/src/cryptsetup/lib/utils_storage_wrappers.c
Line | Count | Source (jump to first uncovered line) |
1 | | // SPDX-License-Identifier: LGPL-2.1-or-later |
2 | | /* |
3 | | * Generic wrapper for storage functions |
4 | | * (experimental only) |
5 | | * |
6 | | * Copyright (C) 2018-2025 Ondrej Kozina |
7 | | */ |
8 | | |
9 | | #include <errno.h> |
10 | | #include <stdio.h> |
11 | | #include <stddef.h> |
12 | | #include <stdint.h> |
13 | | #include <stdlib.h> |
14 | | #include <limits.h> |
15 | | #include <sys/stat.h> |
16 | | #include <sys/types.h> |
17 | | |
18 | | #include "utils_storage_wrappers.h" |
19 | | #include "internal.h" |
20 | | |
21 | | struct crypt_storage_wrapper { |
22 | | crypt_storage_wrapper_type type; |
23 | | int dev_fd; |
24 | | int block_size; |
25 | | size_t mem_alignment; |
26 | | uint64_t data_offset; |
27 | | union { |
28 | | struct { |
29 | | struct crypt_storage *s; |
30 | | uint64_t iv_start; |
31 | | } cb; |
32 | | struct { |
33 | | int dmcrypt_fd; |
34 | | char name[PATH_MAX]; |
35 | | } dm; |
36 | | } u; |
37 | | }; |
38 | | |
39 | | static int crypt_storage_backend_init(struct crypt_device *cd, |
40 | | struct crypt_storage_wrapper *w, |
41 | | uint64_t iv_start, |
42 | | int sector_size, |
43 | | const char *cipher, |
44 | | const char *cipher_mode, |
45 | | const struct volume_key *vk, |
46 | | uint32_t flags) |
47 | 0 | { |
48 | 0 | int r; |
49 | 0 | struct crypt_storage *s; |
50 | | |
51 | | /* iv_start, sector_size */ |
52 | 0 | r = crypt_storage_init(&s, sector_size, cipher, cipher_mode, |
53 | 0 | crypt_volume_key_get_key(vk), |
54 | 0 | crypt_volume_key_length(vk), flags & LARGE_IV); |
55 | 0 | if (r) |
56 | 0 | return r; |
57 | | |
58 | 0 | if ((flags & DISABLE_KCAPI) && crypt_storage_kernel_only(s)) { |
59 | 0 | log_dbg(cd, "Could not initialize userspace block cipher and kernel fallback is disabled."); |
60 | 0 | crypt_storage_destroy(s); |
61 | 0 | return -ENOTSUP; |
62 | 0 | } |
63 | | |
64 | 0 | w->type = USPACE; |
65 | 0 | w->u.cb.s = s; |
66 | 0 | w->u.cb.iv_start = iv_start; |
67 | |
|
68 | 0 | return 0; |
69 | 0 | } |
70 | | |
71 | | static int crypt_storage_dmcrypt_init( |
72 | | struct crypt_device *cd, |
73 | | struct crypt_storage_wrapper *cw, |
74 | | struct device *device, |
75 | | uint64_t device_offset, |
76 | | uint64_t iv_start, |
77 | | int sector_size, |
78 | | const char *cipher_spec, |
79 | | struct volume_key *vk, |
80 | | int open_flags) |
81 | 0 | { |
82 | 0 | static int counter = 0; |
83 | 0 | char path[PATH_MAX]; |
84 | 0 | struct crypt_dm_active_device dmd = { |
85 | 0 | .flags = CRYPT_ACTIVATE_PRIVATE, |
86 | 0 | }; |
87 | 0 | int mode, r, fd = -1; |
88 | |
|
89 | 0 | log_dbg(cd, "Using temporary dmcrypt to access data."); |
90 | |
|
91 | 0 | if (snprintf(cw->u.dm.name, sizeof(cw->u.dm.name), "temporary-cryptsetup-%d-%d", getpid(), counter++) < 0) |
92 | 0 | return -ENOMEM; |
93 | 0 | if (snprintf(path, sizeof(path), "%s/%s", dm_get_dir(), cw->u.dm.name) < 0) |
94 | 0 | return -ENOMEM; |
95 | | |
96 | 0 | r = device_block_adjust(cd, device, DEV_OK, |
97 | 0 | device_offset, &dmd.size, &dmd.flags); |
98 | 0 | if (r < 0) { |
99 | 0 | log_err(cd, _("Device %s does not exist or access denied."), |
100 | 0 | device_path(device)); |
101 | 0 | return -EIO; |
102 | 0 | } |
103 | | |
104 | 0 | mode = open_flags | O_DIRECT; |
105 | 0 | if (dmd.flags & CRYPT_ACTIVATE_READONLY) |
106 | 0 | mode = (open_flags & ~O_ACCMODE) | O_RDONLY; |
107 | |
|
108 | 0 | if (crypt_volume_key_description(vk)) |
109 | 0 | dmd.flags |= CRYPT_ACTIVATE_KEYRING_KEY; |
110 | |
|
111 | 0 | r = dm_crypt_target_set(&dmd.segment, 0, dmd.size, device, vk, cipher_spec, iv_start, |
112 | 0 | device_offset, NULL, 0, 0, sector_size); |
113 | 0 | if (r) |
114 | 0 | return r; |
115 | | |
116 | 0 | r = dm_create_device(cd, cw->u.dm.name, "TEMP", &dmd); |
117 | 0 | if (r < 0) { |
118 | 0 | if (r != -EACCES && r != -ENOTSUP) |
119 | 0 | log_dbg(cd, "error hint would be nice"); |
120 | 0 | r = -EIO; |
121 | 0 | } |
122 | |
|
123 | 0 | dm_targets_free(cd, &dmd); |
124 | |
|
125 | 0 | if (r) |
126 | 0 | return r; |
127 | | |
128 | 0 | fd = open(path, mode); |
129 | 0 | if (fd < 0) { |
130 | 0 | log_dbg(cd, "Failed to open %s", path); |
131 | 0 | dm_remove_device(cd, cw->u.dm.name, CRYPT_DEACTIVATE_FORCE); |
132 | 0 | return -EINVAL; |
133 | 0 | } |
134 | | |
135 | 0 | cw->type = DMCRYPT; |
136 | 0 | cw->u.dm.dmcrypt_fd = fd; |
137 | |
|
138 | 0 | return 0; |
139 | 0 | } |
140 | | |
141 | | int crypt_storage_wrapper_init(struct crypt_device *cd, |
142 | | struct crypt_storage_wrapper **cw, |
143 | | struct device *device, |
144 | | uint64_t data_offset, |
145 | | uint64_t iv_start, |
146 | | int sector_size, |
147 | | const char *cipher, |
148 | | struct volume_key *vk, |
149 | | uint32_t flags) |
150 | 0 | { |
151 | 0 | int open_flags, r; |
152 | 0 | char _cipher[MAX_CIPHER_LEN], mode[MAX_CIPHER_LEN]; |
153 | 0 | struct crypt_storage_wrapper *w; |
154 | | |
155 | | /* device-mapper restrictions */ |
156 | 0 | if (data_offset & ((1 << SECTOR_SHIFT) - 1)) |
157 | 0 | return -EINVAL; |
158 | | |
159 | 0 | if (crypt_parse_name_and_mode(cipher, _cipher, NULL, mode)) |
160 | 0 | return -EINVAL; |
161 | | |
162 | 0 | open_flags = O_CLOEXEC | ((flags & OPEN_READONLY) ? O_RDONLY : O_RDWR); |
163 | |
|
164 | 0 | w = malloc(sizeof(*w)); |
165 | 0 | if (!w) |
166 | 0 | return -ENOMEM; |
167 | | |
168 | 0 | memset(w, 0, sizeof(*w)); |
169 | 0 | w->data_offset = data_offset; |
170 | 0 | w->mem_alignment = device_alignment(device); |
171 | 0 | w->block_size = device_block_size(cd, device); |
172 | 0 | if (!w->block_size || !w->mem_alignment) { |
173 | 0 | log_dbg(cd, "block size or alignment error."); |
174 | 0 | r = -EINVAL; |
175 | 0 | goto err; |
176 | 0 | } |
177 | | |
178 | 0 | w->dev_fd = device_open(cd, device, open_flags); |
179 | 0 | if (w->dev_fd < 0) { |
180 | 0 | r = -EINVAL; |
181 | 0 | goto err; |
182 | 0 | } |
183 | | |
184 | 0 | if (crypt_is_cipher_null(_cipher)) { |
185 | 0 | log_dbg(cd, "Requested cipher_null, switching to noop wrapper."); |
186 | 0 | w->type = NONE; |
187 | 0 | *cw = w; |
188 | 0 | return 0; |
189 | 0 | } |
190 | | |
191 | 0 | if (!vk) { |
192 | 0 | log_dbg(cd, "no key passed."); |
193 | 0 | r = -EINVAL; |
194 | 0 | goto err; |
195 | 0 | } |
196 | | |
197 | 0 | r = crypt_storage_backend_init(cd, w, iv_start, sector_size, _cipher, mode, vk, flags); |
198 | 0 | if (!r) { |
199 | 0 | *cw = w; |
200 | 0 | return 0; |
201 | 0 | } |
202 | | |
203 | 0 | log_dbg(cd, "Failed to initialize userspace block cipher."); |
204 | |
|
205 | 0 | if ((r != -ENOTSUP && r != -ENOENT) || (flags & DISABLE_DMCRYPT)) |
206 | 0 | goto err; |
207 | | |
208 | 0 | r = crypt_storage_dmcrypt_init(cd, w, device, data_offset >> SECTOR_SHIFT, iv_start, |
209 | 0 | sector_size, cipher, vk, open_flags); |
210 | 0 | if (r) { |
211 | 0 | log_dbg(cd, "Dm-crypt backend failed to initialize."); |
212 | 0 | goto err; |
213 | 0 | } |
214 | 0 | *cw = w; |
215 | 0 | return 0; |
216 | 0 | err: |
217 | 0 | crypt_storage_wrapper_destroy(w); |
218 | | /* wrapper destroy */ |
219 | 0 | return r; |
220 | 0 | } |
221 | | |
222 | | /* offset is relative to sector_start */ |
223 | | ssize_t crypt_storage_wrapper_read(struct crypt_storage_wrapper *cw, |
224 | | off_t offset, void *buffer, size_t buffer_length) |
225 | 0 | { |
226 | 0 | return read_lseek_blockwise(cw->dev_fd, |
227 | 0 | cw->block_size, |
228 | 0 | cw->mem_alignment, |
229 | 0 | buffer, |
230 | 0 | buffer_length, |
231 | 0 | cw->data_offset + offset); |
232 | 0 | } |
233 | | |
234 | | ssize_t crypt_storage_wrapper_read_decrypt(struct crypt_storage_wrapper *cw, |
235 | | off_t offset, void *buffer, size_t buffer_length) |
236 | 0 | { |
237 | 0 | int r; |
238 | 0 | ssize_t read; |
239 | |
|
240 | 0 | if (cw->type == DMCRYPT) |
241 | 0 | return read_lseek_blockwise(cw->u.dm.dmcrypt_fd, |
242 | 0 | cw->block_size, |
243 | 0 | cw->mem_alignment, |
244 | 0 | buffer, |
245 | 0 | buffer_length, |
246 | 0 | offset); |
247 | | |
248 | 0 | read = read_lseek_blockwise(cw->dev_fd, |
249 | 0 | cw->block_size, |
250 | 0 | cw->mem_alignment, |
251 | 0 | buffer, |
252 | 0 | buffer_length, |
253 | 0 | cw->data_offset + offset); |
254 | 0 | if (cw->type == NONE || read < 0) |
255 | 0 | return read; |
256 | | |
257 | 0 | r = crypt_storage_decrypt(cw->u.cb.s, |
258 | 0 | cw->u.cb.iv_start + (offset >> SECTOR_SHIFT), |
259 | 0 | read, |
260 | 0 | buffer); |
261 | 0 | if (r) |
262 | 0 | return -EINVAL; |
263 | | |
264 | 0 | return read; |
265 | 0 | } |
266 | | |
267 | | ssize_t crypt_storage_wrapper_decrypt(struct crypt_storage_wrapper *cw, |
268 | | off_t offset, void *buffer, size_t buffer_length) |
269 | 0 | { |
270 | 0 | int r; |
271 | 0 | ssize_t read; |
272 | |
|
273 | 0 | if (cw->type == NONE) |
274 | 0 | return 0; |
275 | | |
276 | 0 | if (cw->type == DMCRYPT) { |
277 | | /* there's nothing we can do, just read/decrypt via dm-crypt */ |
278 | 0 | read = crypt_storage_wrapper_read_decrypt(cw, offset, buffer, buffer_length); |
279 | 0 | if (read < 0 || (size_t)read != buffer_length) |
280 | 0 | return -EINVAL; |
281 | 0 | return 0; |
282 | 0 | } |
283 | | |
284 | 0 | r = crypt_storage_decrypt(cw->u.cb.s, |
285 | 0 | cw->u.cb.iv_start + (offset >> SECTOR_SHIFT), |
286 | 0 | buffer_length, |
287 | 0 | buffer); |
288 | 0 | if (r) |
289 | 0 | return r; |
290 | | |
291 | 0 | return 0; |
292 | 0 | } |
293 | | |
294 | | ssize_t crypt_storage_wrapper_write(struct crypt_storage_wrapper *cw, |
295 | | off_t offset, void *buffer, size_t buffer_length) |
296 | 0 | { |
297 | 0 | return write_lseek_blockwise(cw->dev_fd, |
298 | 0 | cw->block_size, |
299 | 0 | cw->mem_alignment, |
300 | 0 | buffer, |
301 | 0 | buffer_length, |
302 | 0 | cw->data_offset + offset); |
303 | 0 | } |
304 | | |
305 | | ssize_t crypt_storage_wrapper_encrypt_write(struct crypt_storage_wrapper *cw, |
306 | | off_t offset, void *buffer, size_t buffer_length) |
307 | 0 | { |
308 | 0 | if (cw->type == DMCRYPT) |
309 | 0 | return write_lseek_blockwise(cw->u.dm.dmcrypt_fd, |
310 | 0 | cw->block_size, |
311 | 0 | cw->mem_alignment, |
312 | 0 | buffer, |
313 | 0 | buffer_length, |
314 | 0 | offset); |
315 | | |
316 | 0 | if (cw->type == USPACE && |
317 | 0 | crypt_storage_encrypt(cw->u.cb.s, |
318 | 0 | cw->u.cb.iv_start + (offset >> SECTOR_SHIFT), |
319 | 0 | buffer_length, buffer)) |
320 | 0 | return -EINVAL; |
321 | | |
322 | 0 | return write_lseek_blockwise(cw->dev_fd, |
323 | 0 | cw->block_size, |
324 | 0 | cw->mem_alignment, |
325 | 0 | buffer, |
326 | 0 | buffer_length, |
327 | 0 | cw->data_offset + offset); |
328 | 0 | } |
329 | | |
330 | | ssize_t crypt_storage_wrapper_encrypt(struct crypt_storage_wrapper *cw, |
331 | | off_t offset, void *buffer, size_t buffer_length) |
332 | 0 | { |
333 | 0 | if (cw->type == NONE) |
334 | 0 | return 0; |
335 | | |
336 | 0 | if (cw->type == DMCRYPT) |
337 | 0 | return -ENOTSUP; |
338 | | |
339 | 0 | if (crypt_storage_encrypt(cw->u.cb.s, |
340 | 0 | cw->u.cb.iv_start + (offset >> SECTOR_SHIFT), |
341 | 0 | buffer_length, |
342 | 0 | buffer)) |
343 | 0 | return -EINVAL; |
344 | | |
345 | 0 | return 0; |
346 | 0 | } |
347 | | |
348 | | void crypt_storage_wrapper_destroy(struct crypt_storage_wrapper *cw) |
349 | 0 | { |
350 | 0 | if (!cw) |
351 | 0 | return; |
352 | | |
353 | 0 | if (cw->type == USPACE) |
354 | 0 | crypt_storage_destroy(cw->u.cb.s); |
355 | 0 | if (cw->type == DMCRYPT) { |
356 | 0 | close(cw->u.dm.dmcrypt_fd); |
357 | 0 | dm_remove_device(NULL, cw->u.dm.name, CRYPT_DEACTIVATE_FORCE); |
358 | 0 | } |
359 | |
|
360 | 0 | free(cw); |
361 | 0 | } |
362 | | |
363 | | int crypt_storage_wrapper_datasync(const struct crypt_storage_wrapper *cw) |
364 | 0 | { |
365 | 0 | if (!cw) |
366 | 0 | return -EINVAL; |
367 | 0 | if (cw->type == DMCRYPT) |
368 | 0 | return fdatasync(cw->u.dm.dmcrypt_fd); |
369 | 0 | else |
370 | 0 | return fdatasync(cw->dev_fd); |
371 | 0 | } |
372 | | |
373 | | crypt_storage_wrapper_type crypt_storage_wrapper_get_type(const struct crypt_storage_wrapper *cw) |
374 | 0 | { |
375 | 0 | return cw ? cw->type : NONE; |
376 | 0 | } |