/src/cryptsetup/lib/setup.c
Line | Count | Source (jump to first uncovered line) |
1 | | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | | /* |
3 | | * libcryptsetup - cryptsetup library |
4 | | * |
5 | | * Copyright (C) 2004 Jana Saout <jana@saout.de> |
6 | | * Copyright (C) 2004-2007 Clemens Fruhwirth <clemens@endorphin.org> |
7 | | * Copyright (C) 2009-2025 Red Hat, Inc. All rights reserved. |
8 | | * Copyright (C) 2009-2025 Milan Broz |
9 | | */ |
10 | | |
11 | | #include <string.h> |
12 | | #include <stdio.h> |
13 | | #include <stdlib.h> |
14 | | #include <stdarg.h> |
15 | | #include <sys/utsname.h> |
16 | | #include <errno.h> |
17 | | |
18 | | #include "libcryptsetup.h" |
19 | | #include "luks1/luks.h" |
20 | | #include "luks2/luks2.h" |
21 | | #include "loopaes/loopaes.h" |
22 | | #include "verity/verity.h" |
23 | | #include "tcrypt/tcrypt.h" |
24 | | #include "integrity/integrity.h" |
25 | | #include "bitlk/bitlk.h" |
26 | | #include "fvault2/fvault2.h" |
27 | | #include "utils_device_locking.h" |
28 | | #include "internal.h" |
29 | | #include "keyslot_context.h" |
30 | | #include "luks2/hw_opal/hw_opal.h" |
31 | | |
32 | 0 | #define CRYPT_CD_UNRESTRICTED (1 << 0) |
33 | 0 | #define CRYPT_CD_QUIET (1 << 1) |
34 | | |
35 | | struct crypt_device { |
36 | | char *type; |
37 | | |
38 | | struct device *device; |
39 | | struct device *metadata_device; |
40 | | |
41 | | struct volume_key *volume_key; |
42 | | int rng_type; |
43 | | uint32_t compatibility; |
44 | | struct crypt_pbkdf_type pbkdf; |
45 | | |
46 | | /* global context scope settings */ |
47 | | unsigned key_in_keyring:1; |
48 | | |
49 | | bool link_vk_to_keyring; |
50 | | int32_t keyring_to_link_vk; |
51 | | const char *user_key_name1; |
52 | | const char *user_key_name2; |
53 | | key_type_t keyring_key_type; |
54 | | |
55 | | uint64_t data_offset; |
56 | | uint64_t metadata_size; /* Used in LUKS2 format */ |
57 | | uint64_t keyslots_size; /* Used in LUKS2 format */ |
58 | | |
59 | | /* Workaround for OOM during parallel activation (like in systemd) */ |
60 | | bool memory_hard_pbkdf_lock_enabled; |
61 | | struct crypt_lock_handle *pbkdf_memory_hard_lock; |
62 | | |
63 | | union { |
64 | | struct { /* used in CRYPT_LUKS1 */ |
65 | | struct luks_phdr hdr; |
66 | | char *cipher_spec; |
67 | | } luks1; |
68 | | struct { /* used in CRYPT_LUKS2 */ |
69 | | struct luks2_hdr hdr; |
70 | | char cipher[MAX_CIPHER_LEN]; /* only for compatibility */ |
71 | | char cipher_mode[MAX_CIPHER_LEN]; /* only for compatibility */ |
72 | | char *keyslot_cipher; |
73 | | unsigned int keyslot_key_size; |
74 | | struct luks2_reencrypt *rh; |
75 | | } luks2; |
76 | | struct { /* used in CRYPT_PLAIN */ |
77 | | struct crypt_params_plain hdr; |
78 | | char *cipher_spec; |
79 | | char *cipher; |
80 | | const char *cipher_mode; |
81 | | unsigned int key_size; |
82 | | } plain; |
83 | | struct { /* used in CRYPT_LOOPAES */ |
84 | | struct crypt_params_loopaes hdr; |
85 | | char *cipher_spec; |
86 | | char *cipher; |
87 | | const char *cipher_mode; |
88 | | unsigned int key_size; |
89 | | } loopaes; |
90 | | struct { /* used in CRYPT_VERITY */ |
91 | | struct crypt_params_verity hdr; |
92 | | const char *root_hash; |
93 | | unsigned int root_hash_size; |
94 | | char *uuid; |
95 | | struct device *fec_device; |
96 | | } verity; |
97 | | struct { /* used in CRYPT_TCRYPT */ |
98 | | struct crypt_params_tcrypt params; |
99 | | struct tcrypt_phdr hdr; |
100 | | } tcrypt; |
101 | | struct { /* used in CRYPT_INTEGRITY */ |
102 | | struct crypt_params_integrity params; |
103 | | struct volume_key *journal_mac_key; |
104 | | struct volume_key *journal_crypt_key; |
105 | | uint32_t sb_flags; |
106 | | } integrity; |
107 | | struct { /* used in CRYPT_BITLK */ |
108 | | struct bitlk_metadata params; |
109 | | char *cipher_spec; |
110 | | } bitlk; |
111 | | struct { /* used in CRYPT_FVAULT2 */ |
112 | | struct fvault2_params params; |
113 | | } fvault2; |
114 | | struct { /* used if initialized without header by name */ |
115 | | char *active_name; |
116 | | /* buffers, must refresh from kernel on every query */ |
117 | | char cipher_spec[MAX_CIPHER_LEN*2+1]; |
118 | | char cipher[MAX_CIPHER_LEN]; |
119 | | char integrity_spec[MAX_INTEGRITY_LEN]; |
120 | | const char *cipher_mode; |
121 | | unsigned int key_size; |
122 | | uint32_t sector_size; |
123 | | } none; |
124 | | } u; |
125 | | |
126 | | /* callbacks definitions */ |
127 | | void (*log)(int level, const char *msg, void *usrptr); |
128 | | void *log_usrptr; |
129 | | int (*confirm)(const char *msg, void *usrptr); |
130 | | void *confirm_usrptr; |
131 | | }; |
132 | | |
133 | | /* Just to suppress redundant messages about crypto backend */ |
134 | | static int _crypto_logged = 0; |
135 | | |
136 | | /* Log helper */ |
137 | | static void (*_default_log)(int level, const char *msg, void *usrptr) = NULL; |
138 | | static void *_default_log_usrptr = NULL; |
139 | | static int _debug_level = 0; |
140 | | |
141 | | /* Library can do metadata locking */ |
142 | | static int _metadata_locking = 1; |
143 | | |
144 | | /* Library scope detection for kernel keyring support */ |
145 | | static int _kernel_keyring_supported; |
146 | | |
147 | | /* Library allowed to use kernel keyring for loading VK in kernel crypto layer */ |
148 | | static int _vk_via_keyring = 1; |
149 | | |
150 | | void crypt_set_debug_level(int level) |
151 | 0 | { |
152 | 0 | _debug_level = level; |
153 | 0 | } |
154 | | |
155 | | int crypt_get_debug_level(void) |
156 | 0 | { |
157 | 0 | return _debug_level; |
158 | 0 | } |
159 | | |
160 | | void crypt_log(struct crypt_device *cd, int level, const char *msg) |
161 | 45.0k | { |
162 | 45.0k | if (!msg) |
163 | 0 | return; |
164 | | |
165 | 45.0k | if (level < _debug_level) |
166 | 43.6k | return; |
167 | | |
168 | 1.36k | if (cd && cd->log) |
169 | 0 | cd->log(level, msg, cd->log_usrptr); |
170 | 1.36k | else if (_default_log) |
171 | 1.36k | _default_log(level, msg, _default_log_usrptr); |
172 | | /* Default to stdout/stderr if there is no callback. */ |
173 | 0 | else |
174 | 0 | fprintf(level == CRYPT_LOG_ERROR ? stderr : stdout, "%s", msg); |
175 | 1.36k | } |
176 | | |
177 | | __attribute__((format(printf, 3, 4))) |
178 | | void crypt_logf(struct crypt_device *cd, int level, const char *format, ...) |
179 | 45.0k | { |
180 | 45.0k | va_list argp; |
181 | 45.0k | char target[LOG_MAX_LEN + 2]; |
182 | 45.0k | int len; |
183 | | |
184 | 45.0k | va_start(argp, format); |
185 | | |
186 | 45.0k | len = vsnprintf(&target[0], LOG_MAX_LEN, format, argp); |
187 | 45.0k | if (len > 0 && len < LOG_MAX_LEN) { |
188 | | /* All verbose and error messages in tools end with EOL. */ |
189 | 45.0k | if (level == CRYPT_LOG_VERBOSE || level == CRYPT_LOG_ERROR || |
190 | 45.0k | level == CRYPT_LOG_DEBUG || level == CRYPT_LOG_DEBUG_JSON) |
191 | 45.0k | strncat(target, "\n", LOG_MAX_LEN); |
192 | | |
193 | 45.0k | crypt_log(cd, level, target); |
194 | 45.0k | } |
195 | | |
196 | 45.0k | va_end(argp); |
197 | 45.0k | } |
198 | | |
199 | | static const char *mdata_device_path(struct crypt_device *cd) |
200 | 7.59k | { |
201 | 7.59k | return device_path(cd->metadata_device ?: cd->device); |
202 | 7.59k | } |
203 | | |
204 | | static const char *data_device_path(struct crypt_device *cd) |
205 | 0 | { |
206 | 0 | return device_path(cd->device); |
207 | 0 | } |
208 | | |
209 | | /* internal only */ |
210 | | struct device *crypt_metadata_device(struct crypt_device *cd) |
211 | 15.4k | { |
212 | 15.4k | return cd->metadata_device ?: cd->device; |
213 | 15.4k | } |
214 | | |
215 | | struct device *crypt_data_device(struct crypt_device *cd) |
216 | 2.37k | { |
217 | 2.37k | return cd->device; |
218 | 2.37k | } |
219 | | |
220 | | uint64_t crypt_get_metadata_size_bytes(struct crypt_device *cd) |
221 | 0 | { |
222 | 0 | assert(cd); |
223 | 0 | return cd->metadata_size; |
224 | 0 | } |
225 | | |
226 | | uint64_t crypt_get_keyslots_size_bytes(struct crypt_device *cd) |
227 | 0 | { |
228 | 0 | assert(cd); |
229 | 0 | return cd->keyslots_size; |
230 | 0 | } |
231 | | |
232 | | uint64_t crypt_get_data_offset_sectors(struct crypt_device *cd) |
233 | 0 | { |
234 | 0 | assert(cd); |
235 | 0 | return cd->data_offset; |
236 | 0 | } |
237 | | |
238 | | int crypt_opal_supported(struct crypt_device *cd, struct device *opal_device) |
239 | 0 | { |
240 | 0 | int r; |
241 | |
|
242 | 0 | assert(cd); |
243 | 0 | assert(opal_device); |
244 | | |
245 | 0 | r = opal_supported(cd, opal_device); |
246 | 0 | if (r <= 0) { |
247 | 0 | if (r == -ENOTSUP) |
248 | 0 | log_err(cd, _("OPAL support is disabled in libcryptsetup.")); |
249 | 0 | else |
250 | 0 | log_err(cd, _("Device %s or kernel does not support OPAL encryption."), |
251 | 0 | device_path(opal_device)); |
252 | 0 | r = -EINVAL; |
253 | 0 | } else |
254 | 0 | r = 0; |
255 | |
|
256 | 0 | return r; |
257 | 0 | } |
258 | | |
259 | | int init_crypto(struct crypt_device *ctx) |
260 | 9.49k | { |
261 | 9.49k | struct utsname uts; |
262 | 9.49k | int r; |
263 | | |
264 | 9.49k | r = crypt_random_init(ctx); |
265 | 9.49k | if (r < 0) { |
266 | 0 | log_err(ctx, _("Cannot initialize crypto RNG backend.")); |
267 | 0 | return r; |
268 | 0 | } |
269 | | |
270 | 9.49k | r = crypt_backend_init(crypt_fips_mode()); |
271 | 9.49k | if (r < 0) |
272 | 0 | log_err(ctx, _("Cannot initialize crypto backend.")); |
273 | | |
274 | 9.49k | if (!r && !_crypto_logged) { |
275 | 1 | log_dbg(ctx, "Crypto backend (%s%s) initialized in cryptsetup library version %s.", |
276 | 1 | crypt_backend_version(), crypt_argon2_version(), PACKAGE_VERSION); |
277 | | |
278 | 1 | if (!uname(&uts)) |
279 | 1 | log_dbg(ctx, "Detected kernel %s %s %s.", |
280 | 1 | uts.sysname, uts.release, uts.machine); |
281 | 1 | _crypto_logged = 1; |
282 | 1 | } |
283 | | |
284 | 9.49k | return r; |
285 | 9.49k | } |
286 | | |
287 | | static int process_key(struct crypt_device *cd, const char *hash_name, |
288 | | size_t key_size, const char *pass, size_t passLen, |
289 | | struct volume_key **vk) |
290 | 0 | { |
291 | 0 | int r; |
292 | 0 | void *key = NULL; |
293 | |
|
294 | 0 | if (!key_size) |
295 | 0 | return -EINVAL; |
296 | | |
297 | 0 | if (hash_name) { |
298 | 0 | key = crypt_safe_alloc(key_size); |
299 | 0 | if (!key) |
300 | 0 | return -ENOMEM; |
301 | | |
302 | 0 | r = crypt_plain_hash(cd, hash_name, key, key_size, pass, passLen); |
303 | 0 | if (r < 0) { |
304 | 0 | if (r == -ENOENT) |
305 | 0 | log_err(cd, _("Hash algorithm %s not supported."), |
306 | 0 | hash_name); |
307 | 0 | else |
308 | 0 | log_err(cd, _("Key processing error (using hash %s)."), |
309 | 0 | hash_name); |
310 | 0 | crypt_safe_free(key); |
311 | 0 | return -EINVAL; |
312 | 0 | } |
313 | 0 | *vk = crypt_alloc_volume_key_by_safe_alloc(&key); |
314 | 0 | } else if (passLen >= key_size) { |
315 | 0 | *vk = crypt_alloc_volume_key(key_size, pass); |
316 | 0 | } else { |
317 | 0 | key = crypt_safe_alloc(key_size); |
318 | 0 | if (!key) |
319 | 0 | return -ENOMEM; |
320 | | |
321 | 0 | crypt_safe_memcpy(key, pass, passLen); |
322 | |
|
323 | 0 | *vk = crypt_alloc_volume_key_by_safe_alloc(&key); |
324 | 0 | } |
325 | | |
326 | 0 | r = *vk ? 0 : -ENOMEM; |
327 | |
|
328 | 0 | crypt_safe_free(key); |
329 | |
|
330 | 0 | return r; |
331 | 0 | } |
332 | | |
333 | | static int isPLAIN(const char *type) |
334 | 5.19k | { |
335 | 5.19k | return (type && !strcmp(CRYPT_PLAIN, type)); |
336 | 5.19k | } |
337 | | |
338 | | static int isLUKS1(const char *type) |
339 | 14.6k | { |
340 | 14.6k | return (type && !strcmp(CRYPT_LUKS1, type)); |
341 | 14.6k | } |
342 | | |
343 | | static int isLUKS2(const char *type) |
344 | 12.7k | { |
345 | 12.7k | return (type && !strcmp(CRYPT_LUKS2, type)); |
346 | 12.7k | } |
347 | | |
348 | | static int isLUKS(const char *type) |
349 | 0 | { |
350 | 0 | return (isLUKS2(type) || isLUKS1(type)); |
351 | 0 | } |
352 | | |
353 | | static int isLOOPAES(const char *type) |
354 | 5.17k | { |
355 | 5.17k | return (type && !strcmp(CRYPT_LOOPAES, type)); |
356 | 5.17k | } |
357 | | |
358 | | static int isVERITY(const char *type) |
359 | 8.95k | { |
360 | 8.95k | return (type && !strcmp(CRYPT_VERITY, type)); |
361 | 8.95k | } |
362 | | |
363 | | static int isTCRYPT(const char *type) |
364 | 3.77k | { |
365 | 3.77k | return (type && !strcmp(CRYPT_TCRYPT, type)); |
366 | 3.77k | } |
367 | | |
368 | | static int isINTEGRITY(const char *type) |
369 | 8.95k | { |
370 | 8.95k | return (type && !strcmp(CRYPT_INTEGRITY, type)); |
371 | 8.95k | } |
372 | | |
373 | | static int isBITLK(const char *type) |
374 | 8.95k | { |
375 | 8.95k | return (type && !strcmp(CRYPT_BITLK, type)); |
376 | 8.95k | } |
377 | | |
378 | | static int isFVAULT2(const char *type) |
379 | 1.88k | { |
380 | 1.88k | return (type && !strcmp(CRYPT_FVAULT2, type)); |
381 | 1.88k | } |
382 | | |
383 | | static int _onlyLUKS(struct crypt_device *cd, uint32_t cdflags, uint32_t mask) |
384 | 0 | { |
385 | 0 | int r = 0; |
386 | |
|
387 | 0 | if (cd && !cd->type) { |
388 | 0 | if (!(cdflags & CRYPT_CD_QUIET)) |
389 | 0 | log_err(cd, _("Cannot determine device type. Incompatible activation of device?")); |
390 | 0 | r = -EINVAL; |
391 | 0 | } |
392 | |
|
393 | 0 | if (!cd || !isLUKS(cd->type)) { |
394 | 0 | if (!(cdflags & CRYPT_CD_QUIET)) |
395 | 0 | log_err(cd, _("This operation is supported only for LUKS device.")); |
396 | 0 | r = -EINVAL; |
397 | 0 | } |
398 | |
|
399 | 0 | if (r || (cdflags & CRYPT_CD_UNRESTRICTED) || isLUKS1(cd->type)) |
400 | 0 | return r; |
401 | | |
402 | 0 | return LUKS2_unmet_requirements(cd, &cd->u.luks2.hdr, mask, cdflags & CRYPT_CD_QUIET); |
403 | 0 | } |
404 | | |
405 | | static int onlyLUKSunrestricted(struct crypt_device *cd) |
406 | 0 | { |
407 | 0 | return _onlyLUKS(cd, CRYPT_CD_UNRESTRICTED, 0); |
408 | 0 | } |
409 | | |
410 | | static int onlyLUKSnoRequirements(struct crypt_device *cd) |
411 | 0 | { |
412 | 0 | return _onlyLUKS(cd, 0, 0); |
413 | 0 | } |
414 | | |
415 | | static int onlyLUKS(struct crypt_device *cd) |
416 | 0 | { |
417 | 0 | return _onlyLUKS(cd, 0, CRYPT_REQUIREMENT_OPAL | CRYPT_REQUIREMENT_INLINE_HW_TAGS); |
418 | 0 | } |
419 | | |
420 | | static int _onlyLUKS2(struct crypt_device *cd, uint32_t cdflags, uint32_t mask) |
421 | 0 | { |
422 | 0 | int r = 0; |
423 | |
|
424 | 0 | if (cd && !cd->type) { |
425 | 0 | if (!(cdflags & CRYPT_CD_QUIET)) |
426 | 0 | log_err(cd, _("Cannot determine device type. Incompatible activation of device?")); |
427 | 0 | r = -EINVAL; |
428 | 0 | } |
429 | |
|
430 | 0 | if (!cd || !isLUKS2(cd->type)) { |
431 | 0 | if (!(cdflags & CRYPT_CD_QUIET)) |
432 | 0 | log_err(cd, _("This operation is supported only for LUKS2 device.")); |
433 | 0 | r = -EINVAL; |
434 | 0 | } |
435 | |
|
436 | 0 | if (r || (cdflags & CRYPT_CD_UNRESTRICTED)) |
437 | 0 | return r; |
438 | | |
439 | 0 | return LUKS2_unmet_requirements(cd, &cd->u.luks2.hdr, mask, cdflags & CRYPT_CD_QUIET); |
440 | 0 | } |
441 | | |
442 | | static int onlyLUKS2unrestricted(struct crypt_device *cd) |
443 | 0 | { |
444 | 0 | return _onlyLUKS2(cd, CRYPT_CD_UNRESTRICTED, 0); |
445 | 0 | } |
446 | | |
447 | | /* Internal only */ |
448 | | int onlyLUKS2(struct crypt_device *cd) |
449 | 0 | { |
450 | 0 | return _onlyLUKS2(cd, 0, CRYPT_REQUIREMENT_OPAL | CRYPT_REQUIREMENT_INLINE_HW_TAGS); |
451 | 0 | } |
452 | | |
453 | | /* Internal only */ |
454 | | int onlyLUKS2reencrypt(struct crypt_device *cd) |
455 | 0 | { |
456 | 0 | return _onlyLUKS2(cd, 0, CRYPT_REQUIREMENT_ONLINE_REENCRYPT); |
457 | 0 | } |
458 | | |
459 | | static void crypt_set_null_type(struct crypt_device *cd) |
460 | 5.19k | { |
461 | 5.19k | free(cd->type); |
462 | 5.19k | cd->type = NULL; |
463 | 5.19k | cd->data_offset = 0; |
464 | 5.19k | cd->metadata_size = 0; |
465 | 5.19k | cd->keyslots_size = 0; |
466 | 5.19k | crypt_safe_memzero(&cd->u, sizeof(cd->u)); |
467 | 5.19k | } |
468 | | |
469 | | static void crypt_reset_null_type(struct crypt_device *cd) |
470 | 5.68k | { |
471 | 5.68k | if (cd->type) |
472 | 0 | return; |
473 | | |
474 | 5.68k | free(cd->u.none.active_name); |
475 | 5.68k | cd->u.none.active_name = NULL; |
476 | 5.68k | } |
477 | | |
478 | | /* keyslot helpers */ |
479 | | static int keyslot_verify_or_find_empty(struct crypt_device *cd, int *keyslot) |
480 | 0 | { |
481 | 0 | crypt_keyslot_info ki; |
482 | |
|
483 | 0 | if (*keyslot == CRYPT_ANY_SLOT) { |
484 | 0 | if (isLUKS1(cd->type)) |
485 | 0 | *keyslot = LUKS_keyslot_find_empty(&cd->u.luks1.hdr); |
486 | 0 | else |
487 | 0 | *keyslot = LUKS2_keyslot_find_empty(cd, &cd->u.luks2.hdr, 0); |
488 | 0 | if (*keyslot < 0) { |
489 | 0 | log_err(cd, _("All key slots full.")); |
490 | 0 | return -EINVAL; |
491 | 0 | } |
492 | 0 | } |
493 | | |
494 | 0 | if (isLUKS1(cd->type)) |
495 | 0 | ki = LUKS_keyslot_info(&cd->u.luks1.hdr, *keyslot); |
496 | 0 | else |
497 | 0 | ki = LUKS2_keyslot_info(&cd->u.luks2.hdr, *keyslot); |
498 | 0 | switch (ki) { |
499 | 0 | case CRYPT_SLOT_INVALID: |
500 | 0 | log_err(cd, _("Key slot %d is invalid, please select between 0 and %d."), |
501 | 0 | *keyslot, crypt_keyslot_max(cd->type) - 1); |
502 | 0 | return -EINVAL; |
503 | 0 | case CRYPT_SLOT_INACTIVE: |
504 | 0 | break; |
505 | 0 | default: |
506 | 0 | log_err(cd, _("Key slot %d is full, please select another one."), |
507 | 0 | *keyslot); |
508 | 0 | return -EINVAL; |
509 | 0 | } |
510 | | |
511 | 0 | log_dbg(cd, "Selected keyslot %d.", *keyslot); |
512 | 0 | return 0; |
513 | 0 | } |
514 | | |
515 | | int PLAIN_activate(struct crypt_device *cd, |
516 | | const char *name, |
517 | | struct volume_key *vk, |
518 | | uint64_t size, |
519 | | uint32_t flags) |
520 | 0 | { |
521 | 0 | int r; |
522 | 0 | struct crypt_dm_active_device dmd = { |
523 | 0 | .flags = flags, |
524 | 0 | .size = size, |
525 | 0 | }; |
526 | |
|
527 | 0 | log_dbg(cd, "Trying to activate PLAIN device %s using cipher %s.", |
528 | 0 | name, crypt_get_cipher_spec(cd)); |
529 | |
|
530 | 0 | if (MISALIGNED(size, device_block_size(cd, crypt_data_device(cd)) >> SECTOR_SHIFT)) { |
531 | 0 | log_err(cd, _("Device size is not aligned to device logical block size.")); |
532 | 0 | return -EINVAL; |
533 | 0 | } |
534 | | |
535 | 0 | r = dm_crypt_target_set(&dmd.segment, 0, dmd.size, crypt_data_device(cd), |
536 | 0 | vk, crypt_get_cipher_spec(cd), crypt_get_iv_offset(cd), |
537 | 0 | crypt_get_data_offset(cd), NULL, 0, 0, crypt_get_sector_size(cd)); |
538 | 0 | if (r < 0) |
539 | 0 | return r; |
540 | | |
541 | 0 | r = create_or_reload_device(cd, name, CRYPT_PLAIN, &dmd); |
542 | |
|
543 | 0 | dm_targets_free(cd, &dmd); |
544 | 0 | return r; |
545 | 0 | } |
546 | | |
547 | | int crypt_confirm(struct crypt_device *cd, const char *msg) |
548 | 0 | { |
549 | 0 | if (!cd || !cd->confirm) |
550 | 0 | return 1; |
551 | 0 | else |
552 | 0 | return cd->confirm(msg, cd->confirm_usrptr); |
553 | 0 | } |
554 | | |
555 | | void crypt_set_log_callback(struct crypt_device *cd, |
556 | | void (*log)(int level, const char *msg, void *usrptr), |
557 | | void *usrptr) |
558 | 1.90k | { |
559 | 1.90k | if (!cd) { |
560 | 1.90k | _default_log = log; |
561 | 1.90k | _default_log_usrptr = usrptr; |
562 | 1.90k | } else { |
563 | 0 | cd->log = log; |
564 | 0 | cd->log_usrptr = usrptr; |
565 | 0 | } |
566 | 1.90k | } |
567 | | |
568 | | void crypt_set_confirm_callback(struct crypt_device *cd, |
569 | | int (*confirm)(const char *msg, void *usrptr), |
570 | | void *usrptr) |
571 | 0 | { |
572 | 0 | if (cd) { |
573 | 0 | cd->confirm = confirm; |
574 | 0 | cd->confirm_usrptr = usrptr; |
575 | 0 | } |
576 | 0 | } |
577 | | |
578 | | const char *crypt_get_dir(void) |
579 | 0 | { |
580 | 0 | return dm_get_dir(); |
581 | 0 | } |
582 | | |
583 | | int crypt_init(struct crypt_device **cd, const char *device) |
584 | 1.90k | { |
585 | 1.90k | struct crypt_device *h = NULL; |
586 | 1.90k | int r; |
587 | | |
588 | 1.90k | if (!cd) |
589 | 0 | return -EINVAL; |
590 | | |
591 | 1.90k | log_dbg(NULL, "Allocating context for crypt device %s.", device ?: "(none)"); |
592 | | #if !HAVE_DECL_O_CLOEXEC |
593 | | log_dbg(NULL, "Running without O_CLOEXEC."); |
594 | | #endif |
595 | | |
596 | 1.90k | if (!(h = malloc(sizeof(struct crypt_device)))) |
597 | 0 | return -ENOMEM; |
598 | | |
599 | 0 | memset(h, 0, sizeof(*h)); |
600 | |
|
601 | 0 | r = device_alloc(NULL, &h->device, device); |
602 | 0 | if (r < 0) { |
603 | 0 | free(h); |
604 | 0 | return r; |
605 | 0 | } |
606 | | |
607 | 0 | dm_backend_init(NULL); |
608 | |
|
609 | 0 | h->rng_type = crypt_random_default_key_rng(); |
610 | |
|
611 | 0 | *cd = h; |
612 | 0 | return 0; |
613 | 0 | } |
614 | | |
615 | | static int crypt_check_data_device_size(struct crypt_device *cd) |
616 | 0 | { |
617 | 0 | int r; |
618 | 0 | uint64_t size, size_min; |
619 | | |
620 | | /* Check data device size, require at least header or one sector */ |
621 | 0 | size_min = crypt_get_data_offset(cd) << SECTOR_SHIFT ?: SECTOR_SIZE; |
622 | |
|
623 | 0 | r = device_size(cd->device, &size); |
624 | 0 | if (r < 0) |
625 | 0 | return r; |
626 | | |
627 | 0 | if (size < size_min) { |
628 | 0 | log_err(cd, _("Header detected but device %s is too small."), |
629 | 0 | device_path(cd->device)); |
630 | 0 | return -EINVAL; |
631 | 0 | } |
632 | | |
633 | 0 | return r; |
634 | 0 | } |
635 | | |
636 | | static int _crypt_set_data_device(struct crypt_device *cd, const char *device) |
637 | 0 | { |
638 | 0 | struct device *dev = NULL; |
639 | 0 | int r; |
640 | |
|
641 | 0 | r = device_alloc(cd, &dev, device); |
642 | 0 | if (r < 0) |
643 | 0 | return r; |
644 | | |
645 | 0 | if (!cd->metadata_device) { |
646 | 0 | cd->metadata_device = cd->device; |
647 | 0 | } else |
648 | 0 | device_free(cd, cd->device); |
649 | |
|
650 | 0 | cd->device = dev; |
651 | |
|
652 | 0 | r = crypt_check_data_device_size(cd); |
653 | 0 | if (!r && isLUKS2(cd->type)) |
654 | 0 | device_set_block_size(crypt_data_device(cd), LUKS2_get_sector_size(&cd->u.luks2.hdr)); |
655 | |
|
656 | 0 | return r; |
657 | 0 | } |
658 | | |
659 | | int crypt_set_data_device(struct crypt_device *cd, const char *device) |
660 | 0 | { |
661 | | /* metadata device must be set */ |
662 | 0 | if (!cd || !cd->device || !device) |
663 | 0 | return -EINVAL; |
664 | | |
665 | 0 | log_dbg(cd, "Setting ciphertext data device to %s.", device ?: "(none)"); |
666 | |
|
667 | 0 | if (!isLUKS1(cd->type) && !isLUKS2(cd->type) && !isVERITY(cd->type) && |
668 | 0 | !isINTEGRITY(cd->type) && !isTCRYPT(cd->type)) { |
669 | 0 | log_err(cd, _("This operation is not supported for this device type.")); |
670 | 0 | return -EINVAL; |
671 | 0 | } |
672 | | |
673 | 0 | if (isLUKS2(cd->type) && crypt_get_luks2_reencrypt(cd)) { |
674 | 0 | log_err(cd, _("Illegal operation with reencryption in-progress.")); |
675 | 0 | return -EINVAL; |
676 | 0 | } |
677 | | |
678 | 0 | return _crypt_set_data_device(cd, device); |
679 | 0 | } |
680 | | |
681 | | int crypt_init_data_device(struct crypt_device **cd, const char *device, const char *data_device) |
682 | 0 | { |
683 | 0 | int r; |
684 | |
|
685 | 0 | if (!cd) |
686 | 0 | return -EINVAL; |
687 | | |
688 | 0 | r = crypt_init(cd, device); |
689 | 0 | if (r || !data_device || !strcmp(device, data_device)) |
690 | 0 | return r; |
691 | | |
692 | 0 | log_dbg(NULL, "Setting ciphertext data device to %s.", data_device); |
693 | 0 | r = _crypt_set_data_device(*cd, data_device); |
694 | 0 | if (r) { |
695 | 0 | crypt_free(*cd); |
696 | 0 | *cd = NULL; |
697 | 0 | } |
698 | |
|
699 | 0 | return r; |
700 | 0 | } |
701 | | |
702 | | static void crypt_free_type(struct crypt_device *cd, const char *force_type) |
703 | 5.19k | { |
704 | 5.19k | const char *type = force_type ?: cd->type; |
705 | | |
706 | 5.19k | if (isPLAIN(type)) { |
707 | 0 | free(CONST_CAST(void*)cd->u.plain.hdr.hash); |
708 | 0 | free(cd->u.plain.cipher); |
709 | 0 | free(cd->u.plain.cipher_spec); |
710 | 1.90k | } else if (isLUKS2(type)) { |
711 | 0 | LUKS2_reencrypt_free(cd, cd->u.luks2.rh); |
712 | 0 | LUKS2_hdr_free(cd, &cd->u.luks2.hdr); |
713 | 0 | free(cd->u.luks2.keyslot_cipher); |
714 | 1.90k | } else if (isLUKS1(type)) { |
715 | 19 | free(cd->u.luks1.cipher_spec); |
716 | 1.88k | } else if (isLOOPAES(type)) { |
717 | 0 | free(CONST_CAST(void*)cd->u.loopaes.hdr.hash); |
718 | 0 | free(cd->u.loopaes.cipher); |
719 | 0 | free(cd->u.loopaes.cipher_spec); |
720 | 1.88k | } else if (isVERITY(type)) { |
721 | 0 | free(CONST_CAST(void*)cd->u.verity.hdr.hash_name); |
722 | 0 | free(CONST_CAST(void*)cd->u.verity.hdr.data_device); |
723 | 0 | free(CONST_CAST(void*)cd->u.verity.hdr.hash_device); |
724 | 0 | free(CONST_CAST(void*)cd->u.verity.hdr.fec_device); |
725 | 0 | free(CONST_CAST(void*)cd->u.verity.hdr.salt); |
726 | 0 | free(CONST_CAST(void*)cd->u.verity.root_hash); |
727 | 0 | free(cd->u.verity.uuid); |
728 | 0 | device_free(cd, cd->u.verity.fec_device); |
729 | 1.88k | } else if (isINTEGRITY(type)) { |
730 | 0 | free(CONST_CAST(void*)cd->u.integrity.params.integrity); |
731 | 0 | free(CONST_CAST(void*)cd->u.integrity.params.journal_integrity); |
732 | 0 | free(CONST_CAST(void*)cd->u.integrity.params.journal_crypt); |
733 | 0 | crypt_free_volume_key(cd->u.integrity.journal_crypt_key); |
734 | 0 | crypt_free_volume_key(cd->u.integrity.journal_mac_key); |
735 | 1.88k | } else if (isBITLK(type)) { |
736 | 1.88k | free(cd->u.bitlk.cipher_spec); |
737 | 1.88k | BITLK_bitlk_metadata_free(&cd->u.bitlk.params); |
738 | 1.88k | } else if (!type) { |
739 | 1.40k | free(cd->u.none.active_name); |
740 | 1.40k | cd->u.none.active_name = NULL; |
741 | 1.40k | } |
742 | | |
743 | 1.90k | crypt_set_null_type(cd); |
744 | 1.90k | } |
745 | | |
746 | | /* internal only */ |
747 | | struct crypt_pbkdf_type *crypt_get_pbkdf(struct crypt_device *cd) |
748 | 1.90k | { |
749 | 1.90k | return &cd->pbkdf; |
750 | 1.90k | } |
751 | | |
752 | | /* |
753 | | * crypt_load() helpers |
754 | | */ |
755 | | static int _crypt_load_luks2(struct crypt_device *cd, int reload, int repair) |
756 | 0 | { |
757 | 0 | int r; |
758 | 0 | char *type = NULL; |
759 | 0 | struct luks2_hdr hdr2 = {}; |
760 | |
|
761 | 0 | log_dbg(cd, "%soading LUKS2 header (repair %sabled).", reload ? "Rel" : "L", repair ? "en" : "dis"); |
762 | |
|
763 | 0 | r = LUKS2_hdr_read(cd, &hdr2, repair); |
764 | 0 | if (r) |
765 | 0 | return r; |
766 | | |
767 | 0 | if (!reload) { |
768 | 0 | type = strdup(CRYPT_LUKS2); |
769 | 0 | if (!type) { |
770 | 0 | r = -ENOMEM; |
771 | 0 | goto out; |
772 | 0 | } |
773 | 0 | } |
774 | | |
775 | 0 | if (verify_pbkdf_params(cd, &cd->pbkdf)) { |
776 | 0 | r = init_pbkdf_type(cd, NULL, CRYPT_LUKS2); |
777 | 0 | if (r) |
778 | 0 | goto out; |
779 | 0 | } |
780 | | |
781 | 0 | if (reload) { |
782 | 0 | LUKS2_hdr_free(cd, &cd->u.luks2.hdr); |
783 | 0 | free(cd->u.luks2.keyslot_cipher); |
784 | 0 | } else |
785 | 0 | cd->type = type; |
786 | |
|
787 | 0 | r = 0; |
788 | 0 | memcpy(&cd->u.luks2.hdr, &hdr2, sizeof(hdr2)); |
789 | 0 | cd->u.luks2.keyslot_cipher = NULL; |
790 | 0 | cd->u.luks2.rh = NULL; |
791 | |
|
792 | 0 | out: |
793 | 0 | if (r) { |
794 | 0 | free(type); |
795 | 0 | LUKS2_hdr_free(cd, &hdr2); |
796 | 0 | } |
797 | 0 | return r; |
798 | 0 | } |
799 | | |
800 | | static void _luks2_rollback(struct crypt_device *cd) |
801 | 0 | { |
802 | 0 | if (!cd || !isLUKS2(cd->type)) |
803 | 0 | return; |
804 | | |
805 | 0 | if (LUKS2_hdr_rollback(cd, &cd->u.luks2.hdr)) { |
806 | 0 | log_err(cd, _("Failed to rollback LUKS2 metadata in memory.")); |
807 | 0 | return; |
808 | 0 | } |
809 | | |
810 | 0 | free(cd->u.luks2.keyslot_cipher); |
811 | 0 | cd->u.luks2.keyslot_cipher = NULL; |
812 | 0 | } |
813 | | |
814 | | static int _crypt_load_luks(struct crypt_device *cd, const char *requested_type, |
815 | | bool quiet, bool repair) |
816 | 1.90k | { |
817 | 1.90k | char *cipher_spec; |
818 | 1.90k | struct luks_phdr hdr = {}; |
819 | 1.90k | int r, version; |
820 | | |
821 | 1.90k | r = init_crypto(cd); |
822 | 1.90k | if (r < 0) |
823 | 0 | return r; |
824 | | |
825 | | /* This will return 0 if primary LUKS2 header is damaged */ |
826 | 1.90k | version = LUKS2_hdr_version_unlocked(cd, NULL); |
827 | | |
828 | 1.90k | if ((isLUKS1(requested_type) && version == 2) || |
829 | 1.90k | (isLUKS2(requested_type) && version == 1)) |
830 | 2 | return -EINVAL; |
831 | | |
832 | 1.90k | if (requested_type) |
833 | 1.90k | version = 0; |
834 | | |
835 | 1.90k | if (isLUKS1(requested_type) || version == 1) { |
836 | 1.90k | if (isLUKS2(cd->type)) { |
837 | 0 | log_dbg(cd, "Context is already initialized to type %s", cd->type); |
838 | 0 | return -EINVAL; |
839 | 0 | } |
840 | | |
841 | 1.90k | if (verify_pbkdf_params(cd, &cd->pbkdf)) { |
842 | 1.90k | r = init_pbkdf_type(cd, NULL, CRYPT_LUKS1); |
843 | 1.90k | if (r) |
844 | 0 | return r; |
845 | 1.90k | } |
846 | | |
847 | 1.90k | r = LUKS_read_phdr(&hdr, !quiet, repair, cd); |
848 | 1.90k | if (r) |
849 | 1.88k | goto out; |
850 | | |
851 | 19 | if (!cd->type && !(cd->type = strdup(CRYPT_LUKS1))) { |
852 | 0 | r = -ENOMEM; |
853 | 0 | goto out; |
854 | 0 | } |
855 | | |
856 | | /* Set hash to the same as in the loaded header */ |
857 | 19 | if (!cd->pbkdf.hash || strcmp(cd->pbkdf.hash, hdr.hashSpec)) { |
858 | 17 | free(CONST_CAST(void*)cd->pbkdf.hash); |
859 | 17 | cd->pbkdf.hash = strdup(hdr.hashSpec); |
860 | 17 | if (!cd->pbkdf.hash) { |
861 | 0 | r = -ENOMEM; |
862 | 0 | goto out; |
863 | 0 | } |
864 | 17 | } |
865 | | |
866 | 19 | if (asprintf(&cipher_spec, "%s-%s", hdr.cipherName, hdr.cipherMode) < 0) { |
867 | 0 | r = -ENOMEM; |
868 | 0 | goto out; |
869 | 0 | } |
870 | | |
871 | 19 | free(cd->u.luks1.cipher_spec); |
872 | 19 | cd->u.luks1.cipher_spec = cipher_spec; |
873 | | |
874 | 19 | memcpy(&cd->u.luks1.hdr, &hdr, sizeof(hdr)); |
875 | 19 | } else if (isLUKS2(requested_type) || version == 2 || version == 0) { |
876 | 0 | if (isLUKS1(cd->type)) { |
877 | 0 | log_dbg(cd, "Context is already initialized to type %s", cd->type); |
878 | 0 | return -EINVAL; |
879 | 0 | } |
880 | | |
881 | | /* |
882 | | * Current LUKS2 repair just overrides blkid probes |
883 | | * and perform auto-recovery if possible. This is safe |
884 | | * unless future LUKS2 repair code do something more |
885 | | * sophisticated. In such case we would need to check |
886 | | * for LUKS2 requirements and decide if it's safe to |
887 | | * perform repair. |
888 | | */ |
889 | 0 | r = _crypt_load_luks2(cd, cd->type != NULL, repair); |
890 | 0 | if (!r) |
891 | 0 | device_set_block_size(crypt_data_device(cd), LUKS2_get_sector_size(&cd->u.luks2.hdr)); |
892 | 0 | else if (!quiet) |
893 | 0 | log_err(cd, _("Device %s is not a valid LUKS device."), mdata_device_path(cd)); |
894 | 0 | } else { |
895 | 0 | if (version > 2) |
896 | 0 | log_err(cd, _("Unsupported LUKS version %d."), version); |
897 | 0 | r = -EINVAL; |
898 | 0 | } |
899 | 1.90k | out: |
900 | 1.90k | crypt_safe_memzero(&hdr, sizeof(hdr)); |
901 | | |
902 | 1.90k | return r; |
903 | 1.90k | } |
904 | | |
905 | | static int _crypt_load_tcrypt(struct crypt_device *cd, struct crypt_params_tcrypt *params) |
906 | 0 | { |
907 | 0 | int r; |
908 | |
|
909 | 0 | if (!params) |
910 | 0 | return -EINVAL; |
911 | | |
912 | 0 | r = init_crypto(cd); |
913 | 0 | if (r < 0) |
914 | 0 | return r; |
915 | | |
916 | 0 | memcpy(&cd->u.tcrypt.params, params, sizeof(*params)); |
917 | |
|
918 | 0 | r = TCRYPT_read_phdr(cd, &cd->u.tcrypt.hdr, &cd->u.tcrypt.params); |
919 | |
|
920 | 0 | cd->u.tcrypt.params.passphrase = NULL; |
921 | 0 | cd->u.tcrypt.params.passphrase_size = 0; |
922 | 0 | cd->u.tcrypt.params.keyfiles = NULL; |
923 | 0 | cd->u.tcrypt.params.keyfiles_count = 0; |
924 | 0 | cd->u.tcrypt.params.veracrypt_pim = 0; |
925 | |
|
926 | 0 | if (r < 0) |
927 | 0 | goto out; |
928 | | |
929 | 0 | if (!cd->type && !(cd->type = strdup(CRYPT_TCRYPT))) |
930 | 0 | r = -ENOMEM; |
931 | 0 | out: |
932 | 0 | if (r < 0) |
933 | 0 | crypt_free_type(cd, CRYPT_TCRYPT); |
934 | 0 | return r; |
935 | 0 | } |
936 | | |
937 | | static int _crypt_load_verity(struct crypt_device *cd, struct crypt_params_verity *params) |
938 | 0 | { |
939 | 0 | int r; |
940 | 0 | uint64_t sb_offset = 0; |
941 | |
|
942 | 0 | r = init_crypto(cd); |
943 | 0 | if (r < 0) |
944 | 0 | return r; |
945 | | |
946 | 0 | if (params && params->flags & CRYPT_VERITY_NO_HEADER) |
947 | 0 | return -EINVAL; |
948 | | |
949 | 0 | if (params) |
950 | 0 | sb_offset = params->hash_area_offset; |
951 | |
|
952 | 0 | r = VERITY_read_sb(cd, sb_offset, &cd->u.verity.uuid, &cd->u.verity.hdr); |
953 | 0 | if (r < 0) |
954 | 0 | goto out; |
955 | | |
956 | 0 | if (!cd->type && !(cd->type = strdup(CRYPT_VERITY))) { |
957 | 0 | r = -ENOMEM; |
958 | 0 | goto out; |
959 | 0 | } |
960 | | |
961 | 0 | if (params) |
962 | 0 | cd->u.verity.hdr.flags = params->flags; |
963 | | |
964 | | /* Hash availability checked in sb load */ |
965 | 0 | cd->u.verity.root_hash_size = crypt_hash_size(cd->u.verity.hdr.hash_name); |
966 | 0 | if (cd->u.verity.root_hash_size > 4096) { |
967 | 0 | r = -EINVAL; |
968 | 0 | goto out; |
969 | 0 | } |
970 | | |
971 | 0 | if (params && params->data_device && |
972 | 0 | (r = crypt_set_data_device(cd, params->data_device)) < 0) |
973 | 0 | goto out; |
974 | | |
975 | 0 | if (params && params->fec_device) { |
976 | 0 | r = device_alloc(cd, &cd->u.verity.fec_device, params->fec_device); |
977 | 0 | if (r < 0) |
978 | 0 | goto out; |
979 | 0 | cd->u.verity.hdr.fec_area_offset = params->fec_area_offset; |
980 | 0 | cd->u.verity.hdr.fec_roots = params->fec_roots; |
981 | 0 | } |
982 | 0 | out: |
983 | 0 | if (r < 0) |
984 | 0 | crypt_free_type(cd, CRYPT_VERITY); |
985 | 0 | return r; |
986 | 0 | } |
987 | | |
988 | | static int _crypt_load_integrity(struct crypt_device *cd, |
989 | | struct crypt_params_integrity *params) |
990 | 0 | { |
991 | 0 | int r; |
992 | |
|
993 | 0 | r = init_crypto(cd); |
994 | 0 | if (r < 0) |
995 | 0 | return r; |
996 | | |
997 | 0 | r = INTEGRITY_read_sb(cd, &cd->u.integrity.params, &cd->u.integrity.sb_flags); |
998 | 0 | if (r < 0) |
999 | 0 | goto out; |
1000 | | |
1001 | | // FIXME: add checks for fields in integrity sb vs params |
1002 | | |
1003 | 0 | r = -ENOMEM; |
1004 | 0 | if (params) { |
1005 | 0 | cd->u.integrity.params.journal_watermark = params->journal_watermark; |
1006 | 0 | cd->u.integrity.params.journal_commit_time = params->journal_commit_time; |
1007 | 0 | cd->u.integrity.params.buffer_sectors = params->buffer_sectors; |
1008 | 0 | if (params->integrity && |
1009 | 0 | !(cd->u.integrity.params.integrity = strdup(params->integrity))) |
1010 | 0 | goto out; |
1011 | 0 | cd->u.integrity.params.integrity_key_size = params->integrity_key_size; |
1012 | 0 | if (params->journal_integrity && |
1013 | 0 | !(cd->u.integrity.params.journal_integrity = strdup(params->journal_integrity))) |
1014 | 0 | goto out; |
1015 | 0 | if (params->journal_crypt && |
1016 | 0 | !(cd->u.integrity.params.journal_crypt = strdup(params->journal_crypt))) |
1017 | 0 | goto out; |
1018 | | |
1019 | 0 | if (params->journal_crypt_key) { |
1020 | 0 | cd->u.integrity.journal_crypt_key = |
1021 | 0 | crypt_alloc_volume_key(params->journal_crypt_key_size, |
1022 | 0 | params->journal_crypt_key); |
1023 | 0 | if (!cd->u.integrity.journal_crypt_key) |
1024 | 0 | goto out; |
1025 | 0 | } |
1026 | 0 | if (params->journal_integrity_key) { |
1027 | 0 | cd->u.integrity.journal_mac_key = |
1028 | 0 | crypt_alloc_volume_key(params->journal_integrity_key_size, |
1029 | 0 | params->journal_integrity_key); |
1030 | 0 | if (!cd->u.integrity.journal_mac_key) |
1031 | 0 | goto out; |
1032 | 0 | } |
1033 | 0 | } |
1034 | | |
1035 | 0 | if (!cd->type && !(cd->type = strdup(CRYPT_INTEGRITY))) |
1036 | 0 | goto out; |
1037 | 0 | r = 0; |
1038 | 0 | out: |
1039 | 0 | if (r < 0) |
1040 | 0 | crypt_free_type(cd, CRYPT_INTEGRITY); |
1041 | 0 | return r; |
1042 | 0 | } |
1043 | | |
1044 | | static int _crypt_load_bitlk(struct crypt_device *cd) |
1045 | 1.88k | { |
1046 | 1.88k | int r; |
1047 | | |
1048 | 1.88k | r = init_crypto(cd); |
1049 | 1.88k | if (r < 0) |
1050 | 0 | return r; |
1051 | | |
1052 | 1.88k | r = BITLK_read_sb(cd, &cd->u.bitlk.params); |
1053 | 1.88k | if (r < 0) |
1054 | 1.40k | goto out; |
1055 | | |
1056 | 488 | if (asprintf(&cd->u.bitlk.cipher_spec, "%s-%s", |
1057 | 488 | cd->u.bitlk.params.cipher, cd->u.bitlk.params.cipher_mode) < 0) { |
1058 | 0 | cd->u.bitlk.cipher_spec = NULL; |
1059 | 0 | r = -ENOMEM; |
1060 | 0 | goto out; |
1061 | 0 | } |
1062 | | |
1063 | 488 | if (!cd->type && !(cd->type = strdup(CRYPT_BITLK))) { |
1064 | 0 | r = -ENOMEM; |
1065 | 0 | goto out; |
1066 | 0 | } |
1067 | | |
1068 | 488 | device_set_block_size(crypt_data_device(cd), cd->u.bitlk.params.sector_size); |
1069 | 1.88k | out: |
1070 | 1.88k | if (r < 0) |
1071 | 1.40k | crypt_free_type(cd, CRYPT_BITLK); |
1072 | 1.88k | return r; |
1073 | 488 | } |
1074 | | |
1075 | | static int _crypt_load_fvault2(struct crypt_device *cd) |
1076 | 1.88k | { |
1077 | 1.88k | int r; |
1078 | | |
1079 | 1.88k | r = init_crypto(cd); |
1080 | 1.88k | if (r < 0) |
1081 | 0 | return r; |
1082 | | |
1083 | 1.88k | r = FVAULT2_read_metadata(cd, &cd->u.fvault2.params); |
1084 | 1.88k | if (r < 0) |
1085 | 1.88k | goto out; |
1086 | | |
1087 | 0 | if (!cd->type && !(cd->type = strdup(CRYPT_FVAULT2))) |
1088 | 0 | r = -ENOMEM; |
1089 | 1.88k | out: |
1090 | 1.88k | if (r < 0) |
1091 | 1.88k | crypt_free_type(cd, CRYPT_FVAULT2); |
1092 | 1.88k | return r; |
1093 | 0 | } |
1094 | | |
1095 | | int crypt_load(struct crypt_device *cd, |
1096 | | const char *requested_type, |
1097 | | void *params) |
1098 | 5.68k | { |
1099 | 5.68k | int r; |
1100 | | |
1101 | 5.68k | if (!cd) |
1102 | 0 | return -EINVAL; |
1103 | | |
1104 | 5.68k | log_dbg(cd, "Trying to load %s crypt type from device %s.", |
1105 | 5.68k | requested_type ?: "any", mdata_device_path(cd) ?: "(none)"); |
1106 | | |
1107 | 18.4E | if (!crypt_metadata_device(cd)) |
1108 | 0 | return -EINVAL; |
1109 | | |
1110 | 18.4E | crypt_reset_null_type(cd); |
1111 | 18.4E | cd->data_offset = 0; |
1112 | 18.4E | cd->metadata_size = 0; |
1113 | 18.4E | cd->keyslots_size = 0; |
1114 | | |
1115 | 18.4E | if (!requested_type || isLUKS1(requested_type) || isLUKS2(requested_type)) { |
1116 | 1.90k | if (cd->type && !isLUKS1(cd->type) && !isLUKS2(cd->type)) { |
1117 | 0 | log_dbg(cd, "Context is already initialized to type %s", cd->type); |
1118 | 0 | return -EINVAL; |
1119 | 0 | } |
1120 | | |
1121 | 1.90k | r = _crypt_load_luks(cd, requested_type, true, false); |
1122 | 18.4E | } else if (isVERITY(requested_type)) { |
1123 | 0 | if (cd->type && !isVERITY(cd->type)) { |
1124 | 0 | log_dbg(cd, "Context is already initialized to type %s", cd->type); |
1125 | 0 | return -EINVAL; |
1126 | 0 | } |
1127 | 0 | r = _crypt_load_verity(cd, params); |
1128 | 18.4E | } else if (isTCRYPT(requested_type)) { |
1129 | 0 | if (cd->type && !isTCRYPT(cd->type)) { |
1130 | 0 | log_dbg(cd, "Context is already initialized to type %s", cd->type); |
1131 | 0 | return -EINVAL; |
1132 | 0 | } |
1133 | 0 | r = _crypt_load_tcrypt(cd, params); |
1134 | 18.4E | } else if (isINTEGRITY(requested_type)) { |
1135 | 0 | if (cd->type && !isINTEGRITY(cd->type)) { |
1136 | 0 | log_dbg(cd, "Context is already initialized to type %s", cd->type); |
1137 | 0 | return -EINVAL; |
1138 | 0 | } |
1139 | 0 | r = _crypt_load_integrity(cd, params); |
1140 | 18.4E | } else if (isBITLK(requested_type)) { |
1141 | 1.88k | if (cd->type && !isBITLK(cd->type)) { |
1142 | 0 | log_dbg(cd, "Context is already initialized to type %s", cd->type); |
1143 | 0 | return -EINVAL; |
1144 | 0 | } |
1145 | 1.88k | r = _crypt_load_bitlk(cd); |
1146 | 18.4E | } else if (isFVAULT2(requested_type)) { |
1147 | 1.88k | if (cd->type && !isFVAULT2(cd->type)) { |
1148 | 0 | log_dbg(cd, "Context is already initialized to type %s", cd->type); |
1149 | 0 | return -EINVAL; |
1150 | 0 | } |
1151 | 1.88k | r = _crypt_load_fvault2(cd); |
1152 | 1.88k | } else |
1153 | 18.4E | return -EINVAL; |
1154 | | |
1155 | 5.68k | return r; |
1156 | 18.4E | } |
1157 | | |
1158 | | /* |
1159 | | * crypt_init() helpers |
1160 | | */ |
1161 | | static int _init_by_name_crypt_none(struct crypt_device *cd) |
1162 | 0 | { |
1163 | 0 | int r; |
1164 | 0 | char _mode[MAX_CIPHER_LEN]; |
1165 | 0 | struct crypt_dm_active_device dmd; |
1166 | 0 | struct dm_target *tgt = &dmd.segment; |
1167 | |
|
1168 | 0 | if (cd->type || !cd->u.none.active_name) |
1169 | 0 | return -EINVAL; |
1170 | | |
1171 | 0 | r = dm_query_device(cd, cd->u.none.active_name, |
1172 | 0 | DM_ACTIVE_CRYPT_CIPHER | |
1173 | 0 | DM_ACTIVE_CRYPT_KEYSIZE, &dmd); |
1174 | 0 | if (r < 0) |
1175 | 0 | return r; |
1176 | 0 | if (!single_segment(&dmd) || tgt->type != DM_CRYPT) |
1177 | 0 | r = -EINVAL; |
1178 | 0 | if (r >= 0) |
1179 | 0 | r = crypt_parse_name_and_mode(tgt->u.crypt.cipher, |
1180 | 0 | cd->u.none.cipher, NULL, |
1181 | 0 | _mode); |
1182 | |
|
1183 | 0 | if (!r) { |
1184 | 0 | r = snprintf(cd->u.none.cipher_spec, sizeof(cd->u.none.cipher_spec), |
1185 | 0 | "%s-%s", cd->u.none.cipher, _mode); |
1186 | 0 | if (r < 0 || (size_t)r >= sizeof(cd->u.none.cipher_spec)) |
1187 | 0 | r = -EINVAL; |
1188 | 0 | else { |
1189 | 0 | cd->u.none.cipher_mode = cd->u.none.cipher_spec + strlen(cd->u.none.cipher) + 1; |
1190 | 0 | cd->u.none.key_size = crypt_volume_key_length(tgt->u.crypt.vk); |
1191 | 0 | r = 0; |
1192 | 0 | } |
1193 | 0 | } |
1194 | |
|
1195 | 0 | if (!r && tgt->u.crypt.integrity) { |
1196 | 0 | r = snprintf(cd->u.none.integrity_spec, sizeof(cd->u.none.integrity_spec), |
1197 | 0 | "%s", tgt->u.crypt.integrity); |
1198 | 0 | if (r < 0 || (size_t)r >= sizeof(cd->u.none.integrity_spec)) |
1199 | 0 | r = -EINVAL; |
1200 | 0 | else |
1201 | 0 | r = 0; |
1202 | 0 | } |
1203 | |
|
1204 | 0 | cd->u.none.sector_size = tgt->u.crypt.sector_size; |
1205 | |
|
1206 | 0 | dm_targets_free(cd, &dmd); |
1207 | 0 | return r; |
1208 | 0 | } |
1209 | | |
1210 | | static const char *LUKS_UUID(struct crypt_device *cd) |
1211 | 0 | { |
1212 | 0 | if (!cd) |
1213 | 0 | return NULL; |
1214 | 0 | else if (isLUKS1(cd->type)) |
1215 | 0 | return cd->u.luks1.hdr.uuid; |
1216 | 0 | else if (isLUKS2(cd->type)) |
1217 | 0 | return cd->u.luks2.hdr.uuid; |
1218 | | |
1219 | 0 | return NULL; |
1220 | 0 | } |
1221 | | |
1222 | | static int _init_by_name_crypt(struct crypt_device *cd, const char *name) |
1223 | 0 | { |
1224 | 0 | bool found = false; |
1225 | 0 | char **dep, *cipher_spec = NULL, cipher[MAX_CIPHER_LEN], cipher_mode[MAX_CIPHER_LEN]; |
1226 | 0 | char deps_uuid_prefix[40], *deps[MAX_DM_DEPS+1] = {}; |
1227 | 0 | const char *dev; |
1228 | 0 | char *iname = NULL; |
1229 | 0 | int key_nums, r; |
1230 | 0 | struct crypt_dm_active_device dmd, dmdi = {}, dmdep = {}; |
1231 | 0 | struct dm_target *tgt = &dmd.segment, *tgti = &dmdi.segment; |
1232 | |
|
1233 | 0 | r = dm_query_device(cd, name, |
1234 | 0 | DM_ACTIVE_DEVICE | |
1235 | 0 | DM_ACTIVE_UUID | |
1236 | 0 | DM_ACTIVE_CRYPT_CIPHER | |
1237 | 0 | DM_ACTIVE_CRYPT_KEYSIZE, &dmd); |
1238 | 0 | if (r < 0) |
1239 | 0 | return r; |
1240 | | |
1241 | 0 | if (tgt->type != DM_CRYPT && tgt->type != DM_LINEAR) { |
1242 | 0 | log_dbg(cd, "Unsupported device table detected in %s.", name); |
1243 | 0 | r = -EINVAL; |
1244 | 0 | goto out; |
1245 | 0 | } |
1246 | | |
1247 | 0 | r = -EINVAL; |
1248 | |
|
1249 | 0 | if (dmd.uuid) { |
1250 | 0 | r = snprintf(deps_uuid_prefix, sizeof(deps_uuid_prefix), CRYPT_SUBDEV "-%.32s", dmd.uuid + 6); |
1251 | 0 | if (r < 0 || (size_t)r != (sizeof(deps_uuid_prefix) - 1)) |
1252 | 0 | r = -EINVAL; |
1253 | 0 | } |
1254 | |
|
1255 | 0 | if (r >= 0) { |
1256 | 0 | r = dm_device_deps(cd, name, deps_uuid_prefix, deps, ARRAY_SIZE(deps)); |
1257 | 0 | if (r) |
1258 | 0 | goto out; |
1259 | 0 | } |
1260 | | |
1261 | 0 | r = crypt_parse_name_and_mode(tgt->type == DM_LINEAR ? "null" : tgt->u.crypt.cipher, cipher, |
1262 | 0 | &key_nums, cipher_mode); |
1263 | 0 | if (r < 0) { |
1264 | | /* Allow crypt null context with unknown cipher string */ |
1265 | 0 | if (tgt->type == DM_CRYPT && !tgt->u.crypt.integrity) { |
1266 | 0 | crypt_set_null_type(cd); |
1267 | 0 | r = 0; |
1268 | 0 | goto out; |
1269 | 0 | } |
1270 | 0 | log_err(cd, _("No known cipher specification pattern detected for active device %s."), name); |
1271 | 0 | goto out; |
1272 | 0 | } |
1273 | | |
1274 | 0 | dep = deps; |
1275 | |
|
1276 | 0 | if (tgt->type == DM_CRYPT && tgt->u.crypt.tag_size && |
1277 | 0 | (iname = dm_get_active_iname(cd, name))) { |
1278 | |
|
1279 | 0 | r = dm_query_device(cd, iname, DM_ACTIVE_DEVICE, &dmdi); |
1280 | 0 | free(iname); |
1281 | 0 | if (r < 0) |
1282 | 0 | goto out; |
1283 | | /* |
1284 | | * Data device for crypt with integrity is not dm-integrity device, |
1285 | | * but always the device underlying dm-integrity. |
1286 | | */ |
1287 | 0 | device_free(cd, cd->device); |
1288 | 0 | MOVE_REF(cd->device, tgti->data_device); |
1289 | 0 | } |
1290 | | |
1291 | | /* do not try to lookup LUKS2 header in detached header mode */ |
1292 | 0 | if (dmd.uuid && !cd->metadata_device && !found) { |
1293 | 0 | while (*dep && !found) { |
1294 | 0 | r = dm_query_device(cd, *dep, DM_ACTIVE_DEVICE, &dmdep); |
1295 | 0 | if (r < 0) |
1296 | 0 | goto out; |
1297 | | |
1298 | 0 | tgt = &dmdep.segment; |
1299 | |
|
1300 | 0 | while (tgt && !found) { |
1301 | 0 | dev = device_path(tgt->data_device); |
1302 | 0 | if (!dev) { |
1303 | 0 | tgt = tgt->next; |
1304 | 0 | continue; |
1305 | 0 | } |
1306 | 0 | if (!strstr(dev, dm_get_dir()) || |
1307 | 0 | !crypt_string_in(dev + strlen(dm_get_dir()) + 1, deps, ARRAY_SIZE(deps))) { |
1308 | 0 | device_free(cd, cd->device); |
1309 | 0 | MOVE_REF(cd->device, tgt->data_device); |
1310 | 0 | found = true; |
1311 | 0 | } |
1312 | 0 | tgt = tgt->next; |
1313 | 0 | } |
1314 | 0 | dep++; |
1315 | 0 | dm_targets_free(cd, &dmdep); |
1316 | 0 | } |
1317 | 0 | } |
1318 | | |
1319 | 0 | if (asprintf(&cipher_spec, "%s-%s", cipher, cipher_mode) < 0) { |
1320 | 0 | cipher_spec = NULL; |
1321 | 0 | r = -ENOMEM; |
1322 | 0 | goto out; |
1323 | 0 | } |
1324 | | |
1325 | 0 | tgt = &dmd.segment; |
1326 | 0 | r = 0; |
1327 | |
|
1328 | 0 | if (isPLAIN(cd->type) && single_segment(&dmd) && tgt->type == DM_CRYPT) { |
1329 | 0 | cd->u.plain.hdr.hash = NULL; /* no way to get this */ |
1330 | 0 | cd->u.plain.hdr.offset = tgt->u.crypt.offset; |
1331 | 0 | cd->u.plain.hdr.skip = tgt->u.crypt.iv_offset; |
1332 | 0 | cd->u.plain.hdr.sector_size = tgt->u.crypt.sector_size; |
1333 | 0 | cd->u.plain.key_size = crypt_volume_key_length(tgt->u.crypt.vk); |
1334 | 0 | cd->u.plain.cipher = strdup(cipher); |
1335 | 0 | MOVE_REF(cd->u.plain.cipher_spec, cipher_spec); |
1336 | 0 | cd->u.plain.cipher_mode = cd->u.plain.cipher_spec + strlen(cipher) + 1; |
1337 | 0 | if (dmd.flags & CRYPT_ACTIVATE_KEYRING_KEY) |
1338 | 0 | crypt_set_key_in_keyring(cd, 1); |
1339 | 0 | } else if (isLOOPAES(cd->type) && single_segment(&dmd) && tgt->type == DM_CRYPT) { |
1340 | 0 | cd->u.loopaes.hdr.offset = tgt->u.crypt.offset; |
1341 | 0 | cd->u.loopaes.cipher = strdup(cipher); |
1342 | 0 | MOVE_REF(cd->u.loopaes.cipher_spec, cipher_spec); |
1343 | 0 | cd->u.loopaes.cipher_mode = cd->u.loopaes.cipher_spec + strlen(cipher) + 1; |
1344 | | /* version 3 uses last key for IV */ |
1345 | 0 | if (crypt_volume_key_length(tgt->u.crypt.vk) % key_nums) |
1346 | 0 | key_nums++; |
1347 | 0 | cd->u.loopaes.key_size = crypt_volume_key_length(tgt->u.crypt.vk) / key_nums; |
1348 | 0 | } else if (isLUKS1(cd->type) || isLUKS2(cd->type)) { |
1349 | 0 | if (crypt_metadata_device(cd)) { |
1350 | 0 | r = _crypt_load_luks(cd, cd->type, true, false); |
1351 | 0 | if (r < 0) { |
1352 | 0 | log_dbg(cd, "LUKS device header does not match active device."); |
1353 | 0 | crypt_set_null_type(cd); |
1354 | 0 | device_close(cd, cd->metadata_device); |
1355 | 0 | device_close(cd, cd->device); |
1356 | 0 | r = 0; |
1357 | 0 | goto out; |
1358 | 0 | } |
1359 | | /* check whether UUIDs match each other */ |
1360 | 0 | r = dm_uuid_cmp(dmd.uuid, LUKS_UUID(cd)); |
1361 | 0 | if (r < 0) { |
1362 | 0 | log_dbg(cd, "LUKS device header uuid: %s mismatches DM returned uuid %s", |
1363 | 0 | LUKS_UUID(cd), dmd.uuid); |
1364 | 0 | crypt_free_type(cd, NULL); |
1365 | 0 | r = 0; |
1366 | 0 | goto out; |
1367 | 0 | } |
1368 | 0 | } else { |
1369 | 0 | log_dbg(cd, "LUKS device header not available."); |
1370 | 0 | crypt_set_null_type(cd); |
1371 | 0 | r = 0; |
1372 | 0 | } |
1373 | 0 | } else if (isTCRYPT(cd->type) && single_segment(&dmd) && tgt->type == DM_CRYPT) { |
1374 | 0 | r = TCRYPT_init_by_name(cd, name, dmd.uuid, tgt, &cd->device, |
1375 | 0 | &cd->u.tcrypt.params, &cd->u.tcrypt.hdr); |
1376 | 0 | } else if (isBITLK(cd->type)) { |
1377 | 0 | r = _crypt_load_bitlk(cd); |
1378 | 0 | if (r < 0) { |
1379 | 0 | log_dbg(cd, "BITLK device header not available."); |
1380 | 0 | crypt_set_null_type(cd); |
1381 | 0 | r = 0; |
1382 | 0 | } |
1383 | 0 | } else if (isFVAULT2(cd->type)) { |
1384 | 0 | r = _crypt_load_fvault2(cd); |
1385 | 0 | if (r < 0) { |
1386 | 0 | log_dbg(cd, "FVAULT2 device header not available."); |
1387 | 0 | crypt_set_null_type(cd); |
1388 | 0 | r = 0; |
1389 | 0 | } |
1390 | 0 | } |
1391 | 0 | out: |
1392 | 0 | dm_targets_free(cd, &dmd); |
1393 | 0 | dm_targets_free(cd, &dmdi); |
1394 | 0 | dm_targets_free(cd, &dmdep); |
1395 | 0 | free(CONST_CAST(void*)dmd.uuid); |
1396 | 0 | free(cipher_spec); |
1397 | 0 | dep = deps; |
1398 | 0 | while (*dep) |
1399 | 0 | free(*dep++); |
1400 | 0 | return r; |
1401 | 0 | } |
1402 | | |
1403 | | static int _init_by_name_verity(struct crypt_device *cd, const char *name) |
1404 | 0 | { |
1405 | 0 | struct crypt_dm_active_device dmd; |
1406 | 0 | struct dm_target *tgt = &dmd.segment; |
1407 | 0 | int r; |
1408 | |
|
1409 | 0 | r = dm_query_device(cd, name, |
1410 | 0 | DM_ACTIVE_DEVICE | |
1411 | 0 | DM_ACTIVE_VERITY_HASH_DEVICE | |
1412 | 0 | DM_ACTIVE_VERITY_ROOT_HASH | |
1413 | 0 | DM_ACTIVE_VERITY_PARAMS, &dmd); |
1414 | 0 | if (r < 0) |
1415 | 0 | return r; |
1416 | 0 | if (!single_segment(&dmd) || tgt->type != DM_VERITY) { |
1417 | 0 | log_dbg(cd, "Unsupported device table detected in %s.", name); |
1418 | 0 | r = -EINVAL; |
1419 | 0 | goto out; |
1420 | 0 | } |
1421 | 0 | if (r > 0) |
1422 | 0 | r = 0; |
1423 | |
|
1424 | 0 | if (isVERITY(cd->type)) { |
1425 | 0 | cd->u.verity.uuid = NULL; // FIXME |
1426 | 0 | cd->u.verity.hdr.flags = CRYPT_VERITY_NO_HEADER; //FIXME |
1427 | 0 | cd->u.verity.hdr.data_size = tgt->u.verity.vp->data_size; |
1428 | 0 | cd->u.verity.root_hash_size = tgt->u.verity.root_hash_size; |
1429 | 0 | MOVE_REF(cd->u.verity.hdr.hash_name, tgt->u.verity.vp->hash_name); |
1430 | 0 | cd->u.verity.hdr.data_device = NULL; |
1431 | 0 | cd->u.verity.hdr.hash_device = NULL; |
1432 | 0 | cd->u.verity.hdr.data_block_size = tgt->u.verity.vp->data_block_size; |
1433 | 0 | cd->u.verity.hdr.hash_block_size = tgt->u.verity.vp->hash_block_size; |
1434 | 0 | cd->u.verity.hdr.hash_area_offset = tgt->u.verity.hash_offset; |
1435 | 0 | cd->u.verity.hdr.fec_area_offset = tgt->u.verity.fec_offset; |
1436 | 0 | cd->u.verity.hdr.hash_type = tgt->u.verity.vp->hash_type; |
1437 | 0 | cd->u.verity.hdr.flags = tgt->u.verity.vp->flags; |
1438 | 0 | cd->u.verity.hdr.salt_size = tgt->u.verity.vp->salt_size; |
1439 | 0 | MOVE_REF(cd->u.verity.hdr.salt, tgt->u.verity.vp->salt); |
1440 | 0 | MOVE_REF(cd->u.verity.hdr.fec_device, tgt->u.verity.vp->fec_device); |
1441 | 0 | cd->u.verity.hdr.fec_roots = tgt->u.verity.vp->fec_roots; |
1442 | 0 | MOVE_REF(cd->u.verity.fec_device, tgt->u.verity.fec_device); |
1443 | 0 | MOVE_REF(cd->metadata_device, tgt->u.verity.hash_device); |
1444 | 0 | MOVE_REF(cd->u.verity.root_hash, tgt->u.verity.root_hash); |
1445 | 0 | } |
1446 | 0 | out: |
1447 | 0 | dm_targets_free(cd, &dmd); |
1448 | 0 | return r; |
1449 | 0 | } |
1450 | | |
1451 | | static int _init_by_name_integrity(struct crypt_device *cd, const char *name) |
1452 | 0 | { |
1453 | 0 | struct crypt_dm_active_device dmd; |
1454 | 0 | struct dm_target *tgt = &dmd.segment; |
1455 | 0 | int r; |
1456 | |
|
1457 | 0 | r = dm_query_device(cd, name, DM_ACTIVE_DEVICE | |
1458 | 0 | DM_ACTIVE_CRYPT_KEY | |
1459 | 0 | DM_ACTIVE_CRYPT_KEYSIZE | |
1460 | 0 | DM_ACTIVE_INTEGRITY_PARAMS, &dmd); |
1461 | 0 | if (r < 0) |
1462 | 0 | return r; |
1463 | 0 | if (!single_segment(&dmd) || tgt->type != DM_INTEGRITY) { |
1464 | 0 | log_dbg(cd, "Unsupported device table detected in %s.", name); |
1465 | 0 | r = -EINVAL; |
1466 | 0 | goto out; |
1467 | 0 | } |
1468 | 0 | if (r > 0) |
1469 | 0 | r = 0; |
1470 | |
|
1471 | 0 | if (isINTEGRITY(cd->type)) { |
1472 | 0 | cd->u.integrity.params.tag_size = tgt->u.integrity.tag_size; |
1473 | 0 | cd->u.integrity.params.sector_size = tgt->u.integrity.sector_size; |
1474 | 0 | cd->u.integrity.params.journal_size = tgt->u.integrity.journal_size; |
1475 | 0 | cd->u.integrity.params.journal_watermark = tgt->u.integrity.journal_watermark; |
1476 | 0 | cd->u.integrity.params.journal_commit_time = tgt->u.integrity.journal_commit_time; |
1477 | 0 | cd->u.integrity.params.interleave_sectors = tgt->u.integrity.interleave_sectors; |
1478 | 0 | cd->u.integrity.params.buffer_sectors = tgt->u.integrity.buffer_sectors; |
1479 | 0 | MOVE_REF(cd->u.integrity.params.integrity, tgt->u.integrity.integrity); |
1480 | 0 | MOVE_REF(cd->u.integrity.params.journal_integrity, tgt->u.integrity.journal_integrity); |
1481 | 0 | MOVE_REF(cd->u.integrity.params.journal_crypt, tgt->u.integrity.journal_crypt); |
1482 | |
|
1483 | 0 | if (tgt->u.integrity.vk) |
1484 | 0 | cd->u.integrity.params.integrity_key_size = crypt_volume_key_length(tgt->u.integrity.vk); |
1485 | 0 | if (tgt->u.integrity.journal_integrity_key) |
1486 | 0 | cd->u.integrity.params.journal_integrity_key_size = crypt_volume_key_length(tgt->u.integrity.journal_integrity_key); |
1487 | 0 | if (tgt->u.integrity.journal_crypt_key) |
1488 | 0 | cd->u.integrity.params.journal_crypt_key_size = crypt_volume_key_length(tgt->u.integrity.journal_crypt_key); |
1489 | 0 | MOVE_REF(cd->metadata_device, tgt->u.integrity.meta_device); |
1490 | 0 | } |
1491 | 0 | out: |
1492 | 0 | dm_targets_free(cd, &dmd); |
1493 | 0 | return r; |
1494 | 0 | } |
1495 | | |
1496 | | int crypt_init_by_name_and_header(struct crypt_device **cd, |
1497 | | const char *name, |
1498 | | const char *header_device) |
1499 | 0 | { |
1500 | 0 | crypt_status_info ci; |
1501 | 0 | struct crypt_dm_active_device dmd; |
1502 | 0 | struct dm_target *tgt = &dmd.segment; |
1503 | 0 | int r; |
1504 | |
|
1505 | 0 | if (!cd || !name) |
1506 | 0 | return -EINVAL; |
1507 | | |
1508 | 0 | log_dbg(NULL, "Allocating crypt device context by device %s.", name); |
1509 | |
|
1510 | 0 | ci = crypt_status(NULL, name); |
1511 | 0 | if (ci == CRYPT_INVALID) |
1512 | 0 | return -ENODEV; |
1513 | | |
1514 | 0 | if (ci < CRYPT_ACTIVE) { |
1515 | 0 | log_err(NULL, _("Device %s is not active."), name); |
1516 | 0 | return -ENODEV; |
1517 | 0 | } |
1518 | | |
1519 | 0 | r = dm_query_device(NULL, name, DM_ACTIVE_DEVICE | DM_ACTIVE_UUID, &dmd); |
1520 | 0 | if (r < 0) |
1521 | 0 | return r; |
1522 | | |
1523 | 0 | *cd = NULL; |
1524 | |
|
1525 | 0 | if (header_device) { |
1526 | 0 | r = crypt_init(cd, header_device); |
1527 | 0 | } else { |
1528 | 0 | r = crypt_init(cd, device_path(tgt->data_device)); |
1529 | | |
1530 | | /* Underlying device disappeared but mapping still active */ |
1531 | 0 | if (!tgt->data_device || r == -ENOTBLK) |
1532 | 0 | log_verbose(NULL, _("Underlying device for crypt device %s disappeared."), |
1533 | 0 | name); |
1534 | | |
1535 | | /* Underlying device is not readable but crypt mapping exists */ |
1536 | 0 | if (r == -ENOTBLK) |
1537 | 0 | r = crypt_init(cd, NULL); |
1538 | 0 | } |
1539 | |
|
1540 | 0 | if (r < 0) |
1541 | 0 | goto out; |
1542 | | |
1543 | 0 | if (dmd.uuid) { |
1544 | 0 | if (!strncmp(CRYPT_PLAIN, dmd.uuid, sizeof(CRYPT_PLAIN)-1)) |
1545 | 0 | (*cd)->type = strdup(CRYPT_PLAIN); |
1546 | 0 | else if (!strncmp(CRYPT_LOOPAES, dmd.uuid, sizeof(CRYPT_LOOPAES)-1)) |
1547 | 0 | (*cd)->type = strdup(CRYPT_LOOPAES); |
1548 | 0 | else if (!strncmp(CRYPT_LUKS1, dmd.uuid, sizeof(CRYPT_LUKS1)-1)) |
1549 | 0 | (*cd)->type = strdup(CRYPT_LUKS1); |
1550 | 0 | else if (!strncmp(CRYPT_LUKS2, dmd.uuid, sizeof(CRYPT_LUKS2)-1)) |
1551 | 0 | (*cd)->type = strdup(CRYPT_LUKS2); |
1552 | 0 | else if (!strncmp(CRYPT_VERITY, dmd.uuid, sizeof(CRYPT_VERITY)-1)) |
1553 | 0 | (*cd)->type = strdup(CRYPT_VERITY); |
1554 | 0 | else if (!strncmp(CRYPT_TCRYPT, dmd.uuid, sizeof(CRYPT_TCRYPT)-1)) |
1555 | 0 | (*cd)->type = strdup(CRYPT_TCRYPT); |
1556 | 0 | else if (!strncmp(CRYPT_INTEGRITY, dmd.uuid, sizeof(CRYPT_INTEGRITY)-1)) |
1557 | 0 | (*cd)->type = strdup(CRYPT_INTEGRITY); |
1558 | 0 | else if (!strncmp(CRYPT_BITLK, dmd.uuid, sizeof(CRYPT_BITLK)-1)) |
1559 | 0 | (*cd)->type = strdup(CRYPT_BITLK); |
1560 | 0 | else if (!strncmp(CRYPT_FVAULT2, dmd.uuid, sizeof(CRYPT_FVAULT2)-1)) |
1561 | 0 | (*cd)->type = strdup(CRYPT_FVAULT2); |
1562 | 0 | else |
1563 | 0 | log_dbg(NULL, "Unknown UUID set, some parameters are not set."); |
1564 | 0 | } else |
1565 | 0 | log_dbg(NULL, "Active device has no UUID set, some parameters are not set."); |
1566 | |
|
1567 | 0 | if (header_device) { |
1568 | 0 | r = crypt_set_data_device(*cd, device_path(tgt->data_device)); |
1569 | 0 | if (r < 0) |
1570 | 0 | goto out; |
1571 | 0 | } |
1572 | | |
1573 | | /* Try to initialize basic parameters from active device */ |
1574 | | |
1575 | 0 | if (tgt->type == DM_CRYPT || tgt->type == DM_LINEAR) |
1576 | 0 | r = _init_by_name_crypt(*cd, name); |
1577 | 0 | else if (tgt->type == DM_VERITY) |
1578 | 0 | r = _init_by_name_verity(*cd, name); |
1579 | 0 | else if (tgt->type == DM_INTEGRITY) |
1580 | 0 | r = _init_by_name_integrity(*cd, name); |
1581 | 0 | out: |
1582 | 0 | if (r < 0) { |
1583 | 0 | crypt_free(*cd); |
1584 | 0 | *cd = NULL; |
1585 | 0 | } else if (!(*cd)->type) { |
1586 | | /* For anonymous device (no header found) remember initialized name */ |
1587 | 0 | (*cd)->u.none.active_name = strdup(name); |
1588 | 0 | } |
1589 | |
|
1590 | 0 | free(CONST_CAST(void*)dmd.uuid); |
1591 | 0 | dm_targets_free(NULL, &dmd); |
1592 | 0 | return r; |
1593 | 0 | } |
1594 | | |
1595 | | int crypt_init_by_name(struct crypt_device **cd, const char *name) |
1596 | 0 | { |
1597 | 0 | return crypt_init_by_name_and_header(cd, name, NULL); |
1598 | 0 | } |
1599 | | |
1600 | | /* |
1601 | | * crypt_format() helpers |
1602 | | */ |
1603 | | static int _crypt_format_plain(struct crypt_device *cd, |
1604 | | const char *cipher, |
1605 | | const char *cipher_mode, |
1606 | | const char *uuid, |
1607 | | size_t volume_key_size, |
1608 | | struct crypt_params_plain *params) |
1609 | 0 | { |
1610 | 0 | unsigned int sector_size = params ? params->sector_size : SECTOR_SIZE; |
1611 | 0 | uint64_t dev_size; |
1612 | |
|
1613 | 0 | if (!cipher || !cipher_mode) { |
1614 | 0 | log_err(cd, _("Invalid plain crypt parameters.")); |
1615 | 0 | return -EINVAL; |
1616 | 0 | } |
1617 | | |
1618 | 0 | if (volume_key_size > 1024) { |
1619 | 0 | log_err(cd, _("Invalid key size.")); |
1620 | 0 | return -EINVAL; |
1621 | 0 | } |
1622 | | |
1623 | 0 | if (uuid) { |
1624 | 0 | log_err(cd, _("UUID is not supported for this crypt type.")); |
1625 | 0 | return -EINVAL; |
1626 | 0 | } |
1627 | | |
1628 | 0 | if (cd->metadata_device) { |
1629 | 0 | log_err(cd, _("Detached metadata device is not supported for this crypt type.")); |
1630 | 0 | return -EINVAL; |
1631 | 0 | } |
1632 | | |
1633 | | /* For compatibility with old params structure */ |
1634 | 0 | if (!sector_size) |
1635 | 0 | sector_size = SECTOR_SIZE; |
1636 | |
|
1637 | 0 | if (sector_size < SECTOR_SIZE || sector_size > MAX_SECTOR_SIZE || |
1638 | 0 | NOTPOW2(sector_size)) { |
1639 | 0 | log_err(cd, _("Unsupported encryption sector size.")); |
1640 | 0 | return -EINVAL; |
1641 | 0 | } |
1642 | | |
1643 | 0 | if (sector_size > SECTOR_SIZE && !device_size(cd->device, &dev_size)) { |
1644 | 0 | if (params && params->offset) |
1645 | 0 | dev_size -= (params->offset * SECTOR_SIZE); |
1646 | 0 | if (dev_size % sector_size) { |
1647 | 0 | log_err(cd, _("Device size is not aligned to requested sector size.")); |
1648 | 0 | return -EINVAL; |
1649 | 0 | } |
1650 | 0 | device_set_block_size(crypt_data_device(cd), sector_size); |
1651 | 0 | } |
1652 | | |
1653 | 0 | if (!(cd->type = strdup(CRYPT_PLAIN))) |
1654 | 0 | return -ENOMEM; |
1655 | | |
1656 | 0 | cd->u.plain.key_size = volume_key_size; |
1657 | 0 | cd->volume_key = crypt_alloc_volume_key(volume_key_size, NULL); |
1658 | 0 | if (!cd->volume_key) |
1659 | 0 | return -ENOMEM; |
1660 | | |
1661 | 0 | if (asprintf(&cd->u.plain.cipher_spec, "%s-%s", cipher, cipher_mode) < 0) { |
1662 | 0 | cd->u.plain.cipher_spec = NULL; |
1663 | 0 | return -ENOMEM; |
1664 | 0 | } |
1665 | 0 | cd->u.plain.cipher = strdup(cipher); |
1666 | 0 | cd->u.plain.cipher_mode = cd->u.plain.cipher_spec + strlen(cipher) + 1; |
1667 | |
|
1668 | 0 | if (params && params->hash) |
1669 | 0 | cd->u.plain.hdr.hash = strdup(params->hash); |
1670 | |
|
1671 | 0 | cd->u.plain.hdr.offset = params ? params->offset : 0; |
1672 | 0 | cd->u.plain.hdr.skip = params ? params->skip : 0; |
1673 | 0 | cd->u.plain.hdr.size = params ? params->size : 0; |
1674 | 0 | cd->u.plain.hdr.sector_size = sector_size; |
1675 | |
|
1676 | 0 | if (!cd->u.plain.cipher) |
1677 | 0 | return -ENOMEM; |
1678 | | |
1679 | 0 | return 0; |
1680 | 0 | } |
1681 | | |
1682 | | static int _crypt_format_luks1(struct crypt_device *cd, |
1683 | | const char *cipher, |
1684 | | const char *cipher_mode, |
1685 | | const char *uuid, |
1686 | | const char *volume_key, |
1687 | | size_t volume_key_size, |
1688 | | struct crypt_params_luks1 *params) |
1689 | 0 | { |
1690 | 0 | int r; |
1691 | 0 | unsigned long required_alignment = DEFAULT_DISK_ALIGNMENT; |
1692 | 0 | unsigned long alignment_offset = 0; |
1693 | 0 | uint64_t dev_size; |
1694 | |
|
1695 | 0 | if (!cipher || !cipher_mode) |
1696 | 0 | return -EINVAL; |
1697 | | |
1698 | 0 | if (!crypt_metadata_device(cd)) { |
1699 | 0 | log_err(cd, _("Can't format LUKS without device.")); |
1700 | 0 | return -EINVAL; |
1701 | 0 | } |
1702 | | |
1703 | 0 | if (device_is_zoned(crypt_metadata_device(cd)) > 0) { |
1704 | 0 | log_err(cd, _("Zoned device %s cannot be used for LUKS header."), |
1705 | 0 | device_path(crypt_metadata_device(cd))); |
1706 | 0 | return -EINVAL; |
1707 | 0 | } |
1708 | | |
1709 | 0 | if (params && cd->data_offset && params->data_alignment && |
1710 | 0 | (cd->data_offset % params->data_alignment)) { |
1711 | 0 | log_err(cd, _("Requested data alignment is not compatible with data offset.")); |
1712 | 0 | return -EINVAL; |
1713 | 0 | } |
1714 | | |
1715 | 0 | if (!(cd->type = strdup(CRYPT_LUKS1))) |
1716 | 0 | return -ENOMEM; |
1717 | | |
1718 | 0 | if (volume_key) |
1719 | 0 | cd->volume_key = crypt_alloc_volume_key(volume_key_size, |
1720 | 0 | volume_key); |
1721 | 0 | else |
1722 | 0 | cd->volume_key = crypt_generate_volume_key(cd, volume_key_size, KEY_QUALITY_KEY); |
1723 | |
|
1724 | 0 | if (!cd->volume_key) |
1725 | 0 | return -ENOMEM; |
1726 | | |
1727 | 0 | if (verify_pbkdf_params(cd, &cd->pbkdf)) { |
1728 | 0 | r = init_pbkdf_type(cd, NULL, CRYPT_LUKS1); |
1729 | 0 | if (r) |
1730 | 0 | return r; |
1731 | 0 | } |
1732 | | |
1733 | 0 | if (params && params->hash && strcmp(params->hash, cd->pbkdf.hash)) { |
1734 | 0 | free(CONST_CAST(void*)cd->pbkdf.hash); |
1735 | 0 | cd->pbkdf.hash = strdup(params->hash); |
1736 | 0 | if (!cd->pbkdf.hash) |
1737 | 0 | return -ENOMEM; |
1738 | 0 | } |
1739 | | |
1740 | 0 | if (params && params->data_device) { |
1741 | 0 | if (!cd->metadata_device) |
1742 | 0 | cd->metadata_device = cd->device; |
1743 | 0 | else |
1744 | 0 | device_free(cd, cd->device); |
1745 | 0 | cd->device = NULL; |
1746 | 0 | if (device_alloc(cd, &cd->device, params->data_device) < 0) |
1747 | 0 | return -ENOMEM; |
1748 | 0 | } |
1749 | | |
1750 | 0 | if (device_is_dax(crypt_data_device(cd)) > 0) |
1751 | 0 | log_std(cd, _("WARNING: DAX device can corrupt data as it does not guarantee atomic sector updates.\n")); |
1752 | |
|
1753 | 0 | if (params && cd->metadata_device) { |
1754 | | /* For detached header the alignment is used directly as data offset */ |
1755 | 0 | if (!cd->data_offset) |
1756 | 0 | cd->data_offset = params->data_alignment; |
1757 | 0 | required_alignment = params->data_alignment * SECTOR_SIZE; |
1758 | 0 | } else if (params && params->data_alignment) { |
1759 | 0 | required_alignment = params->data_alignment * SECTOR_SIZE; |
1760 | 0 | } else |
1761 | 0 | device_topology_alignment(cd, cd->device, |
1762 | 0 | &required_alignment, |
1763 | 0 | &alignment_offset, DEFAULT_DISK_ALIGNMENT); |
1764 | |
|
1765 | 0 | r = LUKS_check_cipher(cd, volume_key_size, cipher, cipher_mode); |
1766 | 0 | if (r < 0) |
1767 | 0 | return r; |
1768 | | |
1769 | 0 | r = LUKS_generate_phdr(&cd->u.luks1.hdr, cd->volume_key, cipher, cipher_mode, |
1770 | 0 | cd->pbkdf.hash, uuid, |
1771 | 0 | cd->data_offset * SECTOR_SIZE, |
1772 | 0 | alignment_offset, required_alignment, cd); |
1773 | 0 | if (r < 0) |
1774 | 0 | return r; |
1775 | | |
1776 | 0 | r = device_check_access(cd, crypt_metadata_device(cd), DEV_EXCL); |
1777 | 0 | if (r < 0) |
1778 | 0 | return r; |
1779 | | |
1780 | | |
1781 | 0 | if (asprintf(&cd->u.luks1.cipher_spec, "%s-%s", cipher, cipher_mode) < 0) { |
1782 | 0 | cd->u.luks1.cipher_spec = NULL; |
1783 | 0 | return -ENOMEM; |
1784 | 0 | } |
1785 | | |
1786 | 0 | r = LUKS_wipe_header_areas(&cd->u.luks1.hdr, cd); |
1787 | 0 | if (r < 0) { |
1788 | 0 | free(cd->u.luks1.cipher_spec); |
1789 | 0 | log_err(cd, _("Cannot wipe header on device %s."), |
1790 | 0 | mdata_device_path(cd)); |
1791 | 0 | return r; |
1792 | 0 | } |
1793 | | |
1794 | 0 | r = LUKS_write_phdr(&cd->u.luks1.hdr, cd); |
1795 | 0 | if (r) { |
1796 | 0 | free(cd->u.luks1.cipher_spec); |
1797 | 0 | return r; |
1798 | 0 | } |
1799 | | |
1800 | 0 | if (!device_size(crypt_data_device(cd), &dev_size) && |
1801 | 0 | dev_size <= (crypt_get_data_offset(cd) * SECTOR_SIZE)) |
1802 | 0 | log_std(cd, _("Device %s is too small for activation, there is no remaining space for data.\n"), |
1803 | 0 | device_path(crypt_data_device(cd))); |
1804 | |
|
1805 | 0 | return 0; |
1806 | 0 | } |
1807 | | |
1808 | | static int LUKS2_check_encryption_params(struct crypt_device *cd, |
1809 | | const char *cipher, |
1810 | | const char *cipher_mode, |
1811 | | const char *integrity, |
1812 | | size_t required_integrity_key_size, |
1813 | | size_t volume_key_size, |
1814 | | const struct crypt_params_luks2 *params, |
1815 | | const char **ret_integrity, |
1816 | | size_t *ret_integrity_key_size) |
1817 | 0 | { |
1818 | 0 | int r, integrity_key_size = 0; |
1819 | |
|
1820 | 0 | assert(cipher); |
1821 | 0 | assert(cipher_mode); |
1822 | 0 | assert(ret_integrity); |
1823 | | |
1824 | 0 | if (integrity) { |
1825 | 0 | if (params->integrity_params) { |
1826 | | /* Standalone dm-integrity must not be used */ |
1827 | 0 | if (params->integrity_params->integrity) |
1828 | 0 | return -EINVAL; |
1829 | | /* FIXME: journal encryption and MAC is here not yet supported */ |
1830 | 0 | if (params->integrity_params->journal_crypt || |
1831 | 0 | params->integrity_params->journal_integrity) |
1832 | 0 | return -ENOTSUP; |
1833 | 0 | } |
1834 | 0 | if (!INTEGRITY_tag_size(integrity, cipher, cipher_mode)) { |
1835 | | /* merge "none" string into NULL to make branching logic is easier */ |
1836 | 0 | if (!strcmp(integrity, "none")) |
1837 | 0 | integrity = NULL; |
1838 | 0 | else |
1839 | 0 | return -EINVAL; |
1840 | 0 | } |
1841 | 0 | integrity_key_size = INTEGRITY_key_size(integrity, required_integrity_key_size); |
1842 | 0 | if ((integrity_key_size < 0) || (integrity_key_size >= (int)volume_key_size)) { |
1843 | 0 | log_err(cd, _("Volume key is too small for encryption with integrity extensions.")); |
1844 | 0 | return -EINVAL; |
1845 | 0 | } |
1846 | 0 | if (integrity_key_size && integrity_key_size < LUKS2_MIN_INTEGRITY_KEY_BYTES) { |
1847 | 0 | log_err(cd, _("Integrity key size is too small.")); |
1848 | 0 | return -EINVAL; |
1849 | 0 | } |
1850 | 0 | } |
1851 | | |
1852 | | /* FIXME: allow this later also for normal ciphers (check AF_ALG availability. */ |
1853 | 0 | if (integrity && integrity_key_size == 0) { |
1854 | 0 | r = crypt_cipher_check_kernel(cipher, cipher_mode, integrity, volume_key_size); |
1855 | 0 | if (r < 0) { |
1856 | 0 | log_err(cd, _("Cipher %s-%s (key size %zd bits) is not available."), |
1857 | 0 | cipher, cipher_mode, volume_key_size * 8); |
1858 | 0 | return r; |
1859 | 0 | } |
1860 | 0 | } |
1861 | | |
1862 | 0 | if ((!integrity || integrity_key_size) && !crypt_cipher_wrapped_key(cipher, cipher_mode) && |
1863 | 0 | !INTEGRITY_tag_size(NULL, cipher, cipher_mode)) { |
1864 | 0 | r = LUKS_check_cipher(cd, volume_key_size - integrity_key_size, |
1865 | 0 | cipher, cipher_mode); |
1866 | 0 | if (r < 0) |
1867 | 0 | return r; |
1868 | 0 | } |
1869 | | |
1870 | 0 | *ret_integrity = integrity; |
1871 | 0 | if (ret_integrity_key_size) |
1872 | 0 | *ret_integrity_key_size = required_integrity_key_size ? integrity_key_size : 0; |
1873 | |
|
1874 | 0 | return 0; |
1875 | 0 | } |
1876 | | |
1877 | | static int LUKS2_check_encryption_sector(struct crypt_device *cd, uint64_t device_size_bytes, |
1878 | | uint64_t data_offset_bytes, uint32_t sector_size, bool modify_sector_size, |
1879 | | bool verify_data_area_alignment, uint32_t *ret_sector_size) |
1880 | 0 | { |
1881 | 0 | uint64_t dmc_flags; |
1882 | |
|
1883 | 0 | assert(ret_sector_size); |
1884 | | |
1885 | 0 | if (sector_size < SECTOR_SIZE || sector_size > MAX_SECTOR_SIZE || |
1886 | 0 | NOTPOW2(sector_size)) { |
1887 | 0 | log_err(cd, _("Unsupported encryption sector size.")); |
1888 | 0 | return -EINVAL; |
1889 | 0 | } |
1890 | | |
1891 | 0 | if (sector_size != SECTOR_SIZE && !dm_flags(cd, DM_CRYPT, &dmc_flags) && |
1892 | 0 | !(dmc_flags & DM_SECTOR_SIZE_SUPPORTED)) { |
1893 | 0 | if (modify_sector_size) { |
1894 | 0 | log_dbg(cd, "dm-crypt does not support encryption sector size option. Reverting to 512 bytes."); |
1895 | 0 | sector_size = SECTOR_SIZE; |
1896 | 0 | } else |
1897 | 0 | log_std(cd, _("WARNING: The device activation will fail, dm-crypt is missing " |
1898 | 0 | "support for requested encryption sector size.\n")); |
1899 | 0 | } |
1900 | |
|
1901 | 0 | if (modify_sector_size) { |
1902 | 0 | if (data_offset_bytes && MISALIGNED(data_offset_bytes, sector_size)) { |
1903 | 0 | log_dbg(cd, "Data offset not aligned to sector size. Reverting to 512 bytes."); |
1904 | 0 | sector_size = SECTOR_SIZE; |
1905 | 0 | } else if (MISALIGNED(device_size_bytes - data_offset_bytes, sector_size)) { |
1906 | | /* underflow does not affect misalignment checks */ |
1907 | 0 | log_dbg(cd, "Device size is not aligned to sector size. Reverting to 512 bytes."); |
1908 | 0 | sector_size = SECTOR_SIZE; |
1909 | 0 | } |
1910 | 0 | } |
1911 | | |
1912 | | /* underflow does not affect misalignment checks */ |
1913 | 0 | if (verify_data_area_alignment && |
1914 | 0 | sector_size > SECTOR_SIZE && |
1915 | 0 | MISALIGNED(device_size_bytes - data_offset_bytes, sector_size)) { |
1916 | 0 | log_err(cd, _("Device size is not aligned to requested sector size.")); |
1917 | 0 | return -EINVAL; |
1918 | 0 | } |
1919 | | |
1920 | 0 | *ret_sector_size = sector_size; |
1921 | |
|
1922 | 0 | return 0; |
1923 | 0 | } |
1924 | | |
1925 | | static int _crypt_format_luks2(struct crypt_device *cd, |
1926 | | const char *cipher, |
1927 | | const char *cipher_mode, |
1928 | | const char *uuid, |
1929 | | const char *volume_key, |
1930 | | size_t volume_key_size, |
1931 | | struct crypt_params_luks2 *params, |
1932 | | bool sector_size_autodetect, bool integrity_inline) |
1933 | 0 | { |
1934 | 0 | int r; |
1935 | 0 | unsigned long required_alignment = DEFAULT_DISK_ALIGNMENT; |
1936 | 0 | unsigned long alignment_offset = 0; |
1937 | 0 | unsigned int sector_size; |
1938 | 0 | char cipher_spec[2*MAX_CAPI_ONE_LEN]; |
1939 | 0 | const char *integrity = params ? params->integrity : NULL; |
1940 | 0 | size_t integrity_key_size = 0; /* only for independent, separate key in HMAC */ |
1941 | 0 | struct volume_key *integrity_key = NULL; |
1942 | 0 | uint64_t data_offset_bytes, dev_size, metadata_size_bytes, keyslots_size_bytes; |
1943 | |
|
1944 | 0 | cd->u.luks2.hdr.jobj = NULL; |
1945 | 0 | cd->u.luks2.keyslot_cipher = NULL; |
1946 | |
|
1947 | 0 | if (!cipher || !cipher_mode) |
1948 | 0 | return -EINVAL; |
1949 | | |
1950 | 0 | if (!crypt_metadata_device(cd)) { |
1951 | 0 | log_err(cd, _("Can't format LUKS without device.")); |
1952 | 0 | return -EINVAL; |
1953 | 0 | } |
1954 | | |
1955 | 0 | if (device_is_zoned(crypt_metadata_device(cd)) > 0) { |
1956 | 0 | log_err(cd, _("Zoned device %s cannot be used for LUKS header."), |
1957 | 0 | device_path(crypt_metadata_device(cd))); |
1958 | 0 | return -EINVAL; |
1959 | 0 | } |
1960 | | |
1961 | 0 | if (params && cd->data_offset && params->data_alignment && |
1962 | 0 | (cd->data_offset % params->data_alignment)) { |
1963 | 0 | log_err(cd, _("Requested data alignment is not compatible with data offset.")); |
1964 | 0 | return -EINVAL; |
1965 | 0 | } |
1966 | | |
1967 | 0 | if (params && params->sector_size) |
1968 | 0 | sector_size_autodetect = false; |
1969 | |
|
1970 | 0 | if (params && params->data_device) { |
1971 | 0 | if (!cd->metadata_device) |
1972 | 0 | cd->metadata_device = cd->device; |
1973 | 0 | else |
1974 | 0 | device_free(cd, cd->device); |
1975 | 0 | cd->device = NULL; |
1976 | 0 | if (device_alloc(cd, &cd->device, params->data_device) < 0) |
1977 | 0 | return -ENOMEM; |
1978 | 0 | } |
1979 | | |
1980 | 0 | if (device_is_dax(crypt_data_device(cd)) > 0) |
1981 | 0 | log_std(cd, _("WARNING: DAX device can corrupt data as it does not guarantee atomic sector updates.\n")); |
1982 | |
|
1983 | 0 | if (sector_size_autodetect) { |
1984 | 0 | sector_size = device_optimal_encryption_sector_size(cd, crypt_data_device(cd)); |
1985 | 0 | log_dbg(cd, "Auto-detected optimal encryption sector size for device %s is %d bytes.", |
1986 | 0 | device_path(crypt_data_device(cd)), sector_size); |
1987 | 0 | } else |
1988 | 0 | sector_size = params ? params->sector_size : SECTOR_SIZE; |
1989 | |
|
1990 | 0 | r = device_check_access(cd, crypt_metadata_device(cd), DEV_EXCL); |
1991 | 0 | if (r < 0) |
1992 | 0 | return r; |
1993 | | |
1994 | 0 | if (!(cd->type = strdup(CRYPT_LUKS2))) |
1995 | 0 | return -ENOMEM; |
1996 | | |
1997 | 0 | if (volume_key) |
1998 | 0 | cd->volume_key = crypt_alloc_volume_key(volume_key_size, |
1999 | 0 | volume_key); |
2000 | 0 | else |
2001 | 0 | cd->volume_key = crypt_generate_volume_key(cd, volume_key_size, KEY_QUALITY_KEY); |
2002 | |
|
2003 | 0 | if (!cd->volume_key) |
2004 | 0 | return -ENOMEM; |
2005 | | |
2006 | 0 | if (params && params->pbkdf) |
2007 | 0 | r = crypt_set_pbkdf_type(cd, params->pbkdf); |
2008 | 0 | else if (verify_pbkdf_params(cd, &cd->pbkdf)) |
2009 | 0 | r = init_pbkdf_type(cd, NULL, CRYPT_LUKS2); |
2010 | |
|
2011 | 0 | if (r < 0) |
2012 | 0 | return r; |
2013 | | |
2014 | 0 | if (params && cd->metadata_device) { |
2015 | | /* For detached header the alignment is used directly as data offset */ |
2016 | 0 | if (!cd->data_offset) |
2017 | 0 | cd->data_offset = params->data_alignment; |
2018 | 0 | required_alignment = params->data_alignment * SECTOR_SIZE; |
2019 | 0 | } else if (params && params->data_alignment) { |
2020 | 0 | required_alignment = params->data_alignment * SECTOR_SIZE; |
2021 | 0 | } else |
2022 | 0 | device_topology_alignment(cd, cd->device, |
2023 | 0 | &required_alignment, |
2024 | 0 | &alignment_offset, DEFAULT_DISK_ALIGNMENT); |
2025 | |
|
2026 | 0 | if (params && params->integrity_params && params->integrity_params->integrity_key_size) |
2027 | 0 | integrity_key_size = params->integrity_params->integrity_key_size; |
2028 | |
|
2029 | 0 | r = LUKS2_check_encryption_params(cd, cipher, cipher_mode, integrity, integrity_key_size, |
2030 | 0 | volume_key_size, params, &integrity, &integrity_key_size); |
2031 | 0 | if (r < 0) |
2032 | 0 | goto out; |
2033 | | |
2034 | 0 | r = device_size(crypt_data_device(cd), &dev_size); |
2035 | 0 | if (r < 0) |
2036 | 0 | goto out; |
2037 | | |
2038 | 0 | r = LUKS2_hdr_get_storage_params(cd, alignment_offset, required_alignment, |
2039 | 0 | &metadata_size_bytes, &keyslots_size_bytes, &data_offset_bytes); |
2040 | 0 | if (r < 0) |
2041 | 0 | goto out; |
2042 | | |
2043 | 0 | r = LUKS2_check_encryption_sector(cd, dev_size, data_offset_bytes, sector_size, |
2044 | 0 | sector_size_autodetect, integrity == NULL, |
2045 | 0 | §or_size); |
2046 | 0 | if (r < 0) |
2047 | 0 | goto out; |
2048 | | |
2049 | 0 | if (*cipher_mode != '\0') |
2050 | 0 | r = snprintf(cipher_spec, sizeof(cipher_spec), "%s-%s", cipher, cipher_mode); |
2051 | 0 | else |
2052 | 0 | r = snprintf(cipher_spec, sizeof(cipher_spec), "%s", cipher); |
2053 | 0 | if (r < 0 || (size_t)r >= sizeof(cipher_spec)) { |
2054 | 0 | r = -EINVAL; |
2055 | 0 | goto out; |
2056 | 0 | } |
2057 | | |
2058 | 0 | r = LUKS2_generate_hdr(cd, &cd->u.luks2.hdr, cd->volume_key, |
2059 | 0 | cipher_spec, |
2060 | 0 | integrity, integrity_key_size, |
2061 | 0 | uuid, |
2062 | 0 | sector_size, |
2063 | 0 | data_offset_bytes, |
2064 | 0 | metadata_size_bytes, keyslots_size_bytes, |
2065 | 0 | 0, 0, 0); |
2066 | 0 | if (r < 0) |
2067 | 0 | goto out; |
2068 | | |
2069 | 0 | if (integrity_inline) { |
2070 | 0 | log_dbg(cd, "Adding LUKS2 inline HW tags requirement flag."); |
2071 | 0 | r = LUKS2_config_set_requirement_version(cd, &cd->u.luks2.hdr, |
2072 | 0 | CRYPT_REQUIREMENT_INLINE_HW_TAGS, 1, false); |
2073 | 0 | if (r < 0) |
2074 | 0 | goto out; |
2075 | 0 | } |
2076 | | |
2077 | 0 | if (params && (params->label || params->subsystem)) { |
2078 | 0 | r = LUKS2_hdr_labels(cd, &cd->u.luks2.hdr, |
2079 | 0 | params->label, params->subsystem, 0); |
2080 | 0 | if (r < 0) |
2081 | 0 | goto out; |
2082 | 0 | } |
2083 | | |
2084 | 0 | device_set_block_size(crypt_data_device(cd), sector_size); |
2085 | |
|
2086 | 0 | r = LUKS2_wipe_header_areas(cd, &cd->u.luks2.hdr); |
2087 | 0 | if (r < 0) { |
2088 | 0 | log_err(cd, _("Cannot wipe header on device %s."), |
2089 | 0 | mdata_device_path(cd)); |
2090 | 0 | if (dev_size < LUKS2_hdr_and_areas_size(&cd->u.luks2.hdr)) |
2091 | 0 | log_err(cd, _("Device %s is too small."), device_path(crypt_metadata_device(cd))); |
2092 | 0 | goto out; |
2093 | 0 | } |
2094 | | |
2095 | | /* Wipe integrity superblock and create integrity superblock */ |
2096 | 0 | if (crypt_get_integrity_tag_size(cd)) { |
2097 | 0 | r = crypt_wipe_device(cd, crypt_data_device(cd), CRYPT_WIPE_ZERO, |
2098 | 0 | crypt_get_data_offset(cd) * SECTOR_SIZE, |
2099 | 0 | 8 * SECTOR_SIZE, 8 * SECTOR_SIZE, NULL, NULL); |
2100 | 0 | if (r < 0) { |
2101 | 0 | if (r == -EBUSY) |
2102 | 0 | log_err(cd, _("Cannot format device %s in use."), |
2103 | 0 | data_device_path(cd)); |
2104 | 0 | else if (r == -EACCES) { |
2105 | 0 | log_err(cd, _("Cannot format device %s, permission denied."), |
2106 | 0 | data_device_path(cd)); |
2107 | 0 | r = -EINVAL; |
2108 | 0 | } else |
2109 | 0 | log_err(cd, _("Cannot wipe header on device %s."), |
2110 | 0 | data_device_path(cd)); |
2111 | |
|
2112 | 0 | goto out; |
2113 | 0 | } |
2114 | 0 | } |
2115 | | |
2116 | | /* Format underlying virtual dm-integrity device */ |
2117 | 0 | if (!integrity_inline && crypt_get_integrity_tag_size(cd)) { |
2118 | 0 | if (integrity_key_size) { |
2119 | 0 | integrity_key = crypt_alloc_volume_key(integrity_key_size, |
2120 | 0 | crypt_volume_key_get_key(cd->volume_key) + volume_key_size - integrity_key_size); |
2121 | 0 | if (!integrity_key) { |
2122 | 0 | r = -ENOMEM; |
2123 | 0 | goto out; |
2124 | 0 | } |
2125 | 0 | } |
2126 | 0 | r = INTEGRITY_format(cd, params ? params->integrity_params : NULL, |
2127 | 0 | integrity_key, NULL, NULL, 0, NULL, false); |
2128 | 0 | if (r) |
2129 | 0 | log_err(cd, _("Cannot format integrity for device %s."), |
2130 | 0 | data_device_path(cd)); |
2131 | 0 | crypt_free_volume_key(integrity_key); |
2132 | 0 | } |
2133 | | |
2134 | 0 | if (r < 0) |
2135 | 0 | goto out; |
2136 | | |
2137 | | /* override sequence id check with format */ |
2138 | 0 | r = LUKS2_hdr_write_force(cd, &cd->u.luks2.hdr); |
2139 | 0 | if (r < 0) { |
2140 | 0 | if (r == -EBUSY) |
2141 | 0 | log_err(cd, _("Cannot format device %s in use."), |
2142 | 0 | mdata_device_path(cd)); |
2143 | 0 | else if (r == -EACCES) { |
2144 | 0 | log_err(cd, _("Cannot format device %s, permission denied."), |
2145 | 0 | mdata_device_path(cd)); |
2146 | 0 | r = -EINVAL; |
2147 | 0 | } else |
2148 | 0 | log_err(cd, _("Cannot format device %s."), |
2149 | 0 | mdata_device_path(cd)); |
2150 | 0 | } |
2151 | |
|
2152 | 0 | out: |
2153 | 0 | if (r) { |
2154 | 0 | LUKS2_hdr_free(cd, &cd->u.luks2.hdr); |
2155 | 0 | return r; |
2156 | 0 | } |
2157 | | |
2158 | | /* Device size can be larger now if it is a file container */ |
2159 | 0 | if (!device_size(crypt_data_device(cd), &dev_size) && |
2160 | 0 | dev_size <= (crypt_get_data_offset(cd) * SECTOR_SIZE)) |
2161 | 0 | log_std(cd, _("Device %s is too small for activation, there is no remaining space for data.\n"), |
2162 | 0 | device_path(crypt_data_device(cd))); |
2163 | |
|
2164 | 0 | return 0; |
2165 | 0 | } |
2166 | | |
2167 | | static int opal_topology_alignment(struct crypt_device *cd, |
2168 | | uint64_t partition_offset_sectors, |
2169 | | uint64_t data_offset_sectors, |
2170 | | uint64_t required_alignment_sectors, |
2171 | | uint64_t default_alignment_bytes, |
2172 | | uint64_t *ret_alignment_offset_bytes, |
2173 | | uint64_t *ret_alignment_bytes, |
2174 | | uint32_t *ret_opal_block_bytes, |
2175 | | uint64_t *ret_opal_alignment_granularity_blocks) |
2176 | 0 | { |
2177 | 0 | bool opal_align; |
2178 | 0 | int r; |
2179 | 0 | uint32_t opal_block_bytes, device_block_bytes; |
2180 | 0 | uint64_t opal_alignment_granularity_blocks, opal_lowest_lba_blocks; |
2181 | |
|
2182 | 0 | assert(cd); |
2183 | 0 | assert(ret_alignment_offset_bytes); |
2184 | 0 | assert(ret_alignment_bytes); |
2185 | 0 | assert(ret_opal_block_bytes); |
2186 | 0 | assert(ret_opal_alignment_granularity_blocks); |
2187 | | |
2188 | 0 | r = opal_geometry(cd, crypt_data_device(cd), &opal_align, &opal_block_bytes, |
2189 | 0 | &opal_alignment_granularity_blocks, &opal_lowest_lba_blocks); |
2190 | 0 | if (r) { |
2191 | 0 | log_err(cd, _("Cannot get OPAL alignment parameters.")); |
2192 | 0 | return -EINVAL; |
2193 | 0 | } |
2194 | | |
2195 | 0 | device_block_bytes = device_block_size(cd, crypt_data_device(cd)); |
2196 | |
|
2197 | 0 | log_dbg(cd, "OPAL geometry: alignment: '%c', logical block size: %" PRIu32 "/%" PRIu32 |
2198 | 0 | ", alignment granularity: %" PRIu64 ", lowest aligned LBA: %" PRIu64, |
2199 | 0 | opal_align ? 'y' : 'n', opal_block_bytes, device_block_bytes, |
2200 | 0 | opal_alignment_granularity_blocks, opal_lowest_lba_blocks); |
2201 | |
|
2202 | 0 | if (opal_block_bytes < SECTOR_SIZE || NOTPOW2(opal_block_bytes)) { |
2203 | 0 | log_err(cd, _("Bogus OPAL logical block size.")); |
2204 | 0 | return -EINVAL; |
2205 | 0 | } |
2206 | | |
2207 | 0 | if (device_block_bytes != opal_block_bytes) { |
2208 | 0 | log_err(cd, _("Bogus OPAL logical block size differs from device block size.")); |
2209 | 0 | return -EINVAL; |
2210 | 0 | } |
2211 | | |
2212 | 0 | if (data_offset_sectors && |
2213 | 0 | MISALIGNED(data_offset_sectors + partition_offset_sectors, opal_block_bytes / SECTOR_SIZE)) { |
2214 | 0 | log_err(cd, _("Requested data offset is not compatible with OPAL block size.")); |
2215 | 0 | return -EINVAL; |
2216 | 0 | } |
2217 | | |
2218 | | /* Data offset has priority over data alignment parameter */ |
2219 | 0 | if (!data_offset_sectors && |
2220 | 0 | MISALIGNED(required_alignment_sectors, opal_block_bytes / SECTOR_SIZE)) { |
2221 | 0 | log_err(cd, _("Requested data alignment is not compatible with OPAL alignment.")); |
2222 | 0 | return -EINVAL; |
2223 | 0 | } |
2224 | | |
2225 | 0 | if (!opal_align) { |
2226 | | /* For detached header the alignment is used directly as data offset */ |
2227 | 0 | if (required_alignment_sectors || cd->metadata_device) |
2228 | 0 | *ret_alignment_bytes = required_alignment_sectors * SECTOR_SIZE; |
2229 | 0 | else |
2230 | 0 | *ret_alignment_bytes = default_alignment_bytes; |
2231 | 0 | *ret_alignment_offset_bytes = 0; |
2232 | 0 | *ret_opal_block_bytes = opal_block_bytes; |
2233 | 0 | *ret_opal_alignment_granularity_blocks = 1; |
2234 | 0 | return 0; |
2235 | 0 | } |
2236 | | |
2237 | 0 | if (data_offset_sectors) { |
2238 | 0 | if (MISALIGNED((((data_offset_sectors + partition_offset_sectors) * SECTOR_SIZE) / opal_block_bytes) - opal_lowest_lba_blocks, |
2239 | 0 | opal_alignment_granularity_blocks)) { |
2240 | | // FIXME: Add hint to user on how to fix it |
2241 | 0 | log_err(cd, _("Data offset does not satisfy OPAL alignment requirements.")); |
2242 | 0 | return -EINVAL; |
2243 | 0 | } |
2244 | | |
2245 | 0 | *ret_alignment_offset_bytes = 0; |
2246 | 0 | *ret_alignment_bytes = 0; |
2247 | 0 | *ret_opal_block_bytes = opal_block_bytes; |
2248 | 0 | *ret_opal_alignment_granularity_blocks = opal_alignment_granularity_blocks; |
2249 | |
|
2250 | 0 | return 0; |
2251 | 0 | } |
2252 | | |
2253 | 0 | if (MISALIGNED(required_alignment_sectors * SECTOR_SIZE, opal_block_bytes * opal_alignment_granularity_blocks)) { |
2254 | 0 | log_err(cd, _("Requested data alignment does not satisfy locking range alignment requirements.")); |
2255 | 0 | return -EINVAL; |
2256 | 0 | } |
2257 | | |
2258 | | /* For detached header the alignment is used directly as data offset */ |
2259 | 0 | if (required_alignment_sectors || cd->metadata_device) |
2260 | 0 | *ret_alignment_bytes = required_alignment_sectors * SECTOR_SIZE; |
2261 | 0 | else |
2262 | 0 | *ret_alignment_bytes = size_round_up(default_alignment_bytes, opal_block_bytes * opal_alignment_granularity_blocks); |
2263 | | |
2264 | | /* data offset is not set, calculate proper alignment */ |
2265 | 0 | *ret_alignment_offset_bytes = (partition_offset_sectors * SECTOR_SIZE) % (opal_block_bytes * opal_alignment_granularity_blocks); |
2266 | 0 | if (*ret_alignment_offset_bytes) |
2267 | 0 | *ret_alignment_offset_bytes = opal_block_bytes * opal_alignment_granularity_blocks - *ret_alignment_offset_bytes; |
2268 | |
|
2269 | 0 | if (*ret_alignment_offset_bytes) |
2270 | 0 | log_dbg(cd, "Compensating misaligned partition offset by %" PRIu64 "bytes.", |
2271 | 0 | *ret_alignment_offset_bytes); |
2272 | |
|
2273 | 0 | *ret_alignment_offset_bytes += (opal_lowest_lba_blocks * opal_block_bytes); |
2274 | 0 | *ret_opal_block_bytes = opal_block_bytes; |
2275 | 0 | *ret_opal_alignment_granularity_blocks = opal_alignment_granularity_blocks; |
2276 | |
|
2277 | 0 | log_dbg(cd, "OPAL alignment (%" PRIu32 "/%" PRIu64 "), offset = %" PRIu64 ". Required alignment is %" PRIu64 ".", |
2278 | 0 | opal_block_bytes, opal_alignment_granularity_blocks, *ret_alignment_offset_bytes, *ret_alignment_bytes); |
2279 | |
|
2280 | 0 | return 0; |
2281 | 0 | } |
2282 | | |
2283 | | int crypt_format_luks2_opal(struct crypt_device *cd, |
2284 | | const char *cipher, |
2285 | | const char *cipher_mode, |
2286 | | const char *uuid, |
2287 | | const char *volume_keys, |
2288 | | size_t volume_keys_size, |
2289 | | struct crypt_params_luks2 *params, |
2290 | | struct crypt_params_hw_opal *opal_params) |
2291 | 0 | { |
2292 | 0 | bool opal_range_reset = false, subsystem_overridden = false, sector_size_autodetect = cipher != NULL; |
2293 | 0 | int r; |
2294 | 0 | char cipher_spec[128]; |
2295 | 0 | const char *integrity = params ? params->integrity : NULL; |
2296 | 0 | size_t integrity_key_size = 0; /* only for independent, separate key in HMAC */ |
2297 | 0 | struct volume_key *integrity_key = NULL; |
2298 | 0 | uint32_t sector_size, opal_block_bytes, opal_segment_number = 1; /* We'll use the partition number if available later */ |
2299 | 0 | uint64_t alignment_offset_bytes, data_offset_bytes, device_size_bytes, opal_alignment_granularity_blocks, |
2300 | 0 | partition_offset_sectors, range_offset_blocks, range_size_bytes, |
2301 | 0 | required_alignment_bytes, metadata_size_bytes, keyslots_size_bytes, |
2302 | 0 | provided_data_sectors; |
2303 | 0 | struct volume_key *user_key = NULL; |
2304 | 0 | struct crypt_lock_handle *opal_lh = NULL; |
2305 | |
|
2306 | 0 | if (!cd || !params || !opal_params || |
2307 | 0 | !opal_params->admin_key || !opal_params->admin_key_size || !opal_params->user_key_size) |
2308 | 0 | return -EINVAL; |
2309 | | |
2310 | 0 | if (cd->type) { |
2311 | 0 | log_dbg(cd, "Context already formatted as %s.", cd->type); |
2312 | 0 | return -EINVAL; |
2313 | 0 | } |
2314 | | |
2315 | 0 | log_dbg(cd, "Formatting device %s as type LUKS2 with OPAL HW encryption.", mdata_device_path(cd) ?: "(none)"); |
2316 | |
|
2317 | 0 | r = init_crypto(cd); |
2318 | 0 | if (r < 0) |
2319 | 0 | return r; |
2320 | | |
2321 | 0 | if (volume_keys_size < opal_params->user_key_size) |
2322 | 0 | return -EINVAL; |
2323 | | |
2324 | 0 | if (cipher && (volume_keys_size == opal_params->user_key_size)) |
2325 | 0 | return -EINVAL; |
2326 | | |
2327 | 0 | if (!crypt_metadata_device(cd)) { |
2328 | 0 | log_err(cd, _("Can't format LUKS without device.")); |
2329 | 0 | return -EINVAL; |
2330 | 0 | } |
2331 | | |
2332 | 0 | if (params->data_alignment && |
2333 | 0 | MISALIGNED(cd->data_offset, params->data_alignment)) { |
2334 | 0 | log_err(cd, _("Requested data alignment is not compatible with data offset.")); |
2335 | 0 | return -EINVAL; |
2336 | 0 | } |
2337 | | |
2338 | 0 | if (params->data_device) { |
2339 | 0 | if (!cd->metadata_device) |
2340 | 0 | cd->metadata_device = cd->device; |
2341 | 0 | else |
2342 | 0 | device_free(cd, cd->device); |
2343 | 0 | cd->device = NULL; |
2344 | 0 | if (device_alloc(cd, &cd->device, params->data_device) < 0) |
2345 | 0 | return -ENOMEM; |
2346 | 0 | } |
2347 | | |
2348 | 0 | r = crypt_opal_supported(cd, crypt_data_device(cd)); |
2349 | 0 | if (r < 0) |
2350 | 0 | return r; |
2351 | | |
2352 | 0 | if (params->sector_size) |
2353 | 0 | sector_size_autodetect = false; |
2354 | |
|
2355 | 0 | partition_offset_sectors = crypt_dev_partition_offset(device_path(crypt_data_device(cd))); |
2356 | |
|
2357 | 0 | r = device_check_access(cd, crypt_metadata_device(cd), DEV_EXCL); |
2358 | 0 | if (r < 0) |
2359 | 0 | return r; |
2360 | | |
2361 | | /* |
2362 | | * Check both data and metadata devices for exclusive access since |
2363 | | * we don't want to setup locking range on already used partition. |
2364 | | */ |
2365 | 0 | if (crypt_metadata_device(cd) != crypt_data_device(cd)) { |
2366 | 0 | r = device_check_access(cd, crypt_data_device(cd), DEV_EXCL); |
2367 | 0 | if (r < 0) |
2368 | 0 | return r; |
2369 | 0 | } |
2370 | | |
2371 | 0 | if (!(cd->type = strdup(CRYPT_LUKS2))) |
2372 | 0 | return -ENOMEM; |
2373 | | |
2374 | 0 | if (volume_keys) |
2375 | 0 | cd->volume_key = crypt_alloc_volume_key(volume_keys_size, volume_keys); |
2376 | 0 | else |
2377 | 0 | cd->volume_key = crypt_generate_volume_key(cd, volume_keys_size, KEY_QUALITY_KEY); |
2378 | |
|
2379 | 0 | if (!cd->volume_key) { |
2380 | 0 | r = -ENOMEM; |
2381 | 0 | goto out; |
2382 | 0 | } |
2383 | | |
2384 | 0 | if (cipher) { |
2385 | 0 | user_key = crypt_alloc_volume_key(opal_params->user_key_size, crypt_volume_key_get_key(cd->volume_key)); |
2386 | 0 | if (!user_key) { |
2387 | 0 | r = -ENOMEM; |
2388 | 0 | goto out; |
2389 | 0 | } |
2390 | 0 | } |
2391 | | |
2392 | 0 | r = 0; |
2393 | 0 | if (params->pbkdf) |
2394 | 0 | r = crypt_set_pbkdf_type(cd, params->pbkdf); |
2395 | 0 | else if (verify_pbkdf_params(cd, &cd->pbkdf)) |
2396 | 0 | r = init_pbkdf_type(cd, NULL, CRYPT_LUKS2); |
2397 | |
|
2398 | 0 | if (r < 0) |
2399 | 0 | goto out; |
2400 | | |
2401 | 0 | if (cd->metadata_device && !cd->data_offset) |
2402 | | /* For detached header the alignment is used directly as data offset */ |
2403 | 0 | cd->data_offset = params->data_alignment; |
2404 | |
|
2405 | 0 | r = opal_topology_alignment(cd, partition_offset_sectors, |
2406 | 0 | cd->data_offset, params->data_alignment, |
2407 | 0 | DEFAULT_DISK_ALIGNMENT, &alignment_offset_bytes, &required_alignment_bytes, |
2408 | 0 | &opal_block_bytes, &opal_alignment_granularity_blocks); |
2409 | 0 | if (r < 0) |
2410 | 0 | goto out; |
2411 | | |
2412 | 0 | if (sector_size_autodetect) { |
2413 | 0 | sector_size = device_optimal_encryption_sector_size(cd, crypt_data_device(cd)); |
2414 | 0 | if ((opal_block_bytes * opal_alignment_granularity_blocks) > sector_size) |
2415 | 0 | sector_size = opal_block_bytes * opal_alignment_granularity_blocks; |
2416 | 0 | if (sector_size > MAX_SECTOR_SIZE) |
2417 | 0 | sector_size = MAX_SECTOR_SIZE; |
2418 | 0 | log_dbg(cd, "Auto-detected optimal encryption sector size for device %s is %d bytes.", |
2419 | 0 | device_path(crypt_data_device(cd)), sector_size); |
2420 | 0 | } else |
2421 | 0 | sector_size = params->sector_size; |
2422 | | |
2423 | | /* To ensure it is obvious and explicit that OPAL is being used, set the |
2424 | | * subsystem tag if the user hasn't passed one. */ |
2425 | 0 | if (!params->subsystem) { |
2426 | 0 | params->subsystem = "HW-OPAL"; |
2427 | 0 | subsystem_overridden = true; |
2428 | 0 | } |
2429 | | |
2430 | | /* We need to give the drive a segment number - use the partition number if there is |
2431 | | * one, otherwise the first valid (1) number if it's a single-volume setup */ |
2432 | 0 | r = crypt_dev_get_partition_number(device_path(crypt_data_device(cd))); |
2433 | 0 | if (r > 0) |
2434 | 0 | opal_segment_number = r; |
2435 | |
|
2436 | 0 | if (cipher) { |
2437 | 0 | if (params->integrity_params && params->integrity_params->integrity_key_size) |
2438 | 0 | integrity_key_size = params->integrity_params->integrity_key_size; |
2439 | |
|
2440 | 0 | r = LUKS2_check_encryption_params(cd, cipher, cipher_mode, integrity, 0, |
2441 | 0 | volume_keys_size - opal_params->user_key_size, |
2442 | 0 | params, &integrity, &integrity_key_size); |
2443 | 0 | if (r < 0) |
2444 | 0 | goto out; |
2445 | 0 | } |
2446 | | |
2447 | 0 | r = device_size(crypt_data_device(cd), &device_size_bytes); |
2448 | 0 | if (r < 0) |
2449 | 0 | goto out; |
2450 | | |
2451 | 0 | r = LUKS2_hdr_get_storage_params(cd, alignment_offset_bytes, required_alignment_bytes, |
2452 | 0 | &metadata_size_bytes, &keyslots_size_bytes, &data_offset_bytes); |
2453 | 0 | if (r < 0) |
2454 | 0 | goto out; |
2455 | | |
2456 | 0 | r = -EINVAL; |
2457 | 0 | if (device_size_bytes < data_offset_bytes && !cd->metadata_device) { |
2458 | 0 | log_err(cd, _("Device %s is too small."), device_path(crypt_data_device(cd))); |
2459 | 0 | goto out; |
2460 | 0 | } |
2461 | | |
2462 | 0 | device_size_bytes -= data_offset_bytes; |
2463 | 0 | range_size_bytes = device_size_bytes - (device_size_bytes % (opal_block_bytes * opal_alignment_granularity_blocks)); |
2464 | 0 | if (!range_size_bytes) |
2465 | 0 | goto out; |
2466 | | |
2467 | 0 | if (device_size_bytes != range_size_bytes) |
2468 | 0 | log_err(cd, _("Compensating device size by %" PRIu64 " sectors to align it with OPAL alignment granularity."), |
2469 | 0 | (device_size_bytes - range_size_bytes) / SECTOR_SIZE); |
2470 | |
|
2471 | 0 | if (cipher) { |
2472 | 0 | r = LUKS2_check_encryption_sector(cd, range_size_bytes, data_offset_bytes, sector_size, |
2473 | 0 | sector_size_autodetect, integrity == NULL, |
2474 | 0 | §or_size); |
2475 | 0 | if (r < 0) |
2476 | 0 | goto out; |
2477 | | |
2478 | 0 | if (*cipher_mode != '\0') |
2479 | 0 | r = snprintf(cipher_spec, sizeof(cipher_spec), "%s-%s", cipher, cipher_mode); |
2480 | 0 | else |
2481 | 0 | r = snprintf(cipher_spec, sizeof(cipher_spec), "%s", cipher); |
2482 | 0 | if (r < 0 || (size_t)r >= sizeof(cipher_spec)) { |
2483 | 0 | r = -EINVAL; |
2484 | 0 | goto out; |
2485 | 0 | } |
2486 | 0 | } |
2487 | | |
2488 | 0 | r = LUKS2_generate_hdr(cd, &cd->u.luks2.hdr, cd->volume_key, |
2489 | 0 | cipher ? cipher_spec : NULL, |
2490 | 0 | integrity, integrity_key_size, |
2491 | 0 | uuid, |
2492 | 0 | sector_size, |
2493 | 0 | data_offset_bytes, |
2494 | 0 | metadata_size_bytes, keyslots_size_bytes, |
2495 | 0 | range_size_bytes, |
2496 | 0 | opal_segment_number, |
2497 | 0 | opal_params->user_key_size); |
2498 | 0 | if (r < 0) |
2499 | 0 | goto out; |
2500 | | |
2501 | 0 | log_dbg(cd, "Adding LUKS2 OPAL requirement flag."); |
2502 | 0 | r = LUKS2_config_set_requirement_version(cd, &cd->u.luks2.hdr, CRYPT_REQUIREMENT_OPAL, 1, false); |
2503 | 0 | if (r < 0) |
2504 | 0 | goto out; |
2505 | | |
2506 | 0 | if (params->label || params->subsystem) { |
2507 | 0 | r = LUKS2_hdr_labels(cd, &cd->u.luks2.hdr, |
2508 | 0 | params->label, params->subsystem, 0); |
2509 | 0 | if (r < 0) |
2510 | 0 | goto out; |
2511 | 0 | } |
2512 | | |
2513 | 0 | device_set_block_size(crypt_data_device(cd), sector_size); |
2514 | |
|
2515 | 0 | r = LUKS2_wipe_header_areas(cd, &cd->u.luks2.hdr); |
2516 | 0 | if (r < 0) { |
2517 | 0 | log_err(cd, _("Cannot wipe header on device %s."), |
2518 | 0 | mdata_device_path(cd)); |
2519 | 0 | if (device_size_bytes < LUKS2_hdr_and_areas_size(&cd->u.luks2.hdr)) |
2520 | 0 | log_err(cd, _("Device %s is too small."), device_path(crypt_metadata_device(cd))); |
2521 | 0 | goto out; |
2522 | 0 | } |
2523 | | |
2524 | 0 | range_offset_blocks = (data_offset_bytes + partition_offset_sectors * SECTOR_SIZE) / opal_block_bytes; |
2525 | |
|
2526 | 0 | r = opal_exclusive_lock(cd, crypt_data_device(cd), &opal_lh); |
2527 | 0 | if (r < 0) { |
2528 | 0 | log_err(cd, _("Failed to acquire OPAL lock on device %s."), device_path(crypt_data_device(cd))); |
2529 | 0 | goto out; |
2530 | 0 | } |
2531 | | |
2532 | 0 | r = opal_setup_ranges(cd, crypt_data_device(cd), user_key ?: cd->volume_key, |
2533 | 0 | range_offset_blocks, range_size_bytes / opal_block_bytes, |
2534 | 0 | opal_block_bytes, opal_segment_number, |
2535 | 0 | opal_params->admin_key, opal_params->admin_key_size); |
2536 | 0 | if (r < 0) { |
2537 | 0 | if (r == -EPERM) |
2538 | 0 | log_err(cd, _("Incorrect OPAL Admin key.")); |
2539 | 0 | else |
2540 | 0 | log_err(cd, _("Cannot setup OPAL segment.")); |
2541 | 0 | goto out; |
2542 | 0 | } |
2543 | | |
2544 | 0 | opal_range_reset = true; |
2545 | | |
2546 | | /* integrity metadata goes in unlocked OPAL locking range */ |
2547 | 0 | if (crypt_get_integrity_tag_size(cd)) { |
2548 | 0 | r = opal_unlock(cd, crypt_data_device(cd), opal_segment_number, user_key ?: cd->volume_key); |
2549 | 0 | if (r < 0) |
2550 | 0 | goto out; |
2551 | | |
2552 | 0 | r = crypt_wipe_device(cd, crypt_data_device(cd), CRYPT_WIPE_ZERO, |
2553 | 0 | crypt_get_data_offset(cd) * SECTOR_SIZE, |
2554 | 0 | 8 * SECTOR_SIZE, 8 * SECTOR_SIZE, NULL, NULL); |
2555 | 0 | if (r < 0) { |
2556 | 0 | if (r == -EBUSY) |
2557 | 0 | log_err(cd, _("Cannot format device %s in use."), |
2558 | 0 | data_device_path(cd)); |
2559 | 0 | else if (r == -EACCES) { |
2560 | 0 | log_err(cd, _("Cannot format device %s, permission denied."), |
2561 | 0 | data_device_path(cd)); |
2562 | 0 | r = -EINVAL; |
2563 | 0 | } else |
2564 | 0 | log_err(cd, _("Cannot wipe header on device %s."), |
2565 | 0 | data_device_path(cd)); |
2566 | |
|
2567 | 0 | goto out; |
2568 | 0 | } |
2569 | | |
2570 | 0 | if (integrity_key_size) { |
2571 | 0 | integrity_key = crypt_alloc_volume_key(integrity_key_size, |
2572 | 0 | crypt_volume_key_get_key(cd->volume_key) + volume_keys_size - integrity_key_size); |
2573 | |
|
2574 | 0 | if (!integrity_key) { |
2575 | 0 | r = -ENOMEM; |
2576 | 0 | goto out; |
2577 | 0 | } |
2578 | 0 | } |
2579 | | |
2580 | 0 | r = INTEGRITY_format(cd, params->integrity_params, integrity_key, NULL, NULL, |
2581 | | /* |
2582 | | * Create reduced dm-integrity device only if locking range size does |
2583 | | * not match device size. |
2584 | | */ |
2585 | 0 | device_size_bytes != range_size_bytes ? range_size_bytes / SECTOR_SIZE : 0, NULL, false); |
2586 | 0 | if (r) |
2587 | 0 | log_err(cd, _("Cannot format integrity for device %s."), |
2588 | 0 | data_device_path(cd)); |
2589 | |
|
2590 | 0 | crypt_free_volume_key(integrity_key); |
2591 | 0 | if (r < 0) |
2592 | 0 | goto out; |
2593 | | |
2594 | 0 | r = INTEGRITY_data_sectors(cd, crypt_data_device(cd), |
2595 | 0 | crypt_get_data_offset(cd) * SECTOR_SIZE, |
2596 | 0 | &provided_data_sectors); |
2597 | 0 | if (r < 0) |
2598 | 0 | goto out; |
2599 | | |
2600 | 0 | if (!LUKS2_segment_set_size(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT, |
2601 | 0 | &(uint64_t) {provided_data_sectors * SECTOR_SIZE})) { |
2602 | 0 | r = -EINVAL; |
2603 | 0 | goto out; |
2604 | 0 | } |
2605 | | |
2606 | 0 | r = opal_lock(cd, crypt_data_device(cd), opal_segment_number); |
2607 | 0 | if (r < 0) |
2608 | 0 | goto out; |
2609 | 0 | } |
2610 | | |
2611 | | /* override sequence id check with format */ |
2612 | 0 | r = LUKS2_hdr_write_force(cd, &cd->u.luks2.hdr); |
2613 | 0 | if (r < 0) { |
2614 | 0 | if (r == -EBUSY) |
2615 | 0 | log_err(cd, _("Cannot format device %s in use."), |
2616 | 0 | mdata_device_path(cd)); |
2617 | 0 | else if (r == -EACCES) { |
2618 | 0 | log_err(cd, _("Cannot format device %s, permission denied."), |
2619 | 0 | mdata_device_path(cd)); |
2620 | 0 | r = -EINVAL; |
2621 | 0 | } else if (r == -EIO) { |
2622 | 0 | log_err(cd, _("Cannot format device %s, OPAL device seems to be fully write-protected now."), |
2623 | 0 | mdata_device_path(cd)); |
2624 | 0 | log_err(cd, _("This is perhaps a bug in firmware. Run OPAL PSID reset and reconnect for recovery.")); |
2625 | 0 | } else |
2626 | 0 | log_err(cd, _("Cannot format device %s."), |
2627 | 0 | mdata_device_path(cd)); |
2628 | 0 | } |
2629 | |
|
2630 | 0 | out: |
2631 | 0 | crypt_free_volume_key(user_key); |
2632 | |
|
2633 | 0 | if (subsystem_overridden) |
2634 | 0 | params->subsystem = NULL; |
2635 | |
|
2636 | 0 | if (r >= 0) { |
2637 | 0 | opal_exclusive_unlock(cd, opal_lh); |
2638 | 0 | return 0; |
2639 | 0 | } |
2640 | | |
2641 | 0 | if (opal_range_reset && |
2642 | 0 | (opal_reset_segment(cd, crypt_data_device(cd), opal_segment_number, |
2643 | 0 | opal_params->admin_key, opal_params->admin_key_size) < 0)) |
2644 | 0 | log_err(cd, _("Locking range %d reset on device %s failed."), |
2645 | 0 | opal_segment_number, device_path(crypt_data_device(cd))); |
2646 | |
|
2647 | 0 | opal_exclusive_unlock(cd, opal_lh); |
2648 | 0 | LUKS2_hdr_free(cd, &cd->u.luks2.hdr); |
2649 | |
|
2650 | 0 | crypt_set_null_type(cd); |
2651 | 0 | crypt_free_volume_key(cd->volume_key); |
2652 | 0 | cd->volume_key = NULL; |
2653 | |
|
2654 | 0 | return r; |
2655 | 0 | } |
2656 | | |
2657 | | static int _crypt_format_loopaes(struct crypt_device *cd, |
2658 | | const char *cipher, |
2659 | | const char *uuid, |
2660 | | size_t volume_key_size, |
2661 | | struct crypt_params_loopaes *params) |
2662 | 0 | { |
2663 | 0 | if (!crypt_metadata_device(cd)) { |
2664 | 0 | log_err(cd, _("Can't format LOOPAES without device.")); |
2665 | 0 | return -EINVAL; |
2666 | 0 | } |
2667 | | |
2668 | 0 | if (volume_key_size > 1024) { |
2669 | 0 | log_err(cd, _("Invalid key size.")); |
2670 | 0 | return -EINVAL; |
2671 | 0 | } |
2672 | | |
2673 | 0 | if (uuid) { |
2674 | 0 | log_err(cd, _("UUID is not supported for this crypt type.")); |
2675 | 0 | return -EINVAL; |
2676 | 0 | } |
2677 | | |
2678 | 0 | if (cd->metadata_device) { |
2679 | 0 | log_err(cd, _("Detached metadata device is not supported for this crypt type.")); |
2680 | 0 | return -EINVAL; |
2681 | 0 | } |
2682 | | |
2683 | 0 | if (!(cd->type = strdup(CRYPT_LOOPAES))) |
2684 | 0 | return -ENOMEM; |
2685 | | |
2686 | 0 | cd->u.loopaes.key_size = volume_key_size; |
2687 | |
|
2688 | 0 | cd->u.loopaes.cipher = strdup(cipher ?: DEFAULT_LOOPAES_CIPHER); |
2689 | |
|
2690 | 0 | if (params && params->hash) |
2691 | 0 | cd->u.loopaes.hdr.hash = strdup(params->hash); |
2692 | |
|
2693 | 0 | cd->u.loopaes.hdr.offset = params ? params->offset : 0; |
2694 | 0 | cd->u.loopaes.hdr.skip = params ? params->skip : 0; |
2695 | |
|
2696 | 0 | return 0; |
2697 | 0 | } |
2698 | | |
2699 | | static int _crypt_format_verity(struct crypt_device *cd, |
2700 | | const char *uuid, |
2701 | | struct crypt_params_verity *params) |
2702 | 0 | { |
2703 | 0 | int r = 0, hash_size; |
2704 | 0 | uint64_t data_device_size, hash_blocks_size; |
2705 | 0 | struct device *fec_device = NULL; |
2706 | 0 | char *fec_device_path = NULL, *hash_name = NULL, *root_hash = NULL, *salt = NULL; |
2707 | |
|
2708 | 0 | if (!crypt_metadata_device(cd)) { |
2709 | 0 | log_err(cd, _("Can't format VERITY without device.")); |
2710 | 0 | return -EINVAL; |
2711 | 0 | } |
2712 | | |
2713 | 0 | if (!params) |
2714 | 0 | return -EINVAL; |
2715 | | |
2716 | 0 | if (!params->data_device && !cd->metadata_device) |
2717 | 0 | return -EINVAL; |
2718 | | |
2719 | 0 | if (params->hash_type > VERITY_MAX_HASH_TYPE) { |
2720 | 0 | log_err(cd, _("Unsupported VERITY hash type %d."), params->hash_type); |
2721 | 0 | return -EINVAL; |
2722 | 0 | } |
2723 | | |
2724 | 0 | if (VERITY_BLOCK_SIZE_OK(params->data_block_size) || |
2725 | 0 | VERITY_BLOCK_SIZE_OK(params->hash_block_size)) { |
2726 | 0 | log_err(cd, _("Unsupported VERITY block size.")); |
2727 | 0 | return -EINVAL; |
2728 | 0 | } |
2729 | | |
2730 | 0 | if (MISALIGNED_512(params->hash_area_offset)) { |
2731 | 0 | log_err(cd, _("Unsupported VERITY hash offset.")); |
2732 | 0 | return -EINVAL; |
2733 | 0 | } |
2734 | | |
2735 | 0 | if (MISALIGNED_512(params->fec_area_offset)) { |
2736 | 0 | log_err(cd, _("Unsupported VERITY FEC offset.")); |
2737 | 0 | return -EINVAL; |
2738 | 0 | } |
2739 | | |
2740 | 0 | if (!(cd->type = strdup(CRYPT_VERITY))) |
2741 | 0 | return -ENOMEM; |
2742 | | |
2743 | 0 | if (params->data_device) { |
2744 | 0 | r = crypt_set_data_device(cd, params->data_device); |
2745 | 0 | if (r) |
2746 | 0 | return r; |
2747 | 0 | } |
2748 | | |
2749 | 0 | if (!params->data_size) { |
2750 | 0 | r = device_size(cd->device, &data_device_size); |
2751 | 0 | if (r < 0) |
2752 | 0 | return r; |
2753 | | |
2754 | 0 | cd->u.verity.hdr.data_size = data_device_size / params->data_block_size; |
2755 | 0 | } else |
2756 | 0 | cd->u.verity.hdr.data_size = params->data_size; |
2757 | | |
2758 | 0 | if (device_is_identical(crypt_metadata_device(cd), crypt_data_device(cd)) > 0 && |
2759 | 0 | (cd->u.verity.hdr.data_size * params->data_block_size) > params->hash_area_offset) { |
2760 | 0 | log_err(cd, _("Data area overlaps with hash area.")); |
2761 | 0 | return -EINVAL; |
2762 | 0 | } |
2763 | | |
2764 | 0 | hash_size = crypt_hash_size(params->hash_name); |
2765 | 0 | if (hash_size <= 0) { |
2766 | 0 | log_err(cd, _("Hash algorithm %s not supported."), |
2767 | 0 | params->hash_name); |
2768 | 0 | return -EINVAL; |
2769 | 0 | } |
2770 | 0 | cd->u.verity.root_hash_size = hash_size; |
2771 | |
|
2772 | 0 | if (params->fec_device) { |
2773 | 0 | fec_device_path = strdup(params->fec_device); |
2774 | 0 | if (!fec_device_path) |
2775 | 0 | return -ENOMEM; |
2776 | 0 | r = device_alloc(cd, &fec_device, params->fec_device); |
2777 | 0 | if (r < 0) { |
2778 | 0 | r = -ENOMEM; |
2779 | 0 | goto out; |
2780 | 0 | } |
2781 | | |
2782 | 0 | hash_blocks_size = VERITY_hash_blocks(cd, params) * params->hash_block_size; |
2783 | 0 | if (device_is_identical(crypt_metadata_device(cd), fec_device) > 0 && |
2784 | 0 | (params->hash_area_offset + hash_blocks_size) > params->fec_area_offset) { |
2785 | 0 | log_err(cd, _("Hash area overlaps with FEC area.")); |
2786 | 0 | r = -EINVAL; |
2787 | 0 | goto out; |
2788 | 0 | } |
2789 | | |
2790 | 0 | if (device_is_identical(crypt_data_device(cd), fec_device) > 0 && |
2791 | 0 | (cd->u.verity.hdr.data_size * params->data_block_size) > params->fec_area_offset) { |
2792 | 0 | log_err(cd, _("Data area overlaps with FEC area.")); |
2793 | 0 | r = -EINVAL; |
2794 | 0 | goto out; |
2795 | 0 | } |
2796 | 0 | } |
2797 | | |
2798 | 0 | root_hash = malloc(cd->u.verity.root_hash_size); |
2799 | 0 | hash_name = strdup(params->hash_name); |
2800 | 0 | salt = malloc(params->salt_size); |
2801 | |
|
2802 | 0 | if (!root_hash || !hash_name || !salt) { |
2803 | 0 | r = -ENOMEM; |
2804 | 0 | goto out; |
2805 | 0 | } |
2806 | | |
2807 | 0 | cd->u.verity.hdr.flags = params->flags; |
2808 | 0 | cd->u.verity.root_hash = root_hash; |
2809 | 0 | cd->u.verity.hdr.hash_name = hash_name; |
2810 | 0 | cd->u.verity.hdr.data_device = NULL; |
2811 | 0 | cd->u.verity.fec_device = fec_device; |
2812 | 0 | cd->u.verity.hdr.fec_device = fec_device_path; |
2813 | 0 | cd->u.verity.hdr.fec_roots = params->fec_roots; |
2814 | 0 | cd->u.verity.hdr.data_block_size = params->data_block_size; |
2815 | 0 | cd->u.verity.hdr.hash_block_size = params->hash_block_size; |
2816 | 0 | cd->u.verity.hdr.hash_area_offset = params->hash_area_offset; |
2817 | 0 | cd->u.verity.hdr.fec_area_offset = params->fec_area_offset; |
2818 | 0 | cd->u.verity.hdr.hash_type = params->hash_type; |
2819 | 0 | cd->u.verity.hdr.flags = params->flags; |
2820 | 0 | cd->u.verity.hdr.salt_size = params->salt_size; |
2821 | 0 | cd->u.verity.hdr.salt = salt; |
2822 | |
|
2823 | 0 | if (params->salt) |
2824 | 0 | memcpy(salt, params->salt, params->salt_size); |
2825 | 0 | else |
2826 | 0 | r = crypt_random_get(cd, salt, params->salt_size, CRYPT_RND_SALT); |
2827 | 0 | if (r) |
2828 | 0 | goto out; |
2829 | | |
2830 | 0 | if (params->flags & CRYPT_VERITY_CREATE_HASH) { |
2831 | 0 | r = VERITY_create(cd, &cd->u.verity.hdr, |
2832 | 0 | cd->u.verity.root_hash, cd->u.verity.root_hash_size); |
2833 | 0 | if (!r && params->fec_device) |
2834 | 0 | r = VERITY_FEC_process(cd, &cd->u.verity.hdr, cd->u.verity.fec_device, 0, NULL); |
2835 | 0 | if (r) |
2836 | 0 | goto out; |
2837 | 0 | } |
2838 | | |
2839 | 0 | if (!(params->flags & CRYPT_VERITY_NO_HEADER)) { |
2840 | 0 | if (uuid) { |
2841 | 0 | if (!(cd->u.verity.uuid = strdup(uuid))) |
2842 | 0 | r = -ENOMEM; |
2843 | 0 | } else |
2844 | 0 | r = VERITY_UUID_generate(&cd->u.verity.uuid); |
2845 | |
|
2846 | 0 | if (!r) |
2847 | 0 | r = VERITY_write_sb(cd, cd->u.verity.hdr.hash_area_offset, |
2848 | 0 | cd->u.verity.uuid, |
2849 | 0 | &cd->u.verity.hdr); |
2850 | 0 | } |
2851 | |
|
2852 | 0 | out: |
2853 | 0 | if (r) { |
2854 | 0 | device_free(cd, fec_device); |
2855 | 0 | free(root_hash); |
2856 | 0 | free(hash_name); |
2857 | 0 | free(fec_device_path); |
2858 | 0 | free(salt); |
2859 | 0 | } |
2860 | |
|
2861 | 0 | return r; |
2862 | 0 | } |
2863 | | |
2864 | | static int _crypt_format_integrity(struct crypt_device *cd, |
2865 | | const char *uuid, |
2866 | | struct crypt_params_integrity *params, |
2867 | | const char *integrity_key, size_t integrity_key_size, |
2868 | | bool integrity_inline) |
2869 | 0 | { |
2870 | 0 | int r; |
2871 | 0 | uint32_t integrity_tag_size; |
2872 | 0 | char *integrity = NULL, *journal_integrity = NULL, *journal_crypt = NULL; |
2873 | 0 | struct volume_key *journal_crypt_key = NULL, *journal_mac_key = NULL, *ik = NULL; |
2874 | |
|
2875 | 0 | if (!params) |
2876 | 0 | return -EINVAL; |
2877 | | |
2878 | 0 | if (uuid) { |
2879 | 0 | log_err(cd, _("UUID is not supported for this crypt type.")); |
2880 | 0 | return -EINVAL; |
2881 | 0 | } |
2882 | | |
2883 | 0 | if (integrity_key_size && integrity_key_size != params->integrity_key_size) { |
2884 | 0 | log_err(cd, _("Integrity key size mismatch.")); |
2885 | 0 | return -EINVAL; |
2886 | 0 | } |
2887 | | |
2888 | 0 | r = device_check_access(cd, crypt_metadata_device(cd), DEV_EXCL); |
2889 | 0 | if (r < 0) |
2890 | 0 | return r; |
2891 | | |
2892 | | /* Wipe first 8 sectors - fs magic numbers etc. */ |
2893 | 0 | r = crypt_wipe_device(cd, crypt_metadata_device(cd), CRYPT_WIPE_ZERO, 0, |
2894 | 0 | 8 * SECTOR_SIZE, 8 * SECTOR_SIZE, NULL, NULL); |
2895 | 0 | if (r < 0) { |
2896 | 0 | log_err(cd, _("Cannot wipe header on device %s."), |
2897 | 0 | mdata_device_path(cd)); |
2898 | 0 | return r; |
2899 | 0 | } |
2900 | | |
2901 | 0 | if (!(cd->type = strdup(CRYPT_INTEGRITY))) |
2902 | 0 | return -ENOMEM; |
2903 | | |
2904 | 0 | if (params->journal_crypt_key) { |
2905 | 0 | journal_crypt_key = crypt_alloc_volume_key(params->journal_crypt_key_size, |
2906 | 0 | params->journal_crypt_key); |
2907 | 0 | if (!journal_crypt_key) |
2908 | 0 | return -ENOMEM; |
2909 | 0 | } |
2910 | | |
2911 | 0 | if (params->journal_integrity_key) { |
2912 | 0 | journal_mac_key = crypt_alloc_volume_key(params->journal_integrity_key_size, |
2913 | 0 | params->journal_integrity_key); |
2914 | 0 | if (!journal_mac_key) { |
2915 | 0 | r = -ENOMEM; |
2916 | 0 | goto out; |
2917 | 0 | } |
2918 | 0 | } |
2919 | | |
2920 | 0 | if (params->integrity && !(integrity = strdup(params->integrity))) { |
2921 | 0 | r = -ENOMEM; |
2922 | 0 | goto out; |
2923 | 0 | } |
2924 | 0 | if (params->journal_integrity && !(journal_integrity = strdup(params->journal_integrity))) { |
2925 | 0 | r = -ENOMEM; |
2926 | 0 | goto out; |
2927 | 0 | } |
2928 | 0 | if (params->journal_crypt && !(journal_crypt = strdup(params->journal_crypt))) { |
2929 | 0 | r = -ENOMEM; |
2930 | 0 | goto out; |
2931 | 0 | } |
2932 | | |
2933 | 0 | integrity_tag_size = INTEGRITY_hash_tag_size(integrity); |
2934 | 0 | if (integrity_tag_size > 0 && params->tag_size && integrity_tag_size != params->tag_size) |
2935 | 0 | log_std(cd, _("WARNING: Requested tag size %d bytes differs from %s size output (%d bytes).\n"), |
2936 | 0 | params->tag_size, integrity, integrity_tag_size); |
2937 | |
|
2938 | 0 | if (params->tag_size) |
2939 | 0 | integrity_tag_size = params->tag_size; |
2940 | |
|
2941 | 0 | cd->u.integrity.journal_crypt_key = journal_crypt_key; |
2942 | 0 | cd->u.integrity.journal_mac_key = journal_mac_key; |
2943 | 0 | cd->u.integrity.params.journal_size = params->journal_size; |
2944 | 0 | cd->u.integrity.params.journal_watermark = params->journal_watermark; |
2945 | 0 | cd->u.integrity.params.journal_commit_time = params->journal_commit_time; |
2946 | 0 | cd->u.integrity.params.interleave_sectors = params->interleave_sectors; |
2947 | 0 | cd->u.integrity.params.buffer_sectors = params->buffer_sectors; |
2948 | 0 | cd->u.integrity.params.sector_size = params->sector_size; |
2949 | 0 | cd->u.integrity.params.tag_size = integrity_tag_size; |
2950 | 0 | cd->u.integrity.params.integrity = integrity; |
2951 | 0 | cd->u.integrity.params.journal_integrity = journal_integrity; |
2952 | 0 | cd->u.integrity.params.journal_crypt = journal_crypt; |
2953 | |
|
2954 | 0 | if (params->integrity_key_size) { |
2955 | 0 | if (!integrity_key) |
2956 | 0 | ik = crypt_generate_volume_key(cd, params->integrity_key_size, KEY_QUALITY_EMPTY); |
2957 | 0 | else |
2958 | 0 | ik = crypt_alloc_volume_key(params->integrity_key_size, integrity_key); |
2959 | 0 | if (!ik) { |
2960 | 0 | r = -ENOMEM; |
2961 | 0 | goto out; |
2962 | 0 | } |
2963 | 0 | } |
2964 | | |
2965 | 0 | r = INTEGRITY_format(cd, params, ik, cd->u.integrity.journal_crypt_key, |
2966 | 0 | cd->u.integrity.journal_mac_key, 0, &cd->u.integrity.sb_flags, |
2967 | 0 | integrity_inline); |
2968 | 0 | if (r) |
2969 | 0 | log_err(cd, _("Cannot format integrity for device %s."), mdata_device_path(cd)); |
2970 | |
|
2971 | 0 | crypt_free_volume_key(ik); |
2972 | 0 | out: |
2973 | 0 | if (r) { |
2974 | 0 | crypt_free_volume_key(journal_crypt_key); |
2975 | 0 | crypt_free_volume_key(journal_mac_key); |
2976 | 0 | free(integrity); |
2977 | 0 | free(journal_integrity); |
2978 | 0 | free(journal_crypt); |
2979 | 0 | } |
2980 | |
|
2981 | 0 | return r; |
2982 | 0 | } |
2983 | | |
2984 | | int crypt_format_inline(struct crypt_device *cd, |
2985 | | const char *type, |
2986 | | const char *cipher, |
2987 | | const char *cipher_mode, |
2988 | | const char *uuid, |
2989 | | const char *volume_key, |
2990 | | size_t volume_key_size, |
2991 | | void *params) |
2992 | 0 | { |
2993 | 0 | struct crypt_params_luks2 *lparams; |
2994 | 0 | const struct crypt_params_integrity *iparams; |
2995 | 0 | uint32_t device_tag_size, required_tag_size; |
2996 | 0 | struct device *idevice; |
2997 | 0 | size_t sector_size, required_sector_size; |
2998 | 0 | int r; |
2999 | |
|
3000 | 0 | if (!cd || !params) |
3001 | 0 | return -EINVAL; |
3002 | | |
3003 | 0 | if (cd->type) { |
3004 | 0 | log_dbg(cd, "Context already formatted as %s.", cd->type); |
3005 | 0 | return -EINVAL; |
3006 | 0 | } |
3007 | | |
3008 | 0 | log_dbg(cd, "Formatting device %s as type %s with inline tags.", mdata_device_path(cd) ?: "(none)", type); |
3009 | |
|
3010 | 0 | if (isINTEGRITY(type)) { |
3011 | 0 | lparams = NULL; |
3012 | 0 | iparams = params; |
3013 | 0 | idevice = crypt_metadata_device(cd); |
3014 | 0 | required_sector_size = iparams->sector_size; |
3015 | 0 | required_tag_size = iparams->tag_size; |
3016 | | |
3017 | | /* Unused in standalone integrity */ |
3018 | 0 | if (cipher || cipher_mode) |
3019 | 0 | return -EINVAL; |
3020 | 0 | } else if (isLUKS2(type)) { |
3021 | 0 | lparams = params; |
3022 | 0 | iparams = lparams->integrity_params; |
3023 | 0 | idevice = crypt_data_device(cd); |
3024 | 0 | required_sector_size = lparams->sector_size; |
3025 | |
|
3026 | 0 | if (!lparams->integrity || !idevice) |
3027 | 0 | return -EINVAL; |
3028 | | |
3029 | 0 | required_tag_size = INTEGRITY_tag_size(lparams->integrity, cipher, cipher_mode); |
3030 | 0 | } else { |
3031 | 0 | log_err(cd, _("Unknown or unsupported device type %s requested."), type); |
3032 | 0 | return -EINVAL; |
3033 | 0 | } |
3034 | | |
3035 | | /* In inline mode journal will be never used, check that params are not set */ |
3036 | 0 | if (iparams && (iparams->journal_size || iparams->journal_watermark || iparams->journal_commit_time || |
3037 | 0 | iparams->interleave_sectors || iparams->journal_integrity || iparams->journal_integrity_key || |
3038 | 0 | iparams->journal_integrity_key_size || iparams->journal_crypt || iparams->journal_crypt_key || |
3039 | 0 | iparams->journal_integrity_key_size)) |
3040 | 0 | return -EINVAL; |
3041 | | |
3042 | 0 | if (!device_is_nop_dif(idevice, &device_tag_size)) { |
3043 | 0 | log_err(cd, _("Device %s does not provide inline integrity data fields."), mdata_device_path(cd)); |
3044 | 0 | return -EINVAL; |
3045 | 0 | } |
3046 | | |
3047 | | /* We can get device_tag_size = 0 as kernel provides this info only for some block devices */ |
3048 | 0 | if (device_tag_size > 0 && device_tag_size < required_tag_size) { |
3049 | 0 | log_err(cd, _("Inline tag size %" PRIu32 " [bytes] is larger than %" PRIu32 " provided by device %s."), |
3050 | 0 | required_tag_size, device_tag_size, mdata_device_path(cd)); |
3051 | 0 | return -EINVAL; |
3052 | 0 | } |
3053 | 0 | log_dbg(cd, "Inline integrity is supported (%" PRIu32 ").", device_tag_size); |
3054 | | |
3055 | | /* Inline must use sectors size as hardware device */ |
3056 | 0 | sector_size = device_block_size(cd, idevice); |
3057 | 0 | if (!sector_size) |
3058 | 0 | return -EINVAL; |
3059 | | |
3060 | | /* No autodetection, use device sector size */ |
3061 | 0 | if (isLUKS2(type) && lparams && !required_sector_size) |
3062 | 0 | lparams->sector_size = sector_size; |
3063 | 0 | else if (sector_size != required_sector_size) { |
3064 | 0 | log_err(cd, _("Sector must be the same as device hardware sector (%zu bytes)."), sector_size); |
3065 | 0 | return -EINVAL; |
3066 | 0 | } |
3067 | | |
3068 | 0 | if (isINTEGRITY(type)) |
3069 | 0 | r = _crypt_format_integrity(cd, uuid, params, volume_key, volume_key_size, true); |
3070 | 0 | else if (isLUKS2(type)) |
3071 | 0 | r = _crypt_format_luks2(cd, cipher, cipher_mode, |
3072 | 0 | uuid, volume_key, volume_key_size, params, false, true); |
3073 | 0 | else |
3074 | 0 | r = -EINVAL; |
3075 | |
|
3076 | 0 | if (r < 0) { |
3077 | 0 | crypt_set_null_type(cd); |
3078 | 0 | crypt_free_volume_key(cd->volume_key); |
3079 | 0 | cd->volume_key = NULL; |
3080 | 0 | } |
3081 | |
|
3082 | 0 | return r; |
3083 | 0 | } |
3084 | | |
3085 | | static int _crypt_format(struct crypt_device *cd, |
3086 | | const char *type, |
3087 | | const char *cipher, |
3088 | | const char *cipher_mode, |
3089 | | const char *uuid, |
3090 | | const char *volume_key, |
3091 | | size_t volume_key_size, |
3092 | | void *params, |
3093 | | bool sector_size_autodetect) |
3094 | 0 | { |
3095 | 0 | int r; |
3096 | |
|
3097 | 0 | if (!cd || !type) |
3098 | 0 | return -EINVAL; |
3099 | | |
3100 | 0 | if (cd->type) { |
3101 | 0 | log_dbg(cd, "Context already formatted as %s.", cd->type); |
3102 | 0 | return -EINVAL; |
3103 | 0 | } |
3104 | | |
3105 | 0 | log_dbg(cd, "Formatting device %s as type %s.", mdata_device_path(cd) ?: "(none)", type); |
3106 | |
|
3107 | 0 | crypt_reset_null_type(cd); |
3108 | |
|
3109 | 0 | r = init_crypto(cd); |
3110 | 0 | if (r < 0) |
3111 | 0 | return r; |
3112 | | |
3113 | 0 | if (isPLAIN(type)) |
3114 | 0 | r = _crypt_format_plain(cd, cipher, cipher_mode, |
3115 | 0 | uuid, volume_key_size, params); |
3116 | 0 | else if (isLUKS1(type)) |
3117 | 0 | r = _crypt_format_luks1(cd, cipher, cipher_mode, |
3118 | 0 | uuid, volume_key, volume_key_size, params); |
3119 | 0 | else if (isLUKS2(type)) |
3120 | 0 | r = _crypt_format_luks2(cd, cipher, cipher_mode, |
3121 | 0 | uuid, volume_key, volume_key_size, params, sector_size_autodetect, false); |
3122 | 0 | else if (isLOOPAES(type)) |
3123 | 0 | r = _crypt_format_loopaes(cd, cipher, uuid, volume_key_size, params); |
3124 | 0 | else if (isVERITY(type)) |
3125 | 0 | r = _crypt_format_verity(cd, uuid, params); |
3126 | 0 | else if (isINTEGRITY(type)) |
3127 | 0 | r = _crypt_format_integrity(cd, uuid, params, volume_key, volume_key_size, false); |
3128 | 0 | else { |
3129 | 0 | log_err(cd, _("Unknown or unsupported device type %s requested."), type); |
3130 | 0 | r = -EINVAL; |
3131 | 0 | } |
3132 | |
|
3133 | 0 | if (r < 0) { |
3134 | 0 | crypt_set_null_type(cd); |
3135 | 0 | crypt_free_volume_key(cd->volume_key); |
3136 | 0 | cd->volume_key = NULL; |
3137 | 0 | } |
3138 | |
|
3139 | 0 | return r; |
3140 | 0 | } |
3141 | | |
3142 | | CRYPT_SYMBOL_EXPORT_NEW(int, crypt_format, 2, 4, |
3143 | | /* crypt_format parameters follows */ |
3144 | | struct crypt_device *cd, |
3145 | | const char *type, |
3146 | | const char *cipher, |
3147 | | const char *cipher_mode, |
3148 | | const char *uuid, |
3149 | | const char *volume_key, |
3150 | | size_t volume_key_size, |
3151 | | void *params) |
3152 | 0 | { |
3153 | 0 | return _crypt_format(cd, type, cipher, cipher_mode, uuid, volume_key, volume_key_size, params, true); |
3154 | 0 | } |
3155 | | |
3156 | | |
3157 | | CRYPT_SYMBOL_EXPORT_OLD(int, crypt_format, 2, 0, |
3158 | | /* crypt_format parameters follows */ |
3159 | | struct crypt_device *cd, |
3160 | | const char *type, |
3161 | | const char *cipher, |
3162 | | const char *cipher_mode, |
3163 | | const char *uuid, |
3164 | | const char *volume_key, |
3165 | | size_t volume_key_size, |
3166 | | void *params) |
3167 | 0 | { |
3168 | 0 | return _crypt_format(cd, type, cipher, cipher_mode, uuid, volume_key, volume_key_size, params, false); |
3169 | 0 | } |
3170 | | |
3171 | | int crypt_repair(struct crypt_device *cd, |
3172 | | const char *requested_type, |
3173 | | void *params __attribute__((unused))) |
3174 | 0 | { |
3175 | 0 | int r; |
3176 | |
|
3177 | 0 | if (!cd) |
3178 | 0 | return -EINVAL; |
3179 | | |
3180 | 0 | log_dbg(cd, "Trying to repair %s crypt type from device %s.", |
3181 | 0 | requested_type ?: "any", mdata_device_path(cd) ?: "(none)"); |
3182 | |
|
3183 | 0 | if (!crypt_metadata_device(cd)) |
3184 | 0 | return -EINVAL; |
3185 | | |
3186 | 0 | if (requested_type && !isLUKS(requested_type)) |
3187 | 0 | return -EINVAL; |
3188 | | |
3189 | | /* Load with repair */ |
3190 | 0 | r = _crypt_load_luks(cd, requested_type, false, true); |
3191 | 0 | if (r < 0) |
3192 | 0 | return r; |
3193 | | |
3194 | | /* cd->type and header must be set in context */ |
3195 | 0 | r = crypt_check_data_device_size(cd); |
3196 | 0 | if (r < 0) |
3197 | 0 | crypt_set_null_type(cd); |
3198 | |
|
3199 | 0 | return r; |
3200 | 0 | } |
3201 | | |
3202 | | /* compare volume keys */ |
3203 | | static int _compare_volume_keys(struct volume_key *svk, struct volume_key *tvk) |
3204 | 0 | { |
3205 | 0 | if (svk == tvk) |
3206 | 0 | return 0; |
3207 | | |
3208 | 0 | if (!svk || !tvk) |
3209 | 0 | return 1; |
3210 | | |
3211 | 0 | if (crypt_volume_key_length(svk) != crypt_volume_key_length(tvk)) |
3212 | 0 | return 1; |
3213 | | |
3214 | | /* No switch between keyring and direct key specification */ |
3215 | 0 | if ((!crypt_volume_key_description(svk) && crypt_volume_key_description(tvk)) || |
3216 | 0 | (crypt_volume_key_description(svk) && !crypt_volume_key_description(tvk)) || |
3217 | 0 | (!crypt_volume_key_is_set(svk) && crypt_volume_key_is_set(tvk)) || |
3218 | 0 | (crypt_volume_key_is_set(svk) && !crypt_volume_key_is_set(tvk))) |
3219 | 0 | return 1; |
3220 | | |
3221 | 0 | if (crypt_volume_key_description(svk) && |
3222 | 0 | (crypt_volume_key_kernel_key_type(svk) != crypt_volume_key_kernel_key_type(tvk) || |
3223 | 0 | strcmp(crypt_volume_key_description(svk), crypt_volume_key_description(tvk)))) |
3224 | 0 | return 1; |
3225 | | |
3226 | 0 | if (crypt_volume_key_is_set(svk) && |
3227 | 0 | crypt_backend_memeq(crypt_volume_key_get_key(svk), |
3228 | 0 | crypt_volume_key_get_key(tvk), |
3229 | 0 | crypt_volume_key_length(svk))) |
3230 | 0 | return 1; |
3231 | | |
3232 | 0 | return 0; |
3233 | 0 | } |
3234 | | |
3235 | | static int _compare_volume_keys_luks2(struct volume_key *svk, struct volume_key *tvk) |
3236 | 0 | { |
3237 | 0 | if (svk == tvk) |
3238 | 0 | return 0; |
3239 | | |
3240 | 0 | if (!svk || !tvk) |
3241 | 0 | return 1; |
3242 | | |
3243 | 0 | if (crypt_volume_key_length(svk) != crypt_volume_key_length(tvk)) |
3244 | 0 | return 1; |
3245 | | |
3246 | 0 | if ((!crypt_volume_key_is_set(svk) && !crypt_volume_key_description(svk)) || |
3247 | 0 | (!crypt_volume_key_is_set(tvk) && !crypt_volume_key_description(tvk))) |
3248 | 0 | return 1; |
3249 | | |
3250 | 0 | if (crypt_volume_key_is_set(svk) && crypt_volume_key_is_set(tvk) && |
3251 | 0 | crypt_backend_memeq(crypt_volume_key_get_key(svk), |
3252 | 0 | crypt_volume_key_get_key(tvk), |
3253 | 0 | crypt_volume_key_length(svk))) |
3254 | 0 | return 1; |
3255 | | |
3256 | 0 | if (crypt_volume_key_description(svk) && crypt_volume_key_description(tvk)) |
3257 | 0 | return (crypt_volume_key_kernel_key_type(svk) != crypt_volume_key_kernel_key_type(tvk) || |
3258 | 0 | strcmp(crypt_volume_key_description(svk), crypt_volume_key_description(tvk))); |
3259 | | |
3260 | 0 | return 0; |
3261 | 0 | } |
3262 | | |
3263 | | static int _compare_device_types(struct crypt_device *cd, |
3264 | | const struct crypt_dm_active_device *src, |
3265 | | const struct crypt_dm_active_device *tgt) |
3266 | 0 | { |
3267 | 0 | if (!tgt->uuid) { |
3268 | 0 | log_dbg(cd, "Missing device uuid in target device."); |
3269 | 0 | return -EINVAL; |
3270 | 0 | } |
3271 | | |
3272 | | /* |
3273 | | * FIXME: The CRYPT_SUBDEV prefix should be enough but we need |
3274 | | * to keep INTEGRITY- for dm-integrity subdevices opened with |
3275 | | * cryptsetup version < 2.8.0. Drop the INTEGRITY condition |
3276 | | * in next Y release. |
3277 | | */ |
3278 | 0 | if (isLUKS2(cd->type) && |
3279 | 0 | (!strncmp("INTEGRITY-", tgt->uuid, strlen("INTEGRITY-")) || |
3280 | 0 | !strncmp(CRYPT_SUBDEV, tgt->uuid, strlen(CRYPT_SUBDEV)))) { |
3281 | 0 | if (dm_uuid_cmp(tgt->uuid, src->uuid)) { |
3282 | 0 | log_dbg(cd, "LUKS UUID mismatch."); |
3283 | 0 | return -EINVAL; |
3284 | 0 | } |
3285 | 0 | } else if (isLUKS(cd->type)) { |
3286 | 0 | if (!src->uuid || strncmp(cd->type, tgt->uuid, strlen(cd->type)) || |
3287 | 0 | dm_uuid_cmp(tgt->uuid, src->uuid)) { |
3288 | 0 | log_dbg(cd, "LUKS UUID mismatch."); |
3289 | 0 | return -EINVAL; |
3290 | 0 | } |
3291 | 0 | } else if (isPLAIN(cd->type) || isLOOPAES(cd->type)) { |
3292 | 0 | if (strncmp(cd->type, tgt->uuid, strlen(cd->type))) { |
3293 | 0 | log_dbg(cd, "Unexpected uuid prefix %s in target device.", tgt->uuid); |
3294 | 0 | return -EINVAL; |
3295 | 0 | } |
3296 | 0 | } else if (!isINTEGRITY(cd->type)) { |
3297 | 0 | log_dbg(cd, "Unsupported device type %s for reload.", cd->type ?: "<empty>"); |
3298 | 0 | return -ENOTSUP; |
3299 | 0 | } |
3300 | | |
3301 | 0 | return 0; |
3302 | 0 | } |
3303 | | |
3304 | | static int _compare_crypt_devices(struct crypt_device *cd, |
3305 | | const struct dm_target *src, |
3306 | | const struct dm_target *tgt) |
3307 | 0 | { |
3308 | 0 | char *src_cipher = NULL, *src_integrity = NULL; |
3309 | 0 | int r = -EINVAL; |
3310 | | |
3311 | | /* for crypt devices keys are mandatory */ |
3312 | 0 | if (!src->u.crypt.vk || !tgt->u.crypt.vk) |
3313 | 0 | return -EINVAL; |
3314 | | |
3315 | | /* CIPHER checks */ |
3316 | 0 | if (!src->u.crypt.cipher || !tgt->u.crypt.cipher) |
3317 | 0 | return -EINVAL; |
3318 | | |
3319 | | /* |
3320 | | * dm_query_target converts capi cipher specification to dm-crypt format. |
3321 | | * We need to do same for cipher specification requested in source |
3322 | | * device. |
3323 | | */ |
3324 | 0 | if (crypt_capi_to_cipher(&src_cipher, &src_integrity, src->u.crypt.cipher, src->u.crypt.integrity)) |
3325 | 0 | return -EINVAL; |
3326 | | |
3327 | 0 | if (strcmp(src_cipher, tgt->u.crypt.cipher)) { |
3328 | 0 | log_dbg(cd, "Cipher specs do not match."); |
3329 | 0 | goto out; |
3330 | 0 | } |
3331 | | |
3332 | 0 | if (crypt_volume_key_length(tgt->u.crypt.vk) == 0 && crypt_is_cipher_null(tgt->u.crypt.cipher)) |
3333 | 0 | log_dbg(cd, "Existing device uses cipher null. Skipping key comparison."); |
3334 | 0 | else if (cd && isLUKS2(cd->type)) { |
3335 | 0 | if (_compare_volume_keys_luks2(src->u.crypt.vk, tgt->u.crypt.vk)) { |
3336 | 0 | log_dbg(cd, "Keys in LUKS2 context and target device do not match."); |
3337 | 0 | goto out; |
3338 | 0 | } |
3339 | 0 | } else if (_compare_volume_keys(src->u.crypt.vk, tgt->u.crypt.vk)) { |
3340 | 0 | log_dbg(cd, "Keys in context and target device do not match."); |
3341 | 0 | goto out; |
3342 | 0 | } |
3343 | | |
3344 | 0 | if (crypt_strcmp(src_integrity, tgt->u.crypt.integrity)) { |
3345 | 0 | log_dbg(cd, "Integrity parameters do not match."); |
3346 | 0 | goto out; |
3347 | 0 | } |
3348 | | |
3349 | 0 | if (src->u.crypt.offset != tgt->u.crypt.offset || |
3350 | 0 | src->u.crypt.sector_size != tgt->u.crypt.sector_size || |
3351 | 0 | src->u.crypt.iv_offset != tgt->u.crypt.iv_offset || |
3352 | 0 | src->u.crypt.tag_size != tgt->u.crypt.tag_size) { |
3353 | 0 | log_dbg(cd, "Integer parameters do not match."); |
3354 | 0 | goto out; |
3355 | 0 | } |
3356 | | |
3357 | 0 | if (device_is_identical(src->data_device, tgt->data_device) <= 0) |
3358 | 0 | log_dbg(cd, "Data devices do not match."); |
3359 | 0 | else |
3360 | 0 | r = 0; |
3361 | |
|
3362 | 0 | out: |
3363 | 0 | free(src_cipher); |
3364 | 0 | free(src_integrity); |
3365 | |
|
3366 | 0 | return r; |
3367 | 0 | } |
3368 | | |
3369 | | static int _compare_integrity_devices(struct crypt_device *cd, |
3370 | | const struct dm_target *src, |
3371 | | const struct dm_target *tgt) |
3372 | 0 | { |
3373 | | /* |
3374 | | * some parameters may be implicit (and set in dm-integrity ctor) |
3375 | | * |
3376 | | * journal_size |
3377 | | * journal_watermark |
3378 | | * journal_commit_time |
3379 | | * buffer_sectors |
3380 | | * interleave_sectors |
3381 | | */ |
3382 | | |
3383 | | /* check remaining integer values that makes sense */ |
3384 | 0 | if (src->u.integrity.tag_size != tgt->u.integrity.tag_size || |
3385 | 0 | src->u.integrity.offset != tgt->u.integrity.offset || |
3386 | 0 | src->u.integrity.sector_size != tgt->u.integrity.sector_size) { |
3387 | 0 | log_dbg(cd, "Integer parameters do not match."); |
3388 | 0 | return -EINVAL; |
3389 | 0 | } |
3390 | | |
3391 | 0 | if (crypt_strcmp(src->u.integrity.integrity, tgt->u.integrity.integrity) || |
3392 | 0 | crypt_strcmp(src->u.integrity.journal_integrity, tgt->u.integrity.journal_integrity) || |
3393 | 0 | crypt_strcmp(src->u.integrity.journal_crypt, tgt->u.integrity.journal_crypt)) { |
3394 | 0 | log_dbg(cd, "Journal parameters do not match."); |
3395 | 0 | return -EINVAL; |
3396 | 0 | } |
3397 | | |
3398 | | /* unfortunately dm-integrity doesn't support keyring */ |
3399 | 0 | if (_compare_volume_keys(src->u.integrity.vk, tgt->u.integrity.vk) || |
3400 | 0 | _compare_volume_keys(src->u.integrity.journal_integrity_key, tgt->u.integrity.journal_integrity_key) || |
3401 | 0 | _compare_volume_keys(src->u.integrity.journal_crypt_key, tgt->u.integrity.journal_crypt_key)) { |
3402 | 0 | log_dbg(cd, "Journal keys do not match."); |
3403 | 0 | return -EINVAL; |
3404 | 0 | } |
3405 | | |
3406 | 0 | if (device_is_identical(src->data_device, tgt->data_device) <= 0) { |
3407 | 0 | log_dbg(cd, "Data devices do not match."); |
3408 | 0 | return -EINVAL; |
3409 | 0 | } |
3410 | | |
3411 | 0 | return 0; |
3412 | 0 | } |
3413 | | |
3414 | | int crypt_compare_dm_devices(struct crypt_device *cd, |
3415 | | const struct crypt_dm_active_device *src, |
3416 | | const struct crypt_dm_active_device *tgt) |
3417 | 0 | { |
3418 | 0 | int r; |
3419 | 0 | const struct dm_target *s, *t; |
3420 | |
|
3421 | 0 | if (!src || !tgt) |
3422 | 0 | return -EINVAL; |
3423 | | |
3424 | 0 | r = _compare_device_types(cd, src, tgt); |
3425 | 0 | if (r) |
3426 | 0 | return r; |
3427 | | |
3428 | 0 | s = &src->segment; |
3429 | 0 | t = &tgt->segment; |
3430 | |
|
3431 | 0 | while (s || t) { |
3432 | 0 | if (!s || !t) { |
3433 | 0 | log_dbg(cd, "segments count mismatch."); |
3434 | 0 | return -EINVAL; |
3435 | 0 | } |
3436 | 0 | if (s->type != t->type) { |
3437 | 0 | log_dbg(cd, "segment type mismatch."); |
3438 | 0 | r = -EINVAL; |
3439 | 0 | break; |
3440 | 0 | } |
3441 | | |
3442 | 0 | switch (s->type) { |
3443 | 0 | case DM_CRYPT: |
3444 | 0 | r = _compare_crypt_devices(cd, s, t); |
3445 | 0 | break; |
3446 | 0 | case DM_INTEGRITY: |
3447 | 0 | r = _compare_integrity_devices(cd, s, t); |
3448 | 0 | break; |
3449 | 0 | case DM_LINEAR: |
3450 | 0 | r = (s->u.linear.offset == t->u.linear.offset) ? 0 : -EINVAL; |
3451 | 0 | break; |
3452 | 0 | default: |
3453 | 0 | r = -ENOTSUP; |
3454 | 0 | } |
3455 | | |
3456 | 0 | if (r) |
3457 | 0 | break; |
3458 | | |
3459 | 0 | s = s->next; |
3460 | 0 | t = t->next; |
3461 | 0 | } |
3462 | | |
3463 | 0 | return r; |
3464 | 0 | } |
3465 | | |
3466 | | static int _reload_device(struct crypt_device *cd, const char *name, |
3467 | | struct crypt_dm_active_device *sdmd, uint64_t dmflags) |
3468 | 0 | { |
3469 | 0 | int r; |
3470 | 0 | struct crypt_dm_active_device tdmd; |
3471 | 0 | struct dm_target *src, *tgt = &tdmd.segment; |
3472 | |
|
3473 | 0 | assert(cd); |
3474 | 0 | assert(sdmd); |
3475 | | |
3476 | 0 | if (!cd->type || !name || !(sdmd->flags & CRYPT_ACTIVATE_REFRESH)) |
3477 | 0 | return -EINVAL; |
3478 | | |
3479 | 0 | src = &sdmd->segment; |
3480 | |
|
3481 | 0 | r = dm_query_device(cd, name, DM_ACTIVE_DEVICE | DM_ACTIVE_CRYPT_CIPHER | |
3482 | 0 | DM_ACTIVE_UUID | DM_ACTIVE_CRYPT_KEYSIZE | |
3483 | 0 | DM_ACTIVE_CRYPT_KEY | DM_ACTIVE_INTEGRITY_PARAMS | |
3484 | 0 | DM_ACTIVE_JOURNAL_CRYPT_KEY | DM_ACTIVE_JOURNAL_MAC_KEY, &tdmd); |
3485 | 0 | if (r < 0) { |
3486 | 0 | log_err(cd, _("Device %s is not active."), name); |
3487 | 0 | return -EINVAL; |
3488 | 0 | } |
3489 | | |
3490 | 0 | if (!single_segment(&tdmd) || |
3491 | 0 | (tgt->type != DM_CRYPT && tgt->type != DM_INTEGRITY) || |
3492 | 0 | (tgt->type == DM_CRYPT && tgt->u.crypt.tag_size)) { |
3493 | 0 | r = -ENOTSUP; |
3494 | 0 | log_err(cd, _("Unsupported parameters on device %s."), name); |
3495 | 0 | goto out; |
3496 | 0 | } |
3497 | | |
3498 | 0 | r = crypt_compare_dm_devices(cd, sdmd, &tdmd); |
3499 | 0 | if (r) { |
3500 | 0 | log_err(cd, _("Mismatching parameters on device %s."), name); |
3501 | 0 | goto out; |
3502 | 0 | } |
3503 | | |
3504 | | /* Changing read only flag for active device makes no sense */ |
3505 | 0 | if (tdmd.flags & CRYPT_ACTIVATE_READONLY) |
3506 | 0 | sdmd->flags |= CRYPT_ACTIVATE_READONLY; |
3507 | 0 | else |
3508 | 0 | sdmd->flags &= ~CRYPT_ACTIVATE_READONLY; |
3509 | | |
3510 | | /* |
3511 | | * Only LUKS2 allows altering between volume key |
3512 | | * passed by hexbyte representation and reference |
3513 | | * to kernel keyring service. |
3514 | | * |
3515 | | * To make it easier pass src key directly after |
3516 | | * it was properly verified in crypt_compare_dm_devices |
3517 | | * call above. |
3518 | | */ |
3519 | 0 | if (isLUKS2(cd->type) && tgt->type == DM_CRYPT && src->u.crypt.vk) { |
3520 | 0 | crypt_free_volume_key(tgt->u.crypt.vk); |
3521 | 0 | tgt->u.crypt.vk = src->u.crypt.vk; |
3522 | 0 | } |
3523 | |
|
3524 | 0 | if (tgt->type == DM_CRYPT) |
3525 | 0 | r = device_block_adjust(cd, src->data_device, DEV_OK, |
3526 | 0 | src->u.crypt.offset, &sdmd->size, NULL); |
3527 | 0 | else if (tgt->type == DM_INTEGRITY) |
3528 | 0 | r = device_block_adjust(cd, src->data_device, DEV_OK, |
3529 | 0 | src->u.integrity.offset, &sdmd->size, NULL); |
3530 | 0 | else |
3531 | 0 | r = -EINVAL; |
3532 | |
|
3533 | 0 | if (r) |
3534 | 0 | goto out; |
3535 | | |
3536 | 0 | tdmd.flags = sdmd->flags; |
3537 | 0 | tgt->size = tdmd.size = sdmd->size; |
3538 | |
|
3539 | 0 | r = dm_reload_device(cd, name, &tdmd, dmflags, 1); |
3540 | 0 | out: |
3541 | | /* otherwise dm_targets_free would free src key */ |
3542 | 0 | if (src->u.crypt.vk == tgt->u.crypt.vk) |
3543 | 0 | tgt->u.crypt.vk = NULL; |
3544 | |
|
3545 | 0 | dm_targets_free(cd, &tdmd); |
3546 | 0 | free(CONST_CAST(void*)tdmd.uuid); |
3547 | |
|
3548 | 0 | return r; |
3549 | 0 | } |
3550 | | |
3551 | | static int _reload_device_with_integrity(struct crypt_device *cd, |
3552 | | const char *name, |
3553 | | const char *iname, |
3554 | | const char *ipath, |
3555 | | struct crypt_dm_active_device *sdmd, |
3556 | | struct crypt_dm_active_device *sdmdi) |
3557 | 0 | { |
3558 | 0 | int r; |
3559 | 0 | struct crypt_dm_active_device tdmd, tdmdi = {}; |
3560 | 0 | struct dm_target *src, *srci, *tgt = &tdmd.segment, *tgti = &tdmdi.segment; |
3561 | 0 | struct device *data_device = NULL; |
3562 | 0 | bool clear = false; |
3563 | |
|
3564 | 0 | assert(cd); |
3565 | 0 | assert(sdmd); |
3566 | 0 | assert(sdmdi); |
3567 | | |
3568 | 0 | if (!cd->type || !name || !iname || !(sdmd->flags & CRYPT_ACTIVATE_REFRESH)) |
3569 | 0 | return -EINVAL; |
3570 | | |
3571 | 0 | src = &sdmd->segment; |
3572 | 0 | srci = &sdmdi->segment; |
3573 | |
|
3574 | 0 | r = dm_query_device(cd, name, DM_ACTIVE_DEVICE | DM_ACTIVE_CRYPT_CIPHER | |
3575 | 0 | DM_ACTIVE_UUID | DM_ACTIVE_CRYPT_KEYSIZE | |
3576 | 0 | DM_ACTIVE_CRYPT_KEY, &tdmd); |
3577 | 0 | if (r < 0) { |
3578 | 0 | log_err(cd, _("Device %s is not active."), name); |
3579 | 0 | return -EINVAL; |
3580 | 0 | } |
3581 | | |
3582 | 0 | if (!single_segment(&tdmd) || tgt->type != DM_CRYPT || !tgt->u.crypt.tag_size) { |
3583 | 0 | log_err(cd, _("Unsupported parameters on device %s."), name); |
3584 | 0 | r = -ENOTSUP; |
3585 | 0 | goto out; |
3586 | 0 | } |
3587 | | |
3588 | 0 | r = dm_query_device(cd, iname, DM_ACTIVE_DEVICE | DM_ACTIVE_UUID, &tdmdi); |
3589 | 0 | if (r < 0) { |
3590 | 0 | log_err(cd, _("Device %s is not active."), iname); |
3591 | 0 | r = -EINVAL; |
3592 | 0 | goto out; |
3593 | 0 | } |
3594 | | |
3595 | 0 | if (!single_segment(&tdmdi) || tgti->type != DM_INTEGRITY) { |
3596 | 0 | log_err(cd, _("Unsupported parameters on device %s."), iname); |
3597 | 0 | r = -ENOTSUP; |
3598 | 0 | goto out; |
3599 | 0 | } |
3600 | | |
3601 | 0 | r = crypt_compare_dm_devices(cd, sdmdi, &tdmdi); |
3602 | 0 | if (r) { |
3603 | 0 | log_err(cd, _("Mismatching parameters on device %s."), iname); |
3604 | 0 | goto out; |
3605 | 0 | } |
3606 | | |
3607 | | /* unsupported underneath dm-crypt with auth. encryption */ |
3608 | 0 | if (sdmdi->segment.u.integrity.meta_device || tdmdi.segment.u.integrity.meta_device) { |
3609 | 0 | r = -ENOTSUP; |
3610 | 0 | goto out; |
3611 | 0 | } |
3612 | | |
3613 | 0 | r = device_alloc(cd, &data_device, ipath); |
3614 | 0 | if (r < 0) |
3615 | 0 | goto out; |
3616 | | |
3617 | 0 | r = device_block_adjust(cd, srci->data_device, DEV_OK, |
3618 | 0 | srci->u.integrity.offset, &sdmdi->size, NULL); |
3619 | 0 | if (r) |
3620 | 0 | goto out; |
3621 | | |
3622 | 0 | src->data_device = data_device; |
3623 | |
|
3624 | 0 | r = crypt_compare_dm_devices(cd, sdmd, &tdmd); |
3625 | 0 | if (r) { |
3626 | 0 | log_err(cd, _("Crypt devices mismatch.")); |
3627 | 0 | goto out; |
3628 | 0 | } |
3629 | | |
3630 | | /* Changing read only flag for active device makes no sense */ |
3631 | 0 | if (tdmd.flags & CRYPT_ACTIVATE_READONLY) |
3632 | 0 | sdmd->flags |= CRYPT_ACTIVATE_READONLY; |
3633 | 0 | else |
3634 | 0 | sdmd->flags &= ~CRYPT_ACTIVATE_READONLY; |
3635 | |
|
3636 | 0 | if (tdmdi.flags & CRYPT_ACTIVATE_READONLY) |
3637 | 0 | sdmdi->flags |= CRYPT_ACTIVATE_READONLY; |
3638 | 0 | else |
3639 | 0 | sdmdi->flags &= ~CRYPT_ACTIVATE_READONLY; |
3640 | | |
3641 | | /* |
3642 | | * To make it easier pass src key directly after |
3643 | | * it was properly verified in crypt_compare_dm_devices |
3644 | | * call above. |
3645 | | */ |
3646 | 0 | crypt_free_volume_key(tgt->u.crypt.vk); |
3647 | 0 | tgt->u.crypt.vk = src->u.crypt.vk; |
3648 | |
|
3649 | 0 | r = device_block_adjust(cd, src->data_device, DEV_OK, |
3650 | 0 | src->u.crypt.offset, &sdmd->size, NULL); |
3651 | 0 | if (r) |
3652 | 0 | goto out; |
3653 | | |
3654 | 0 | tdmd.flags = sdmd->flags; |
3655 | 0 | tdmd.size = sdmd->size; |
3656 | |
|
3657 | 0 | if ((r = dm_reload_device(cd, iname, sdmdi, 0, 0))) { |
3658 | 0 | log_err(cd, _("Failed to reload device %s."), iname); |
3659 | 0 | goto out; |
3660 | 0 | } |
3661 | | |
3662 | 0 | if ((r = dm_reload_device(cd, name, &tdmd, 0, 0))) { |
3663 | 0 | log_err(cd, _("Failed to reload device %s."), name); |
3664 | 0 | clear = true; |
3665 | 0 | goto out; |
3666 | 0 | } |
3667 | | |
3668 | 0 | if ((r = dm_suspend_device(cd, name, 0))) { |
3669 | 0 | log_err(cd, _("Failed to suspend device %s."), name); |
3670 | 0 | clear = true; |
3671 | 0 | goto out; |
3672 | 0 | } |
3673 | | |
3674 | 0 | if ((r = dm_suspend_device(cd, iname, 0))) { |
3675 | 0 | log_err(cd, _("Failed to suspend device %s."), iname); |
3676 | 0 | clear = true; |
3677 | 0 | goto out; |
3678 | 0 | } |
3679 | | |
3680 | 0 | if ((r = dm_resume_device(cd, iname, act2dmflags(sdmdi->flags)))) { |
3681 | 0 | log_err(cd, _("Failed to resume device %s."), iname); |
3682 | 0 | clear = true; |
3683 | 0 | goto out; |
3684 | 0 | } |
3685 | | |
3686 | 0 | r = dm_resume_device(cd, name, act2dmflags(tdmd.flags)); |
3687 | 0 | if (!r) |
3688 | 0 | goto out; |
3689 | | |
3690 | | /* |
3691 | | * This is worst case scenario. We have active underlying dm-integrity device with |
3692 | | * new table but dm-crypt resume failed for some reason. Tear everything down and |
3693 | | * burn it for good. |
3694 | | */ |
3695 | | |
3696 | 0 | log_err(cd, _("Fatal error while reloading device %s (on top of device %s)."), name, iname); |
3697 | |
|
3698 | 0 | if (dm_error_device(cd, name)) |
3699 | 0 | log_err(cd, _("Failed to switch device %s to dm-error."), name); |
3700 | 0 | if (dm_error_device(cd, iname)) |
3701 | 0 | log_err(cd, _("Failed to switch device %s to dm-error."), iname); |
3702 | 0 | out: |
3703 | 0 | if (clear) { |
3704 | 0 | dm_clear_device(cd, name); |
3705 | 0 | dm_clear_device(cd, iname); |
3706 | |
|
3707 | 0 | if (dm_status_suspended(cd, name) > 0) |
3708 | 0 | dm_resume_device(cd, name, 0); |
3709 | 0 | if (dm_status_suspended(cd, iname) > 0) |
3710 | 0 | dm_resume_device(cd, iname, 0); |
3711 | 0 | } |
3712 | | |
3713 | | /* otherwise dm_targets_free would free src key */ |
3714 | 0 | if (tgt->u.crypt.vk == src->u.crypt.vk) |
3715 | 0 | tgt->u.crypt.vk = NULL; |
3716 | 0 | dm_targets_free(cd, &tdmd); |
3717 | 0 | dm_targets_free(cd, &tdmdi); |
3718 | 0 | free(CONST_CAST(void*)tdmdi.uuid); |
3719 | 0 | free(CONST_CAST(void*)tdmd.uuid); |
3720 | 0 | device_free(cd, data_device); |
3721 | |
|
3722 | 0 | return r; |
3723 | 0 | } |
3724 | | |
3725 | | int crypt_resize(struct crypt_device *cd, const char *name, uint64_t new_size) |
3726 | 0 | { |
3727 | 0 | struct crypt_dm_active_device dmdq, dmd = {}; |
3728 | 0 | struct dm_target *tgt = &dmdq.segment; |
3729 | 0 | struct crypt_params_integrity params = {}; |
3730 | 0 | uint64_t supported_flags = 0, dmflags = 0; |
3731 | 0 | uint64_t old_size; |
3732 | 0 | int r; |
3733 | | |
3734 | | /* Device context type must be initialized */ |
3735 | 0 | if (!cd || !cd->type || !name) |
3736 | 0 | return -EINVAL; |
3737 | | |
3738 | 0 | if (isTCRYPT(cd->type) || isBITLK(cd->type)) { |
3739 | 0 | log_err(cd, _("This operation is not supported for this device type.")); |
3740 | 0 | return -ENOTSUP; |
3741 | 0 | } |
3742 | | |
3743 | 0 | if (isLUKS2(cd->type) && !LUKS2_segments_dynamic_size(&cd->u.luks2.hdr)) { |
3744 | 0 | log_err(cd, _("Can not resize LUKS2 device with static size.")); |
3745 | 0 | return -EINVAL; |
3746 | 0 | } |
3747 | | |
3748 | 0 | if (isLUKS2(cd->type) && crypt_get_integrity_tag_size(cd)) { |
3749 | 0 | log_err(cd, _("Resize of LUKS2 device with integrity protection is not supported.")); |
3750 | 0 | return -ENOTSUP; |
3751 | 0 | } |
3752 | | |
3753 | 0 | if (new_size) |
3754 | 0 | log_dbg(cd, "Resizing device %s to %" PRIu64 " sectors.", name, new_size); |
3755 | 0 | else |
3756 | 0 | log_dbg(cd, "Resizing device %s to underlying device size.", name); |
3757 | |
|
3758 | 0 | r = dm_query_device(cd, name, DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY | |
3759 | 0 | DM_ACTIVE_INTEGRITY_PARAMS | DM_ACTIVE_JOURNAL_CRYPT_KEY | |
3760 | 0 | DM_ACTIVE_JOURNAL_MAC_KEY, &dmdq); |
3761 | 0 | if (r < 0) { |
3762 | 0 | log_err(cd, _("Device %s is not active."), name); |
3763 | 0 | return -EINVAL; |
3764 | 0 | } |
3765 | 0 | if (!single_segment(&dmdq) || (tgt->type != DM_CRYPT && tgt->type != DM_INTEGRITY)) { |
3766 | 0 | log_dbg(cd, "Unsupported device table detected in %s.", name); |
3767 | 0 | r = -EINVAL; |
3768 | 0 | goto out; |
3769 | 0 | } |
3770 | | |
3771 | 0 | if ((dmdq.flags & CRYPT_ACTIVATE_KEYRING_KEY) && !crypt_key_in_keyring(cd)) { |
3772 | 0 | r = -EPERM; |
3773 | 0 | goto out; |
3774 | 0 | } |
3775 | | |
3776 | 0 | if (crypt_key_in_keyring(cd)) { |
3777 | 0 | if (isLUKS2(cd->type)) |
3778 | 0 | r = LUKS2_key_description_by_segment(cd, &cd->u.luks2.hdr, |
3779 | 0 | tgt->u.crypt.vk, CRYPT_DEFAULT_SEGMENT); |
3780 | 0 | else if (isPLAIN(cd->type)) |
3781 | 0 | r = 0; /* key description was set on table load */ |
3782 | 0 | else |
3783 | 0 | r = -EINVAL; |
3784 | 0 | if (r < 0) |
3785 | 0 | goto out; |
3786 | | |
3787 | 0 | dmdq.flags |= CRYPT_ACTIVATE_KEYRING_KEY; |
3788 | 0 | } |
3789 | | |
3790 | 0 | if (crypt_loop_device(crypt_get_device_name(cd))) { |
3791 | 0 | log_dbg(cd, "Trying to resize underlying loop device %s.", |
3792 | 0 | crypt_get_device_name(cd)); |
3793 | | /* Here we always use default size not new_size */ |
3794 | 0 | if (crypt_loop_resize(crypt_get_device_name(cd))) |
3795 | 0 | log_err(cd, _("Cannot resize loop device.")); |
3796 | 0 | } |
3797 | | |
3798 | | |
3799 | | /* |
3800 | | * Integrity device metadata are maintained by the kernel. We need to |
3801 | | * reload the device (with the same parameters) and let the kernel |
3802 | | * calculate the maximum size of integrity device and store it in the |
3803 | | * superblock. |
3804 | | */ |
3805 | 0 | if (!new_size && tgt->type == DM_INTEGRITY) { |
3806 | 0 | r = INTEGRITY_data_sectors(cd, crypt_metadata_device(cd), |
3807 | 0 | crypt_get_data_offset(cd) * SECTOR_SIZE, &old_size); |
3808 | 0 | if (r < 0) |
3809 | 0 | return r; |
3810 | | |
3811 | 0 | dmd.size = dmdq.size; |
3812 | 0 | dmd.flags = dmdq.flags | CRYPT_ACTIVATE_REFRESH | CRYPT_ACTIVATE_PRIVATE; |
3813 | |
|
3814 | 0 | r = crypt_get_integrity_info(cd, ¶ms); |
3815 | 0 | if (r) |
3816 | 0 | goto out; |
3817 | | |
3818 | 0 | r = dm_integrity_target_set(cd, &dmd.segment, 0, dmdq.segment.size, |
3819 | 0 | crypt_metadata_device(cd), crypt_data_device(cd), |
3820 | 0 | crypt_get_integrity_tag_size(cd), crypt_get_data_offset(cd), |
3821 | 0 | crypt_get_sector_size(cd), tgt->u.integrity.vk, tgt->u.integrity.journal_crypt_key, |
3822 | 0 | tgt->u.integrity.journal_integrity_key, ¶ms); |
3823 | 0 | if (r) |
3824 | 0 | goto out; |
3825 | | /* Backend device cannot be smaller here, device_block_adjust() will fail if so. */ |
3826 | 0 | r = _reload_device(cd, name, &dmd, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH); |
3827 | 0 | if (r) |
3828 | 0 | goto out; |
3829 | | |
3830 | 0 | r = INTEGRITY_data_sectors(cd, crypt_metadata_device(cd), |
3831 | 0 | crypt_get_data_offset(cd) * SECTOR_SIZE, &new_size); |
3832 | 0 | if (r < 0) |
3833 | 0 | return r; |
3834 | 0 | log_dbg(cd, "Maximum integrity device size from kernel %" PRIu64, new_size); |
3835 | |
|
3836 | 0 | if (old_size == new_size && new_size == dmdq.size && |
3837 | 0 | !dm_flags(cd, tgt->type, &supported_flags) && |
3838 | 0 | !(supported_flags & DM_INTEGRITY_RESIZE_SUPPORTED)) |
3839 | 0 | log_std(cd, _("WARNING: Maximum size already set or kernel doesn't support resize.\n")); |
3840 | 0 | } |
3841 | | |
3842 | 0 | r = device_block_adjust(cd, crypt_data_device(cd), DEV_OK, |
3843 | 0 | crypt_get_data_offset(cd), &new_size, &dmdq.flags); |
3844 | 0 | if (r) |
3845 | 0 | goto out; |
3846 | | |
3847 | 0 | if (MISALIGNED(new_size, (tgt->type == DM_CRYPT ? tgt->u.crypt.sector_size : tgt->u.integrity.sector_size) >> SECTOR_SHIFT)) { |
3848 | 0 | log_err(cd, _("Device size is not aligned to requested sector size.")); |
3849 | 0 | r = -EINVAL; |
3850 | 0 | goto out; |
3851 | 0 | } |
3852 | | |
3853 | 0 | if (MISALIGNED(new_size, device_block_size(cd, crypt_data_device(cd)) >> SECTOR_SHIFT)) { |
3854 | 0 | log_err(cd, _("Device size is not aligned to device logical block size.")); |
3855 | 0 | r = -EINVAL; |
3856 | 0 | goto out; |
3857 | 0 | } |
3858 | | |
3859 | 0 | dmd.uuid = crypt_get_uuid(cd); |
3860 | 0 | dmd.size = new_size; |
3861 | 0 | dmd.flags = dmdq.flags | CRYPT_ACTIVATE_REFRESH; |
3862 | |
|
3863 | 0 | if (tgt->type == DM_CRYPT) { |
3864 | 0 | r = dm_crypt_target_set(&dmd.segment, 0, new_size, crypt_data_device(cd), |
3865 | 0 | tgt->u.crypt.vk, crypt_get_cipher_spec(cd), |
3866 | 0 | crypt_get_iv_offset(cd), crypt_get_data_offset(cd), |
3867 | 0 | crypt_get_integrity(cd), crypt_get_integrity_key_size(cd, true), crypt_get_integrity_tag_size(cd), |
3868 | 0 | crypt_get_sector_size(cd)); |
3869 | 0 | if (r < 0) |
3870 | 0 | goto out; |
3871 | 0 | } else if (tgt->type == DM_INTEGRITY) { |
3872 | 0 | r = crypt_get_integrity_info(cd, ¶ms); |
3873 | 0 | if (r) |
3874 | 0 | goto out; |
3875 | | |
3876 | 0 | r = dm_integrity_target_set(cd, &dmd.segment, 0, new_size, |
3877 | 0 | crypt_metadata_device(cd), crypt_data_device(cd), |
3878 | 0 | crypt_get_integrity_tag_size(cd), crypt_get_data_offset(cd), |
3879 | 0 | crypt_get_sector_size(cd), tgt->u.integrity.vk, tgt->u.integrity.journal_crypt_key, |
3880 | 0 | tgt->u.integrity.journal_integrity_key, ¶ms); |
3881 | 0 | if (r) |
3882 | 0 | goto out; |
3883 | 0 | } |
3884 | | |
3885 | 0 | if (new_size == dmdq.size) { |
3886 | 0 | log_dbg(cd, "Device has already requested size %" PRIu64 |
3887 | 0 | " sectors.", dmdq.size); |
3888 | 0 | r = 0; |
3889 | 0 | } else { |
3890 | 0 | if (isTCRYPT(cd->type)) |
3891 | 0 | r = -ENOTSUP; |
3892 | 0 | else if (isLUKS2(cd->type)) |
3893 | 0 | r = LUKS2_unmet_requirements(cd, &cd->u.luks2.hdr, 0, 0); |
3894 | |
|
3895 | 0 | if (!r) { |
3896 | | /* Skip flush and lockfs if extending device */ |
3897 | 0 | if (new_size > dmdq.size) |
3898 | 0 | dmflags = DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH; |
3899 | 0 | r = _reload_device(cd, name, &dmd, dmflags); |
3900 | 0 | } |
3901 | |
|
3902 | 0 | if (r && tgt->type == DM_INTEGRITY && |
3903 | 0 | !dm_flags(cd, tgt->type, &supported_flags) && |
3904 | 0 | !(supported_flags & DM_INTEGRITY_RESIZE_SUPPORTED)) |
3905 | 0 | log_err(cd, _("Resize failed, the kernel doesn't support it.")); |
3906 | 0 | } |
3907 | 0 | out: |
3908 | 0 | dm_targets_free(cd, &dmd); |
3909 | 0 | dm_targets_free(cd, &dmdq); |
3910 | |
|
3911 | 0 | return r; |
3912 | 0 | } |
3913 | | |
3914 | | int crypt_set_uuid(struct crypt_device *cd, const char *uuid) |
3915 | 0 | { |
3916 | 0 | const char *active_uuid; |
3917 | 0 | int r; |
3918 | |
|
3919 | 0 | log_dbg(cd, "%s device uuid.", uuid ? "Setting new" : "Refreshing"); |
3920 | |
|
3921 | 0 | if ((r = onlyLUKS(cd))) |
3922 | 0 | return r; |
3923 | | |
3924 | 0 | active_uuid = crypt_get_uuid(cd); |
3925 | |
|
3926 | 0 | if (uuid && active_uuid && !strncmp(uuid, active_uuid, UUID_STRING_L)) { |
3927 | 0 | log_dbg(cd, "UUID is the same as requested (%s) for device %s.", |
3928 | 0 | uuid, mdata_device_path(cd)); |
3929 | 0 | return 0; |
3930 | 0 | } |
3931 | | |
3932 | 0 | if (uuid) |
3933 | 0 | log_dbg(cd, "Requested new UUID change to %s for %s.", uuid, mdata_device_path(cd)); |
3934 | 0 | else |
3935 | 0 | log_dbg(cd, "Requested new UUID refresh for %s.", mdata_device_path(cd)); |
3936 | |
|
3937 | 0 | if (!crypt_confirm(cd, _("Do you really want to change UUID of device?"))) |
3938 | 0 | return -EPERM; |
3939 | | |
3940 | 0 | if (isLUKS1(cd->type)) |
3941 | 0 | return LUKS_hdr_uuid_set(&cd->u.luks1.hdr, uuid, cd); |
3942 | 0 | else |
3943 | 0 | return LUKS2_hdr_uuid(cd, &cd->u.luks2.hdr, uuid); |
3944 | 0 | } |
3945 | | |
3946 | | int crypt_set_label(struct crypt_device *cd, const char *label, const char *subsystem) |
3947 | 0 | { |
3948 | 0 | int r; |
3949 | |
|
3950 | 0 | log_dbg(cd, "Setting new labels."); |
3951 | |
|
3952 | 0 | if ((r = onlyLUKS2(cd))) |
3953 | 0 | return r; |
3954 | | |
3955 | 0 | return LUKS2_hdr_labels(cd, &cd->u.luks2.hdr, label, subsystem, 1); |
3956 | 0 | } |
3957 | | |
3958 | | const char *crypt_get_label(struct crypt_device *cd) |
3959 | 0 | { |
3960 | 0 | if (_onlyLUKS2(cd, CRYPT_CD_QUIET | CRYPT_CD_UNRESTRICTED, 0)) |
3961 | 0 | return NULL; |
3962 | | |
3963 | 0 | return cd->u.luks2.hdr.label; |
3964 | 0 | } |
3965 | | |
3966 | | const char *crypt_get_subsystem(struct crypt_device *cd) |
3967 | 0 | { |
3968 | 0 | if (_onlyLUKS2(cd, CRYPT_CD_QUIET | CRYPT_CD_UNRESTRICTED, 0)) |
3969 | 0 | return NULL; |
3970 | | |
3971 | 0 | return cd->u.luks2.hdr.subsystem; |
3972 | 0 | } |
3973 | | |
3974 | | int crypt_header_backup(struct crypt_device *cd, |
3975 | | const char *requested_type, |
3976 | | const char *backup_file) |
3977 | 0 | { |
3978 | 0 | int r; |
3979 | |
|
3980 | 0 | if (requested_type && !isLUKS(requested_type)) |
3981 | 0 | return -EINVAL; |
3982 | | |
3983 | 0 | if (!backup_file) |
3984 | 0 | return -EINVAL; |
3985 | | |
3986 | | /* Load with repair */ |
3987 | 0 | r = _crypt_load_luks(cd, requested_type, false, false); |
3988 | 0 | if (r < 0) |
3989 | 0 | return r; |
3990 | | |
3991 | 0 | log_dbg(cd, "Requested header backup of device %s (%s) to " |
3992 | 0 | "file %s.", mdata_device_path(cd), requested_type ?: "any type", backup_file); |
3993 | |
|
3994 | 0 | if (isLUKS1(cd->type) && (!requested_type || isLUKS1(requested_type))) |
3995 | 0 | r = LUKS_hdr_backup(backup_file, cd); |
3996 | 0 | else if (isLUKS2(cd->type) && (!requested_type || isLUKS2(requested_type))) |
3997 | 0 | r = LUKS2_hdr_backup(cd, &cd->u.luks2.hdr, backup_file); |
3998 | 0 | else |
3999 | 0 | r = -EINVAL; |
4000 | |
|
4001 | 0 | return r; |
4002 | 0 | } |
4003 | | |
4004 | | int crypt_header_restore(struct crypt_device *cd, |
4005 | | const char *requested_type, |
4006 | | const char *backup_file) |
4007 | 0 | { |
4008 | 0 | struct luks_phdr hdr1; |
4009 | 0 | struct luks2_hdr hdr2; |
4010 | 0 | int r, version; |
4011 | |
|
4012 | 0 | if (requested_type && !isLUKS(requested_type)) |
4013 | 0 | return -EINVAL; |
4014 | | |
4015 | 0 | if (!cd || (cd->type && !isLUKS(cd->type)) || !backup_file) |
4016 | 0 | return -EINVAL; |
4017 | | |
4018 | 0 | r = init_crypto(cd); |
4019 | 0 | if (r < 0) |
4020 | 0 | return r; |
4021 | | |
4022 | 0 | log_dbg(cd, "Requested header restore to device %s (%s) from " |
4023 | 0 | "file %s.", mdata_device_path(cd), requested_type ?: "any type", backup_file); |
4024 | |
|
4025 | 0 | version = LUKS2_hdr_version_unlocked(cd, backup_file); |
4026 | 0 | if (!version || |
4027 | 0 | (requested_type && version == 1 && !isLUKS1(requested_type)) || |
4028 | 0 | (requested_type && version == 2 && !isLUKS2(requested_type))) { |
4029 | 0 | log_err(cd, _("Header backup file does not contain compatible LUKS header.")); |
4030 | 0 | return -EINVAL; |
4031 | 0 | } |
4032 | | |
4033 | 0 | memset(&hdr2, 0, sizeof(hdr2)); |
4034 | |
|
4035 | 0 | if (!cd->type) { |
4036 | 0 | if (version == 1) |
4037 | 0 | r = LUKS_hdr_restore(backup_file, &hdr1, cd); |
4038 | 0 | else |
4039 | 0 | r = LUKS2_hdr_restore(cd, &hdr2, backup_file); |
4040 | |
|
4041 | 0 | crypt_safe_memzero(&hdr1, sizeof(hdr1)); |
4042 | 0 | crypt_safe_memzero(&hdr2, sizeof(hdr2)); |
4043 | 0 | } else if (isLUKS2(cd->type) && (!requested_type || isLUKS2(requested_type))) { |
4044 | 0 | r = LUKS2_hdr_restore(cd, &cd->u.luks2.hdr, backup_file); |
4045 | 0 | if (r) |
4046 | 0 | (void) _crypt_load_luks2(cd, 1, 0); |
4047 | 0 | } else if (isLUKS1(cd->type) && (!requested_type || isLUKS1(requested_type))) |
4048 | 0 | r = LUKS_hdr_restore(backup_file, &cd->u.luks1.hdr, cd); |
4049 | 0 | else |
4050 | 0 | r = -EINVAL; |
4051 | |
|
4052 | 0 | if (!r) |
4053 | 0 | r = _crypt_load_luks(cd, version == 1 ? CRYPT_LUKS1 : CRYPT_LUKS2, false, true); |
4054 | |
|
4055 | 0 | return r; |
4056 | 0 | } |
4057 | | |
4058 | | int crypt_header_is_detached(struct crypt_device *cd) |
4059 | 0 | { |
4060 | 0 | int r; |
4061 | |
|
4062 | 0 | if (!cd || (cd->type && !isLUKS(cd->type))) |
4063 | 0 | return -EINVAL; |
4064 | | |
4065 | 0 | r = device_is_identical(crypt_data_device(cd), crypt_metadata_device(cd)); |
4066 | 0 | if (r < 0) { |
4067 | 0 | log_dbg(cd, "Failed to compare data and metadata devices path."); |
4068 | 0 | return r; |
4069 | 0 | } |
4070 | | |
4071 | 0 | return r ? 0 : 1; |
4072 | 0 | } |
4073 | | |
4074 | | void crypt_free(struct crypt_device *cd) |
4075 | 1.90k | { |
4076 | 1.90k | if (!cd) |
4077 | 0 | return; |
4078 | | |
4079 | 1.90k | log_dbg(cd, "Releasing crypt device %s context.", mdata_device_path(cd) ?: "empty"); |
4080 | | |
4081 | 1.90k | dm_backend_exit(cd); |
4082 | 0 | crypt_free_volume_key(cd->volume_key); |
4083 | |
|
4084 | 0 | crypt_free_type(cd, NULL); |
4085 | |
|
4086 | 0 | device_free(cd, cd->device); |
4087 | 0 | device_free(cd, cd->metadata_device); |
4088 | |
|
4089 | 0 | free(CONST_CAST(void*)cd->pbkdf.type); |
4090 | 0 | free(CONST_CAST(void*)cd->pbkdf.hash); |
4091 | 0 | free(CONST_CAST(void*)cd->user_key_name1); |
4092 | 0 | free(CONST_CAST(void*)cd->user_key_name2); |
4093 | | |
4094 | | /* Some structures can contain keys (TCRYPT), wipe it */ |
4095 | 0 | crypt_safe_memzero(cd, sizeof(*cd)); |
4096 | 0 | free(cd); |
4097 | 0 | } |
4098 | | |
4099 | | int crypt_suspend(struct crypt_device *cd, |
4100 | | const char *name) |
4101 | 0 | { |
4102 | 0 | bool dm_opal_uuid; |
4103 | 0 | crypt_status_info ci; |
4104 | 0 | int r; |
4105 | 0 | struct crypt_dm_active_device dmd, dmdi = {}; |
4106 | 0 | uint32_t opal_segment_number = 1; |
4107 | 0 | uint64_t dmflags = DM_SUSPEND_WIPE_KEY; |
4108 | 0 | struct dm_target *tgt = &dmd.segment; |
4109 | 0 | char *iname = NULL; |
4110 | 0 | struct crypt_lock_handle *opal_lh = NULL; |
4111 | |
|
4112 | 0 | if (!cd || !name) |
4113 | 0 | return -EINVAL; |
4114 | | |
4115 | 0 | log_dbg(cd, "Suspending volume %s.", name); |
4116 | |
|
4117 | 0 | if (cd->type && ((r = onlyLUKS(cd)) < 0)) |
4118 | 0 | return r; |
4119 | | |
4120 | 0 | ci = crypt_status(cd, name); |
4121 | 0 | if (ci < CRYPT_ACTIVE) { |
4122 | 0 | log_err(cd, _("Volume %s is not active."), name); |
4123 | 0 | return -EINVAL; |
4124 | 0 | } |
4125 | | |
4126 | 0 | r = dm_query_device(cd, name, |
4127 | 0 | DM_ACTIVE_UUID | DM_ACTIVE_CRYPT_KEY | DM_ACTIVE_CRYPT_KEYSIZE, |
4128 | 0 | &dmd); |
4129 | 0 | if (r < 0) |
4130 | 0 | return r; |
4131 | | |
4132 | 0 | log_dbg(cd, "Checking if active device %s has UUID type LUKS.", name); |
4133 | |
|
4134 | 0 | r = dm_uuid_type_cmp(dmd.uuid, CRYPT_LUKS2); |
4135 | 0 | if (r < 0) |
4136 | 0 | r = dm_uuid_type_cmp(dmd.uuid, CRYPT_LUKS1); |
4137 | |
|
4138 | 0 | if (r < 0) { |
4139 | 0 | log_err(cd, _("This operation is supported only for LUKS device.")); |
4140 | 0 | goto out; |
4141 | 0 | } |
4142 | | |
4143 | 0 | r = -EINVAL; |
4144 | |
|
4145 | 0 | if (isLUKS2(cd->type) && dm_uuid_type_cmp(dmd.uuid, CRYPT_LUKS2)) { |
4146 | 0 | log_dbg(cd, "LUKS device header type: %s mismatches DM device type.", cd->type); |
4147 | 0 | goto out; |
4148 | 0 | } |
4149 | | |
4150 | 0 | if (isLUKS1(cd->type) && dm_uuid_type_cmp(dmd.uuid, CRYPT_LUKS1)) { |
4151 | 0 | log_dbg(cd, "LUKS device header type: %s mismatches DM device type.", cd->type); |
4152 | 0 | goto out; |
4153 | 0 | } |
4154 | | |
4155 | | /* check if active device has LUKS2-OPAL dm uuid prefix */ |
4156 | 0 | dm_opal_uuid = !dm_uuid_type_cmp(dmd.uuid, CRYPT_LUKS2_HW_OPAL); |
4157 | |
|
4158 | 0 | if (!dm_opal_uuid && isLUKS2(cd->type) && |
4159 | 0 | LUKS2_segment_is_hw_opal(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT)) |
4160 | 0 | goto out; |
4161 | | |
4162 | 0 | if (cd->type && (r = dm_uuid_cmp(dmd.uuid, LUKS_UUID(cd))) < 0) { |
4163 | 0 | log_dbg(cd, "LUKS device header uuid: %s mismatches DM returned uuid %s", |
4164 | 0 | LUKS_UUID(cd), dmd.uuid); |
4165 | 0 | goto out; |
4166 | 0 | } |
4167 | | |
4168 | | /* check UUID of integrity device underneath crypt device */ |
4169 | 0 | if (crypt_get_integrity_tag_size(cd)) |
4170 | 0 | iname = dm_get_active_iname(cd, name); |
4171 | |
|
4172 | 0 | r = dm_status_suspended(cd, name); |
4173 | 0 | if (r < 0) |
4174 | 0 | goto out; |
4175 | | |
4176 | 0 | if (r) { |
4177 | 0 | log_err(cd, _("Volume %s is already suspended."), name); |
4178 | 0 | r = -EINVAL; |
4179 | 0 | goto out; |
4180 | 0 | } |
4181 | | |
4182 | 0 | if (dm_opal_uuid && crypt_data_device(cd)) { |
4183 | 0 | if (isLUKS2(cd->type)) { |
4184 | 0 | r = LUKS2_get_opal_segment_number(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT, &opal_segment_number); |
4185 | 0 | if (r < 0) |
4186 | 0 | goto out; |
4187 | 0 | } else { |
4188 | | /* Guess OPAL range number for LUKS2-OPAL device with missing header */ |
4189 | 0 | r = crypt_dev_get_partition_number(device_path(crypt_data_device(cd))); |
4190 | 0 | if (r > 0) |
4191 | 0 | opal_segment_number = r; |
4192 | 0 | } |
4193 | 0 | } |
4194 | | |
4195 | | /* we can't simply wipe wrapped keys. HW OPAL only encryption does not use dm-crypt target */ |
4196 | 0 | if (crypt_cipher_wrapped_key(crypt_get_cipher(cd), crypt_get_cipher_mode(cd)) || |
4197 | 0 | (dm_opal_uuid && tgt->type == DM_LINEAR)) |
4198 | 0 | dmflags &= ~DM_SUSPEND_WIPE_KEY; |
4199 | |
|
4200 | 0 | r = dm_suspend_device(cd, name, dmflags); |
4201 | 0 | if (r) { |
4202 | 0 | if (r == -ENOTSUP) |
4203 | 0 | log_err(cd, _("Suspend is not supported for device %s."), name); |
4204 | 0 | else |
4205 | 0 | log_err(cd, _("Error during suspending device %s."), name); |
4206 | 0 | goto out; |
4207 | 0 | } |
4208 | | |
4209 | | /* Suspend integrity device underneath; keep crypt suspended if it fails */ |
4210 | 0 | if (iname) { |
4211 | 0 | r = dm_suspend_device(cd, iname, 0); |
4212 | 0 | if (r) |
4213 | 0 | log_err(cd, _("Error during suspending device %s."), iname); |
4214 | 0 | } |
4215 | |
|
4216 | 0 | if (single_segment(&dmd) && tgt->type == DM_CRYPT) |
4217 | 0 | crypt_volume_key_drop_kernel_key(cd, tgt->u.crypt.vk); |
4218 | |
|
4219 | 0 | if (dm_opal_uuid && crypt_data_device(cd)) { |
4220 | 0 | r = opal_exclusive_lock(cd, crypt_data_device(cd), &opal_lh); |
4221 | 0 | if (r < 0) { |
4222 | 0 | log_err(cd, _("Failed to acquire OPAL lock on device %s."), device_path(crypt_data_device(cd))); |
4223 | 0 | goto out; |
4224 | 0 | } |
4225 | 0 | } |
4226 | | |
4227 | 0 | if (dm_opal_uuid && (!crypt_data_device(cd) || opal_lock(cd, crypt_data_device(cd), opal_segment_number))) |
4228 | 0 | log_err(cd, _("Device %s was suspended but hardware OPAL device cannot be locked."), name); |
4229 | 0 | out: |
4230 | 0 | opal_exclusive_unlock(cd, opal_lh); |
4231 | 0 | free(iname); |
4232 | 0 | dm_targets_free(cd, &dmd); |
4233 | 0 | dm_targets_free(cd, &dmdi); |
4234 | 0 | free(CONST_CAST(void*)dmd.uuid); |
4235 | 0 | free(CONST_CAST(void*)dmdi.uuid); |
4236 | 0 | return r; |
4237 | 0 | } |
4238 | | |
4239 | | static int resume_luks1_by_volume_key(struct crypt_device *cd, |
4240 | | struct volume_key *vk, |
4241 | | const char *name) |
4242 | 0 | { |
4243 | 0 | int r; |
4244 | 0 | struct volume_key *zerokey = NULL; |
4245 | |
|
4246 | 0 | assert(vk && crypt_volume_key_get_id(vk) == 0); |
4247 | 0 | assert(name); |
4248 | | |
4249 | 0 | if (crypt_is_cipher_null(crypt_get_cipher_spec(cd))) { |
4250 | 0 | zerokey = crypt_alloc_volume_key(0, NULL); |
4251 | 0 | if (!zerokey) |
4252 | 0 | return -ENOMEM; |
4253 | 0 | vk = zerokey; |
4254 | 0 | } |
4255 | | |
4256 | 0 | r = dm_resume_and_reinstate_key(cd, name, vk); |
4257 | |
|
4258 | 0 | if (r == -ENOTSUP) |
4259 | 0 | log_err(cd, _("Resume is not supported for device %s."), name); |
4260 | 0 | else if (r) |
4261 | 0 | log_err(cd, _("Error during resuming device %s."), name); |
4262 | |
|
4263 | 0 | crypt_free_volume_key(zerokey); |
4264 | |
|
4265 | 0 | return r; |
4266 | 0 | } |
4267 | | |
4268 | | static void crypt_unlink_key_from_custom_keyring(struct crypt_device *cd, key_serial_t kid) |
4269 | 0 | { |
4270 | 0 | assert(cd); |
4271 | 0 | assert(cd->keyring_to_link_vk); |
4272 | | |
4273 | 0 | log_dbg(cd, "Unlinking volume key (id: %" PRIi32 ") from kernel keyring (id: %" PRIi32 ").", |
4274 | 0 | kid, cd->keyring_to_link_vk); |
4275 | |
|
4276 | 0 | if (!keyring_unlink_key_from_keyring(kid, cd->keyring_to_link_vk)) |
4277 | 0 | return; |
4278 | | |
4279 | 0 | log_dbg(cd, "keyring_unlink_key_from_keyring failed with errno %d.", errno); |
4280 | 0 | log_err(cd, _("Failed to unlink volume key from user specified keyring.")); |
4281 | 0 | } |
4282 | | |
4283 | | static key_serial_t crypt_single_volume_key_load_in_custom_keyring(struct crypt_device *cd, |
4284 | | struct volume_key *vk, |
4285 | | const char *user_key_name) |
4286 | 0 | { |
4287 | 0 | key_serial_t kid; |
4288 | 0 | const char *type_name; |
4289 | |
|
4290 | 0 | assert(cd); |
4291 | 0 | assert(cd->link_vk_to_keyring); |
4292 | | |
4293 | 0 | if (!vk || !(type_name = key_type_name(cd->keyring_key_type))) |
4294 | 0 | return -EINVAL; |
4295 | | |
4296 | 0 | log_dbg(cd, "Linking volume key (type %s, name %s) to the specified keyring", |
4297 | 0 | type_name, user_key_name); |
4298 | |
|
4299 | 0 | kid = keyring_add_key_to_keyring(cd->keyring_key_type, user_key_name, |
4300 | 0 | crypt_volume_key_get_key(vk), |
4301 | 0 | crypt_volume_key_length(vk), |
4302 | 0 | cd->keyring_to_link_vk); |
4303 | 0 | if (kid <= 0) |
4304 | 0 | log_dbg(cd, "The keyring_add_key_to_keyring function failed (error %d).", errno); |
4305 | |
|
4306 | 0 | return kid; |
4307 | 0 | } |
4308 | | |
4309 | | static int crypt_volume_key_load_in_custom_keyring(struct crypt_device *cd, |
4310 | | struct volume_key *vk, |
4311 | | key_serial_t *kid1_out, |
4312 | | key_serial_t *kid2_out) |
4313 | 0 | { |
4314 | 0 | key_serial_t kid1, kid2 = 0; |
4315 | |
|
4316 | 0 | assert(cd); |
4317 | 0 | assert(cd->link_vk_to_keyring); |
4318 | 0 | assert(cd->user_key_name1); |
4319 | | |
4320 | 0 | if (!vk || !key_type_name(cd->keyring_key_type)) |
4321 | 0 | return -EINVAL; |
4322 | | |
4323 | 0 | kid1 = crypt_single_volume_key_load_in_custom_keyring(cd, vk, cd->user_key_name1); |
4324 | 0 | if (kid1 <= 0) |
4325 | 0 | return -EINVAL; |
4326 | | |
4327 | 0 | vk = crypt_volume_key_next(vk); |
4328 | 0 | if (vk) { |
4329 | 0 | assert(cd->user_key_name2); |
4330 | 0 | kid2 = crypt_single_volume_key_load_in_custom_keyring(cd, vk, cd->user_key_name2); |
4331 | 0 | if (kid2 <= 0) { |
4332 | 0 | crypt_unlink_key_from_custom_keyring(cd, kid1); |
4333 | 0 | return -EINVAL; |
4334 | 0 | } |
4335 | 0 | } |
4336 | | |
4337 | 0 | *kid2_out = kid2; |
4338 | 0 | *kid1_out = kid1; |
4339 | 0 | return 0; |
4340 | 0 | } |
4341 | | |
4342 | | static int resume_luks2_by_volume_key(struct crypt_device *cd, |
4343 | | int digest, |
4344 | | struct volume_key *vk, |
4345 | | const char *name) |
4346 | 0 | { |
4347 | 0 | bool use_keyring; |
4348 | 0 | int r, enc_type; |
4349 | 0 | uint32_t opal_segment_number; |
4350 | 0 | struct volume_key *p_crypt = vk, *p_opal = NULL, *zerokey = NULL, *crypt_key = NULL, *opal_key = NULL; |
4351 | 0 | char *iname = NULL; |
4352 | 0 | struct crypt_lock_handle *opal_lh = NULL; |
4353 | 0 | key_serial_t kid1 = 0, kid2 = 0; |
4354 | |
|
4355 | 0 | assert(digest >= 0); |
4356 | 0 | assert(vk && crypt_volume_key_get_id(vk) == digest); |
4357 | 0 | assert(name); |
4358 | | |
4359 | 0 | enc_type = crypt_get_hw_encryption_type(cd); |
4360 | 0 | if (enc_type < 0) |
4361 | 0 | return enc_type; |
4362 | | |
4363 | 0 | use_keyring = crypt_use_keyring_for_vk(cd); |
4364 | |
|
4365 | 0 | if (enc_type == CRYPT_OPAL_HW_ONLY || enc_type == CRYPT_SW_AND_OPAL_HW) { |
4366 | 0 | r = LUKS2_get_opal_segment_number(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT, |
4367 | 0 | &opal_segment_number); |
4368 | 0 | if (r < 0) |
4369 | 0 | return r; |
4370 | | |
4371 | 0 | r = LUKS2_split_crypt_and_opal_keys(cd, &cd->u.luks2.hdr, |
4372 | 0 | vk, &crypt_key, |
4373 | 0 | &opal_key); |
4374 | 0 | if (r < 0) |
4375 | 0 | return r; |
4376 | | |
4377 | 0 | p_crypt = crypt_key; |
4378 | 0 | p_opal = opal_key ?: vk; |
4379 | 0 | } |
4380 | | |
4381 | 0 | if (enc_type != CRYPT_OPAL_HW_ONLY && crypt_is_cipher_null(crypt_get_cipher_spec(cd))) { |
4382 | 0 | zerokey = crypt_alloc_volume_key(0, NULL); |
4383 | 0 | if (!zerokey) { |
4384 | 0 | r = -ENOMEM; |
4385 | 0 | goto out; |
4386 | 0 | } |
4387 | 0 | p_crypt = zerokey; |
4388 | 0 | use_keyring = false; |
4389 | 0 | } |
4390 | | |
4391 | 0 | if (use_keyring) { |
4392 | 0 | if (p_crypt) { |
4393 | 0 | r = LUKS2_volume_key_load_in_keyring_by_digest(cd, p_crypt, digest); |
4394 | 0 | if (r < 0) |
4395 | 0 | goto out; |
4396 | 0 | } |
4397 | | |
4398 | | /* upload volume key in custom keyring if requested */ |
4399 | 0 | if (cd->link_vk_to_keyring) { |
4400 | 0 | r = crypt_volume_key_load_in_custom_keyring(cd, vk, &kid1, &kid2); |
4401 | 0 | if (r < 0) { |
4402 | 0 | log_err(cd, _("Failed to link volume key in user defined keyring.")); |
4403 | 0 | goto out; |
4404 | 0 | } |
4405 | 0 | } |
4406 | 0 | } |
4407 | | |
4408 | 0 | if (p_opal) { |
4409 | 0 | r = opal_exclusive_lock(cd, crypt_data_device(cd), &opal_lh); |
4410 | 0 | if (r < 0) { |
4411 | 0 | log_err(cd, _("Failed to acquire OPAL lock on device %s."), device_path(crypt_data_device(cd))); |
4412 | 0 | goto out; |
4413 | 0 | } |
4414 | | |
4415 | 0 | r = opal_unlock(cd, crypt_data_device(cd), opal_segment_number, p_opal); |
4416 | 0 | if (r < 0) { |
4417 | 0 | p_opal = NULL; /* do not lock on error path */ |
4418 | 0 | goto out; |
4419 | 0 | } |
4420 | 0 | } |
4421 | | |
4422 | 0 | if (crypt_get_integrity_tag_size(cd) && |
4423 | 0 | (iname = dm_get_active_iname(cd, name))) { |
4424 | 0 | r = dm_resume_device(cd, iname, 0); |
4425 | 0 | if (r) |
4426 | 0 | log_err(cd, _("Error during resuming device %s."), iname); |
4427 | 0 | free(iname); |
4428 | 0 | } |
4429 | |
|
4430 | 0 | if (enc_type == CRYPT_OPAL_HW_ONLY) |
4431 | 0 | r = dm_resume_device(cd, name, 0); |
4432 | 0 | else |
4433 | 0 | r = dm_resume_and_reinstate_key(cd, name, p_crypt); |
4434 | |
|
4435 | 0 | if (r == -ENOTSUP) |
4436 | 0 | log_err(cd, _("Resume is not supported for device %s."), name); |
4437 | 0 | else if (r) |
4438 | 0 | log_err(cd, _("Error during resuming device %s."), name); |
4439 | |
|
4440 | 0 | out: |
4441 | 0 | if (r < 0) { |
4442 | 0 | crypt_drop_uploaded_keyring_key(cd, p_crypt); |
4443 | 0 | if (cd->link_vk_to_keyring && kid1) |
4444 | 0 | crypt_unlink_key_from_custom_keyring(cd, kid1); |
4445 | 0 | if (cd->link_vk_to_keyring && kid2) |
4446 | 0 | crypt_unlink_key_from_custom_keyring(cd, kid2); |
4447 | 0 | } |
4448 | |
|
4449 | 0 | if (r < 0 && p_opal) |
4450 | 0 | opal_lock(cd, crypt_data_device(cd), opal_segment_number); |
4451 | |
|
4452 | 0 | opal_exclusive_unlock(cd, opal_lh); |
4453 | 0 | crypt_free_volume_key(zerokey); |
4454 | 0 | crypt_free_volume_key(opal_key); |
4455 | 0 | crypt_free_volume_key(crypt_key); |
4456 | |
|
4457 | 0 | return r; |
4458 | 0 | } |
4459 | | |
4460 | | /* key must be properly verified */ |
4461 | | static int resume_by_volume_key(struct crypt_device *cd, |
4462 | | struct volume_key *vk, |
4463 | | const char *name) |
4464 | 0 | { |
4465 | 0 | assert(cd); |
4466 | | |
4467 | 0 | if (isLUKS2(cd->type)) |
4468 | 0 | return resume_luks2_by_volume_key(cd, |
4469 | 0 | LUKS2_digest_by_segment(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT), |
4470 | 0 | vk, name); |
4471 | | |
4472 | 0 | if (isLUKS1(cd->type)) |
4473 | 0 | return resume_luks1_by_volume_key(cd, vk, name); |
4474 | | |
4475 | 0 | return -EINVAL; |
4476 | 0 | } |
4477 | | |
4478 | | int crypt_resume_by_keyslot_context(struct crypt_device *cd, |
4479 | | const char *name, |
4480 | | int keyslot, |
4481 | | struct crypt_keyslot_context *kc) |
4482 | 0 | { |
4483 | 0 | int r; |
4484 | 0 | struct volume_key *vk = NULL; |
4485 | 0 | int unlocked_keyslot = -EINVAL; |
4486 | |
|
4487 | 0 | if (!name) |
4488 | 0 | return -EINVAL; |
4489 | | |
4490 | 0 | log_dbg(cd, "Resuming volume %s [keyslot %d] using %s.", name, keyslot, keyslot_context_type_string(kc)); |
4491 | |
|
4492 | 0 | if ((r = onlyLUKS(cd))) |
4493 | 0 | return r; |
4494 | | |
4495 | 0 | r = dm_status_suspended(cd, name); |
4496 | 0 | if (r < 0) |
4497 | 0 | return r; |
4498 | | |
4499 | 0 | if (!r) { |
4500 | 0 | log_err(cd, _("Volume %s is not suspended."), name); |
4501 | 0 | return -EINVAL; |
4502 | 0 | } |
4503 | | |
4504 | 0 | if (isLUKS1(cd->type) && kc->get_luks1_volume_key) |
4505 | 0 | r = kc->get_luks1_volume_key(cd, kc, keyslot, &vk); |
4506 | 0 | else if (isLUKS2(cd->type) && kc->get_luks2_volume_key) |
4507 | 0 | r = kc->get_luks2_volume_key(cd, kc, keyslot, &vk); |
4508 | 0 | else |
4509 | 0 | r = -EINVAL; |
4510 | 0 | if (r < 0) |
4511 | 0 | goto out; |
4512 | 0 | unlocked_keyslot = r; |
4513 | |
|
4514 | 0 | if (isLUKS1(cd->type)) { |
4515 | 0 | r = LUKS_verify_volume_key(&cd->u.luks1.hdr, vk); |
4516 | 0 | crypt_volume_key_set_id(vk, 0); |
4517 | 0 | } else if (isLUKS2(cd->type)) { |
4518 | 0 | r = LUKS2_digest_verify_by_segment(cd, &cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT, vk); |
4519 | 0 | crypt_volume_key_set_id(vk, r); |
4520 | 0 | } else |
4521 | 0 | r = -EINVAL; |
4522 | 0 | if (r < 0) |
4523 | 0 | goto out; |
4524 | | |
4525 | 0 | r = resume_by_volume_key(cd, vk, name); |
4526 | |
|
4527 | 0 | crypt_free_volume_key(vk); |
4528 | 0 | return r < 0 ? r : unlocked_keyslot; |
4529 | 0 | out: |
4530 | 0 | crypt_free_volume_key(vk); |
4531 | 0 | return r; |
4532 | 0 | } |
4533 | | |
4534 | | int crypt_resume_by_passphrase(struct crypt_device *cd, |
4535 | | const char *name, |
4536 | | int keyslot, |
4537 | | const char *passphrase, |
4538 | | size_t passphrase_size) |
4539 | 0 | { |
4540 | 0 | int r; |
4541 | 0 | struct crypt_keyslot_context kc = {}; |
4542 | |
|
4543 | 0 | crypt_keyslot_context_init_by_passphrase_internal(&kc, passphrase, passphrase_size); |
4544 | 0 | r = crypt_resume_by_keyslot_context(cd, name, keyslot, &kc); |
4545 | 0 | crypt_keyslot_context_destroy_internal(&kc); |
4546 | |
|
4547 | 0 | return r; |
4548 | 0 | } |
4549 | | |
4550 | | int crypt_resume_by_keyfile_device_offset(struct crypt_device *cd, |
4551 | | const char *name, |
4552 | | int keyslot, |
4553 | | const char *keyfile, |
4554 | | size_t keyfile_size, |
4555 | | uint64_t keyfile_offset) |
4556 | 0 | { |
4557 | 0 | int r; |
4558 | 0 | struct crypt_keyslot_context kc = {}; |
4559 | |
|
4560 | 0 | crypt_keyslot_context_init_by_keyfile_internal(&kc, keyfile, keyfile_size, keyfile_offset); |
4561 | 0 | r = crypt_resume_by_keyslot_context(cd, name, keyslot, &kc); |
4562 | 0 | crypt_keyslot_context_destroy_internal(&kc); |
4563 | |
|
4564 | 0 | return r; |
4565 | 0 | } |
4566 | | |
4567 | | int crypt_resume_by_keyfile(struct crypt_device *cd, |
4568 | | const char *name, |
4569 | | int keyslot, |
4570 | | const char *keyfile, |
4571 | | size_t keyfile_size) |
4572 | 0 | { |
4573 | 0 | return crypt_resume_by_keyfile_device_offset(cd, name, keyslot, |
4574 | 0 | keyfile, keyfile_size, 0); |
4575 | 0 | } |
4576 | | |
4577 | | int crypt_resume_by_keyfile_offset(struct crypt_device *cd, |
4578 | | const char *name, |
4579 | | int keyslot, |
4580 | | const char *keyfile, |
4581 | | size_t keyfile_size, |
4582 | | size_t keyfile_offset) |
4583 | 0 | { |
4584 | 0 | return crypt_resume_by_keyfile_device_offset(cd, name, keyslot, |
4585 | 0 | keyfile, keyfile_size, keyfile_offset); |
4586 | 0 | } |
4587 | | |
4588 | | int crypt_resume_by_volume_key(struct crypt_device *cd, |
4589 | | const char *name, |
4590 | | const char *volume_key, |
4591 | | size_t volume_key_size) |
4592 | 0 | { |
4593 | 0 | int r; |
4594 | 0 | struct crypt_keyslot_context kc = {}; |
4595 | |
|
4596 | 0 | crypt_keyslot_context_init_by_key_internal(&kc, volume_key, volume_key_size); |
4597 | 0 | r = crypt_resume_by_keyslot_context(cd, name, CRYPT_ANY_SLOT /* unused */, &kc); |
4598 | 0 | crypt_keyslot_context_destroy_internal(&kc); |
4599 | |
|
4600 | 0 | if (r == -EPERM || r == -ENOENT) |
4601 | 0 | log_err(cd, _("Volume key does not match the volume.")); |
4602 | |
|
4603 | 0 | return r; |
4604 | 0 | } |
4605 | | |
4606 | | int crypt_resume_by_token_pin(struct crypt_device *cd, const char *name, |
4607 | | const char *type, int token, const char *pin, size_t pin_size, |
4608 | | void *usrptr) |
4609 | 0 | { |
4610 | 0 | int r; |
4611 | 0 | struct crypt_keyslot_context kc = {}; |
4612 | |
|
4613 | 0 | crypt_keyslot_context_init_by_token_internal(&kc, token, type, pin, pin_size, usrptr); |
4614 | 0 | r = crypt_resume_by_keyslot_context(cd, name, CRYPT_ANY_SLOT, &kc); |
4615 | 0 | crypt_keyslot_context_destroy_internal(&kc); |
4616 | |
|
4617 | 0 | return r; |
4618 | 0 | } |
4619 | | |
4620 | | /* |
4621 | | * Keyslot manipulation |
4622 | | */ |
4623 | | int crypt_keyslot_add_by_passphrase(struct crypt_device *cd, |
4624 | | int keyslot, // -1 any |
4625 | | const char *passphrase, |
4626 | | size_t passphrase_size, |
4627 | | const char *new_passphrase, |
4628 | | size_t new_passphrase_size) |
4629 | 0 | { |
4630 | 0 | int r; |
4631 | 0 | struct crypt_keyslot_context kc = {}, new_kc = {}; |
4632 | |
|
4633 | 0 | if (!passphrase || !new_passphrase) |
4634 | 0 | return -EINVAL; |
4635 | | |
4636 | 0 | crypt_keyslot_context_init_by_passphrase_internal(&kc, passphrase, passphrase_size); |
4637 | 0 | crypt_keyslot_context_init_by_passphrase_internal(&new_kc, new_passphrase, new_passphrase_size); |
4638 | |
|
4639 | 0 | r = crypt_keyslot_add_by_keyslot_context(cd, CRYPT_ANY_SLOT, &kc, keyslot, &new_kc, 0); |
4640 | |
|
4641 | 0 | crypt_keyslot_context_destroy_internal(&kc); |
4642 | 0 | crypt_keyslot_context_destroy_internal(&new_kc); |
4643 | |
|
4644 | 0 | return r; |
4645 | 0 | } |
4646 | | |
4647 | | int crypt_keyslot_change_by_passphrase(struct crypt_device *cd, |
4648 | | int keyslot_old, |
4649 | | int keyslot_new, |
4650 | | const char *passphrase, |
4651 | | size_t passphrase_size, |
4652 | | const char *new_passphrase, |
4653 | | size_t new_passphrase_size) |
4654 | 0 | { |
4655 | 0 | bool keyslot_swap = false; |
4656 | 0 | int digest = -1, r; |
4657 | 0 | struct luks2_keyslot_params params; |
4658 | 0 | struct volume_key *vk = NULL; |
4659 | |
|
4660 | 0 | if (!passphrase || !new_passphrase) |
4661 | 0 | return -EINVAL; |
4662 | | |
4663 | 0 | log_dbg(cd, "Changing passphrase from old keyslot %d to new %d.", |
4664 | 0 | keyslot_old, keyslot_new); |
4665 | |
|
4666 | 0 | if ((r = onlyLUKS(cd))) |
4667 | 0 | return r; |
4668 | | |
4669 | 0 | if (isLUKS1(cd->type)) |
4670 | 0 | r = LUKS_open_key_with_hdr(keyslot_old, passphrase, passphrase_size, |
4671 | 0 | &cd->u.luks1.hdr, &vk, cd); |
4672 | 0 | else if (isLUKS2(cd->type)) { |
4673 | 0 | r = LUKS2_keyslot_open(cd, keyslot_old, CRYPT_ANY_SEGMENT, passphrase, passphrase_size, &vk); |
4674 | | /* will fail for keyslots w/o digest. fix if supported in a future */ |
4675 | 0 | if (r >= 0) { |
4676 | 0 | digest = LUKS2_digest_by_keyslot(&cd->u.luks2.hdr, r); |
4677 | 0 | if (digest < 0) |
4678 | 0 | r = -EINVAL; |
4679 | 0 | } |
4680 | 0 | } else |
4681 | 0 | r = -EINVAL; |
4682 | 0 | if (r < 0) |
4683 | 0 | goto out; |
4684 | | |
4685 | 0 | if (keyslot_old != CRYPT_ANY_SLOT && keyslot_old != r) { |
4686 | 0 | log_dbg(cd, "Keyslot mismatch."); |
4687 | 0 | goto out; |
4688 | 0 | } |
4689 | 0 | keyslot_old = r; |
4690 | |
|
4691 | 0 | if (isLUKS1(cd->type)) { |
4692 | 0 | if (keyslot_new == CRYPT_ANY_SLOT) { |
4693 | 0 | keyslot_new = LUKS_keyslot_find_empty(&cd->u.luks1.hdr); |
4694 | 0 | if (keyslot_new < 0) |
4695 | 0 | keyslot_new = keyslot_old; |
4696 | 0 | } |
4697 | 0 | } else if (isLUKS2(cd->type)) { |
4698 | | /* If there is a free keyslot (both id and binary area) avoid in-place keyslot area overwrite */ |
4699 | 0 | if (keyslot_new == CRYPT_ANY_SLOT || keyslot_new == keyslot_old) { |
4700 | 0 | keyslot_new = LUKS2_keyslot_find_empty(cd, &cd->u.luks2.hdr, crypt_volume_key_length(vk)); |
4701 | 0 | if (keyslot_new < 0) |
4702 | 0 | keyslot_new = keyslot_old; |
4703 | 0 | else |
4704 | 0 | keyslot_swap = true; |
4705 | 0 | } |
4706 | 0 | } |
4707 | 0 | log_dbg(cd, "Key change, old slot %d, new slot %d.", keyslot_old, keyslot_new); |
4708 | |
|
4709 | 0 | if (isLUKS1(cd->type)) { |
4710 | 0 | if (keyslot_old == keyslot_new) { |
4711 | 0 | log_dbg(cd, "Key slot %d is going to be overwritten.", keyslot_old); |
4712 | 0 | (void)crypt_keyslot_destroy(cd, keyslot_old); |
4713 | 0 | } |
4714 | 0 | r = LUKS_set_key(keyslot_new, new_passphrase, new_passphrase_size, |
4715 | 0 | &cd->u.luks1.hdr, vk, cd); |
4716 | 0 | } else if (isLUKS2(cd->type)) { |
4717 | 0 | r = LUKS2_keyslot_params_default(cd, &cd->u.luks2.hdr, ¶ms); |
4718 | 0 | if (r) |
4719 | 0 | goto out; |
4720 | | |
4721 | 0 | if (keyslot_old != keyslot_new) { |
4722 | 0 | r = LUKS2_digest_assign(cd, &cd->u.luks2.hdr, keyslot_new, digest, 1, 0); |
4723 | 0 | if (r < 0) |
4724 | 0 | goto out; |
4725 | 0 | r = LUKS2_token_assignment_copy(cd, &cd->u.luks2.hdr, keyslot_old, keyslot_new, 0); |
4726 | 0 | if (r < 0) |
4727 | 0 | goto out; |
4728 | 0 | } else |
4729 | 0 | log_dbg(cd, "Key slot %d is going to be overwritten.", keyslot_old); |
4730 | | |
4731 | 0 | r = LUKS2_keyslot_store(cd, &cd->u.luks2.hdr, |
4732 | 0 | keyslot_new, new_passphrase, |
4733 | 0 | new_passphrase_size, vk, ¶ms); |
4734 | 0 | if (r < 0) |
4735 | 0 | goto out; |
4736 | | |
4737 | | /* Swap old & new so the final keyslot number remains */ |
4738 | 0 | if (keyslot_swap && keyslot_old != keyslot_new) { |
4739 | 0 | r = LUKS2_keyslot_swap(cd, &cd->u.luks2.hdr, keyslot_old, keyslot_new); |
4740 | 0 | if (r < 0) |
4741 | 0 | goto out; |
4742 | | |
4743 | | /* Swap slot id */ |
4744 | 0 | r = keyslot_old; |
4745 | 0 | keyslot_old = keyslot_new; |
4746 | 0 | keyslot_new = r; |
4747 | 0 | } |
4748 | 0 | } else |
4749 | 0 | r = -EINVAL; |
4750 | | |
4751 | 0 | if (r >= 0 && keyslot_old != keyslot_new) |
4752 | 0 | r = crypt_keyslot_destroy(cd, keyslot_old); |
4753 | |
|
4754 | 0 | if (r < 0) |
4755 | 0 | log_err(cd, _("Failed to swap new key slot.")); |
4756 | 0 | out: |
4757 | 0 | crypt_free_volume_key(vk); |
4758 | 0 | if (r < 0) { |
4759 | 0 | _luks2_rollback(cd); |
4760 | 0 | return r; |
4761 | 0 | } |
4762 | 0 | return keyslot_new; |
4763 | 0 | } |
4764 | | |
4765 | | int crypt_keyslot_add_by_keyfile_device_offset(struct crypt_device *cd, |
4766 | | int keyslot, |
4767 | | const char *keyfile, |
4768 | | size_t keyfile_size, |
4769 | | uint64_t keyfile_offset, |
4770 | | const char *new_keyfile, |
4771 | | size_t new_keyfile_size, |
4772 | | uint64_t new_keyfile_offset) |
4773 | 0 | { |
4774 | 0 | int r; |
4775 | 0 | struct crypt_keyslot_context kc = {}, new_kc = {}; |
4776 | |
|
4777 | 0 | if (!keyfile || !new_keyfile) |
4778 | 0 | return -EINVAL; |
4779 | | |
4780 | 0 | crypt_keyslot_context_init_by_keyfile_internal(&kc, keyfile, keyfile_size, keyfile_offset); |
4781 | 0 | crypt_keyslot_context_init_by_keyfile_internal(&new_kc, new_keyfile, new_keyfile_size, new_keyfile_offset); |
4782 | |
|
4783 | 0 | r = crypt_keyslot_add_by_keyslot_context(cd, CRYPT_ANY_SLOT, &kc, keyslot, &new_kc, 0); |
4784 | |
|
4785 | 0 | crypt_keyslot_context_destroy_internal(&kc); |
4786 | 0 | crypt_keyslot_context_destroy_internal(&new_kc); |
4787 | |
|
4788 | 0 | return r; |
4789 | 0 | } |
4790 | | |
4791 | | int crypt_keyslot_add_by_keyfile(struct crypt_device *cd, |
4792 | | int keyslot, |
4793 | | const char *keyfile, |
4794 | | size_t keyfile_size, |
4795 | | const char *new_keyfile, |
4796 | | size_t new_keyfile_size) |
4797 | 0 | { |
4798 | 0 | return crypt_keyslot_add_by_keyfile_device_offset(cd, keyslot, |
4799 | 0 | keyfile, keyfile_size, 0, |
4800 | 0 | new_keyfile, new_keyfile_size, 0); |
4801 | 0 | } |
4802 | | |
4803 | | int crypt_keyslot_add_by_keyfile_offset(struct crypt_device *cd, |
4804 | | int keyslot, |
4805 | | const char *keyfile, |
4806 | | size_t keyfile_size, |
4807 | | size_t keyfile_offset, |
4808 | | const char *new_keyfile, |
4809 | | size_t new_keyfile_size, |
4810 | | size_t new_keyfile_offset) |
4811 | 0 | { |
4812 | 0 | return crypt_keyslot_add_by_keyfile_device_offset(cd, keyslot, |
4813 | 0 | keyfile, keyfile_size, keyfile_offset, |
4814 | 0 | new_keyfile, new_keyfile_size, new_keyfile_offset); |
4815 | 0 | } |
4816 | | |
4817 | | int crypt_keyslot_add_by_volume_key(struct crypt_device *cd, |
4818 | | int keyslot, |
4819 | | const char *volume_key, |
4820 | | size_t volume_key_size, |
4821 | | const char *passphrase, |
4822 | | size_t passphrase_size) |
4823 | 0 | { |
4824 | 0 | int r; |
4825 | 0 | struct crypt_keyslot_context kc = {}, new_kc = {}; |
4826 | |
|
4827 | 0 | if (!passphrase) |
4828 | 0 | return -EINVAL; |
4829 | | |
4830 | 0 | crypt_keyslot_context_init_by_key_internal(&kc, volume_key, volume_key_size); |
4831 | 0 | crypt_keyslot_context_init_by_passphrase_internal(&new_kc, passphrase, passphrase_size); |
4832 | |
|
4833 | 0 | r = crypt_keyslot_add_by_keyslot_context(cd, CRYPT_ANY_SLOT, &kc, keyslot, &new_kc, 0); |
4834 | |
|
4835 | 0 | crypt_keyslot_context_destroy_internal(&kc); |
4836 | 0 | crypt_keyslot_context_destroy_internal(&new_kc); |
4837 | |
|
4838 | 0 | return r; |
4839 | 0 | } |
4840 | | |
4841 | | int crypt_keyslot_destroy(struct crypt_device *cd, int keyslot) |
4842 | 0 | { |
4843 | 0 | crypt_keyslot_info ki; |
4844 | 0 | int r; |
4845 | |
|
4846 | 0 | log_dbg(cd, "Destroying keyslot %d.", keyslot); |
4847 | |
|
4848 | 0 | if ((r = onlyLUKSunrestricted(cd))) |
4849 | 0 | return r; |
4850 | | |
4851 | 0 | ki = crypt_keyslot_status(cd, keyslot); |
4852 | 0 | if (ki == CRYPT_SLOT_INVALID) { |
4853 | 0 | log_err(cd, _("Key slot %d is invalid."), keyslot); |
4854 | 0 | return -EINVAL; |
4855 | 0 | } |
4856 | | |
4857 | 0 | if (isLUKS1(cd->type)) { |
4858 | 0 | if (ki == CRYPT_SLOT_INACTIVE) { |
4859 | 0 | log_err(cd, _("Keyslot %d is not active."), keyslot); |
4860 | 0 | return -EINVAL; |
4861 | 0 | } |
4862 | 0 | return LUKS_del_key(keyslot, &cd->u.luks1.hdr, cd); |
4863 | 0 | } |
4864 | | |
4865 | 0 | return LUKS2_keyslot_wipe(cd, &cd->u.luks2.hdr, keyslot); |
4866 | 0 | } |
4867 | | |
4868 | | static int _check_header_data_overlap(struct crypt_device *cd, const char *name) |
4869 | 0 | { |
4870 | 0 | if (!name || !isLUKS(cd->type)) |
4871 | 0 | return 0; |
4872 | | |
4873 | 0 | if (device_is_identical(crypt_data_device(cd), crypt_metadata_device(cd)) <= 0) |
4874 | 0 | return 0; |
4875 | | |
4876 | | /* FIXME: check real header size */ |
4877 | 0 | if (crypt_get_data_offset(cd) == 0) { |
4878 | 0 | log_err(cd, _("Device header overlaps with data area.")); |
4879 | 0 | return -EINVAL; |
4880 | 0 | } |
4881 | | |
4882 | 0 | return 0; |
4883 | 0 | } |
4884 | | |
4885 | | static int check_devices(struct crypt_device *cd, const char *name, const char *iname, uint32_t *flags) |
4886 | 0 | { |
4887 | 0 | int r; |
4888 | |
|
4889 | 0 | if (!flags || !name) |
4890 | 0 | return -EINVAL; |
4891 | | |
4892 | 0 | if (iname) { |
4893 | 0 | r = dm_status_device(cd, iname); |
4894 | 0 | if (r >= 0 && !(*flags & CRYPT_ACTIVATE_REFRESH)) |
4895 | 0 | return -EBUSY; |
4896 | 0 | if (r < 0 && r != -ENODEV) |
4897 | 0 | return r; |
4898 | 0 | if (r == -ENODEV) |
4899 | 0 | *flags &= ~CRYPT_ACTIVATE_REFRESH; |
4900 | 0 | } |
4901 | | |
4902 | 0 | r = dm_status_device(cd, name); |
4903 | 0 | if (r >= 0 && !(*flags & CRYPT_ACTIVATE_REFRESH)) |
4904 | 0 | return -EBUSY; |
4905 | 0 | if (r < 0 && r != -ENODEV) |
4906 | 0 | return r; |
4907 | 0 | if (r == -ENODEV) |
4908 | 0 | *flags &= ~CRYPT_ACTIVATE_REFRESH; |
4909 | |
|
4910 | 0 | return 0; |
4911 | 0 | } |
4912 | | |
4913 | | static int _create_device_with_integrity(struct crypt_device *cd, |
4914 | | const char *type, const char *name, const char *iname, |
4915 | | const char *ipath, struct crypt_dm_active_device *dmd, |
4916 | | struct crypt_dm_active_device *dmdi) |
4917 | 0 | { |
4918 | 0 | int r; |
4919 | 0 | enum devcheck device_check; |
4920 | 0 | struct dm_target *tgt; |
4921 | 0 | struct device *device = NULL; |
4922 | |
|
4923 | 0 | if (!single_segment(dmd)) |
4924 | 0 | return -EINVAL; |
4925 | | |
4926 | 0 | tgt = &dmd->segment; |
4927 | 0 | if (tgt->type != DM_CRYPT) |
4928 | 0 | return -EINVAL; |
4929 | | |
4930 | 0 | device_check = dmd->flags & CRYPT_ACTIVATE_SHARED ? DEV_OK : DEV_EXCL; |
4931 | |
|
4932 | 0 | r = INTEGRITY_activate_dmd_device(cd, iname, CRYPT_SUBDEV, dmdi, 0); |
4933 | 0 | if (r) |
4934 | 0 | return r; |
4935 | | |
4936 | 0 | r = device_alloc(cd, &device, ipath); |
4937 | 0 | if (r < 0) |
4938 | 0 | goto out; |
4939 | 0 | tgt->data_device = device; |
4940 | |
|
4941 | 0 | r = device_block_adjust(cd, tgt->data_device, device_check, |
4942 | 0 | tgt->u.crypt.offset, &dmd->size, &dmd->flags); |
4943 | |
|
4944 | 0 | if (!r) |
4945 | 0 | r = dm_create_device(cd, name, type, dmd); |
4946 | 0 | out: |
4947 | 0 | if (r < 0) |
4948 | 0 | dm_remove_device(cd, iname, 0); |
4949 | |
|
4950 | 0 | device_free(cd, device); |
4951 | 0 | return r; |
4952 | 0 | } |
4953 | | |
4954 | | static int kernel_keyring_support(void) |
4955 | 0 | { |
4956 | 0 | static unsigned _checked = 0; |
4957 | |
|
4958 | 0 | if (!_checked) { |
4959 | 0 | _kernel_keyring_supported = keyring_check(); |
4960 | 0 | _checked = 1; |
4961 | 0 | } |
4962 | |
|
4963 | 0 | return _kernel_keyring_supported; |
4964 | 0 | } |
4965 | | |
4966 | | static int dmcrypt_keyring_bug(void) |
4967 | 0 | { |
4968 | 0 | uint64_t kversion; |
4969 | |
|
4970 | 0 | if (kernel_version(&kversion)) |
4971 | 0 | return 1; |
4972 | 0 | return kversion < compact_version(4,15,0,0); |
4973 | 0 | } |
4974 | | |
4975 | | int create_or_reload_device(struct crypt_device *cd, const char *name, |
4976 | | const char *type, struct crypt_dm_active_device *dmd) |
4977 | 0 | { |
4978 | 0 | int r; |
4979 | 0 | enum devcheck device_check; |
4980 | 0 | struct dm_target *tgt; |
4981 | 0 | uint64_t offset, dmflags = 0; |
4982 | |
|
4983 | 0 | if (!type || !name || !single_segment(dmd)) |
4984 | 0 | return -EINVAL; |
4985 | | |
4986 | 0 | tgt = &dmd->segment; |
4987 | 0 | if (tgt->type != DM_CRYPT && tgt->type != DM_INTEGRITY && tgt->type != DM_LINEAR) |
4988 | 0 | return -EINVAL; |
4989 | | |
4990 | | /* drop CRYPT_ACTIVATE_REFRESH flag if any device is inactive */ |
4991 | 0 | r = check_devices(cd, name, NULL, &dmd->flags); |
4992 | 0 | if (r) |
4993 | 0 | return r; |
4994 | | |
4995 | 0 | if (dmd->flags & CRYPT_ACTIVATE_REFRESH) { |
4996 | | /* Refresh and recalculate means increasing dm-integrity device */ |
4997 | 0 | if (tgt->type == DM_INTEGRITY && dmd->flags & CRYPT_ACTIVATE_RECALCULATE) |
4998 | 0 | dmflags = DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH; |
4999 | 0 | r = _reload_device(cd, name, dmd, dmflags); |
5000 | 0 | } else { |
5001 | 0 | if (tgt->type == DM_CRYPT || tgt->type == DM_LINEAR) { |
5002 | 0 | device_check = dmd->flags & CRYPT_ACTIVATE_SHARED ? DEV_OK : DEV_EXCL; |
5003 | 0 | offset = tgt->type == DM_CRYPT ? tgt->u.crypt.offset : tgt->u.linear.offset; |
5004 | |
|
5005 | 0 | r = device_block_adjust(cd, tgt->data_device, device_check, |
5006 | 0 | offset, &dmd->size, &dmd->flags); |
5007 | 0 | if (!r) { |
5008 | 0 | tgt->size = dmd->size; |
5009 | 0 | r = dm_create_device(cd, name, type, dmd); |
5010 | 0 | } |
5011 | 0 | } else if (tgt->type == DM_INTEGRITY) { |
5012 | 0 | r = device_block_adjust(cd, tgt->data_device, DEV_EXCL, |
5013 | 0 | tgt->u.integrity.offset, NULL, &dmd->flags); |
5014 | 0 | if (r) |
5015 | 0 | return r; |
5016 | | |
5017 | 0 | if (tgt->u.integrity.meta_device) { |
5018 | 0 | r = device_block_adjust(cd, tgt->u.integrity.meta_device, DEV_EXCL, 0, NULL, NULL); |
5019 | 0 | if (r) |
5020 | 0 | return r; |
5021 | 0 | } |
5022 | | |
5023 | 0 | r = dm_create_device(cd, name, type, dmd); |
5024 | 0 | } |
5025 | 0 | } |
5026 | | |
5027 | 0 | return r; |
5028 | 0 | } |
5029 | | |
5030 | | int create_or_reload_device_with_integrity(struct crypt_device *cd, const char *name, |
5031 | | const char *type, struct crypt_dm_active_device *dmd, |
5032 | | struct crypt_dm_active_device *dmdi) |
5033 | 0 | { |
5034 | 0 | int r; |
5035 | 0 | char *iname = NULL, *ipath = NULL; |
5036 | |
|
5037 | 0 | if (!type || !name || !dmd || !dmdi) |
5038 | 0 | return -EINVAL; |
5039 | | |
5040 | 0 | r = dm_get_iname(name, &iname, false); |
5041 | 0 | if (r) |
5042 | 0 | goto out; |
5043 | | |
5044 | 0 | r = dm_get_iname(name, &ipath, true); |
5045 | 0 | if (r) |
5046 | 0 | goto out; |
5047 | | |
5048 | | /* drop CRYPT_ACTIVATE_REFRESH flag if any device is inactive */ |
5049 | 0 | r = check_devices(cd, name, iname, &dmd->flags); |
5050 | 0 | if (r) |
5051 | 0 | goto out; |
5052 | | |
5053 | 0 | if (dmd->flags & CRYPT_ACTIVATE_REFRESH) |
5054 | 0 | r = _reload_device_with_integrity(cd, name, iname, ipath, dmd, dmdi); |
5055 | 0 | else |
5056 | 0 | r = _create_device_with_integrity(cd, type, name, iname, ipath, dmd, dmdi); |
5057 | 0 | out: |
5058 | 0 | free(ipath); |
5059 | 0 | free(iname); |
5060 | |
|
5061 | 0 | return r; |
5062 | 0 | } |
5063 | | |
5064 | | static int load_all_keys(struct crypt_device *cd, struct volume_key *vks) |
5065 | 0 | { |
5066 | 0 | int r; |
5067 | 0 | struct volume_key *vk = vks; |
5068 | |
|
5069 | 0 | while (vk) { |
5070 | 0 | r = LUKS2_volume_key_load_in_keyring_by_digest(cd, vk, crypt_volume_key_get_id(vk)); |
5071 | 0 | if (r < 0) |
5072 | 0 | return r; |
5073 | 0 | vk = crypt_volume_key_next(vk); |
5074 | 0 | } |
5075 | | |
5076 | 0 | return 0; |
5077 | 0 | } |
5078 | | |
5079 | | #if USE_LUKS2_REENCRYPTION |
5080 | | static int _activate_reencrypt_device_by_vk(struct crypt_device *cd, |
5081 | | struct luks2_hdr *hdr, |
5082 | | const char *name, |
5083 | | struct volume_key *vks, |
5084 | | uint32_t flags) |
5085 | 0 | { |
5086 | 0 | bool dynamic_size; |
5087 | 0 | crypt_reencrypt_info ri; |
5088 | 0 | uint64_t minimal_size, device_size; |
5089 | 0 | int r = 0; |
5090 | 0 | struct crypt_lock_handle *reencrypt_lock = NULL; |
5091 | 0 | struct volume_key *vk; |
5092 | |
|
5093 | 0 | assert(hdr); |
5094 | 0 | assert(vks); |
5095 | | |
5096 | 0 | r = LUKS2_reencrypt_lock(cd, &reencrypt_lock); |
5097 | 0 | if (r) { |
5098 | 0 | if (r == -EBUSY) |
5099 | 0 | log_err(cd, _("Reencryption in-progress. Cannot activate device.")); |
5100 | 0 | else |
5101 | 0 | log_err(cd, _("Failed to get reencryption lock.")); |
5102 | 0 | return r; |
5103 | 0 | } |
5104 | | |
5105 | 0 | if ((r = crypt_load(cd, CRYPT_LUKS2, NULL))) |
5106 | 0 | goto out; |
5107 | | |
5108 | 0 | ri = LUKS2_reencrypt_status(hdr); |
5109 | 0 | if (ri == CRYPT_REENCRYPT_INVALID) { |
5110 | 0 | r = -EINVAL; |
5111 | 0 | goto out; |
5112 | 0 | } |
5113 | | |
5114 | 0 | if (ri > CRYPT_REENCRYPT_NONE) { |
5115 | | /* it's sufficient to force re-verify the reencrypt digest only */ |
5116 | 0 | r = LUKS2_reencrypt_digest_verify(cd, &cd->u.luks2.hdr, vks); |
5117 | 0 | if (r < 0) |
5118 | 0 | goto out; |
5119 | | |
5120 | 0 | if (ri == CRYPT_REENCRYPT_CRASH) { |
5121 | 0 | r = LUKS2_reencrypt_locked_recovery_by_vks(cd, vks); |
5122 | 0 | if (r < 0) { |
5123 | 0 | log_err(cd, _("LUKS2 reencryption recovery using volume key(s) failed.")); |
5124 | 0 | goto out; |
5125 | 0 | } |
5126 | | |
5127 | 0 | ri = LUKS2_reencrypt_status(hdr); |
5128 | 0 | } |
5129 | 0 | } |
5130 | | |
5131 | | /* recovery finished reencryption or it was already finished after metadata reload */ |
5132 | 0 | if (ri == CRYPT_REENCRYPT_NONE) { |
5133 | 0 | vk = crypt_volume_key_by_id(vks, LUKS2_digest_by_segment(hdr, CRYPT_DEFAULT_SEGMENT)); |
5134 | 0 | if (!vk) { |
5135 | 0 | r = -EPERM; |
5136 | 0 | goto out; |
5137 | 0 | } |
5138 | | |
5139 | 0 | r = LUKS2_digest_verify_by_segment(cd, &cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT, vk); |
5140 | 0 | if (r >= 0) |
5141 | 0 | r = LUKS2_activate(cd, name, vk, NULL, flags); |
5142 | 0 | goto out; |
5143 | 0 | } |
5144 | 0 | if (ri > CRYPT_REENCRYPT_CLEAN) { |
5145 | 0 | r = -EINVAL; |
5146 | 0 | goto out; |
5147 | 0 | } |
5148 | | |
5149 | 0 | if ((r = LUKS2_get_data_size(hdr, &minimal_size, &dynamic_size))) |
5150 | 0 | goto out; |
5151 | | |
5152 | 0 | log_dbg(cd, "Entering clean reencryption state mode."); |
5153 | |
|
5154 | 0 | r = LUKS2_reencrypt_check_device_size(cd, hdr, minimal_size, &device_size, |
5155 | 0 | !(flags & CRYPT_ACTIVATE_SHARED), |
5156 | 0 | dynamic_size); |
5157 | 0 | if (r < 0) |
5158 | 0 | goto out; |
5159 | 0 | r = LUKS2_activate_multi(cd, name, vks, device_size >> SECTOR_SHIFT, flags); |
5160 | 0 | out: |
5161 | 0 | LUKS2_reencrypt_unlock(cd, reencrypt_lock); |
5162 | |
|
5163 | 0 | return r; |
5164 | 0 | } |
5165 | | |
5166 | | /* |
5167 | | * Activation/deactivation of a device |
5168 | | */ |
5169 | | static int _activate_luks2_by_volume_key(struct crypt_device *cd, |
5170 | | const char *name, |
5171 | | struct volume_key *vk, |
5172 | | struct volume_key *external_key, |
5173 | | uint32_t flags) |
5174 | 0 | { |
5175 | 0 | int r; |
5176 | 0 | crypt_reencrypt_info ri; |
5177 | 0 | ri = LUKS2_reencrypt_status(&cd->u.luks2.hdr); |
5178 | 0 | if (ri == CRYPT_REENCRYPT_INVALID) |
5179 | 0 | return -EINVAL; |
5180 | | |
5181 | 0 | if (ri > CRYPT_REENCRYPT_NONE) { |
5182 | | /* reencryption must reverify keys after taking the reencryption lock and reloading metadata */ |
5183 | 0 | r = _activate_reencrypt_device_by_vk(cd, &cd->u.luks2.hdr, name, vk, flags); |
5184 | 0 | } else { |
5185 | | /* hw-opal data segment type does not require volume key for activation */ |
5186 | 0 | assert(!vk || crypt_volume_key_get_id(vk) == LUKS2_digest_by_segment(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT)); |
5187 | 0 | r = LUKS2_activate(cd, name, vk, external_key, flags); |
5188 | 0 | } |
5189 | | |
5190 | 0 | return r; |
5191 | 0 | } |
5192 | | #else |
5193 | | static int _activate_luks2_by_volume_key(struct crypt_device *cd, |
5194 | | const char *name, |
5195 | | struct volume_key *vk, |
5196 | | struct volume_key *external_key, |
5197 | | uint32_t flags) |
5198 | | { |
5199 | | int r; |
5200 | | crypt_reencrypt_info ri; |
5201 | | ri = LUKS2_reencrypt_status(&cd->u.luks2.hdr); |
5202 | | if (ri == CRYPT_REENCRYPT_INVALID) |
5203 | | return -EINVAL; |
5204 | | |
5205 | | if (ri > CRYPT_REENCRYPT_NONE) { |
5206 | | log_err(cd, _("This operation is not supported for this device type.")); |
5207 | | r = -ENOTSUP; |
5208 | | } else { |
5209 | | assert(crypt_volume_key_get_id(vk) == LUKS2_digest_by_segment(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT)); |
5210 | | r = LUKS2_activate(cd, name, vk, external_key, flags); |
5211 | | } |
5212 | | |
5213 | | return r; |
5214 | | } |
5215 | | #endif |
5216 | | |
5217 | | static int _activate_loopaes(struct crypt_device *cd, |
5218 | | const char *name, |
5219 | | const char *buffer, |
5220 | | size_t buffer_size, |
5221 | | uint32_t flags) |
5222 | 0 | { |
5223 | 0 | int r; |
5224 | 0 | unsigned int key_count = 0; |
5225 | 0 | struct volume_key *vk = NULL; |
5226 | 0 | char *buffer_copy; |
5227 | |
|
5228 | 0 | buffer_copy = crypt_safe_alloc(buffer_size); |
5229 | 0 | if (!buffer_copy) |
5230 | 0 | return -ENOMEM; |
5231 | 0 | crypt_safe_memcpy(buffer_copy, buffer, buffer_size); |
5232 | |
|
5233 | 0 | r = LOOPAES_parse_keyfile(cd, &vk, cd->u.loopaes.hdr.hash, &key_count, |
5234 | 0 | buffer_copy, buffer_size); |
5235 | 0 | crypt_safe_free(buffer_copy); |
5236 | |
|
5237 | 0 | if (!r && name) |
5238 | 0 | r = LOOPAES_activate(cd, name, cd->u.loopaes.cipher, key_count, |
5239 | 0 | vk, flags); |
5240 | |
|
5241 | 0 | crypt_free_volume_key(vk); |
5242 | |
|
5243 | 0 | return r; |
5244 | 0 | } |
5245 | | |
5246 | | static int _activate_check_status(struct crypt_device *cd, const char *name, unsigned reload) |
5247 | 0 | { |
5248 | 0 | int r; |
5249 | |
|
5250 | 0 | if (!name) |
5251 | 0 | return 0; |
5252 | | |
5253 | 0 | r = dm_status_device(cd, name); |
5254 | |
|
5255 | 0 | if (r >= 0 && reload) |
5256 | 0 | return 0; |
5257 | | |
5258 | 0 | if (r >= 0 || r == -EEXIST) { |
5259 | 0 | log_err(cd, _("Device %s already exists."), name); |
5260 | 0 | return -EEXIST; |
5261 | 0 | } |
5262 | | |
5263 | 0 | if (r == -ENODEV) |
5264 | 0 | return 0; |
5265 | | |
5266 | 0 | log_err(cd, _("Cannot use device %s, name is invalid or still in use."), name); |
5267 | 0 | return r; |
5268 | 0 | } |
5269 | | |
5270 | | static int _verify_reencrypt_keys(struct crypt_device *cd, struct volume_key *vks) |
5271 | 0 | { |
5272 | 0 | int r; |
5273 | |
|
5274 | 0 | assert(cd && (isLUKS2(cd->type))); |
5275 | | |
5276 | 0 | r = LUKS2_reencrypt_digest_verify(cd, &cd->u.luks2.hdr, vks); |
5277 | 0 | if (r == -EPERM || r == -ENOENT || r == -EINVAL) |
5278 | 0 | log_err(cd, _("Reencryption volume keys do not match the volume.")); |
5279 | |
|
5280 | 0 | return r; |
5281 | 0 | } |
5282 | | |
5283 | | static int _verify_key(struct crypt_device *cd, |
5284 | | bool unbound_key, |
5285 | | struct volume_key *vk) |
5286 | 0 | { |
5287 | 0 | int r = -EINVAL; |
5288 | |
|
5289 | 0 | assert(cd); |
5290 | | |
5291 | 0 | if (isPLAIN(cd->type)) { |
5292 | 0 | if (vk && crypt_volume_key_length(vk) == cd->u.plain.key_size) { |
5293 | 0 | r = KEY_VERIFIED; |
5294 | 0 | } else |
5295 | 0 | log_err(cd, _("Incorrect volume key specified for plain device.")); |
5296 | 0 | } else if (isLUKS1(cd->type)) { |
5297 | 0 | if (!vk) |
5298 | 0 | return -EINVAL; |
5299 | | |
5300 | 0 | r = LUKS_verify_volume_key(&cd->u.luks1.hdr, vk); |
5301 | 0 | } else if (isLUKS2(cd->type)) { |
5302 | 0 | if (!vk) |
5303 | 0 | return -EINVAL; |
5304 | | |
5305 | 0 | if (unbound_key) |
5306 | 0 | r = LUKS2_digest_verify_by_any_matching(cd, vk); |
5307 | 0 | else |
5308 | 0 | r = LUKS2_digest_verify_by_segment(cd, &cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT, vk); |
5309 | 0 | } else if (isVERITY(cd->type)) |
5310 | 0 | r = KEY_VERIFIED; |
5311 | 0 | else if (isTCRYPT(cd->type)) |
5312 | 0 | r = KEY_VERIFIED; |
5313 | 0 | else if (isINTEGRITY(cd->type)) |
5314 | 0 | r = KEY_VERIFIED; |
5315 | 0 | else if (isBITLK(cd->type)) |
5316 | 0 | r = KEY_VERIFIED; |
5317 | 0 | else if (isFVAULT2(cd->type)) { |
5318 | 0 | if (vk && crypt_volume_key_length(vk) == FVAULT2_volume_key_size()) |
5319 | 0 | r = KEY_VERIFIED; |
5320 | 0 | } else |
5321 | 0 | log_err(cd, _("Device type is not properly initialized.")); |
5322 | | |
5323 | 0 | if (r >= KEY_VERIFIED) |
5324 | 0 | crypt_volume_key_set_id(vk, r); |
5325 | |
|
5326 | 0 | return r > 0 ? 0 : r; |
5327 | 0 | } |
5328 | | |
5329 | | /* activation/deactivation of device mapping */ |
5330 | | static int _activate_by_volume_key(struct crypt_device *cd, |
5331 | | const char *name, |
5332 | | struct volume_key *vk, |
5333 | | struct volume_key *external_key, |
5334 | | uint32_t flags) |
5335 | 0 | { |
5336 | 0 | int r; |
5337 | |
|
5338 | 0 | assert(cd); |
5339 | 0 | assert(name); |
5340 | | |
5341 | 0 | r = _check_header_data_overlap(cd, name); |
5342 | 0 | if (r < 0) |
5343 | 0 | return r; |
5344 | | |
5345 | | /* use key directly, no hash */ |
5346 | 0 | if (isPLAIN(cd->type)) { |
5347 | 0 | assert(!external_key); |
5348 | 0 | assert(crypt_volume_key_get_id(vk) == KEY_VERIFIED); |
5349 | | |
5350 | 0 | r = PLAIN_activate(cd, name, vk, cd->u.plain.hdr.size, flags); |
5351 | 0 | } else if (isLUKS1(cd->type)) { |
5352 | 0 | assert(!external_key); |
5353 | 0 | assert(crypt_volume_key_get_id(vk) == KEY_VERIFIED); |
5354 | | |
5355 | 0 | r = LUKS1_activate(cd, name, vk, flags); |
5356 | 0 | } else if (isLUKS2(cd->type)) { |
5357 | 0 | r = _activate_luks2_by_volume_key(cd, name, vk, external_key, flags); |
5358 | 0 | } else if (isVERITY(cd->type)) { |
5359 | 0 | assert(crypt_volume_key_get_id(vk) == KEY_VERIFIED); |
5360 | 0 | r = VERITY_activate(cd, name, vk, external_key, cd->u.verity.fec_device, |
5361 | 0 | &cd->u.verity.hdr, flags); |
5362 | 0 | } else if (isTCRYPT(cd->type)) { |
5363 | 0 | assert(!external_key); |
5364 | 0 | r = TCRYPT_activate(cd, name, &cd->u.tcrypt.hdr, |
5365 | 0 | &cd->u.tcrypt.params, flags); |
5366 | 0 | } else if (isINTEGRITY(cd->type)) { |
5367 | 0 | assert(!external_key); |
5368 | 0 | assert(!vk || crypt_volume_key_get_id(vk) == KEY_VERIFIED); |
5369 | 0 | r = INTEGRITY_activate(cd, name, &cd->u.integrity.params, vk, |
5370 | 0 | cd->u.integrity.journal_crypt_key, |
5371 | 0 | cd->u.integrity.journal_mac_key, flags, |
5372 | 0 | cd->u.integrity.sb_flags); |
5373 | 0 | } else if (isBITLK(cd->type)) { |
5374 | 0 | assert(!external_key); |
5375 | 0 | assert(crypt_volume_key_get_id(vk) == KEY_VERIFIED); |
5376 | 0 | r = BITLK_activate_by_volume_key(cd, name, vk, &cd->u.bitlk.params, flags); |
5377 | 0 | } else if (isFVAULT2(cd->type)) { |
5378 | 0 | assert(!external_key); |
5379 | 0 | assert(crypt_volume_key_get_id(vk) == KEY_VERIFIED); |
5380 | 0 | r = FVAULT2_activate_by_volume_key(cd, name, vk, &cd->u.fvault2.params, flags); |
5381 | 0 | } else { |
5382 | 0 | log_err(cd, _("Device type is not properly initialized.")); |
5383 | 0 | r = -EINVAL; |
5384 | 0 | } |
5385 | | |
5386 | 0 | return r; |
5387 | 0 | } |
5388 | | |
5389 | | int crypt_activate_by_keyslot_context(struct crypt_device *cd, |
5390 | | const char *name, |
5391 | | int keyslot, |
5392 | | struct crypt_keyslot_context *kc, |
5393 | | int additional_keyslot, |
5394 | | struct crypt_keyslot_context *additional_kc, |
5395 | | uint32_t flags) |
5396 | 0 | { |
5397 | 0 | bool use_keyring, luks2_reencryption = false; |
5398 | 0 | struct volume_key *p_ext_key, *crypt_key = NULL, *opal_key = NULL, *vk = NULL, |
5399 | 0 | *vk_sign = NULL, *p_crypt = NULL; |
5400 | 0 | size_t passphrase_size; |
5401 | 0 | const char *passphrase = NULL; |
5402 | 0 | int unlocked_keyslot, r = -EINVAL; |
5403 | 0 | key_serial_t kid1 = 0, kid2 = 0; |
5404 | 0 | struct luks2_hdr *hdr = &cd->u.luks2.hdr; |
5405 | |
|
5406 | 0 | if (!cd || !kc) |
5407 | 0 | return -EINVAL; |
5408 | | |
5409 | 0 | log_dbg(cd, "%s volume %s [keyslot %d] using %s.", |
5410 | 0 | name ? "Activating" : "Checking", name ?: "passphrase", keyslot, keyslot_context_type_string(kc)); |
5411 | 0 | if (!name && (flags & CRYPT_ACTIVATE_REFRESH)) |
5412 | 0 | return -EINVAL; |
5413 | 0 | if ((flags & CRYPT_ACTIVATE_KEYRING_KEY) && !crypt_use_keyring_for_vk(cd)) |
5414 | 0 | return -EINVAL; |
5415 | 0 | if ((flags & CRYPT_ACTIVATE_ALLOW_UNBOUND_KEY) && name) |
5416 | 0 | return -EINVAL; |
5417 | 0 | if (!additional_kc && (additional_keyslot != CRYPT_ANY_SLOT)) |
5418 | 0 | return -EINVAL; |
5419 | 0 | if ((kc->type == CRYPT_KC_TYPE_KEYRING) && !kernel_keyring_support()) { |
5420 | 0 | log_err(cd, _("Kernel keyring is not supported by the kernel.")); |
5421 | 0 | return -EINVAL; |
5422 | 0 | } |
5423 | 0 | if ((kc->type == CRYPT_KC_TYPE_SIGNED_KEY) && !kernel_keyring_support()) { |
5424 | 0 | log_err(cd, _("Kernel keyring missing: required for passing signature to kernel.")); |
5425 | 0 | return -EINVAL; |
5426 | 0 | } |
5427 | 0 | r = _check_header_data_overlap(cd, name); |
5428 | 0 | if (r < 0) |
5429 | 0 | return r; |
5430 | 0 | r = _activate_check_status(cd, name, flags & CRYPT_ACTIVATE_REFRESH); |
5431 | 0 | if (r < 0) |
5432 | 0 | return r; |
5433 | | |
5434 | 0 | if (kc->get_passphrase && kc->type != CRYPT_KC_TYPE_TOKEN && |
5435 | 0 | isLOOPAES(cd->type)) { |
5436 | 0 | r = kc->get_passphrase(cd, kc, &passphrase, &passphrase_size); |
5437 | 0 | if (r < 0) |
5438 | 0 | return r; |
5439 | | |
5440 | 0 | return _activate_loopaes(cd, name, passphrase, passphrase_size, flags); |
5441 | 0 | } |
5442 | | |
5443 | | /* acquire the volume key(s) */ |
5444 | 0 | r = -EINVAL; |
5445 | 0 | if (isLUKS1(cd->type)) { |
5446 | 0 | if (kc->get_luks1_volume_key) |
5447 | 0 | r = kc->get_luks1_volume_key(cd, kc, keyslot, &vk); |
5448 | 0 | } else if (isLUKS2(cd->type)) { |
5449 | 0 | if (flags & CRYPT_ACTIVATE_ALLOW_UNBOUND_KEY) { |
5450 | 0 | if (kc->get_luks2_key) |
5451 | 0 | r = kc->get_luks2_key(cd, kc, keyslot, CRYPT_ANY_SEGMENT, &vk); |
5452 | 0 | } else { |
5453 | 0 | switch (LUKS2_reencrypt_status(hdr)) { |
5454 | 0 | case CRYPT_REENCRYPT_NONE: |
5455 | 0 | if (kc->get_luks2_volume_key) |
5456 | 0 | r = kc->get_luks2_volume_key(cd, kc, keyslot, &vk); |
5457 | 0 | break; |
5458 | 0 | case CRYPT_REENCRYPT_CLEAN: /* fall-through */ |
5459 | 0 | case CRYPT_REENCRYPT_CRASH: |
5460 | 0 | luks2_reencryption = true; |
5461 | 0 | r = LUKS2_keyslot_context_open_all_segments(cd, keyslot, additional_keyslot, kc, additional_kc, &vk); |
5462 | | /* fall-through */ |
5463 | 0 | default: |
5464 | 0 | break; |
5465 | 0 | } |
5466 | 0 | } |
5467 | 0 | } else if (isTCRYPT(cd->type)) { |
5468 | 0 | r = 0; |
5469 | 0 | } else if (name && isPLAIN(cd->type)) { |
5470 | 0 | if (kc->type == CRYPT_KC_TYPE_VK_KEYRING) { |
5471 | 0 | vk = crypt_alloc_volume_key(cd->u.plain.key_size, NULL); |
5472 | 0 | if (!vk) |
5473 | 0 | return -ENOMEM; |
5474 | 0 | r = crypt_volume_key_set_description_by_name(vk, kc->u.vk_kr.key_description); |
5475 | 0 | if (r < 0) |
5476 | 0 | log_err(cd, _("Cannot use keyring key %s."), kc->u.vk_kr.key_description); |
5477 | 0 | } else if (kc->get_passphrase && kc->type != CRYPT_KC_TYPE_TOKEN) { |
5478 | 0 | r = kc->get_passphrase(cd, kc, &passphrase, &passphrase_size); |
5479 | 0 | if (r < 0) |
5480 | 0 | return r; |
5481 | 0 | r = process_key(cd, cd->u.plain.hdr.hash, |
5482 | 0 | cd->u.plain.key_size, |
5483 | 0 | passphrase, passphrase_size, &vk); |
5484 | 0 | } else if (kc->get_plain_volume_key) |
5485 | 0 | r = kc->get_plain_volume_key(cd, kc, &vk); |
5486 | 0 | } else if (isBITLK(cd->type)) { |
5487 | 0 | if (kc->get_bitlk_volume_key && (name || kc->type != CRYPT_KC_TYPE_KEY)) |
5488 | 0 | r = kc->get_bitlk_volume_key(cd, kc, &cd->u.bitlk.params, &vk); |
5489 | 0 | } else if (isFVAULT2(cd->type)) { |
5490 | 0 | if (kc->get_fvault2_volume_key) |
5491 | 0 | r = kc->get_fvault2_volume_key(cd, kc, &cd->u.fvault2.params, &vk); |
5492 | 0 | } else if (isVERITY(cd->type) && (name || kc->type != CRYPT_KC_TYPE_SIGNED_KEY)) { |
5493 | 0 | if (kc->get_verity_volume_key) |
5494 | 0 | r = kc->get_verity_volume_key(cd, kc, &vk, &vk_sign); |
5495 | 0 | if (r >= 0) |
5496 | 0 | r = VERITY_verify_params(cd, &cd->u.verity.hdr, vk_sign != NULL, |
5497 | 0 | cd->u.verity.fec_device, vk); |
5498 | |
|
5499 | 0 | free(CONST_CAST(void*)cd->u.verity.root_hash); |
5500 | 0 | cd->u.verity.root_hash = NULL; |
5501 | 0 | flags |= CRYPT_ACTIVATE_READONLY; |
5502 | 0 | } else if (isINTEGRITY(cd->type)) { |
5503 | 0 | if (kc->get_integrity_volume_key) |
5504 | 0 | r = kc->get_integrity_volume_key(cd, kc, &vk); |
5505 | 0 | } |
5506 | 0 | if (r < 0 && (r != -ENOENT || kc->type == CRYPT_KC_TYPE_TOKEN)) |
5507 | 0 | goto out; |
5508 | 0 | unlocked_keyslot = r; |
5509 | |
|
5510 | 0 | if (r == -ENOENT && isLUKS(cd->type) && cd->volume_key) { |
5511 | 0 | vk = crypt_alloc_volume_key(crypt_volume_key_length(cd->volume_key), |
5512 | 0 | crypt_volume_key_get_key(cd->volume_key)); |
5513 | 0 | r = vk ? 0 : -ENOMEM; |
5514 | 0 | } |
5515 | 0 | if (r == -ENOENT && isINTEGRITY(cd->type)) |
5516 | 0 | r = 0; |
5517 | |
|
5518 | 0 | if (r < 0) |
5519 | 0 | goto out; |
5520 | | |
5521 | 0 | if (luks2_reencryption) |
5522 | 0 | r = _verify_reencrypt_keys(cd, vk); |
5523 | 0 | else |
5524 | 0 | r = _verify_key(cd, flags & CRYPT_ACTIVATE_ALLOW_UNBOUND_KEY, vk); |
5525 | |
|
5526 | 0 | if (r < 0) |
5527 | 0 | goto out; |
5528 | | |
5529 | 0 | if (isLUKS2(cd->type)) { |
5530 | | /* split the key only if we do activation */ |
5531 | 0 | if (name && LUKS2_segment_is_hw_opal(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT)) { |
5532 | 0 | r = LUKS2_split_crypt_and_opal_keys(cd, &cd->u.luks2.hdr, |
5533 | 0 | vk, &crypt_key, |
5534 | 0 | &opal_key); |
5535 | 0 | if (r < 0) |
5536 | 0 | goto out; |
5537 | | |
5538 | | /* copy volume key digest id in crypt subkey */ |
5539 | 0 | crypt_volume_key_set_id(crypt_key, crypt_volume_key_get_id(vk)); |
5540 | |
|
5541 | 0 | p_crypt = crypt_key; |
5542 | 0 | p_ext_key = opal_key ?: vk; |
5543 | 0 | } else { |
5544 | 0 | p_crypt = vk; |
5545 | 0 | p_ext_key = NULL; |
5546 | 0 | } |
5547 | | |
5548 | 0 | if (!crypt_use_keyring_for_vk(cd)) |
5549 | 0 | use_keyring = false; |
5550 | 0 | else |
5551 | | /* Force keyring use for activation of LUKS2 device in reencryption */ |
5552 | 0 | use_keyring = (name && (luks2_reencryption || !crypt_is_cipher_null(crypt_get_cipher(cd)))) || |
5553 | 0 | (flags & CRYPT_ACTIVATE_KEYRING_KEY); |
5554 | |
|
5555 | 0 | if (use_keyring) { |
5556 | | /* upload dm-crypt part of volume key in thread keyring if requested */ |
5557 | 0 | if (p_crypt) { |
5558 | 0 | r = load_all_keys(cd, p_crypt); |
5559 | 0 | if (r < 0) |
5560 | 0 | goto out; |
5561 | 0 | flags |= CRYPT_ACTIVATE_KEYRING_KEY; |
5562 | 0 | } |
5563 | | |
5564 | | /* upload the volume key in custom user keyring if requested */ |
5565 | 0 | if (cd->link_vk_to_keyring) { |
5566 | 0 | r = crypt_volume_key_load_in_custom_keyring(cd, vk, &kid1, &kid2); |
5567 | 0 | if (r < 0) { |
5568 | 0 | log_err(cd, _("Failed to link volume key in user defined keyring.")); |
5569 | 0 | goto out; |
5570 | 0 | } |
5571 | 0 | } |
5572 | 0 | } |
5573 | 0 | } else { |
5574 | 0 | p_crypt = vk; |
5575 | 0 | p_ext_key = vk_sign; |
5576 | 0 | } |
5577 | | |
5578 | 0 | if (name) |
5579 | 0 | r = _activate_by_volume_key(cd, name, p_crypt, p_ext_key, flags); |
5580 | |
|
5581 | 0 | if (r >= 0 && unlocked_keyslot >= 0) |
5582 | 0 | r = unlocked_keyslot; |
5583 | 0 | out: |
5584 | 0 | if (r < 0) { |
5585 | 0 | crypt_drop_uploaded_keyring_key(cd, vk); |
5586 | 0 | crypt_drop_uploaded_keyring_key(cd, crypt_key); |
5587 | 0 | if (cd->link_vk_to_keyring && kid1) |
5588 | 0 | crypt_unlink_key_from_custom_keyring(cd, kid1); |
5589 | 0 | if (cd->link_vk_to_keyring && kid2) |
5590 | 0 | crypt_unlink_key_from_custom_keyring(cd, kid2); |
5591 | 0 | } |
5592 | |
|
5593 | 0 | crypt_free_volume_key(vk); |
5594 | 0 | crypt_free_volume_key(crypt_key); |
5595 | 0 | crypt_free_volume_key(opal_key); |
5596 | 0 | crypt_free_volume_key(vk_sign); |
5597 | 0 | return r; |
5598 | 0 | } |
5599 | | |
5600 | | int crypt_activate_by_passphrase(struct crypt_device *cd, |
5601 | | const char *name, |
5602 | | int keyslot, |
5603 | | const char *passphrase, |
5604 | | size_t passphrase_size, |
5605 | | uint32_t flags) |
5606 | 0 | { |
5607 | 0 | int r; |
5608 | 0 | struct crypt_keyslot_context kc = {}; |
5609 | |
|
5610 | 0 | crypt_keyslot_context_init_by_passphrase_internal(&kc, passphrase, passphrase_size); |
5611 | 0 | r = crypt_activate_by_keyslot_context(cd, name, keyslot, &kc, CRYPT_ANY_SLOT, &kc, flags); |
5612 | 0 | crypt_keyslot_context_destroy_internal(&kc); |
5613 | |
|
5614 | 0 | return r; |
5615 | 0 | } |
5616 | | |
5617 | | int crypt_activate_by_keyfile_device_offset(struct crypt_device *cd, |
5618 | | const char *name, |
5619 | | int keyslot, |
5620 | | const char *keyfile, |
5621 | | size_t keyfile_size, |
5622 | | uint64_t keyfile_offset, |
5623 | | uint32_t flags) |
5624 | 0 | { |
5625 | 0 | int r; |
5626 | 0 | struct crypt_keyslot_context kc = {}; |
5627 | |
|
5628 | 0 | crypt_keyslot_context_init_by_keyfile_internal(&kc, keyfile, keyfile_size, keyfile_offset); |
5629 | 0 | r = crypt_activate_by_keyslot_context(cd, name, keyslot, &kc, CRYPT_ANY_SLOT, &kc, flags); |
5630 | 0 | crypt_keyslot_context_destroy_internal(&kc); |
5631 | |
|
5632 | 0 | return r; |
5633 | 0 | } |
5634 | | |
5635 | | int crypt_activate_by_keyfile(struct crypt_device *cd, |
5636 | | const char *name, |
5637 | | int keyslot, |
5638 | | const char *keyfile, |
5639 | | size_t keyfile_size, |
5640 | | uint32_t flags) |
5641 | 0 | { |
5642 | 0 | return crypt_activate_by_keyfile_device_offset(cd, name, keyslot, keyfile, |
5643 | 0 | keyfile_size, 0, flags); |
5644 | 0 | } |
5645 | | |
5646 | | int crypt_activate_by_keyfile_offset(struct crypt_device *cd, |
5647 | | const char *name, |
5648 | | int keyslot, |
5649 | | const char *keyfile, |
5650 | | size_t keyfile_size, |
5651 | | size_t keyfile_offset, |
5652 | | uint32_t flags) |
5653 | 0 | { |
5654 | 0 | return crypt_activate_by_keyfile_device_offset(cd, name, keyslot, keyfile, |
5655 | 0 | keyfile_size, keyfile_offset, flags); |
5656 | 0 | } |
5657 | | |
5658 | | int crypt_activate_by_volume_key(struct crypt_device *cd, |
5659 | | const char *name, |
5660 | | const char *volume_key, |
5661 | | size_t volume_key_size, |
5662 | | uint32_t flags) |
5663 | 0 | { |
5664 | 0 | int r; |
5665 | 0 | struct crypt_keyslot_context kc = {}; |
5666 | |
|
5667 | 0 | crypt_keyslot_context_init_by_key_internal(&kc, volume_key, volume_key_size); |
5668 | 0 | r = crypt_activate_by_keyslot_context(cd, name, CRYPT_ANY_SLOT /* unused */, &kc, CRYPT_ANY_SLOT, &kc, flags); |
5669 | 0 | crypt_keyslot_context_destroy_internal(&kc); |
5670 | |
|
5671 | 0 | return r; |
5672 | 0 | } |
5673 | | |
5674 | | int crypt_activate_by_signed_key(struct crypt_device *cd, |
5675 | | const char *name, |
5676 | | const char *volume_key, |
5677 | | size_t volume_key_size, |
5678 | | const char *signature, |
5679 | | size_t signature_size, |
5680 | | uint32_t flags) |
5681 | 0 | { |
5682 | 0 | int r; |
5683 | 0 | struct crypt_keyslot_context kc = {}; |
5684 | |
|
5685 | 0 | if (!cd || !isVERITY(cd->type)) |
5686 | 0 | return -EINVAL; |
5687 | | |
5688 | 0 | if (!volume_key || !volume_key_size || (!name && signature)) { |
5689 | 0 | log_err(cd, _("Incorrect root hash specified for verity device.")); |
5690 | 0 | return -EINVAL; |
5691 | 0 | } |
5692 | | |
5693 | 0 | if (signature) |
5694 | 0 | crypt_keyslot_context_init_by_signed_key_internal(&kc, volume_key, volume_key_size, |
5695 | 0 | signature, signature_size); |
5696 | 0 | else |
5697 | 0 | crypt_keyslot_context_init_by_key_internal(&kc, volume_key, volume_key_size); |
5698 | 0 | r = crypt_activate_by_keyslot_context(cd, name, -2 /* unused */, &kc, CRYPT_ANY_SLOT, NULL, flags); |
5699 | 0 | crypt_keyslot_context_destroy_internal(&kc); |
5700 | |
|
5701 | 0 | return r; |
5702 | 0 | } |
5703 | | |
5704 | | int crypt_deactivate_by_name(struct crypt_device *cd, const char *name, uint32_t flags) |
5705 | 0 | { |
5706 | 0 | struct crypt_device *fake_cd = NULL; |
5707 | 0 | struct luks2_hdr *hdr2 = NULL; |
5708 | 0 | struct crypt_dm_active_device dmd = {}; |
5709 | 0 | int r; |
5710 | 0 | uint64_t get_flags = DM_ACTIVE_DEVICE | DM_ACTIVE_UUID | DM_ACTIVE_HOLDERS; |
5711 | |
|
5712 | 0 | if (!name) |
5713 | 0 | return -EINVAL; |
5714 | | |
5715 | 0 | if ((flags & CRYPT_DEACTIVATE_DEFERRED) && (flags & CRYPT_DEACTIVATE_DEFERRED_CANCEL)) |
5716 | 0 | return -EINVAL; |
5717 | | |
5718 | 0 | log_dbg(cd, "Deactivating volume %s.", name); |
5719 | |
|
5720 | 0 | if (!cd) { |
5721 | 0 | r = crypt_init_by_name(&fake_cd, name); |
5722 | 0 | if (r < 0) |
5723 | 0 | return r; |
5724 | 0 | cd = fake_cd; |
5725 | 0 | } |
5726 | | |
5727 | 0 | if (flags & (CRYPT_DEACTIVATE_DEFERRED | CRYPT_DEACTIVATE_DEFERRED_CANCEL)) { |
5728 | 0 | r = crypt_get_hw_encryption_type(cd); |
5729 | 0 | if (r == CRYPT_SW_AND_OPAL_HW || r == CRYPT_OPAL_HW_ONLY) { |
5730 | 0 | log_err(cd, _("OPAL does not support deferred deactivation.")); |
5731 | 0 | return -EINVAL; |
5732 | 0 | } |
5733 | 0 | } |
5734 | | |
5735 | | /* skip holders detection and early abort when some flags raised */ |
5736 | 0 | if (flags & (CRYPT_DEACTIVATE_FORCE | CRYPT_DEACTIVATE_DEFERRED | CRYPT_DEACTIVATE_DEFERRED_CANCEL)) |
5737 | 0 | get_flags &= ~DM_ACTIVE_HOLDERS; |
5738 | |
|
5739 | 0 | switch (crypt_status(cd, name)) { |
5740 | 0 | case CRYPT_ACTIVE: |
5741 | 0 | case CRYPT_BUSY: |
5742 | 0 | r = dm_query_device(cd, name, get_flags, &dmd); |
5743 | 0 | if (r >= 0) { |
5744 | 0 | if (dmd.holders) { |
5745 | 0 | log_err(cd, _("Device %s is still in use."), name); |
5746 | 0 | r = -EBUSY; |
5747 | 0 | break; |
5748 | 0 | } |
5749 | 0 | } |
5750 | | |
5751 | | /* For detached header case or missing metadata we need to check for OPAL2 devices |
5752 | | * from DM UUID */ |
5753 | 0 | if (dmd.uuid && (flags & (CRYPT_DEACTIVATE_DEFERRED | CRYPT_DEACTIVATE_DEFERRED_CANCEL)) && |
5754 | 0 | !strncmp(CRYPT_LUKS2_HW_OPAL, dmd.uuid, sizeof(CRYPT_LUKS2_HW_OPAL)-1)) { |
5755 | 0 | log_err(cd, _("OPAL does not support deferred deactivation.")); |
5756 | 0 | r = -EINVAL; |
5757 | 0 | break; |
5758 | 0 | } |
5759 | | |
5760 | 0 | if (flags & CRYPT_DEACTIVATE_DEFERRED_CANCEL) { |
5761 | 0 | r = dm_cancel_deferred_removal(name); |
5762 | 0 | if (r < 0) |
5763 | 0 | log_err(cd, _("Could not cancel deferred remove from device %s."), name); |
5764 | 0 | break; |
5765 | 0 | } |
5766 | | |
5767 | 0 | hdr2 = crypt_get_hdr(cd, CRYPT_LUKS2); |
5768 | |
|
5769 | 0 | if ((dmd.uuid && !strncmp(CRYPT_LUKS2, dmd.uuid, sizeof(CRYPT_LUKS2)-1)) || hdr2) |
5770 | 0 | r = LUKS2_deactivate(cd, name, hdr2, &dmd, flags); |
5771 | 0 | else if (isTCRYPT(cd->type)) |
5772 | 0 | r = TCRYPT_deactivate(cd, name, flags); |
5773 | 0 | else |
5774 | 0 | r = dm_remove_device(cd, name, flags); |
5775 | 0 | if (r < 0 && crypt_status(cd, name) == CRYPT_BUSY) { |
5776 | 0 | log_err(cd, _("Device %s is still in use."), name); |
5777 | 0 | r = -EBUSY; |
5778 | 0 | } |
5779 | 0 | break; |
5780 | 0 | case CRYPT_INACTIVE: |
5781 | 0 | log_err(cd, _("Device %s is not active."), name); |
5782 | 0 | r = -ENODEV; |
5783 | 0 | break; |
5784 | 0 | default: |
5785 | 0 | log_err(cd, _("Invalid device %s."), name); |
5786 | 0 | r = -EINVAL; |
5787 | 0 | } |
5788 | | |
5789 | 0 | dm_targets_free(cd, &dmd); |
5790 | 0 | free(CONST_CAST(void*)dmd.uuid); |
5791 | 0 | crypt_free(fake_cd); |
5792 | |
|
5793 | 0 | return r; |
5794 | 0 | } |
5795 | | |
5796 | | int crypt_deactivate(struct crypt_device *cd, const char *name) |
5797 | 0 | { |
5798 | 0 | return crypt_deactivate_by_name(cd, name, 0); |
5799 | 0 | } |
5800 | | |
5801 | | int crypt_get_active_device(struct crypt_device *cd, const char *name, |
5802 | | struct crypt_active_device *cad) |
5803 | 0 | { |
5804 | 0 | int r; |
5805 | 0 | struct crypt_dm_active_device dmd, dmdi = {}; |
5806 | 0 | char *iname = NULL; |
5807 | 0 | struct dm_target *tgt = &dmd.segment; |
5808 | 0 | uint64_t min_offset = UINT64_MAX; |
5809 | |
|
5810 | 0 | if (!cd || !name || !cad) |
5811 | 0 | return -EINVAL; |
5812 | | |
5813 | 0 | r = dm_query_device(cd, name, DM_ACTIVE_DEVICE, &dmd); |
5814 | 0 | if (r < 0) |
5815 | 0 | return r; |
5816 | | |
5817 | | /* For LUKS2 with integrity we need flags from underlying dm-integrity */ |
5818 | 0 | if (isLUKS2(cd->type) && crypt_get_integrity_tag_size(cd) && |
5819 | 0 | (iname = dm_get_active_iname(cd, name))) { |
5820 | 0 | if (dm_query_device(cd, iname, 0, &dmdi) >= 0) |
5821 | 0 | dmd.flags |= dmdi.flags; |
5822 | 0 | free(iname); |
5823 | 0 | } |
5824 | |
|
5825 | 0 | if (cd && isTCRYPT(cd->type)) { |
5826 | 0 | cad->offset = TCRYPT_get_data_offset(cd, &cd->u.tcrypt.hdr, &cd->u.tcrypt.params); |
5827 | 0 | cad->iv_offset = TCRYPT_get_iv_offset(cd, &cd->u.tcrypt.hdr, &cd->u.tcrypt.params); |
5828 | 0 | } else { |
5829 | 0 | while (tgt) { |
5830 | 0 | if (tgt->type == DM_CRYPT && (min_offset > tgt->u.crypt.offset)) { |
5831 | 0 | min_offset = tgt->u.crypt.offset; |
5832 | 0 | cad->iv_offset = tgt->u.crypt.iv_offset; |
5833 | 0 | } else if (tgt->type == DM_INTEGRITY && (min_offset > tgt->u.integrity.offset)) { |
5834 | 0 | min_offset = tgt->u.integrity.offset; |
5835 | 0 | cad->iv_offset = 0; |
5836 | 0 | } else if (tgt->type == DM_LINEAR && (min_offset > tgt->u.linear.offset)) { |
5837 | 0 | min_offset = tgt->u.linear.offset; |
5838 | 0 | cad->iv_offset = 0; |
5839 | 0 | } |
5840 | 0 | tgt = tgt->next; |
5841 | 0 | } |
5842 | 0 | } |
5843 | |
|
5844 | 0 | if (min_offset != UINT64_MAX) |
5845 | 0 | cad->offset = min_offset; |
5846 | |
|
5847 | 0 | cad->size = dmd.size; |
5848 | 0 | cad->flags = dmd.flags; |
5849 | |
|
5850 | 0 | r = 0; |
5851 | 0 | dm_targets_free(cd, &dmd); |
5852 | 0 | dm_targets_free(cd, &dmdi); |
5853 | |
|
5854 | 0 | return r; |
5855 | 0 | } |
5856 | | |
5857 | | uint64_t crypt_get_active_integrity_failures(struct crypt_device *cd, const char *name) |
5858 | 0 | { |
5859 | 0 | struct crypt_dm_active_device dmd; |
5860 | 0 | uint64_t failures = 0; |
5861 | |
|
5862 | 0 | if (!name) |
5863 | 0 | return 0; |
5864 | | |
5865 | | /* LUKS2 / dm-crypt does not provide this count. */ |
5866 | 0 | if (dm_query_device(cd, name, 0, &dmd) < 0) |
5867 | 0 | return 0; |
5868 | | |
5869 | 0 | if (single_segment(&dmd) && dmd.segment.type == DM_INTEGRITY) |
5870 | 0 | (void)dm_status_integrity_failures(cd, name, &failures); |
5871 | |
|
5872 | 0 | dm_targets_free(cd, &dmd); |
5873 | |
|
5874 | 0 | return failures; |
5875 | 0 | } |
5876 | | |
5877 | | /* |
5878 | | * Volume key handling |
5879 | | */ |
5880 | | int crypt_volume_key_get(struct crypt_device *cd, |
5881 | | int keyslot, |
5882 | | char *volume_key, |
5883 | | size_t *volume_key_size, |
5884 | | const char *passphrase, |
5885 | | size_t passphrase_size) |
5886 | 0 | { |
5887 | 0 | int r; |
5888 | 0 | struct crypt_keyslot_context kc = {}; |
5889 | |
|
5890 | 0 | if (!passphrase) |
5891 | 0 | return crypt_volume_key_get_by_keyslot_context(cd, keyslot, volume_key, volume_key_size, NULL); |
5892 | | |
5893 | 0 | crypt_keyslot_context_init_by_passphrase_internal(&kc, passphrase, passphrase_size); |
5894 | |
|
5895 | 0 | r = crypt_volume_key_get_by_keyslot_context(cd, keyslot, volume_key, volume_key_size, &kc); |
5896 | |
|
5897 | 0 | crypt_keyslot_context_destroy_internal(&kc); |
5898 | |
|
5899 | 0 | return r; |
5900 | 0 | } |
5901 | | |
5902 | | int crypt_volume_key_get_by_keyslot_context(struct crypt_device *cd, |
5903 | | int keyslot, |
5904 | | char *volume_key, |
5905 | | size_t *volume_key_size, |
5906 | | struct crypt_keyslot_context *kc) |
5907 | 0 | { |
5908 | 0 | size_t passphrase_size; |
5909 | 0 | int key_len, r; |
5910 | 0 | const char *passphrase = NULL; |
5911 | 0 | struct volume_key *vk = NULL; |
5912 | |
|
5913 | 0 | if (!cd || !volume_key || !volume_key_size || |
5914 | 0 | (!kc && !isLUKS(cd->type) && !isTCRYPT(cd->type) && !isVERITY(cd->type))) |
5915 | 0 | return -EINVAL; |
5916 | | |
5917 | 0 | if (isLUKS2(cd->type) && keyslot != CRYPT_ANY_SLOT) |
5918 | 0 | key_len = LUKS2_get_keyslot_stored_key_size(&cd->u.luks2.hdr, keyslot); |
5919 | 0 | else |
5920 | 0 | key_len = crypt_get_volume_key_size(cd); |
5921 | |
|
5922 | 0 | if (key_len < 0) |
5923 | 0 | return -EINVAL; |
5924 | | |
5925 | 0 | if (key_len > (int)*volume_key_size) { |
5926 | 0 | log_err(cd, _("Volume key buffer too small.")); |
5927 | 0 | return -ENOMEM; |
5928 | 0 | } |
5929 | | |
5930 | 0 | if (kc && (!kc->get_passphrase || kc->type == CRYPT_KC_TYPE_KEY)) |
5931 | 0 | return -EINVAL; |
5932 | | |
5933 | 0 | r = -EINVAL; |
5934 | |
|
5935 | 0 | if (isLUKS2(cd->type)) { |
5936 | 0 | if (kc && !kc->get_luks2_key) |
5937 | 0 | log_err(cd, _("Cannot retrieve volume key for LUKS2 device.")); |
5938 | 0 | else if (!kc) |
5939 | 0 | r = -ENOENT; |
5940 | 0 | else |
5941 | 0 | r = kc->get_luks2_key(cd, kc, keyslot, |
5942 | 0 | keyslot == CRYPT_ANY_SLOT ? CRYPT_DEFAULT_SEGMENT : CRYPT_ANY_SEGMENT, |
5943 | 0 | &vk); |
5944 | 0 | } else if (isLUKS1(cd->type)) { |
5945 | 0 | if (kc && !kc->get_luks1_volume_key) |
5946 | 0 | log_err(cd, _("Cannot retrieve volume key for LUKS1 device.")); |
5947 | 0 | else if (!kc) |
5948 | 0 | r = -ENOENT; |
5949 | 0 | else |
5950 | 0 | r = kc->get_luks1_volume_key(cd, kc, keyslot, &vk); |
5951 | 0 | } else if (isPLAIN(cd->type) && cd->u.plain.hdr.hash) { |
5952 | 0 | if (kc && kc->get_passphrase && kc->type != CRYPT_KC_TYPE_TOKEN) { |
5953 | 0 | r = kc->get_passphrase(cd, kc, &passphrase, &passphrase_size); |
5954 | 0 | if (r < 0) |
5955 | 0 | return r; |
5956 | 0 | r = process_key(cd, cd->u.plain.hdr.hash, key_len, |
5957 | 0 | passphrase, passphrase_size, &vk); |
5958 | 0 | } |
5959 | 0 | if (r < 0) |
5960 | 0 | log_err(cd, _("Cannot retrieve volume key for plain device.")); |
5961 | 0 | } else if (isVERITY(cd->type)) { |
5962 | | /* volume_key == root hash */ |
5963 | 0 | if (cd->u.verity.root_hash) { |
5964 | 0 | crypt_safe_memcpy(volume_key, cd->u.verity.root_hash, cd->u.verity.root_hash_size); |
5965 | 0 | *volume_key_size = cd->u.verity.root_hash_size; |
5966 | 0 | r = 0; |
5967 | 0 | } else |
5968 | 0 | log_err(cd, _("Cannot retrieve root hash for verity device.")); |
5969 | 0 | } else if (isTCRYPT(cd->type)) { |
5970 | 0 | r = TCRYPT_get_volume_key(cd, &cd->u.tcrypt.hdr, &cd->u.tcrypt.params, &vk); |
5971 | 0 | } else if (isBITLK(cd->type)) { |
5972 | 0 | if (kc && kc->get_bitlk_volume_key) |
5973 | 0 | r = kc->get_bitlk_volume_key(cd, kc, &cd->u.bitlk.params, &vk); |
5974 | 0 | if (r < 0) |
5975 | 0 | log_err(cd, _("Cannot retrieve volume key for BITLK device.")); |
5976 | 0 | } else if (isFVAULT2(cd->type)) { |
5977 | 0 | if (kc && kc->get_fvault2_volume_key) |
5978 | 0 | r = kc->get_fvault2_volume_key(cd, kc, &cd->u.fvault2.params, &vk); |
5979 | 0 | if (r < 0) |
5980 | 0 | log_err(cd, _("Cannot retrieve volume key for FVAULT2 device.")); |
5981 | 0 | } else |
5982 | 0 | log_err(cd, _("This operation is not supported for %s crypt device."), cd->type ?: "(none)"); |
5983 | | |
5984 | 0 | if (r == -ENOENT && isLUKS(cd->type) && cd->volume_key) { |
5985 | 0 | vk = crypt_alloc_volume_key(crypt_volume_key_length(cd->volume_key), |
5986 | 0 | crypt_volume_key_get_key(cd->volume_key)); |
5987 | 0 | r = vk ? 0 : -ENOMEM; |
5988 | 0 | } |
5989 | |
|
5990 | 0 | if (r >= 0 && vk) { |
5991 | 0 | crypt_safe_memcpy(volume_key, crypt_volume_key_get_key(vk), crypt_volume_key_length(vk)); |
5992 | 0 | *volume_key_size = crypt_volume_key_length(vk); |
5993 | 0 | } |
5994 | |
|
5995 | 0 | crypt_free_volume_key(vk); |
5996 | 0 | return r; |
5997 | 0 | } |
5998 | | |
5999 | | int crypt_volume_key_verify(struct crypt_device *cd, |
6000 | | const char *volume_key, |
6001 | | size_t volume_key_size) |
6002 | 0 | { |
6003 | 0 | struct volume_key *vk; |
6004 | 0 | int r; |
6005 | |
|
6006 | 0 | if ((r = onlyLUKSunrestricted(cd))) |
6007 | 0 | return r; |
6008 | | |
6009 | 0 | vk = crypt_alloc_volume_key(volume_key_size, volume_key); |
6010 | 0 | if (!vk) |
6011 | 0 | return -ENOMEM; |
6012 | | |
6013 | 0 | if (isLUKS1(cd->type)) |
6014 | 0 | r = LUKS_verify_volume_key(&cd->u.luks1.hdr, vk); |
6015 | 0 | else if (isLUKS2(cd->type)) |
6016 | 0 | r = LUKS2_digest_verify_by_segment(cd, &cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT, vk); |
6017 | 0 | else |
6018 | 0 | r = -EINVAL; |
6019 | |
|
6020 | 0 | crypt_free_volume_key(vk); |
6021 | |
|
6022 | 0 | return r >= 0 ? 0 : r; |
6023 | 0 | } |
6024 | | |
6025 | | /* |
6026 | | * RNG and memory locking |
6027 | | */ |
6028 | | void crypt_set_rng_type(struct crypt_device *cd, int rng_type) |
6029 | 0 | { |
6030 | 0 | if (!cd) |
6031 | 0 | return; |
6032 | | |
6033 | 0 | switch (rng_type) { |
6034 | 0 | case CRYPT_RNG_URANDOM: |
6035 | 0 | case CRYPT_RNG_RANDOM: |
6036 | 0 | log_dbg(cd, "RNG set to %d (%s).", rng_type, rng_type ? "random" : "urandom"); |
6037 | 0 | cd->rng_type = rng_type; |
6038 | 0 | } |
6039 | 0 | } |
6040 | | |
6041 | | int crypt_get_rng_type(struct crypt_device *cd) |
6042 | 0 | { |
6043 | 0 | if (!cd) |
6044 | 0 | return -EINVAL; |
6045 | | |
6046 | 0 | return cd->rng_type; |
6047 | 0 | } |
6048 | | |
6049 | | int crypt_memory_lock(struct crypt_device *cd, int lock) |
6050 | 0 | { |
6051 | 0 | UNUSED(cd); |
6052 | 0 | UNUSED(lock); |
6053 | |
|
6054 | 0 | return 0; |
6055 | 0 | } |
6056 | | |
6057 | | void crypt_set_compatibility(struct crypt_device *cd, uint32_t flags) |
6058 | 0 | { |
6059 | 0 | if (cd) |
6060 | 0 | cd->compatibility = flags; |
6061 | 0 | } |
6062 | | |
6063 | | uint32_t crypt_get_compatibility(struct crypt_device *cd) |
6064 | 0 | { |
6065 | 0 | if (cd) |
6066 | 0 | return cd->compatibility; |
6067 | | |
6068 | 0 | return 0; |
6069 | 0 | } |
6070 | | |
6071 | | /* |
6072 | | * Reporting |
6073 | | */ |
6074 | | crypt_status_info crypt_status(struct crypt_device *cd, const char *name) |
6075 | 0 | { |
6076 | 0 | int r; |
6077 | |
|
6078 | 0 | if (!name) |
6079 | 0 | return CRYPT_INVALID; |
6080 | | |
6081 | 0 | if (!cd) |
6082 | 0 | dm_backend_init(cd); |
6083 | |
|
6084 | 0 | r = dm_status_device(cd, name); |
6085 | |
|
6086 | 0 | if (!cd) |
6087 | 0 | dm_backend_exit(cd); |
6088 | |
|
6089 | 0 | if (r < 0 && r != -ENODEV) |
6090 | 0 | return CRYPT_INVALID; |
6091 | | |
6092 | 0 | if (r == 0) |
6093 | 0 | return CRYPT_ACTIVE; |
6094 | | |
6095 | 0 | if (r > 0) |
6096 | 0 | return CRYPT_BUSY; |
6097 | | |
6098 | 0 | return CRYPT_INACTIVE; |
6099 | 0 | } |
6100 | | |
6101 | | static int _luks_dump(struct crypt_device *cd) |
6102 | 0 | { |
6103 | 0 | int i; |
6104 | |
|
6105 | 0 | log_std(cd, "LUKS header information for %s\n\n", mdata_device_path(cd)); |
6106 | 0 | log_std(cd, "Version: \t%" PRIu16 "\n", cd->u.luks1.hdr.version); |
6107 | 0 | log_std(cd, "Cipher name: \t%s\n", cd->u.luks1.hdr.cipherName); |
6108 | 0 | log_std(cd, "Cipher mode: \t%s\n", cd->u.luks1.hdr.cipherMode); |
6109 | 0 | log_std(cd, "Hash spec: \t%s\n", cd->u.luks1.hdr.hashSpec); |
6110 | 0 | log_std(cd, "Payload offset:\t%" PRIu32 "\n", cd->u.luks1.hdr.payloadOffset); |
6111 | 0 | log_std(cd, "MK bits: \t%" PRIu32 "\n", cd->u.luks1.hdr.keyBytes * 8); |
6112 | 0 | log_std(cd, "MK digest: \t"); |
6113 | 0 | crypt_log_hex(cd, cd->u.luks1.hdr.mkDigest, LUKS_DIGESTSIZE, " ", 0, NULL); |
6114 | 0 | log_std(cd, "\n"); |
6115 | 0 | log_std(cd, "MK salt: \t"); |
6116 | 0 | crypt_log_hex(cd, cd->u.luks1.hdr.mkDigestSalt, LUKS_SALTSIZE/2, " ", 0, NULL); |
6117 | 0 | log_std(cd, "\n \t"); |
6118 | 0 | crypt_log_hex(cd, cd->u.luks1.hdr.mkDigestSalt+LUKS_SALTSIZE/2, LUKS_SALTSIZE/2, " ", 0, NULL); |
6119 | 0 | log_std(cd, "\n"); |
6120 | 0 | log_std(cd, "MK iterations: \t%" PRIu32 "\n", cd->u.luks1.hdr.mkDigestIterations); |
6121 | 0 | log_std(cd, "UUID: \t%s\n\n", cd->u.luks1.hdr.uuid); |
6122 | 0 | for(i = 0; i < LUKS_NUMKEYS; i++) { |
6123 | 0 | if(cd->u.luks1.hdr.keyblock[i].active == LUKS_KEY_ENABLED) { |
6124 | 0 | log_std(cd, "Key Slot %d: ENABLED\n",i); |
6125 | 0 | log_std(cd, "\tIterations: \t%" PRIu32 "\n", |
6126 | 0 | cd->u.luks1.hdr.keyblock[i].passwordIterations); |
6127 | 0 | log_std(cd, "\tSalt: \t"); |
6128 | 0 | crypt_log_hex(cd, cd->u.luks1.hdr.keyblock[i].passwordSalt, |
6129 | 0 | LUKS_SALTSIZE/2, " ", 0, NULL); |
6130 | 0 | log_std(cd, "\n\t \t"); |
6131 | 0 | crypt_log_hex(cd, cd->u.luks1.hdr.keyblock[i].passwordSalt + |
6132 | 0 | LUKS_SALTSIZE/2, LUKS_SALTSIZE/2, " ", 0, NULL); |
6133 | 0 | log_std(cd, "\n"); |
6134 | |
|
6135 | 0 | log_std(cd, "\tKey material offset:\t%" PRIu32 "\n", |
6136 | 0 | cd->u.luks1.hdr.keyblock[i].keyMaterialOffset); |
6137 | 0 | log_std(cd, "\tAF stripes: \t%" PRIu32 "\n", |
6138 | 0 | cd->u.luks1.hdr.keyblock[i].stripes); |
6139 | 0 | } |
6140 | 0 | else |
6141 | 0 | log_std(cd, "Key Slot %d: DISABLED\n", i); |
6142 | 0 | } |
6143 | 0 | return 0; |
6144 | 0 | } |
6145 | | |
6146 | | int crypt_dump(struct crypt_device *cd) |
6147 | 0 | { |
6148 | 0 | if (!cd) |
6149 | 0 | return -EINVAL; |
6150 | 0 | if (isLUKS1(cd->type)) |
6151 | 0 | return _luks_dump(cd); |
6152 | 0 | else if (isLUKS2(cd->type)) |
6153 | 0 | return LUKS2_hdr_dump(cd, &cd->u.luks2.hdr); |
6154 | 0 | else if (isVERITY(cd->type)) |
6155 | 0 | return VERITY_dump(cd, &cd->u.verity.hdr, |
6156 | 0 | cd->u.verity.root_hash, cd->u.verity.root_hash_size, |
6157 | 0 | cd->u.verity.fec_device); |
6158 | 0 | else if (isTCRYPT(cd->type)) |
6159 | 0 | return TCRYPT_dump(cd, &cd->u.tcrypt.hdr, &cd->u.tcrypt.params); |
6160 | 0 | else if (isINTEGRITY(cd->type)) |
6161 | 0 | return INTEGRITY_dump(cd, crypt_data_device(cd), 0); |
6162 | 0 | else if (isBITLK(cd->type)) |
6163 | 0 | return BITLK_dump(cd, crypt_data_device(cd), &cd->u.bitlk.params); |
6164 | 0 | else if (isFVAULT2(cd->type)) |
6165 | 0 | return FVAULT2_dump(cd, crypt_data_device(cd), &cd->u.fvault2.params); |
6166 | | |
6167 | 0 | log_err(cd, _("Dump operation is not supported for this device type.")); |
6168 | 0 | return -EINVAL; |
6169 | 0 | } |
6170 | | |
6171 | | int crypt_dump_json(struct crypt_device *cd, const char **json, uint32_t flags) |
6172 | 0 | { |
6173 | 0 | if (!cd || flags) |
6174 | 0 | return -EINVAL; |
6175 | 0 | if (isLUKS2(cd->type)) |
6176 | 0 | return LUKS2_hdr_dump_json(cd, &cd->u.luks2.hdr, json); |
6177 | | |
6178 | 0 | log_err(cd, _("Dump operation is not supported for this device type.")); |
6179 | 0 | return -EINVAL; |
6180 | 0 | } |
6181 | | |
6182 | | /* internal only */ |
6183 | | const char *crypt_get_cipher_spec(struct crypt_device *cd) |
6184 | 0 | { |
6185 | 0 | if (!cd) |
6186 | 0 | return NULL; |
6187 | 0 | else if (isLUKS2(cd->type)) |
6188 | 0 | return LUKS2_get_cipher(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT); |
6189 | 0 | else if (isLUKS1(cd->type)) |
6190 | 0 | return cd->u.luks1.cipher_spec; |
6191 | 0 | else if (isPLAIN(cd->type)) |
6192 | 0 | return cd->u.plain.cipher_spec; |
6193 | 0 | else if (isLOOPAES(cd->type)) |
6194 | 0 | return cd->u.loopaes.cipher_spec; |
6195 | 0 | else if (isBITLK(cd->type)) |
6196 | 0 | return cd->u.bitlk.cipher_spec; |
6197 | 0 | else if (!cd->type && !_init_by_name_crypt_none(cd)) |
6198 | 0 | return cd->u.none.cipher_spec; |
6199 | | |
6200 | 0 | return NULL; |
6201 | 0 | } |
6202 | | |
6203 | | const char *crypt_get_cipher(struct crypt_device *cd) |
6204 | 0 | { |
6205 | 0 | if (!cd) |
6206 | 0 | return NULL; |
6207 | | |
6208 | 0 | if (isPLAIN(cd->type)) |
6209 | 0 | return cd->u.plain.cipher; |
6210 | | |
6211 | 0 | if (isLUKS1(cd->type)) |
6212 | 0 | return cd->u.luks1.hdr.cipherName; |
6213 | | |
6214 | 0 | if (isLUKS2(cd->type)) { |
6215 | 0 | if (crypt_parse_name_and_mode(LUKS2_get_cipher(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT), |
6216 | 0 | cd->u.luks2.cipher, NULL, cd->u.luks2.cipher_mode)) |
6217 | 0 | return NULL; |
6218 | 0 | return cd->u.luks2.cipher; |
6219 | 0 | } |
6220 | | |
6221 | 0 | if (isLOOPAES(cd->type)) |
6222 | 0 | return cd->u.loopaes.cipher; |
6223 | | |
6224 | 0 | if (isTCRYPT(cd->type)) |
6225 | 0 | return cd->u.tcrypt.params.cipher; |
6226 | | |
6227 | 0 | if (isBITLK(cd->type)) |
6228 | 0 | return cd->u.bitlk.params.cipher; |
6229 | | |
6230 | 0 | if (isFVAULT2(cd->type)) |
6231 | 0 | return cd->u.fvault2.params.cipher; |
6232 | | |
6233 | 0 | if (!cd->type && !_init_by_name_crypt_none(cd)) |
6234 | 0 | return cd->u.none.cipher; |
6235 | | |
6236 | 0 | return NULL; |
6237 | 0 | } |
6238 | | |
6239 | | const char *crypt_get_cipher_mode(struct crypt_device *cd) |
6240 | 0 | { |
6241 | 0 | if (!cd) |
6242 | 0 | return NULL; |
6243 | | |
6244 | 0 | if (isPLAIN(cd->type)) |
6245 | 0 | return cd->u.plain.cipher_mode; |
6246 | | |
6247 | 0 | if (isLUKS1(cd->type)) |
6248 | 0 | return cd->u.luks1.hdr.cipherMode; |
6249 | | |
6250 | 0 | if (isLUKS2(cd->type)) { |
6251 | 0 | if (crypt_parse_name_and_mode(LUKS2_get_cipher(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT), |
6252 | 0 | cd->u.luks2.cipher, NULL, cd->u.luks2.cipher_mode)) |
6253 | 0 | return NULL; |
6254 | 0 | return cd->u.luks2.cipher_mode; |
6255 | 0 | } |
6256 | | |
6257 | 0 | if (isLOOPAES(cd->type)) |
6258 | 0 | return cd->u.loopaes.cipher_mode; |
6259 | | |
6260 | 0 | if (isTCRYPT(cd->type)) |
6261 | 0 | return cd->u.tcrypt.params.mode; |
6262 | | |
6263 | 0 | if (isBITLK(cd->type)) |
6264 | 0 | return cd->u.bitlk.params.cipher_mode; |
6265 | | |
6266 | 0 | if (isFVAULT2(cd->type)) |
6267 | 0 | return cd->u.fvault2.params.cipher_mode; |
6268 | | |
6269 | 0 | if (!cd->type && !_init_by_name_crypt_none(cd)) |
6270 | 0 | return cd->u.none.cipher_mode; |
6271 | | |
6272 | 0 | return NULL; |
6273 | 0 | } |
6274 | | |
6275 | | /* INTERNAL only */ |
6276 | | const char *crypt_get_integrity(struct crypt_device *cd) |
6277 | 0 | { |
6278 | 0 | if (!cd) |
6279 | 0 | return NULL; |
6280 | | |
6281 | 0 | if (isINTEGRITY(cd->type)) |
6282 | 0 | return cd->u.integrity.params.integrity; |
6283 | | |
6284 | 0 | if (isLUKS2(cd->type)) |
6285 | 0 | return LUKS2_get_integrity(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT); |
6286 | | |
6287 | 0 | if (!cd->type && *cd->u.none.integrity_spec) |
6288 | 0 | return cd->u.none.integrity_spec; |
6289 | | |
6290 | 0 | return NULL; |
6291 | 0 | } |
6292 | | |
6293 | | /* INTERNAL only */ |
6294 | | int crypt_get_integrity_key_size(struct crypt_device *cd, bool dm_compat) |
6295 | 0 | { |
6296 | 0 | int key_size = 0; |
6297 | |
|
6298 | 0 | if (isLUKS2(cd->type)) { |
6299 | 0 | key_size = INTEGRITY_key_size(crypt_get_integrity(cd), |
6300 | 0 | LUKS2_get_integrity_key_size(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT)); |
6301 | 0 | if (dm_compat && key_size > 0 && |
6302 | 0 | key_size == INTEGRITY_key_size(crypt_get_integrity(cd), 0)) |
6303 | 0 | return 0; |
6304 | 0 | } |
6305 | | |
6306 | 0 | if (isINTEGRITY(cd->type) || !cd->type) |
6307 | 0 | key_size = INTEGRITY_key_size(crypt_get_integrity(cd), 0); |
6308 | |
|
6309 | 0 | return key_size > 0 ? key_size : 0; |
6310 | 0 | } |
6311 | | |
6312 | | /* INTERNAL only */ |
6313 | | int crypt_get_integrity_tag_size(struct crypt_device *cd) |
6314 | 0 | { |
6315 | 0 | if (isINTEGRITY(cd->type)) |
6316 | 0 | return cd->u.integrity.params.tag_size; |
6317 | | |
6318 | 0 | if (isLUKS2(cd->type) || !cd->type) |
6319 | 0 | return INTEGRITY_tag_size(crypt_get_integrity(cd), |
6320 | 0 | crypt_get_cipher(cd), |
6321 | 0 | crypt_get_cipher_mode(cd)); |
6322 | 0 | return 0; |
6323 | 0 | } |
6324 | | |
6325 | | int crypt_get_sector_size(struct crypt_device *cd) |
6326 | 0 | { |
6327 | 0 | if (!cd) |
6328 | 0 | return SECTOR_SIZE; |
6329 | | |
6330 | 0 | if (isPLAIN(cd->type)) |
6331 | 0 | return cd->u.plain.hdr.sector_size; |
6332 | | |
6333 | 0 | if (isINTEGRITY(cd->type)) |
6334 | 0 | return cd->u.integrity.params.sector_size; |
6335 | | |
6336 | 0 | if (isLUKS2(cd->type)) |
6337 | 0 | return LUKS2_get_sector_size(&cd->u.luks2.hdr); |
6338 | | |
6339 | 0 | if (!cd->type && cd->u.none.sector_size) |
6340 | 0 | return cd->u.none.sector_size; |
6341 | | |
6342 | 0 | return SECTOR_SIZE; |
6343 | 0 | } |
6344 | | |
6345 | | const char *crypt_get_uuid(struct crypt_device *cd) |
6346 | 0 | { |
6347 | 0 | if (!cd) |
6348 | 0 | return NULL; |
6349 | | |
6350 | 0 | if (isLUKS1(cd->type)) |
6351 | 0 | return cd->u.luks1.hdr.uuid; |
6352 | | |
6353 | 0 | if (isLUKS2(cd->type)) |
6354 | 0 | return cd->u.luks2.hdr.uuid; |
6355 | | |
6356 | 0 | if (isVERITY(cd->type)) |
6357 | 0 | return cd->u.verity.uuid; |
6358 | | |
6359 | 0 | if (isBITLK(cd->type)) |
6360 | 0 | return cd->u.bitlk.params.guid; |
6361 | | |
6362 | 0 | if (isFVAULT2(cd->type)) |
6363 | 0 | return cd->u.fvault2.params.family_uuid; |
6364 | | |
6365 | 0 | return NULL; |
6366 | 0 | } |
6367 | | |
6368 | | const char *crypt_get_device_name(struct crypt_device *cd) |
6369 | 0 | { |
6370 | 0 | const char *path; |
6371 | |
|
6372 | 0 | if (!cd) |
6373 | 0 | return NULL; |
6374 | | |
6375 | 0 | path = device_block_path(cd->device); |
6376 | 0 | if (!path) |
6377 | 0 | path = device_path(cd->device); |
6378 | |
|
6379 | 0 | return path; |
6380 | 0 | } |
6381 | | |
6382 | | const char *crypt_get_metadata_device_name(struct crypt_device *cd) |
6383 | 0 | { |
6384 | 0 | const char *path; |
6385 | |
|
6386 | 0 | if (!cd || !cd->metadata_device) |
6387 | 0 | return NULL; |
6388 | | |
6389 | 0 | path = device_block_path(cd->metadata_device); |
6390 | 0 | if (!path) |
6391 | 0 | path = device_path(cd->metadata_device); |
6392 | |
|
6393 | 0 | return path; |
6394 | 0 | } |
6395 | | |
6396 | | int crypt_get_volume_key_size(struct crypt_device *cd) |
6397 | 0 | { |
6398 | 0 | int r; |
6399 | |
|
6400 | 0 | if (!cd) |
6401 | 0 | return 0; |
6402 | | |
6403 | 0 | if (isPLAIN(cd->type)) |
6404 | 0 | return cd->u.plain.key_size; |
6405 | | |
6406 | 0 | if (isLUKS1(cd->type)) |
6407 | 0 | return cd->u.luks1.hdr.keyBytes; |
6408 | | |
6409 | 0 | if (isLUKS2(cd->type)) { |
6410 | 0 | r = LUKS2_get_volume_key_size(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT); |
6411 | 0 | if (r < 0 && cd->volume_key) |
6412 | 0 | r = crypt_volume_key_length(cd->volume_key); |
6413 | 0 | return r < 0 ? 0 : r; |
6414 | 0 | } |
6415 | | |
6416 | 0 | if (isLOOPAES(cd->type)) |
6417 | 0 | return cd->u.loopaes.key_size; |
6418 | | |
6419 | 0 | if (isVERITY(cd->type)) |
6420 | 0 | return cd->u.verity.root_hash_size; |
6421 | | |
6422 | 0 | if (isTCRYPT(cd->type)) |
6423 | 0 | return cd->u.tcrypt.params.key_size; |
6424 | | |
6425 | 0 | if (isBITLK(cd->type)) |
6426 | 0 | return cd->u.bitlk.params.key_size / 8; |
6427 | | |
6428 | 0 | if (isFVAULT2(cd->type)) |
6429 | 0 | return cd->u.fvault2.params.key_size; |
6430 | | |
6431 | 0 | if (!cd->type && !_init_by_name_crypt_none(cd)) |
6432 | 0 | return cd->u.none.key_size; |
6433 | | |
6434 | 0 | return 0; |
6435 | 0 | } |
6436 | | |
6437 | | int crypt_get_old_volume_key_size(struct crypt_device *cd) |
6438 | 0 | { |
6439 | 0 | int r = _onlyLUKS2(cd, CRYPT_CD_QUIET, |
6440 | 0 | CRYPT_REQUIREMENT_ONLINE_REENCRYPT | CRYPT_REQUIREMENT_OPAL); |
6441 | |
|
6442 | 0 | if (r < 0) |
6443 | 0 | return 0; |
6444 | | |
6445 | 0 | r = LUKS2_get_old_volume_key_size(&cd->u.luks2.hdr); |
6446 | |
|
6447 | 0 | return r < 0 ? 0 : r; |
6448 | 0 | } |
6449 | | |
6450 | | int crypt_get_hw_encryption_key_size(struct crypt_device *cd) |
6451 | 0 | { |
6452 | 0 | if (!cd || !isLUKS2(cd->type)) |
6453 | 0 | return 0; |
6454 | | |
6455 | 0 | return LUKS2_get_opal_key_size(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT); |
6456 | 0 | } |
6457 | | |
6458 | | int crypt_keyslot_get_key_size(struct crypt_device *cd, int keyslot) |
6459 | 0 | { |
6460 | 0 | if (!cd || !isLUKS(cd->type)) |
6461 | 0 | return -EINVAL; |
6462 | | |
6463 | 0 | if (keyslot < 0 || keyslot >= crypt_keyslot_max(cd->type)) |
6464 | 0 | return -EINVAL; |
6465 | | |
6466 | 0 | if (isLUKS1(cd->type)) |
6467 | 0 | return cd->u.luks1.hdr.keyBytes; |
6468 | | |
6469 | 0 | if (isLUKS2(cd->type)) |
6470 | 0 | return LUKS2_get_keyslot_stored_key_size(&cd->u.luks2.hdr, keyslot); |
6471 | | |
6472 | 0 | return -EINVAL; |
6473 | 0 | } |
6474 | | |
6475 | | int crypt_keyslot_set_encryption(struct crypt_device *cd, |
6476 | | const char *cipher, |
6477 | | size_t key_size) |
6478 | 0 | { |
6479 | 0 | char *tmp; |
6480 | |
|
6481 | 0 | if (!cd || !cipher || !key_size || !isLUKS2(cd->type)) |
6482 | 0 | return -EINVAL; |
6483 | | |
6484 | 0 | if (LUKS2_keyslot_cipher_incompatible(cd, cipher)) |
6485 | 0 | return -EINVAL; |
6486 | | |
6487 | 0 | if (!(tmp = strdup(cipher))) |
6488 | 0 | return -ENOMEM; |
6489 | | |
6490 | 0 | free(cd->u.luks2.keyslot_cipher); |
6491 | 0 | cd->u.luks2.keyslot_cipher = tmp; |
6492 | 0 | cd->u.luks2.keyslot_key_size = key_size; |
6493 | |
|
6494 | 0 | return 0; |
6495 | 0 | } |
6496 | | |
6497 | | const char *crypt_keyslot_get_encryption(struct crypt_device *cd, int keyslot, size_t *key_size) |
6498 | 0 | { |
6499 | 0 | const char *cipher; |
6500 | |
|
6501 | 0 | if (!cd || !isLUKS(cd->type) || !key_size) |
6502 | 0 | return NULL; |
6503 | | |
6504 | 0 | if (isLUKS1(cd->type)) { |
6505 | 0 | if (keyslot != CRYPT_ANY_SLOT && |
6506 | 0 | LUKS_keyslot_info(&cd->u.luks1.hdr, keyslot) < CRYPT_SLOT_ACTIVE) |
6507 | 0 | return NULL; |
6508 | 0 | *key_size = crypt_get_volume_key_size(cd); |
6509 | 0 | return cd->u.luks1.cipher_spec; |
6510 | 0 | } |
6511 | | |
6512 | 0 | if (keyslot != CRYPT_ANY_SLOT) |
6513 | 0 | return LUKS2_get_keyslot_cipher(&cd->u.luks2.hdr, keyslot, key_size); |
6514 | | |
6515 | | /* Keyslot encryption was set through crypt_keyslot_set_encryption() */ |
6516 | 0 | if (cd->u.luks2.keyslot_cipher) { |
6517 | 0 | *key_size = cd->u.luks2.keyslot_key_size; |
6518 | 0 | return cd->u.luks2.keyslot_cipher; |
6519 | 0 | } |
6520 | | |
6521 | 0 | if (LUKS2_segment_is_hw_opal(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT)) { |
6522 | | /* Fallback to default LUKS2 keyslot encryption */ |
6523 | 0 | *key_size = DEFAULT_LUKS2_KEYSLOT_KEYBITS / 8; |
6524 | 0 | return DEFAULT_LUKS2_KEYSLOT_CIPHER; |
6525 | 0 | } |
6526 | | |
6527 | | /* Try to reuse volume encryption parameters */ |
6528 | 0 | cipher = LUKS2_get_cipher(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT); |
6529 | 0 | if (!LUKS2_keyslot_cipher_incompatible(cd, cipher)) { |
6530 | 0 | *key_size = crypt_get_volume_key_size(cd); |
6531 | 0 | if (*key_size) |
6532 | 0 | return cipher; |
6533 | 0 | } |
6534 | | |
6535 | | /* Fallback to default LUKS2 keyslot encryption */ |
6536 | 0 | *key_size = DEFAULT_LUKS2_KEYSLOT_KEYBITS / 8; |
6537 | 0 | return DEFAULT_LUKS2_KEYSLOT_CIPHER; |
6538 | 0 | } |
6539 | | |
6540 | | int crypt_keyslot_get_pbkdf(struct crypt_device *cd, int keyslot, struct crypt_pbkdf_type *pbkdf) |
6541 | 0 | { |
6542 | 0 | if (!cd || !pbkdf || keyslot == CRYPT_ANY_SLOT) |
6543 | 0 | return -EINVAL; |
6544 | | |
6545 | 0 | if (isLUKS1(cd->type)) |
6546 | 0 | return LUKS_keyslot_pbkdf(&cd->u.luks1.hdr, keyslot, pbkdf); |
6547 | 0 | else if (isLUKS2(cd->type)) |
6548 | 0 | return LUKS2_keyslot_pbkdf(&cd->u.luks2.hdr, keyslot, pbkdf); |
6549 | | |
6550 | 0 | return -EINVAL; |
6551 | 0 | } |
6552 | | |
6553 | | int crypt_set_data_offset(struct crypt_device *cd, uint64_t data_offset) |
6554 | 0 | { |
6555 | 0 | if (!cd) |
6556 | 0 | return -EINVAL; |
6557 | 0 | if (data_offset % (MAX_SECTOR_SIZE >> SECTOR_SHIFT)) { |
6558 | 0 | log_err(cd, _("Data offset is not multiple of %u bytes."), MAX_SECTOR_SIZE); |
6559 | 0 | return -EINVAL; |
6560 | 0 | } |
6561 | | |
6562 | 0 | cd->data_offset = data_offset; |
6563 | 0 | log_dbg(cd, "Data offset set to %" PRIu64 " (512-byte) sectors.", data_offset); |
6564 | |
|
6565 | 0 | return 0; |
6566 | 0 | } |
6567 | | |
6568 | | int crypt_set_metadata_size(struct crypt_device *cd, |
6569 | | uint64_t metadata_size, |
6570 | | uint64_t keyslots_size) |
6571 | 0 | { |
6572 | 0 | if (!cd) |
6573 | 0 | return -EINVAL; |
6574 | | |
6575 | 0 | if (cd->type && !isLUKS2(cd->type)) |
6576 | 0 | return -EINVAL; |
6577 | | |
6578 | 0 | if (metadata_size && LUKS2_check_metadata_area_size(metadata_size)) |
6579 | 0 | return -EINVAL; |
6580 | | |
6581 | 0 | if (keyslots_size && LUKS2_check_keyslots_area_size(keyslots_size)) |
6582 | 0 | return -EINVAL; |
6583 | | |
6584 | 0 | cd->metadata_size = metadata_size; |
6585 | 0 | cd->keyslots_size = keyslots_size; |
6586 | |
|
6587 | 0 | return 0; |
6588 | 0 | } |
6589 | | |
6590 | | int crypt_get_metadata_size(struct crypt_device *cd, |
6591 | | uint64_t *metadata_size, |
6592 | | uint64_t *keyslots_size) |
6593 | 0 | { |
6594 | 0 | uint64_t msize, ksize; |
6595 | |
|
6596 | 0 | if (!cd) |
6597 | 0 | return -EINVAL; |
6598 | | |
6599 | 0 | if (!cd->type) { |
6600 | 0 | msize = cd->metadata_size; |
6601 | 0 | ksize = cd->keyslots_size; |
6602 | 0 | } else if (isLUKS1(cd->type)) { |
6603 | 0 | msize = LUKS_ALIGN_KEYSLOTS; |
6604 | 0 | ksize = LUKS_device_sectors(&cd->u.luks1.hdr) * SECTOR_SIZE - msize; |
6605 | 0 | } else if (isLUKS2(cd->type)) { |
6606 | 0 | msize = LUKS2_metadata_size(&cd->u.luks2.hdr); |
6607 | 0 | ksize = LUKS2_keyslots_size(&cd->u.luks2.hdr); |
6608 | 0 | } else |
6609 | 0 | return -EINVAL; |
6610 | | |
6611 | 0 | if (metadata_size) |
6612 | 0 | *metadata_size = msize; |
6613 | 0 | if (keyslots_size) |
6614 | 0 | *keyslots_size = ksize; |
6615 | |
|
6616 | 0 | return 0; |
6617 | 0 | } |
6618 | | |
6619 | | uint64_t crypt_get_data_offset(struct crypt_device *cd) |
6620 | 0 | { |
6621 | 0 | if (!cd) |
6622 | 0 | return 0; |
6623 | | |
6624 | 0 | if (isPLAIN(cd->type)) |
6625 | 0 | return cd->u.plain.hdr.offset; |
6626 | | |
6627 | 0 | if (isLUKS1(cd->type)) |
6628 | 0 | return cd->u.luks1.hdr.payloadOffset; |
6629 | | |
6630 | 0 | if (isLUKS2(cd->type)) |
6631 | 0 | return LUKS2_get_data_offset(&cd->u.luks2.hdr); |
6632 | | |
6633 | 0 | if (isLOOPAES(cd->type)) |
6634 | 0 | return cd->u.loopaes.hdr.offset; |
6635 | | |
6636 | 0 | if (isTCRYPT(cd->type)) |
6637 | 0 | return TCRYPT_get_data_offset(cd, &cd->u.tcrypt.hdr, &cd->u.tcrypt.params); |
6638 | | |
6639 | 0 | if (isBITLK(cd->type)) |
6640 | 0 | return cd->u.bitlk.params.volume_header_size / SECTOR_SIZE; |
6641 | | |
6642 | 0 | if (isFVAULT2(cd->type)) |
6643 | 0 | return cd->u.fvault2.params.log_vol_off / SECTOR_SIZE; |
6644 | | |
6645 | 0 | return cd->data_offset; |
6646 | 0 | } |
6647 | | |
6648 | | uint64_t crypt_get_iv_offset(struct crypt_device *cd) |
6649 | 0 | { |
6650 | 0 | if (!cd) |
6651 | 0 | return 0; |
6652 | | |
6653 | 0 | if (isPLAIN(cd->type)) |
6654 | 0 | return cd->u.plain.hdr.skip; |
6655 | | |
6656 | 0 | if (isLOOPAES(cd->type)) |
6657 | 0 | return cd->u.loopaes.hdr.skip; |
6658 | | |
6659 | 0 | if (isTCRYPT(cd->type)) |
6660 | 0 | return TCRYPT_get_iv_offset(cd, &cd->u.tcrypt.hdr, &cd->u.tcrypt.params); |
6661 | | |
6662 | 0 | return 0; |
6663 | 0 | } |
6664 | | |
6665 | | crypt_keyslot_info crypt_keyslot_status(struct crypt_device *cd, int keyslot) |
6666 | 0 | { |
6667 | 0 | if (_onlyLUKS(cd, CRYPT_CD_QUIET | CRYPT_CD_UNRESTRICTED, 0) < 0) |
6668 | 0 | return CRYPT_SLOT_INVALID; |
6669 | | |
6670 | 0 | if (isLUKS1(cd->type)) |
6671 | 0 | return LUKS_keyslot_info(&cd->u.luks1.hdr, keyslot); |
6672 | 0 | else if(isLUKS2(cd->type)) |
6673 | 0 | return LUKS2_keyslot_info(&cd->u.luks2.hdr, keyslot); |
6674 | | |
6675 | 0 | return CRYPT_SLOT_INVALID; |
6676 | 0 | } |
6677 | | |
6678 | | int crypt_keyslot_max(const char *type) |
6679 | 0 | { |
6680 | 0 | if (isLUKS1(type)) |
6681 | 0 | return LUKS_NUMKEYS; |
6682 | | |
6683 | 0 | if (isLUKS2(type)) |
6684 | 0 | return LUKS2_KEYSLOTS_MAX; |
6685 | | |
6686 | 0 | return -EINVAL; |
6687 | 0 | } |
6688 | | |
6689 | | int crypt_keyslot_area(struct crypt_device *cd, |
6690 | | int keyslot, |
6691 | | uint64_t *offset, |
6692 | | uint64_t *length) |
6693 | 0 | { |
6694 | 0 | if (_onlyLUKS(cd, CRYPT_CD_QUIET | CRYPT_CD_UNRESTRICTED, 0) || !offset || !length) |
6695 | 0 | return -EINVAL; |
6696 | | |
6697 | 0 | if (isLUKS2(cd->type)) |
6698 | 0 | return LUKS2_keyslot_area(&cd->u.luks2.hdr, keyslot, offset, length); |
6699 | | |
6700 | 0 | return LUKS_keyslot_area(&cd->u.luks1.hdr, keyslot, offset, length); |
6701 | 0 | } |
6702 | | |
6703 | | crypt_keyslot_priority crypt_keyslot_get_priority(struct crypt_device *cd, int keyslot) |
6704 | 0 | { |
6705 | 0 | if (_onlyLUKS(cd, CRYPT_CD_QUIET | CRYPT_CD_UNRESTRICTED, 0)) |
6706 | 0 | return CRYPT_SLOT_PRIORITY_INVALID; |
6707 | | |
6708 | 0 | if (keyslot < 0 || keyslot >= crypt_keyslot_max(cd->type)) |
6709 | 0 | return CRYPT_SLOT_PRIORITY_INVALID; |
6710 | | |
6711 | 0 | if (isLUKS2(cd->type)) |
6712 | 0 | return LUKS2_keyslot_priority_get(&cd->u.luks2.hdr, keyslot); |
6713 | | |
6714 | 0 | return CRYPT_SLOT_PRIORITY_NORMAL; |
6715 | 0 | } |
6716 | | |
6717 | | int crypt_keyslot_set_priority(struct crypt_device *cd, int keyslot, crypt_keyslot_priority priority) |
6718 | 0 | { |
6719 | 0 | int r; |
6720 | |
|
6721 | 0 | log_dbg(cd, "Setting keyslot %d to priority %d.", keyslot, priority); |
6722 | |
|
6723 | 0 | if (priority == CRYPT_SLOT_PRIORITY_INVALID) |
6724 | 0 | return -EINVAL; |
6725 | | |
6726 | 0 | if (keyslot < 0 || keyslot >= crypt_keyslot_max(cd->type)) |
6727 | 0 | return -EINVAL; |
6728 | | |
6729 | 0 | if ((r = onlyLUKS2(cd))) |
6730 | 0 | return r; |
6731 | | |
6732 | 0 | return LUKS2_keyslot_priority_set(cd, &cd->u.luks2.hdr, keyslot, priority, 1); |
6733 | 0 | } |
6734 | | |
6735 | | const char *crypt_get_type(struct crypt_device *cd) |
6736 | 1.90k | { |
6737 | 1.90k | return cd ? cd->type : NULL; |
6738 | 1.90k | } |
6739 | | |
6740 | | const char *crypt_get_default_type(void) |
6741 | 0 | { |
6742 | 0 | return DEFAULT_LUKS_FORMAT; |
6743 | 0 | } |
6744 | | |
6745 | | int crypt_get_hw_encryption_type(struct crypt_device *cd) |
6746 | 0 | { |
6747 | 0 | if (!cd) |
6748 | 0 | return -EINVAL; |
6749 | | |
6750 | 0 | if (isLUKS2(cd->type)) { |
6751 | 0 | if (LUKS2_segment_is_hw_opal_crypt(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT)) |
6752 | 0 | return CRYPT_SW_AND_OPAL_HW; |
6753 | 0 | else if (LUKS2_segment_is_hw_opal_only(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT)) |
6754 | 0 | return CRYPT_OPAL_HW_ONLY; |
6755 | 0 | } |
6756 | | |
6757 | 0 | return CRYPT_SW_ONLY; |
6758 | 0 | } |
6759 | | |
6760 | | int crypt_get_verity_info(struct crypt_device *cd, |
6761 | | struct crypt_params_verity *vp) |
6762 | 0 | { |
6763 | 0 | if (!cd || !isVERITY(cd->type) || !vp) |
6764 | 0 | return -EINVAL; |
6765 | | |
6766 | 0 | vp->data_device = device_path(cd->device); |
6767 | 0 | vp->hash_device = mdata_device_path(cd); |
6768 | 0 | vp->fec_device = device_path(cd->u.verity.fec_device); |
6769 | 0 | vp->fec_area_offset = cd->u.verity.hdr.fec_area_offset; |
6770 | 0 | vp->fec_roots = cd->u.verity.hdr.fec_roots; |
6771 | 0 | vp->hash_name = cd->u.verity.hdr.hash_name; |
6772 | 0 | vp->salt = cd->u.verity.hdr.salt; |
6773 | 0 | vp->salt_size = cd->u.verity.hdr.salt_size; |
6774 | 0 | vp->data_block_size = cd->u.verity.hdr.data_block_size; |
6775 | 0 | vp->hash_block_size = cd->u.verity.hdr.hash_block_size; |
6776 | 0 | vp->data_size = cd->u.verity.hdr.data_size; |
6777 | 0 | vp->hash_area_offset = cd->u.verity.hdr.hash_area_offset; |
6778 | 0 | vp->hash_type = cd->u.verity.hdr.hash_type; |
6779 | 0 | vp->flags = cd->u.verity.hdr.flags & (CRYPT_VERITY_NO_HEADER | CRYPT_VERITY_ROOT_HASH_SIGNATURE); |
6780 | 0 | return 0; |
6781 | 0 | } |
6782 | | |
6783 | | int crypt_get_integrity_info(struct crypt_device *cd, |
6784 | | struct crypt_params_integrity *ip) |
6785 | 0 | { |
6786 | 0 | if (!cd || !ip) |
6787 | 0 | return -EINVAL; |
6788 | | |
6789 | 0 | if (isINTEGRITY(cd->type)) { |
6790 | 0 | ip->journal_size = cd->u.integrity.params.journal_size; |
6791 | 0 | ip->journal_watermark = cd->u.integrity.params.journal_watermark; |
6792 | 0 | ip->journal_commit_time = cd->u.integrity.params.journal_commit_time; |
6793 | 0 | ip->interleave_sectors = cd->u.integrity.params.interleave_sectors; |
6794 | 0 | ip->tag_size = cd->u.integrity.params.tag_size; |
6795 | 0 | ip->sector_size = cd->u.integrity.params.sector_size; |
6796 | 0 | ip->buffer_sectors = cd->u.integrity.params.buffer_sectors; |
6797 | |
|
6798 | 0 | ip->integrity = cd->u.integrity.params.integrity; |
6799 | 0 | ip->integrity_key_size = crypt_get_integrity_key_size(cd, false); |
6800 | |
|
6801 | 0 | ip->journal_integrity = cd->u.integrity.params.journal_integrity; |
6802 | 0 | ip->journal_integrity_key_size = cd->u.integrity.params.journal_integrity_key_size; |
6803 | 0 | ip->journal_integrity_key = NULL; |
6804 | |
|
6805 | 0 | ip->journal_crypt = cd->u.integrity.params.journal_crypt; |
6806 | 0 | ip->journal_crypt_key_size = cd->u.integrity.params.journal_crypt_key_size; |
6807 | 0 | ip->journal_crypt_key = NULL; |
6808 | 0 | return 0; |
6809 | 0 | } else if (isLUKS2(cd->type)) { |
6810 | 0 | ip->journal_size = 0; // FIXME |
6811 | 0 | ip->journal_watermark = 0; // FIXME |
6812 | 0 | ip->journal_commit_time = 0; // FIXME |
6813 | 0 | ip->interleave_sectors = 0; // FIXME |
6814 | 0 | ip->sector_size = crypt_get_sector_size(cd); |
6815 | 0 | ip->buffer_sectors = 0; // FIXME |
6816 | |
|
6817 | 0 | ip->integrity = LUKS2_get_integrity(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT); |
6818 | 0 | ip->integrity_key_size = crypt_get_integrity_key_size(cd, false); |
6819 | 0 | ip->tag_size = INTEGRITY_tag_size(ip->integrity, crypt_get_cipher(cd), crypt_get_cipher_mode(cd)); |
6820 | |
|
6821 | 0 | ip->journal_integrity = NULL; |
6822 | 0 | ip->journal_integrity_key_size = 0; |
6823 | 0 | ip->journal_integrity_key = NULL; |
6824 | |
|
6825 | 0 | ip->journal_crypt = NULL; |
6826 | 0 | ip->journal_crypt_key_size = 0; |
6827 | 0 | ip->journal_crypt_key = NULL; |
6828 | 0 | return 0; |
6829 | 0 | } else if (!cd->type) { |
6830 | 0 | memset(ip, 0, sizeof(*ip)); |
6831 | 0 | ip->integrity = crypt_get_integrity(cd); |
6832 | 0 | ip->integrity_key_size = crypt_get_integrity_key_size(cd, false); |
6833 | 0 | ip->tag_size = crypt_get_integrity_tag_size(cd); |
6834 | 0 | } |
6835 | | |
6836 | 0 | return -ENOTSUP; |
6837 | 0 | } |
6838 | | |
6839 | | int crypt_convert(struct crypt_device *cd, |
6840 | | const char *type, |
6841 | | void *params) |
6842 | 0 | { |
6843 | 0 | struct luks_phdr hdr1; |
6844 | 0 | struct luks2_hdr hdr2; |
6845 | 0 | int r; |
6846 | |
|
6847 | 0 | if (!type) |
6848 | 0 | return -EINVAL; |
6849 | | |
6850 | 0 | log_dbg(cd, "Converting LUKS device to type %s", type); |
6851 | |
|
6852 | 0 | if ((r = onlyLUKSnoRequirements(cd))) |
6853 | 0 | return r; |
6854 | | |
6855 | 0 | if (isLUKS1(cd->type) && isLUKS2(type)) |
6856 | 0 | r = LUKS2_luks1_to_luks2(cd, &cd->u.luks1.hdr, &hdr2); |
6857 | 0 | else if (isLUKS2(cd->type) && isLUKS1(type)) |
6858 | 0 | r = LUKS2_luks2_to_luks1(cd, &cd->u.luks2.hdr, &hdr1); |
6859 | 0 | else |
6860 | 0 | return -EINVAL; |
6861 | | |
6862 | 0 | if (r < 0) { |
6863 | | /* in-memory header may be invalid after failed conversion */ |
6864 | 0 | _luks2_rollback(cd); |
6865 | 0 | if (r == -EBUSY) |
6866 | 0 | log_err(cd, _("Cannot convert device %s which is still in use."), mdata_device_path(cd)); |
6867 | 0 | return r; |
6868 | 0 | } |
6869 | | |
6870 | 0 | crypt_free_type(cd, NULL); |
6871 | |
|
6872 | 0 | return crypt_load(cd, type, params); |
6873 | 0 | } |
6874 | | |
6875 | | /* Internal access function to header pointer */ |
6876 | | void *crypt_get_hdr(struct crypt_device *cd, const char *type) |
6877 | 0 | { |
6878 | 0 | assert(cd); |
6879 | 0 | assert(type); |
6880 | | |
6881 | | /* If requested type differs, ignore it */ |
6882 | 0 | if (!cd->type || strcmp(cd->type, type)) |
6883 | 0 | return NULL; |
6884 | | |
6885 | 0 | if (isPLAIN(cd->type)) |
6886 | 0 | return &cd->u.plain; |
6887 | | |
6888 | 0 | if (isLUKS1(cd->type)) |
6889 | 0 | return &cd->u.luks1.hdr; |
6890 | | |
6891 | 0 | if (isLUKS2(type)) |
6892 | 0 | return &cd->u.luks2.hdr; |
6893 | | |
6894 | 0 | if (isLOOPAES(cd->type)) |
6895 | 0 | return &cd->u.loopaes; |
6896 | | |
6897 | 0 | if (isVERITY(cd->type)) |
6898 | 0 | return &cd->u.verity; |
6899 | | |
6900 | 0 | if (isTCRYPT(cd->type)) |
6901 | 0 | return &cd->u.tcrypt; |
6902 | | |
6903 | 0 | return NULL; |
6904 | 0 | } |
6905 | | |
6906 | | /* internal only */ |
6907 | | struct luks2_reencrypt *crypt_get_luks2_reencrypt(struct crypt_device *cd) |
6908 | 0 | { |
6909 | 0 | return cd->u.luks2.rh; |
6910 | 0 | } |
6911 | | |
6912 | | /* internal only */ |
6913 | | void crypt_set_luks2_reencrypt(struct crypt_device *cd, struct luks2_reencrypt *rh) |
6914 | 0 | { |
6915 | 0 | cd->u.luks2.rh = rh; |
6916 | 0 | } |
6917 | | |
6918 | | /* |
6919 | | * Token handling |
6920 | | */ |
6921 | | int crypt_activate_by_token_pin(struct crypt_device *cd, const char *name, |
6922 | | const char *type, int token, const char *pin, size_t pin_size, |
6923 | | void *usrptr, uint32_t flags) |
6924 | 0 | { |
6925 | 0 | int r; |
6926 | 0 | struct crypt_keyslot_context kc = {}; |
6927 | |
|
6928 | 0 | crypt_keyslot_context_init_by_token_internal(&kc, token, type, pin, pin_size, usrptr); |
6929 | 0 | r = crypt_activate_by_keyslot_context(cd, name, CRYPT_ANY_SLOT, &kc, CRYPT_ANY_SLOT, &kc, flags); |
6930 | 0 | crypt_keyslot_context_destroy_internal(&kc); |
6931 | |
|
6932 | 0 | return r; |
6933 | 0 | } |
6934 | | |
6935 | | int crypt_activate_by_token(struct crypt_device *cd, |
6936 | | const char *name, int token, void *usrptr, uint32_t flags) |
6937 | 0 | { |
6938 | 0 | return crypt_activate_by_token_pin(cd, name, NULL, token, NULL, 0, usrptr, flags); |
6939 | 0 | } |
6940 | | |
6941 | | int crypt_token_json_get(struct crypt_device *cd, int token, const char **json) |
6942 | 0 | { |
6943 | 0 | int r; |
6944 | |
|
6945 | 0 | if (!json) |
6946 | 0 | return -EINVAL; |
6947 | | |
6948 | 0 | log_dbg(cd, "Requesting JSON for token %d.", token); |
6949 | |
|
6950 | 0 | if ((r = onlyLUKS2unrestricted(cd))) |
6951 | 0 | return r; |
6952 | | |
6953 | 0 | return LUKS2_token_json_get(&cd->u.luks2.hdr, token, json) ?: token; |
6954 | 0 | } |
6955 | | |
6956 | | int crypt_token_json_set(struct crypt_device *cd, int token, const char *json) |
6957 | 0 | { |
6958 | 0 | int r; |
6959 | |
|
6960 | 0 | log_dbg(cd, "Updating JSON for token %d.", token); |
6961 | |
|
6962 | 0 | if ((r = onlyLUKS2(cd))) |
6963 | 0 | return r; |
6964 | | |
6965 | 0 | return LUKS2_token_create(cd, &cd->u.luks2.hdr, token, json, 1); |
6966 | 0 | } |
6967 | | |
6968 | | crypt_token_info crypt_token_status(struct crypt_device *cd, int token, const char **type) |
6969 | 0 | { |
6970 | 0 | if (_onlyLUKS2(cd, CRYPT_CD_QUIET | CRYPT_CD_UNRESTRICTED, 0)) |
6971 | 0 | return CRYPT_TOKEN_INVALID; |
6972 | | |
6973 | 0 | return LUKS2_token_status(cd, &cd->u.luks2.hdr, token, type); |
6974 | 0 | } |
6975 | | |
6976 | | int crypt_token_max(const char *type) |
6977 | 0 | { |
6978 | 0 | if (isLUKS2(type)) |
6979 | 0 | return LUKS2_TOKENS_MAX; |
6980 | | |
6981 | 0 | return -EINVAL; |
6982 | 0 | } |
6983 | | |
6984 | | int crypt_token_luks2_keyring_get(struct crypt_device *cd, |
6985 | | int token, |
6986 | | struct crypt_token_params_luks2_keyring *params) |
6987 | 0 | { |
6988 | 0 | crypt_token_info token_info; |
6989 | 0 | const char *type; |
6990 | 0 | int r; |
6991 | |
|
6992 | 0 | if (!params) |
6993 | 0 | return -EINVAL; |
6994 | | |
6995 | 0 | log_dbg(cd, "Requesting LUKS2 keyring token %d.", token); |
6996 | |
|
6997 | 0 | if ((r = onlyLUKS2unrestricted(cd))) |
6998 | 0 | return r; |
6999 | | |
7000 | 0 | token_info = LUKS2_token_status(cd, &cd->u.luks2.hdr, token, &type); |
7001 | 0 | switch (token_info) { |
7002 | 0 | case CRYPT_TOKEN_INVALID: |
7003 | 0 | log_dbg(cd, "Token %d is invalid.", token); |
7004 | 0 | return -EINVAL; |
7005 | 0 | case CRYPT_TOKEN_INACTIVE: |
7006 | 0 | log_dbg(cd, "Token %d is inactive.", token); |
7007 | 0 | return -EINVAL; |
7008 | 0 | case CRYPT_TOKEN_INTERNAL: |
7009 | 0 | if (!strcmp(type, LUKS2_TOKEN_KEYRING)) |
7010 | 0 | break; |
7011 | | /* Fall through */ |
7012 | 0 | case CRYPT_TOKEN_INTERNAL_UNKNOWN: |
7013 | 0 | case CRYPT_TOKEN_EXTERNAL: |
7014 | 0 | case CRYPT_TOKEN_EXTERNAL_UNKNOWN: |
7015 | 0 | log_dbg(cd, "Token %d has unexpected type %s.", token, type); |
7016 | 0 | return -EINVAL; |
7017 | 0 | } |
7018 | | |
7019 | 0 | return LUKS2_token_keyring_get(&cd->u.luks2.hdr, token, params); |
7020 | 0 | } |
7021 | | |
7022 | | int crypt_token_luks2_keyring_set(struct crypt_device *cd, |
7023 | | int token, |
7024 | | const struct crypt_token_params_luks2_keyring *params) |
7025 | 0 | { |
7026 | 0 | int r; |
7027 | 0 | char json[4096]; |
7028 | |
|
7029 | 0 | if (!params || !params->key_description) |
7030 | 0 | return -EINVAL; |
7031 | | |
7032 | 0 | log_dbg(cd, "Creating new LUKS2 keyring token (%d).", token); |
7033 | |
|
7034 | 0 | if ((r = onlyLUKS2(cd))) |
7035 | 0 | return r; |
7036 | | |
7037 | 0 | r = LUKS2_token_keyring_json(json, sizeof(json), params); |
7038 | 0 | if (r < 0) |
7039 | 0 | return r; |
7040 | | |
7041 | 0 | return LUKS2_token_create(cd, &cd->u.luks2.hdr, token, json, 1); |
7042 | 0 | } |
7043 | | |
7044 | | int crypt_token_assign_keyslot(struct crypt_device *cd, int token, int keyslot) |
7045 | 0 | { |
7046 | 0 | int r; |
7047 | |
|
7048 | 0 | if ((r = onlyLUKS2(cd))) |
7049 | 0 | return r; |
7050 | | |
7051 | 0 | if (token == CRYPT_ANY_TOKEN) |
7052 | 0 | return -EINVAL; |
7053 | | |
7054 | 0 | return LUKS2_token_assign(cd, &cd->u.luks2.hdr, keyslot, token, 1, 1); |
7055 | 0 | } |
7056 | | |
7057 | | int crypt_token_unassign_keyslot(struct crypt_device *cd, int token, int keyslot) |
7058 | 0 | { |
7059 | 0 | int r; |
7060 | |
|
7061 | 0 | if ((r = onlyLUKS2(cd))) |
7062 | 0 | return r; |
7063 | | |
7064 | 0 | if (token == CRYPT_ANY_TOKEN) |
7065 | 0 | return -EINVAL; |
7066 | | |
7067 | 0 | return LUKS2_token_assign(cd, &cd->u.luks2.hdr, keyslot, token, 0, 1); |
7068 | 0 | } |
7069 | | |
7070 | | int crypt_token_is_assigned(struct crypt_device *cd, int token, int keyslot) |
7071 | 0 | { |
7072 | 0 | int r; |
7073 | |
|
7074 | 0 | if ((r = _onlyLUKS2(cd, CRYPT_CD_QUIET | CRYPT_CD_UNRESTRICTED, 0))) |
7075 | 0 | return r; |
7076 | | |
7077 | 0 | return LUKS2_token_is_assigned(&cd->u.luks2.hdr, keyslot, token); |
7078 | 0 | } |
7079 | | |
7080 | | /* Internal only */ |
7081 | | int crypt_metadata_locking_enabled(void) |
7082 | 0 | { |
7083 | 0 | return _metadata_locking; |
7084 | 0 | } |
7085 | | |
7086 | | int crypt_metadata_locking(struct crypt_device *cd __attribute__((unused)), int enable) |
7087 | 0 | { |
7088 | 0 | if (enable && !_metadata_locking) |
7089 | 0 | return -EPERM; |
7090 | | |
7091 | 0 | _metadata_locking = enable ? 1 : 0; |
7092 | 0 | return 0; |
7093 | 0 | } |
7094 | | |
7095 | | int crypt_persistent_flags_set(struct crypt_device *cd, crypt_flags_type type, uint32_t flags) |
7096 | 0 | { |
7097 | 0 | int r; |
7098 | |
|
7099 | 0 | if ((r = onlyLUKS2(cd))) |
7100 | 0 | return r; |
7101 | | |
7102 | 0 | if (type == CRYPT_FLAGS_ACTIVATION) |
7103 | 0 | return LUKS2_config_set_flags(cd, &cd->u.luks2.hdr, flags); |
7104 | | |
7105 | 0 | if (type == CRYPT_FLAGS_REQUIREMENTS) |
7106 | 0 | return LUKS2_config_set_requirements(cd, &cd->u.luks2.hdr, flags, true); |
7107 | | |
7108 | 0 | return -EINVAL; |
7109 | 0 | } |
7110 | | |
7111 | | int crypt_persistent_flags_get(struct crypt_device *cd, crypt_flags_type type, uint32_t *flags) |
7112 | 0 | { |
7113 | 0 | int r; |
7114 | |
|
7115 | 0 | if (!flags) |
7116 | 0 | return -EINVAL; |
7117 | | |
7118 | 0 | if ((r = onlyLUKS2unrestricted(cd))) |
7119 | 0 | return r; |
7120 | | |
7121 | 0 | if (type == CRYPT_FLAGS_ACTIVATION) |
7122 | 0 | return LUKS2_config_get_flags(cd, &cd->u.luks2.hdr, flags); |
7123 | | |
7124 | 0 | if (type == CRYPT_FLAGS_REQUIREMENTS) { |
7125 | 0 | LUKS2_config_get_requirements(cd, &cd->u.luks2.hdr, flags); |
7126 | 0 | return 0; |
7127 | 0 | } |
7128 | | |
7129 | 0 | return -EINVAL; |
7130 | 0 | } |
7131 | | |
7132 | | static int update_volume_key_segment_digest(struct crypt_device *cd, struct luks2_hdr *hdr, int digest, int commit) |
7133 | 0 | { |
7134 | 0 | int r; |
7135 | | |
7136 | | /* Remove any assignments in memory */ |
7137 | 0 | r = LUKS2_digest_segment_assign(cd, hdr, CRYPT_DEFAULT_SEGMENT, CRYPT_ANY_DIGEST, 0, 0); |
7138 | 0 | if (r) |
7139 | 0 | return r; |
7140 | | |
7141 | | /* Assign it to the specific digest */ |
7142 | 0 | return LUKS2_digest_segment_assign(cd, hdr, CRYPT_DEFAULT_SEGMENT, digest, 1, commit); |
7143 | 0 | } |
7144 | | |
7145 | | static int verify_and_update_segment_digest(struct crypt_device *cd, |
7146 | | struct luks2_hdr *hdr, int keyslot, struct crypt_keyslot_context *kc) |
7147 | 0 | { |
7148 | 0 | int digest, r; |
7149 | 0 | struct volume_key *vk = NULL; |
7150 | |
|
7151 | 0 | assert(kc); |
7152 | 0 | assert(kc->get_luks2_key); |
7153 | 0 | assert(keyslot >= 0); |
7154 | | |
7155 | 0 | r = kc->get_luks2_key(cd, kc, keyslot, CRYPT_ANY_SEGMENT, &vk); |
7156 | 0 | if (r < 0) |
7157 | 0 | return r; |
7158 | | |
7159 | | /* check volume_key (param) digest matches keyslot digest */ |
7160 | 0 | r = LUKS2_digest_verify(cd, hdr, vk, keyslot); |
7161 | 0 | if (r < 0) |
7162 | 0 | goto out; |
7163 | 0 | digest = r; |
7164 | | |
7165 | | /* nothing to do, volume key in keyslot is already assigned to default segment */ |
7166 | 0 | r = LUKS2_digest_verify_by_segment(cd, hdr, CRYPT_DEFAULT_SEGMENT, vk); |
7167 | 0 | if (r >= 0) |
7168 | 0 | goto out; |
7169 | | |
7170 | | /* FIXME: check new volume key is usable with current default segment */ |
7171 | | |
7172 | 0 | r = update_volume_key_segment_digest(cd, &cd->u.luks2.hdr, digest, 1); |
7173 | 0 | if (r) |
7174 | 0 | log_err(cd, _("Failed to assign keyslot %u as the new volume key."), keyslot); |
7175 | 0 | out: |
7176 | 0 | crypt_free_volume_key(vk); |
7177 | |
|
7178 | 0 | return r < 0 ? r : keyslot; |
7179 | 0 | } |
7180 | | |
7181 | | static int luks2_keyslot_add_by_verified_volume_key(struct crypt_device *cd, |
7182 | | int keyslot_new, |
7183 | | const char *new_passphrase, |
7184 | | size_t new_passphrase_size, |
7185 | | struct volume_key *vk) |
7186 | 0 | { |
7187 | 0 | int r; |
7188 | 0 | struct luks2_keyslot_params params; |
7189 | |
|
7190 | 0 | assert(cd); |
7191 | 0 | assert(keyslot_new >= 0); |
7192 | 0 | assert(new_passphrase); |
7193 | 0 | assert(vk); |
7194 | 0 | assert(crypt_volume_key_get_id(vk) >= 0); |
7195 | | |
7196 | 0 | r = LUKS2_keyslot_params_default(cd, &cd->u.luks2.hdr, ¶ms); |
7197 | 0 | if (r < 0) { |
7198 | 0 | log_err(cd, _("Failed to initialize default LUKS2 keyslot parameters.")); |
7199 | 0 | return r; |
7200 | 0 | } |
7201 | | |
7202 | 0 | r = LUKS2_digest_assign(cd, &cd->u.luks2.hdr, keyslot_new, crypt_volume_key_get_id(vk), 1, 0); |
7203 | 0 | if (r < 0) { |
7204 | 0 | log_err(cd, _("Failed to assign keyslot %d to digest."), keyslot_new); |
7205 | 0 | return r; |
7206 | 0 | } |
7207 | | |
7208 | 0 | r = LUKS2_keyslot_store(cd, &cd->u.luks2.hdr, keyslot_new, |
7209 | 0 | CONST_CAST(char*)new_passphrase, |
7210 | 0 | new_passphrase_size, vk, ¶ms); |
7211 | |
|
7212 | 0 | return r < 0 ? r : keyslot_new; |
7213 | 0 | } |
7214 | | |
7215 | | static int luks2_keyslot_add_by_volume_key(struct crypt_device *cd, |
7216 | | int keyslot_new, |
7217 | | const char *new_passphrase, |
7218 | | size_t new_passphrase_size, |
7219 | | struct volume_key *vk) |
7220 | 0 | { |
7221 | 0 | int r; |
7222 | |
|
7223 | 0 | assert(cd); |
7224 | 0 | assert(keyslot_new >= 0); |
7225 | 0 | assert(new_passphrase); |
7226 | 0 | assert(vk); |
7227 | | |
7228 | 0 | r = LUKS2_digest_verify_by_segment(cd, &cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT, vk); |
7229 | 0 | if (r >= 0) |
7230 | 0 | crypt_volume_key_set_id(vk, r); |
7231 | |
|
7232 | 0 | if (r < 0) { |
7233 | 0 | log_err(cd, _("Volume key does not match the volume.")); |
7234 | 0 | return r; |
7235 | 0 | } |
7236 | | |
7237 | 0 | return luks2_keyslot_add_by_verified_volume_key(cd, keyslot_new, new_passphrase, new_passphrase_size, vk); |
7238 | 0 | } |
7239 | | |
7240 | | static int luks1_keyslot_add_by_volume_key(struct crypt_device *cd, |
7241 | | int keyslot_new, |
7242 | | const char *new_passphrase, |
7243 | | size_t new_passphrase_size, |
7244 | | struct volume_key *vk) |
7245 | 0 | { |
7246 | 0 | int r; |
7247 | |
|
7248 | 0 | assert(cd); |
7249 | 0 | assert(keyslot_new >= 0); |
7250 | 0 | assert(new_passphrase); |
7251 | 0 | assert(vk); |
7252 | | |
7253 | 0 | r = LUKS_verify_volume_key(&cd->u.luks1.hdr, vk); |
7254 | 0 | if (r < 0) { |
7255 | 0 | log_err(cd, _("Volume key does not match the volume.")); |
7256 | 0 | return r; |
7257 | 0 | } |
7258 | | |
7259 | 0 | r = LUKS_set_key(keyslot_new, CONST_CAST(char*)new_passphrase, |
7260 | 0 | new_passphrase_size, &cd->u.luks1.hdr, vk, cd); |
7261 | |
|
7262 | 0 | return r < 0 ? r : keyslot_new; |
7263 | 0 | } |
7264 | | |
7265 | | static int keyslot_add_by_key(struct crypt_device *cd, |
7266 | | bool is_luks1, |
7267 | | int keyslot_new, |
7268 | | const char *new_passphrase, |
7269 | | size_t new_passphrase_size, |
7270 | | struct volume_key *vk, |
7271 | | uint32_t flags) |
7272 | 0 | { |
7273 | 0 | int r, digest; |
7274 | |
|
7275 | 0 | assert(cd); |
7276 | 0 | assert(keyslot_new >= 0); |
7277 | 0 | assert(new_passphrase); |
7278 | 0 | assert(vk); |
7279 | | |
7280 | 0 | if (!flags) |
7281 | 0 | return is_luks1 ? luks1_keyslot_add_by_volume_key(cd, keyslot_new, new_passphrase, new_passphrase_size, vk) : |
7282 | 0 | luks2_keyslot_add_by_volume_key(cd, keyslot_new, new_passphrase, new_passphrase_size, vk); |
7283 | | |
7284 | 0 | if (is_luks1) |
7285 | 0 | return -EINVAL; |
7286 | | |
7287 | 0 | digest = LUKS2_digest_verify_by_segment(cd, &cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT, vk); |
7288 | 0 | if (digest >= 0) /* if key matches volume key digest tear down new vk flag */ |
7289 | 0 | flags &= ~CRYPT_VOLUME_KEY_SET; |
7290 | 0 | else if (digest == -EPERM) { |
7291 | | /* if key matches any existing digest, do not create new digest */ |
7292 | 0 | if ((flags & CRYPT_VOLUME_KEY_DIGEST_REUSE)) |
7293 | 0 | digest = LUKS2_digest_verify_by_any_matching(cd, vk); |
7294 | | |
7295 | | /* Anything other than -EPERM or -ENOENT suggests broken metadata. Abort */ |
7296 | 0 | if (digest < 0 && digest != -ENOENT && digest != -EPERM) |
7297 | 0 | return digest; |
7298 | | |
7299 | | /* no segment flag or new vk flag requires new key digest */ |
7300 | 0 | if (flags & (CRYPT_VOLUME_KEY_NO_SEGMENT | CRYPT_VOLUME_KEY_SET)) { |
7301 | 0 | if (digest < 0 || !(flags & CRYPT_VOLUME_KEY_DIGEST_REUSE)) |
7302 | 0 | digest = LUKS2_digest_create(cd, "pbkdf2", &cd->u.luks2.hdr, vk); |
7303 | 0 | } |
7304 | 0 | } else /* Anything other than -EPERM suggests broken metadata. Abort */ |
7305 | 0 | return digest; |
7306 | | |
7307 | 0 | r = digest; |
7308 | 0 | if (r < 0) { |
7309 | 0 | log_err(cd, _("Volume key does not match the volume.")); |
7310 | 0 | return r; |
7311 | 0 | } |
7312 | | |
7313 | 0 | crypt_volume_key_set_id(vk, digest); |
7314 | |
|
7315 | 0 | if (flags & CRYPT_VOLUME_KEY_SET) { |
7316 | 0 | r = update_volume_key_segment_digest(cd, &cd->u.luks2.hdr, digest, 0); |
7317 | 0 | if (r < 0) |
7318 | 0 | log_err(cd, _("Failed to assign keyslot %u as the new volume key."), keyslot_new); |
7319 | 0 | } |
7320 | |
|
7321 | 0 | if (r >= 0) |
7322 | 0 | r = luks2_keyslot_add_by_verified_volume_key(cd, keyslot_new, new_passphrase, new_passphrase_size, vk); |
7323 | |
|
7324 | 0 | return r < 0 ? r : keyslot_new; |
7325 | 0 | } |
7326 | | |
7327 | | int crypt_keyslot_add_by_key(struct crypt_device *cd, |
7328 | | int keyslot, |
7329 | | const char *volume_key, |
7330 | | size_t volume_key_size, |
7331 | | const char *passphrase, |
7332 | | size_t passphrase_size, |
7333 | | uint32_t flags) |
7334 | 0 | { |
7335 | 0 | int r; |
7336 | 0 | struct crypt_keyslot_context kc = {}, new_kc = {}; |
7337 | |
|
7338 | 0 | if (!passphrase || ((flags & CRYPT_VOLUME_KEY_NO_SEGMENT) && |
7339 | 0 | (flags & CRYPT_VOLUME_KEY_SET))) |
7340 | 0 | return -EINVAL; |
7341 | | |
7342 | 0 | if ((r = onlyLUKS(cd)) < 0) |
7343 | 0 | return r; |
7344 | | |
7345 | 0 | if ((flags & CRYPT_VOLUME_KEY_SET) && crypt_keyslot_status(cd, keyslot) > CRYPT_SLOT_INACTIVE && |
7346 | 0 | isLUKS2(cd->type)) { |
7347 | 0 | if (volume_key) |
7348 | 0 | crypt_keyslot_context_init_by_key_internal(&kc, volume_key, volume_key_size); |
7349 | 0 | else |
7350 | 0 | crypt_keyslot_context_init_by_passphrase_internal(&kc, passphrase, passphrase_size); |
7351 | |
|
7352 | 0 | r = verify_and_update_segment_digest(cd, &cd->u.luks2.hdr, keyslot, &kc); |
7353 | |
|
7354 | 0 | crypt_keyslot_context_destroy_internal(&kc); |
7355 | |
|
7356 | 0 | return r; |
7357 | 0 | } |
7358 | | |
7359 | 0 | crypt_keyslot_context_init_by_key_internal(&kc, volume_key, volume_key_size); |
7360 | 0 | crypt_keyslot_context_init_by_passphrase_internal(&new_kc, passphrase, passphrase_size); |
7361 | |
|
7362 | 0 | r = crypt_keyslot_add_by_keyslot_context(cd, CRYPT_ANY_SLOT, &kc, keyslot, &new_kc, flags); |
7363 | |
|
7364 | 0 | crypt_keyslot_context_destroy_internal(&kc); |
7365 | 0 | crypt_keyslot_context_destroy_internal(&new_kc); |
7366 | |
|
7367 | 0 | return r; |
7368 | 0 | } |
7369 | | |
7370 | | int crypt_keyslot_add_by_keyslot_context(struct crypt_device *cd, |
7371 | | int keyslot_existing, |
7372 | | struct crypt_keyslot_context *kc, |
7373 | | int keyslot_new, |
7374 | | struct crypt_keyslot_context *new_kc, |
7375 | | uint32_t flags) |
7376 | 0 | { |
7377 | 0 | bool is_luks1; |
7378 | 0 | int active_slots, r; |
7379 | 0 | const char *new_passphrase; |
7380 | 0 | size_t new_passphrase_size; |
7381 | 0 | struct volume_key *vk = NULL; |
7382 | |
|
7383 | 0 | if (!kc || ((flags & CRYPT_VOLUME_KEY_NO_SEGMENT) && |
7384 | 0 | (flags & CRYPT_VOLUME_KEY_SET))) |
7385 | 0 | return -EINVAL; |
7386 | | |
7387 | 0 | r = flags ? onlyLUKS2(cd) : onlyLUKS(cd); |
7388 | 0 | if (r) |
7389 | 0 | return r; |
7390 | | |
7391 | 0 | if ((flags & CRYPT_VOLUME_KEY_SET) && crypt_keyslot_status(cd, keyslot_existing) > CRYPT_SLOT_INACTIVE) |
7392 | 0 | return verify_and_update_segment_digest(cd, &cd->u.luks2.hdr, keyslot_existing, kc); |
7393 | | |
7394 | 0 | if (!new_kc || !new_kc->get_passphrase) |
7395 | 0 | return -EINVAL; |
7396 | | |
7397 | 0 | log_dbg(cd, "Adding new keyslot %d by %s%s, volume key provided by %s (%d).", |
7398 | 0 | keyslot_new, keyslot_context_type_string(new_kc), |
7399 | 0 | (flags & CRYPT_VOLUME_KEY_NO_SEGMENT) ? " unassigned to a crypt segment" : "", |
7400 | 0 | keyslot_context_type_string(kc), keyslot_existing); |
7401 | |
|
7402 | 0 | r = keyslot_verify_or_find_empty(cd, &keyslot_new); |
7403 | 0 | if (r < 0) |
7404 | 0 | return r; |
7405 | | |
7406 | 0 | is_luks1 = isLUKS1(cd->type); |
7407 | 0 | if (is_luks1) |
7408 | 0 | active_slots = LUKS_keyslot_active_count(&cd->u.luks1.hdr); |
7409 | 0 | else |
7410 | 0 | active_slots = LUKS2_keyslot_active_count(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT); |
7411 | |
|
7412 | 0 | if (active_slots < 0) |
7413 | 0 | return -EINVAL; |
7414 | | |
7415 | 0 | if (active_slots == 0 && kc->type != CRYPT_KC_TYPE_KEY) |
7416 | 0 | r = -ENOENT; |
7417 | 0 | else if (is_luks1 && kc->get_luks1_volume_key) |
7418 | 0 | r = kc->get_luks1_volume_key(cd, kc, keyslot_existing, &vk); |
7419 | 0 | else if (!is_luks1 && kc->get_luks2_volume_key) |
7420 | 0 | r = kc->get_luks2_volume_key(cd, kc, keyslot_existing, &vk); |
7421 | 0 | else |
7422 | 0 | return -EINVAL; |
7423 | | |
7424 | 0 | if (r == -ENOENT) { |
7425 | 0 | if ((flags & CRYPT_VOLUME_KEY_NO_SEGMENT) && kc->type == CRYPT_KC_TYPE_KEY) { |
7426 | 0 | if (!(vk = crypt_generate_volume_key(cd, kc->u.k.volume_key_size, KEY_QUALITY_KEY))) |
7427 | 0 | return -ENOMEM; |
7428 | 0 | r = 0; |
7429 | 0 | } else if (cd->volume_key) { |
7430 | 0 | if (!(vk = crypt_alloc_volume_key(crypt_volume_key_length(cd->volume_key), |
7431 | 0 | crypt_volume_key_get_key(cd->volume_key)))) |
7432 | 0 | return -ENOMEM; |
7433 | 0 | r = 0; |
7434 | 0 | } else if (active_slots == 0) { |
7435 | 0 | log_err(cd, _("Cannot add key slot, all slots disabled and no volume key provided.")); |
7436 | 0 | r = -EINVAL; |
7437 | 0 | } |
7438 | 0 | } |
7439 | | |
7440 | 0 | if (r < 0) |
7441 | 0 | return r; |
7442 | | |
7443 | 0 | r = new_kc->get_passphrase(cd, new_kc, &new_passphrase, &new_passphrase_size); |
7444 | | /* If new keyslot context is token just assign it to new keyslot */ |
7445 | 0 | if (r >= 0 && new_kc->type == CRYPT_KC_TYPE_TOKEN && !is_luks1) |
7446 | 0 | r = LUKS2_token_assign(cd, &cd->u.luks2.hdr, keyslot_new, new_kc->u.t.id, 1, 0); |
7447 | 0 | if (r >= 0) |
7448 | 0 | r = keyslot_add_by_key(cd, is_luks1, keyslot_new, new_passphrase, new_passphrase_size, vk, flags); |
7449 | |
|
7450 | 0 | crypt_free_volume_key(vk); |
7451 | |
|
7452 | 0 | if (r < 0) { |
7453 | 0 | _luks2_rollback(cd); |
7454 | 0 | return r; |
7455 | 0 | } |
7456 | | |
7457 | 0 | return keyslot_new; |
7458 | 0 | } |
7459 | | |
7460 | | /* |
7461 | | * Keyring handling |
7462 | | */ |
7463 | | int crypt_use_keyring_for_vk(struct crypt_device *cd) |
7464 | 0 | { |
7465 | 0 | uint64_t dmc_flags; |
7466 | | |
7467 | | /* dm backend must be initialized */ |
7468 | 0 | if (!cd) |
7469 | 0 | return 0; |
7470 | | |
7471 | 0 | if (!isPLAIN(cd->type) && !isLUKS2(cd->type)) |
7472 | 0 | return 0; |
7473 | | |
7474 | 0 | if (!_vk_via_keyring || !kernel_keyring_support()) |
7475 | 0 | return 0; |
7476 | | |
7477 | 0 | if (dm_flags(cd, DM_CRYPT, &dmc_flags)) |
7478 | 0 | return dmcrypt_keyring_bug() ? 0 : 1; |
7479 | | |
7480 | 0 | return (dmc_flags & DM_KERNEL_KEYRING_SUPPORTED); |
7481 | 0 | } |
7482 | | |
7483 | | int crypt_volume_key_keyring(struct crypt_device *cd __attribute__((unused)), int enable) |
7484 | 0 | { |
7485 | 0 | _vk_via_keyring = enable ? 1 : 0; |
7486 | 0 | return 0; |
7487 | 0 | } |
7488 | | |
7489 | | /* internal only */ |
7490 | | int crypt_volume_key_load_in_keyring(struct crypt_device *cd, struct volume_key *vk) |
7491 | 0 | { |
7492 | 0 | if (!vk || !cd) |
7493 | 0 | return -EINVAL; |
7494 | | |
7495 | 0 | if (!crypt_volume_key_description(vk)) { |
7496 | 0 | log_dbg(cd, "Invalid key description"); |
7497 | 0 | return -EINVAL; |
7498 | 0 | } |
7499 | | |
7500 | 0 | log_dbg(cd, "Loading key (type logon, name %s) in thread keyring.", |
7501 | 0 | crypt_volume_key_description(vk)); |
7502 | |
|
7503 | 0 | if (crypt_volume_key_upload_kernel_key(vk)) { |
7504 | 0 | crypt_set_key_in_keyring(cd, 1); |
7505 | 0 | return 0; |
7506 | 0 | } else { |
7507 | 0 | log_dbg(cd, "keyring_add_key_in_thread_keyring failed (error %d)", errno); |
7508 | 0 | log_err(cd, _("Failed to load key in kernel keyring.")); |
7509 | 0 | return -EINVAL; |
7510 | 0 | } |
7511 | 0 | } |
7512 | | |
7513 | | /* internal only */ |
7514 | | int crypt_keyring_get_user_key(struct crypt_device *cd, |
7515 | | const char *key_description, |
7516 | | char **key, |
7517 | | size_t *key_size) |
7518 | 0 | { |
7519 | 0 | int r; |
7520 | 0 | key_serial_t kid; |
7521 | |
|
7522 | 0 | if (!key_description || !key || !key_size) |
7523 | 0 | return -EINVAL; |
7524 | | |
7525 | 0 | log_dbg(cd, "Requesting key %s (user type)", key_description); |
7526 | |
|
7527 | 0 | kid = keyring_request_key_id(USER_KEY, key_description); |
7528 | 0 | if (kid == -ENOTSUP) { |
7529 | 0 | log_dbg(cd, "Kernel keyring features disabled."); |
7530 | 0 | return -ENOTSUP; |
7531 | 0 | } else if (kid < 0) { |
7532 | 0 | log_dbg(cd, "keyring_request_key_id failed with errno %d.", errno); |
7533 | 0 | return -EINVAL; |
7534 | 0 | } |
7535 | | |
7536 | 0 | log_dbg(cd, "Reading content of kernel key (id %" PRIi32 ").", kid); |
7537 | |
|
7538 | 0 | r = keyring_read_key(kid, key, key_size); |
7539 | 0 | if (r < 0) |
7540 | 0 | log_dbg(cd, "keyring_read_key failed with errno %d.", errno); |
7541 | |
|
7542 | 0 | return r; |
7543 | 0 | } |
7544 | | |
7545 | | /* internal only */ |
7546 | | int crypt_keyring_get_key_by_name(struct crypt_device *cd, |
7547 | | const char *key_description, |
7548 | | char **key, |
7549 | | size_t *key_size) |
7550 | 0 | { |
7551 | 0 | int r; |
7552 | 0 | key_serial_t kid; |
7553 | |
|
7554 | 0 | if (!key_description || !key || !key_size) |
7555 | 0 | return -EINVAL; |
7556 | | |
7557 | 0 | log_dbg(cd, "Searching for kernel key by name %s.", key_description); |
7558 | |
|
7559 | 0 | kid = keyring_find_key_id_by_name(key_description); |
7560 | 0 | if (kid == 0) { |
7561 | 0 | log_dbg(cd, "keyring_find_key_id_by_name failed with errno %d.", errno); |
7562 | 0 | return -ENOENT; |
7563 | 0 | } |
7564 | | |
7565 | 0 | log_dbg(cd, "Reading content of kernel key (id %" PRIi32 ").", kid); |
7566 | |
|
7567 | 0 | r = keyring_read_key(kid, key, key_size); |
7568 | 0 | if (r < 0) |
7569 | 0 | log_dbg(cd, "keyring_read_key failed with errno %d.", errno); |
7570 | |
|
7571 | 0 | return r; |
7572 | 0 | } |
7573 | | |
7574 | | int crypt_keyring_get_keysize_by_name(struct crypt_device *cd, |
7575 | | const char *key_description, |
7576 | | size_t *r_key_size) |
7577 | 0 | { |
7578 | 0 | int r; |
7579 | 0 | key_serial_t kid; |
7580 | |
|
7581 | 0 | if (!key_description || !r_key_size) |
7582 | 0 | return -EINVAL; |
7583 | | |
7584 | 0 | log_dbg(cd, "Searching for kernel key by name %s.", key_description); |
7585 | |
|
7586 | 0 | kid = keyring_find_key_id_by_name(key_description); |
7587 | 0 | if (kid == -ENOTSUP) { |
7588 | 0 | log_dbg(cd, "Kernel keyring features disabled."); |
7589 | 0 | return -ENOTSUP; |
7590 | 0 | } else if (kid < 0) { |
7591 | 0 | log_dbg(cd, "keyring_find_key_id_by_name failed with errno %d.", errno); |
7592 | 0 | return -EINVAL; |
7593 | 0 | } |
7594 | 0 | else if (kid == 0) { |
7595 | 0 | log_dbg(cd, "keyring_find_key_id_by_name failed with errno %d.", ENOENT); |
7596 | 0 | return -ENOENT; |
7597 | 0 | } |
7598 | | |
7599 | 0 | log_dbg(cd, "Reading content of kernel key (id %" PRIi32 ").", kid); |
7600 | |
|
7601 | 0 | r = keyring_read_keysize(kid, r_key_size); |
7602 | 0 | if (r < 0) |
7603 | 0 | log_dbg(cd, "keyring_read_keysize failed with errno %d.", errno); |
7604 | |
|
7605 | 0 | return r; |
7606 | 0 | } |
7607 | | |
7608 | | /* internal only */ |
7609 | | int crypt_key_in_keyring(struct crypt_device *cd) |
7610 | 0 | { |
7611 | 0 | return cd ? cd->key_in_keyring : 0; |
7612 | 0 | } |
7613 | | |
7614 | | /* internal only */ |
7615 | | void crypt_set_key_in_keyring(struct crypt_device *cd, unsigned key_in_keyring) |
7616 | 0 | { |
7617 | 0 | if (!cd) |
7618 | 0 | return; |
7619 | | |
7620 | 0 | cd->key_in_keyring = key_in_keyring; |
7621 | 0 | } |
7622 | | |
7623 | | /* internal only */ |
7624 | | void crypt_unlink_key_from_thread_keyring(struct crypt_device *cd, |
7625 | | key_serial_t key_id) |
7626 | 0 | { |
7627 | 0 | log_dbg(cd, "Unlinking volume key (id: %" PRIi32 ") from thread keyring.", key_id); |
7628 | |
|
7629 | 0 | if (keyring_unlink_key_from_thread_keyring(key_id)) |
7630 | 0 | log_dbg(cd, "keyring_unlink_key_from_thread_keyring failed with errno %d.", errno); |
7631 | 0 | } |
7632 | | |
7633 | | void crypt_unlink_key_by_description_from_thread_keyring(struct crypt_device *cd, |
7634 | | const char *key_description, |
7635 | | key_type_t ktype) |
7636 | 0 | { |
7637 | 0 | key_serial_t kid; |
7638 | 0 | const char *type_name = key_type_name(ktype); |
7639 | |
|
7640 | 0 | if (!key_description || !type_name) |
7641 | 0 | return; |
7642 | | |
7643 | 0 | log_dbg(cd, "Requesting kernel key %s (type %s).", key_description, type_name); |
7644 | |
|
7645 | 0 | crypt_set_key_in_keyring(cd, 0); |
7646 | |
|
7647 | 0 | kid = keyring_request_key_id(ktype, key_description); |
7648 | 0 | if (kid == -ENOTSUP) { |
7649 | 0 | log_dbg(cd, "Kernel keyring features disabled."); |
7650 | 0 | return; |
7651 | 0 | } else if (kid < 0) { |
7652 | 0 | log_dbg(cd, "keyring_request_key_id failed with errno %d.", errno); |
7653 | 0 | return; |
7654 | 0 | } |
7655 | | |
7656 | 0 | crypt_unlink_key_from_thread_keyring(cd, kid); |
7657 | 0 | } |
7658 | | |
7659 | | int crypt_set_keyring_to_link(struct crypt_device *cd, const char *key_description, |
7660 | | const char *old_key_description, |
7661 | | const char *key_type_desc, const char *keyring_to_link_vk) |
7662 | 0 | { |
7663 | 0 | key_type_t key_type = USER_KEY; |
7664 | 0 | const char *name1 = NULL, *name2 = NULL; |
7665 | 0 | int32_t id = 0; |
7666 | 0 | int r, ri; |
7667 | 0 | struct luks2_hdr *hdr; |
7668 | 0 | unsigned user_descriptions_count, vks_count = 1; |
7669 | |
|
7670 | 0 | if (!cd || ((!key_description && !old_key_description) && (keyring_to_link_vk || key_type_desc)) || |
7671 | 0 | ((key_description || old_key_description) && !keyring_to_link_vk)) |
7672 | 0 | return -EINVAL; |
7673 | | |
7674 | 0 | hdr = crypt_get_hdr(cd, CRYPT_LUKS2); |
7675 | | |
7676 | | /* if only one key description is supplied, force it to be the first one */ |
7677 | 0 | if (!key_description && old_key_description) |
7678 | 0 | return -EINVAL; |
7679 | | |
7680 | 0 | if ((r = _onlyLUKS2(cd, 0, CRYPT_REQUIREMENT_OPAL | CRYPT_REQUIREMENT_ONLINE_REENCRYPT))) |
7681 | 0 | return r; |
7682 | | |
7683 | 0 | if (key_type_desc) |
7684 | 0 | key_type = key_type_by_name(key_type_desc); |
7685 | 0 | if (key_type != LOGON_KEY && key_type != USER_KEY) |
7686 | 0 | return -EINVAL; |
7687 | | |
7688 | 0 | ri = crypt_reencrypt_status(cd, NULL); |
7689 | 0 | if (ri > CRYPT_REENCRYPT_NONE && ri < CRYPT_REENCRYPT_INVALID) |
7690 | 0 | vks_count = LUKS2_reencrypt_vks_count(hdr); |
7691 | |
|
7692 | 0 | user_descriptions_count = (key_description ? 1 : 0) + (old_key_description ? 1 : 0); |
7693 | 0 | if (user_descriptions_count != 0 && vks_count > user_descriptions_count) |
7694 | 0 | return -ESRCH; |
7695 | | |
7696 | 0 | if (keyring_to_link_vk) { |
7697 | 0 | id = keyring_find_keyring_id_by_name(keyring_to_link_vk); |
7698 | 0 | if (id == 0) { |
7699 | 0 | log_err(cd, _("Could not find keyring described by \"%s\"."), keyring_to_link_vk); |
7700 | 0 | return -EINVAL; |
7701 | 0 | } |
7702 | 0 | if (key_description && !(name1 = strdup(key_description))) |
7703 | 0 | return -ENOMEM; |
7704 | 0 | if (old_key_description && !(name2 = strdup(old_key_description))) { |
7705 | 0 | free(CONST_CAST(void*)name1); |
7706 | 0 | return -ENOMEM; |
7707 | 0 | } |
7708 | 0 | } |
7709 | | |
7710 | 0 | cd->keyring_key_type = key_type; |
7711 | |
|
7712 | 0 | free(CONST_CAST(void*)cd->user_key_name1); |
7713 | 0 | free(CONST_CAST(void*)cd->user_key_name2); |
7714 | 0 | cd->user_key_name1 = name1; |
7715 | 0 | cd->user_key_name2 = name2; |
7716 | 0 | cd->keyring_to_link_vk = id; |
7717 | 0 | cd->link_vk_to_keyring = id != 0; |
7718 | |
|
7719 | 0 | return 0; |
7720 | 0 | } |
7721 | | |
7722 | | /* internal only */ |
7723 | | void crypt_drop_uploaded_keyring_key(struct crypt_device *cd, struct volume_key *vks) |
7724 | 0 | { |
7725 | 0 | struct volume_key *vk = vks; |
7726 | |
|
7727 | 0 | while (vk) { |
7728 | 0 | crypt_volume_key_drop_uploaded_kernel_key(cd, vk); |
7729 | 0 | vk = crypt_volume_key_next(vk); |
7730 | 0 | } |
7731 | 0 | } |
7732 | | |
7733 | | int crypt_activate_by_keyring(struct crypt_device *cd, |
7734 | | const char *name, |
7735 | | const char *key_description, |
7736 | | int keyslot, |
7737 | | uint32_t flags) |
7738 | 0 | { |
7739 | 0 | int r; |
7740 | 0 | struct crypt_keyslot_context kc = {}; |
7741 | |
|
7742 | 0 | if (!cd || !key_description) |
7743 | 0 | return -EINVAL; |
7744 | | |
7745 | 0 | crypt_keyslot_context_init_by_keyring_internal(&kc, key_description); |
7746 | 0 | r = crypt_activate_by_keyslot_context(cd, name, keyslot, &kc, CRYPT_ANY_SLOT, &kc, flags); |
7747 | 0 | crypt_keyslot_context_destroy_internal(&kc); |
7748 | |
|
7749 | 0 | return r; |
7750 | 0 | } |
7751 | | |
7752 | | /* |
7753 | | * Workaround for serialization of parallel activation and memory-hard PBKDF |
7754 | | * In specific situation (systemd activation) this causes OOM killer activation. |
7755 | | * For now, let's provide this ugly way to serialize unlocking of devices. |
7756 | | */ |
7757 | | int crypt_serialize_lock(struct crypt_device *cd) |
7758 | 0 | { |
7759 | 0 | if (!cd->memory_hard_pbkdf_lock_enabled) |
7760 | 0 | return 0; |
7761 | | |
7762 | 0 | log_dbg(cd, "Taking global memory-hard access serialization lock."); |
7763 | 0 | if (crypt_write_lock(cd, "memory-hard-access", true, &cd->pbkdf_memory_hard_lock)) { |
7764 | 0 | log_err(cd, _("Failed to acquire global memory-hard access serialization lock.")); |
7765 | 0 | cd->pbkdf_memory_hard_lock = NULL; |
7766 | 0 | return -EINVAL; |
7767 | 0 | } |
7768 | | |
7769 | 0 | return 0; |
7770 | 0 | } |
7771 | | |
7772 | | void crypt_serialize_unlock(struct crypt_device *cd) |
7773 | 0 | { |
7774 | 0 | if (!cd->memory_hard_pbkdf_lock_enabled) |
7775 | 0 | return; |
7776 | | |
7777 | 0 | crypt_unlock_internal(cd, cd->pbkdf_memory_hard_lock); |
7778 | 0 | cd->pbkdf_memory_hard_lock = NULL; |
7779 | 0 | } |
7780 | | |
7781 | | crypt_reencrypt_info crypt_reencrypt_status(struct crypt_device *cd, |
7782 | | struct crypt_params_reencrypt *params) |
7783 | 0 | { |
7784 | 0 | if (params) |
7785 | 0 | memset(params, 0, sizeof(*params)); |
7786 | |
|
7787 | 0 | if (!cd || !isLUKS(cd->type)) |
7788 | 0 | return CRYPT_REENCRYPT_INVALID; |
7789 | | |
7790 | 0 | if (isLUKS1(cd->type)) |
7791 | 0 | return CRYPT_REENCRYPT_NONE; |
7792 | | |
7793 | 0 | if (_onlyLUKS2(cd, CRYPT_CD_QUIET, CRYPT_REQUIREMENT_ONLINE_REENCRYPT)) |
7794 | 0 | return CRYPT_REENCRYPT_INVALID; |
7795 | | |
7796 | 0 | return LUKS2_reencrypt_get_params(&cd->u.luks2.hdr, params); |
7797 | 0 | } |
7798 | | |
7799 | | static void __attribute__((destructor)) libcryptsetup_exit(void) |
7800 | 0 | { |
7801 | 0 | crypt_token_unload_external_all(NULL); |
7802 | |
|
7803 | 0 | crypt_backend_destroy(); |
7804 | 0 | crypt_random_exit(); |
7805 | 0 | } |