/src/cryptsetup/lib/luks2/luks2_luks1_convert.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * LUKS - Linux Unified Key Setup v2, LUKS1 conversion code |
3 | | * |
4 | | * Copyright (C) 2015-2024 Red Hat, Inc. All rights reserved. |
5 | | * Copyright (C) 2015-2024 Ondrej Kozina |
6 | | * Copyright (C) 2015-2024 Milan Broz |
7 | | * |
8 | | * This program is free software; you can redistribute it and/or |
9 | | * modify it under the terms of the GNU General Public License |
10 | | * as published by the Free Software Foundation; either version 2 |
11 | | * of the License, or (at your option) any later version. |
12 | | * |
13 | | * This program is distributed in the hope that it will be useful, |
14 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
16 | | * GNU General Public License for more details. |
17 | | * |
18 | | * You should have received a copy of the GNU General Public License |
19 | | * along with this program; if not, write to the Free Software |
20 | | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. |
21 | | */ |
22 | | |
23 | | #include "luks2_internal.h" |
24 | | #include "../luks1/luks.h" |
25 | | #include "../luks1/af.h" |
26 | | |
27 | | /* This differs from LUKS_check_cipher() that it does not check dm-crypt fallback. */ |
28 | | int LUKS2_check_cipher(struct crypt_device *cd, |
29 | | size_t keylength, |
30 | | const char *cipher, |
31 | | const char *cipher_mode) |
32 | 0 | { |
33 | 0 | int r; |
34 | 0 | struct crypt_storage *s; |
35 | 0 | char buf[SECTOR_SIZE], *empty_key; |
36 | |
|
37 | 0 | log_dbg(cd, "Checking if cipher %s-%s is usable (storage wrapper).", cipher, cipher_mode); |
38 | |
|
39 | 0 | empty_key = crypt_safe_alloc(keylength); |
40 | 0 | if (!empty_key) |
41 | 0 | return -ENOMEM; |
42 | | |
43 | | /* No need to get KEY quality random but it must avoid known weak keys. */ |
44 | 0 | r = crypt_random_get(cd, empty_key, keylength, CRYPT_RND_NORMAL); |
45 | 0 | if (r < 0) |
46 | 0 | goto out; |
47 | | |
48 | 0 | r = crypt_storage_init(&s, SECTOR_SIZE, cipher, cipher_mode, empty_key, keylength, false); |
49 | 0 | if (r < 0) |
50 | 0 | goto out; |
51 | | |
52 | 0 | memset(buf, 0, sizeof(buf)); |
53 | 0 | r = crypt_storage_decrypt(s, 0, sizeof(buf), buf); |
54 | 0 | crypt_storage_destroy(s); |
55 | 0 | out: |
56 | 0 | crypt_safe_free(empty_key); |
57 | 0 | crypt_safe_memzero(buf, sizeof(buf)); |
58 | 0 | return r; |
59 | 0 | } |
60 | | |
61 | | static int json_luks1_keyslot(const struct luks_phdr *hdr_v1, int keyslot, struct json_object **keyslot_object) |
62 | 0 | { |
63 | 0 | char *base64_str, cipher[LUKS_CIPHERNAME_L+LUKS_CIPHERMODE_L]; |
64 | 0 | size_t base64_len; |
65 | 0 | struct json_object *keyslot_obj, *field, *jobj_kdf, *jobj_af, *jobj_area; |
66 | 0 | uint64_t offset, area_size, length; |
67 | 0 | int r; |
68 | |
|
69 | 0 | keyslot_obj = json_object_new_object(); |
70 | 0 | if (!keyslot_obj) { |
71 | 0 | r = -ENOMEM; |
72 | 0 | goto err; |
73 | 0 | } |
74 | | |
75 | 0 | json_object_object_add(keyslot_obj, "type", json_object_new_string("luks2")); |
76 | 0 | json_object_object_add(keyslot_obj, "key_size", json_object_new_int64(hdr_v1->keyBytes)); |
77 | | |
78 | | /* KDF */ |
79 | 0 | jobj_kdf = json_object_new_object(); |
80 | 0 | if (!jobj_kdf) { |
81 | 0 | r = -ENOMEM; |
82 | 0 | goto err; |
83 | 0 | } |
84 | | |
85 | 0 | json_object_object_add(jobj_kdf, "type", json_object_new_string(CRYPT_KDF_PBKDF2)); |
86 | 0 | json_object_object_add(jobj_kdf, "hash", json_object_new_string(hdr_v1->hashSpec)); |
87 | 0 | json_object_object_add(jobj_kdf, "iterations", json_object_new_int64(hdr_v1->keyblock[keyslot].passwordIterations)); |
88 | | /* salt field */ |
89 | 0 | r = crypt_base64_encode(&base64_str, &base64_len, hdr_v1->keyblock[keyslot].passwordSalt, LUKS_SALTSIZE); |
90 | 0 | if (r < 0) { |
91 | 0 | json_object_put(keyslot_obj); |
92 | 0 | json_object_put(jobj_kdf); |
93 | 0 | return r; |
94 | 0 | } |
95 | 0 | field = json_object_new_string_len(base64_str, base64_len); |
96 | 0 | free(base64_str); |
97 | 0 | json_object_object_add(jobj_kdf, "salt", field); |
98 | 0 | json_object_object_add(keyslot_obj, "kdf", jobj_kdf); |
99 | | |
100 | | /* AF */ |
101 | 0 | jobj_af = json_object_new_object(); |
102 | 0 | if (!jobj_af) { |
103 | 0 | r = -ENOMEM; |
104 | 0 | goto err; |
105 | 0 | } |
106 | | |
107 | 0 | json_object_object_add(jobj_af, "type", json_object_new_string("luks1")); |
108 | 0 | json_object_object_add(jobj_af, "hash", json_object_new_string(hdr_v1->hashSpec)); |
109 | | /* stripes field ignored, fixed to LUKS_STRIPES (4000) */ |
110 | 0 | json_object_object_add(jobj_af, "stripes", json_object_new_int(LUKS_STRIPES)); |
111 | 0 | json_object_object_add(keyslot_obj, "af", jobj_af); |
112 | | |
113 | | /* Area */ |
114 | 0 | jobj_area = json_object_new_object(); |
115 | 0 | if (!jobj_area) { |
116 | 0 | r = -ENOMEM; |
117 | 0 | goto err; |
118 | 0 | } |
119 | | |
120 | 0 | json_object_object_add(jobj_area, "type", json_object_new_string("raw")); |
121 | | |
122 | | /* encryption algorithm field */ |
123 | 0 | if (*hdr_v1->cipherMode != '\0') { |
124 | 0 | if (snprintf(cipher, sizeof(cipher), "%s-%s", hdr_v1->cipherName, hdr_v1->cipherMode) < 0) { |
125 | 0 | json_object_put(keyslot_obj); |
126 | 0 | json_object_put(jobj_area); |
127 | 0 | return -EINVAL; |
128 | 0 | } |
129 | 0 | json_object_object_add(jobj_area, "encryption", json_object_new_string(cipher)); |
130 | 0 | } else |
131 | 0 | json_object_object_add(jobj_area, "encryption", json_object_new_string(hdr_v1->cipherName)); |
132 | | |
133 | | /* area */ |
134 | 0 | if (LUKS_keyslot_area(hdr_v1, keyslot, &offset, &length)) { |
135 | 0 | json_object_put(keyslot_obj); |
136 | 0 | json_object_put(jobj_area); |
137 | 0 | return -EINVAL; |
138 | 0 | } |
139 | 0 | area_size = size_round_up(length, 4096); |
140 | 0 | json_object_object_add(jobj_area, "key_size", json_object_new_int(hdr_v1->keyBytes)); |
141 | 0 | json_object_object_add(jobj_area, "offset", crypt_jobj_new_uint64(offset)); |
142 | 0 | json_object_object_add(jobj_area, "size", crypt_jobj_new_uint64(area_size)); |
143 | 0 | json_object_object_add(keyslot_obj, "area", jobj_area); |
144 | |
|
145 | 0 | *keyslot_object = keyslot_obj; |
146 | 0 | return 0; |
147 | 0 | err: |
148 | 0 | json_object_put(keyslot_obj); |
149 | 0 | return r; |
150 | 0 | } |
151 | | |
152 | | static int json_luks1_keyslots(const struct luks_phdr *hdr_v1, struct json_object **keyslots_object) |
153 | 0 | { |
154 | 0 | int keyslot, r; |
155 | 0 | struct json_object *keyslot_obj, *field; |
156 | |
|
157 | 0 | keyslot_obj = json_object_new_object(); |
158 | 0 | if (!keyslot_obj) |
159 | 0 | return -ENOMEM; |
160 | | |
161 | 0 | for (keyslot = 0; keyslot < LUKS_NUMKEYS; keyslot++) { |
162 | 0 | if (hdr_v1->keyblock[keyslot].active != LUKS_KEY_ENABLED) |
163 | 0 | continue; |
164 | 0 | r = json_luks1_keyslot(hdr_v1, keyslot, &field); |
165 | 0 | if (r) { |
166 | 0 | json_object_put(keyslot_obj); |
167 | 0 | return r; |
168 | 0 | } |
169 | 0 | r = json_object_object_add_by_uint(keyslot_obj, keyslot, field); |
170 | 0 | if (r) { |
171 | 0 | json_object_put(field); |
172 | 0 | json_object_put(keyslot_obj); |
173 | 0 | return r; |
174 | 0 | } |
175 | 0 | } |
176 | | |
177 | 0 | *keyslots_object = keyslot_obj; |
178 | 0 | return 0; |
179 | 0 | } |
180 | | |
181 | | static int json_luks1_segment(const struct luks_phdr *hdr_v1, struct json_object **segment_object) |
182 | 0 | { |
183 | 0 | const char *c; |
184 | 0 | char cipher[LUKS_CIPHERNAME_L+LUKS_CIPHERMODE_L]; |
185 | 0 | struct json_object *segment_obj, *field; |
186 | 0 | uint64_t number; |
187 | |
|
188 | 0 | segment_obj = json_object_new_object(); |
189 | 0 | if (!segment_obj) |
190 | 0 | return -ENOMEM; |
191 | | |
192 | | /* type field */ |
193 | 0 | field = json_object_new_string("crypt"); |
194 | 0 | if (!field) { |
195 | 0 | json_object_put(segment_obj); |
196 | 0 | return -ENOMEM; |
197 | 0 | } |
198 | 0 | json_object_object_add(segment_obj, "type", field); |
199 | | |
200 | | /* offset field */ |
201 | 0 | number = (uint64_t)hdr_v1->payloadOffset * SECTOR_SIZE; |
202 | |
|
203 | 0 | field = crypt_jobj_new_uint64(number); |
204 | 0 | if (!field) { |
205 | 0 | json_object_put(segment_obj); |
206 | 0 | return -ENOMEM; |
207 | 0 | } |
208 | 0 | json_object_object_add(segment_obj, "offset", field); |
209 | | |
210 | | /* iv_tweak field */ |
211 | 0 | field = json_object_new_string("0"); |
212 | 0 | if (!field) { |
213 | 0 | json_object_put(segment_obj); |
214 | 0 | return -ENOMEM; |
215 | 0 | } |
216 | 0 | json_object_object_add(segment_obj, "iv_tweak", field); |
217 | | |
218 | | /* length field */ |
219 | 0 | field = json_object_new_string("dynamic"); |
220 | 0 | if (!field) { |
221 | 0 | json_object_put(segment_obj); |
222 | 0 | return -ENOMEM; |
223 | 0 | } |
224 | 0 | json_object_object_add(segment_obj, "size", field); |
225 | | |
226 | | /* cipher field */ |
227 | 0 | if (*hdr_v1->cipherMode != '\0') { |
228 | 0 | if (snprintf(cipher, sizeof(cipher), "%s-%s", hdr_v1->cipherName, hdr_v1->cipherMode) < 0) { |
229 | 0 | json_object_put(segment_obj); |
230 | 0 | return -EINVAL; |
231 | 0 | } |
232 | 0 | c = cipher; |
233 | 0 | } else |
234 | 0 | c = hdr_v1->cipherName; |
235 | | |
236 | 0 | field = json_object_new_string(c); |
237 | 0 | if (!field) { |
238 | 0 | json_object_put(segment_obj); |
239 | 0 | return -ENOMEM; |
240 | 0 | } |
241 | 0 | json_object_object_add(segment_obj, "encryption", field); |
242 | | |
243 | | /* block field */ |
244 | 0 | field = json_object_new_int(SECTOR_SIZE); |
245 | 0 | if (!field) { |
246 | 0 | json_object_put(segment_obj); |
247 | 0 | return -ENOMEM; |
248 | 0 | } |
249 | 0 | json_object_object_add(segment_obj, "sector_size", field); |
250 | |
|
251 | 0 | *segment_object = segment_obj; |
252 | 0 | return 0; |
253 | 0 | } |
254 | | |
255 | | static int json_luks1_segments(const struct luks_phdr *hdr_v1, struct json_object **segments_object) |
256 | 0 | { |
257 | 0 | int r; |
258 | 0 | struct json_object *segments_obj, *field; |
259 | |
|
260 | 0 | segments_obj = json_object_new_object(); |
261 | 0 | if (!segments_obj) |
262 | 0 | return -ENOMEM; |
263 | | |
264 | 0 | r = json_luks1_segment(hdr_v1, &field); |
265 | 0 | if (r) { |
266 | 0 | json_object_put(segments_obj); |
267 | 0 | return r; |
268 | 0 | } |
269 | 0 | r = json_object_object_add_by_uint(segments_obj, 0, field); |
270 | 0 | if (r) { |
271 | 0 | json_object_put(field); |
272 | 0 | json_object_put(segments_obj); |
273 | 0 | return r; |
274 | 0 | } |
275 | | |
276 | 0 | *segments_object = segments_obj; |
277 | 0 | return 0; |
278 | 0 | } |
279 | | |
280 | | static int json_luks1_digest(const struct luks_phdr *hdr_v1, struct json_object **digest_object) |
281 | 0 | { |
282 | 0 | char keyslot_str[16], *base64_str; |
283 | 0 | int r, ks; |
284 | 0 | size_t base64_len; |
285 | 0 | struct json_object *digest_obj, *array, *field; |
286 | |
|
287 | 0 | digest_obj = json_object_new_object(); |
288 | 0 | if (!digest_obj) |
289 | 0 | return -ENOMEM; |
290 | | |
291 | | /* type field */ |
292 | 0 | field = json_object_new_string("pbkdf2"); |
293 | 0 | if (!field) { |
294 | 0 | json_object_put(digest_obj); |
295 | 0 | return -ENOMEM; |
296 | 0 | } |
297 | 0 | json_object_object_add(digest_obj, "type", field); |
298 | | |
299 | | /* keyslots array */ |
300 | 0 | array = json_object_new_array(); |
301 | 0 | if (!array) { |
302 | 0 | json_object_put(digest_obj); |
303 | 0 | return -ENOMEM; |
304 | 0 | } |
305 | 0 | json_object_object_add(digest_obj, "keyslots", json_object_get(array)); |
306 | |
|
307 | 0 | for (ks = 0; ks < LUKS_NUMKEYS; ks++) { |
308 | 0 | if (hdr_v1->keyblock[ks].active != LUKS_KEY_ENABLED) |
309 | 0 | continue; |
310 | 0 | if (snprintf(keyslot_str, sizeof(keyslot_str), "%d", ks) < 0) { |
311 | 0 | json_object_put(field); |
312 | 0 | json_object_put(array); |
313 | 0 | json_object_put(digest_obj); |
314 | 0 | return -EINVAL; |
315 | 0 | } |
316 | | |
317 | 0 | field = json_object_new_string(keyslot_str); |
318 | 0 | if (!field || json_object_array_add(array, field) < 0) { |
319 | 0 | json_object_put(field); |
320 | 0 | json_object_put(array); |
321 | 0 | json_object_put(digest_obj); |
322 | 0 | return -ENOMEM; |
323 | 0 | } |
324 | 0 | } |
325 | | |
326 | 0 | json_object_put(array); |
327 | | |
328 | | /* segments array */ |
329 | 0 | array = json_object_new_array(); |
330 | 0 | if (!array) { |
331 | 0 | json_object_put(digest_obj); |
332 | 0 | return -ENOMEM; |
333 | 0 | } |
334 | 0 | json_object_object_add(digest_obj, "segments", json_object_get(array)); |
335 | |
|
336 | 0 | field = json_object_new_string("0"); |
337 | 0 | if (!field || json_object_array_add(array, field) < 0) { |
338 | 0 | json_object_put(field); |
339 | 0 | json_object_put(array); |
340 | 0 | json_object_put(digest_obj); |
341 | 0 | return -ENOMEM; |
342 | 0 | } |
343 | | |
344 | 0 | json_object_put(array); |
345 | | |
346 | | /* hash field */ |
347 | 0 | field = json_object_new_string(hdr_v1->hashSpec); |
348 | 0 | if (!field) { |
349 | 0 | json_object_put(digest_obj); |
350 | 0 | return -ENOMEM; |
351 | 0 | } |
352 | 0 | json_object_object_add(digest_obj, "hash", field); |
353 | | |
354 | | /* salt field */ |
355 | 0 | r = crypt_base64_encode(&base64_str, &base64_len, hdr_v1->mkDigestSalt, LUKS_SALTSIZE); |
356 | 0 | if (r < 0) { |
357 | 0 | json_object_put(digest_obj); |
358 | 0 | return r; |
359 | 0 | } |
360 | | |
361 | 0 | field = json_object_new_string_len(base64_str, base64_len); |
362 | 0 | free(base64_str); |
363 | 0 | if (!field) { |
364 | 0 | json_object_put(digest_obj); |
365 | 0 | return -ENOMEM; |
366 | 0 | } |
367 | 0 | json_object_object_add(digest_obj, "salt", field); |
368 | | |
369 | | /* digest field */ |
370 | 0 | r = crypt_base64_encode(&base64_str, &base64_len, hdr_v1->mkDigest, LUKS_DIGESTSIZE); |
371 | 0 | if (r < 0) { |
372 | 0 | json_object_put(digest_obj); |
373 | 0 | return r; |
374 | 0 | } |
375 | | |
376 | 0 | field = json_object_new_string_len(base64_str, base64_len); |
377 | 0 | free(base64_str); |
378 | 0 | if (!field) { |
379 | 0 | json_object_put(digest_obj); |
380 | 0 | return -ENOMEM; |
381 | 0 | } |
382 | 0 | json_object_object_add(digest_obj, "digest", field); |
383 | | |
384 | | /* iterations field */ |
385 | 0 | field = json_object_new_int64(hdr_v1->mkDigestIterations); |
386 | 0 | if (!field) { |
387 | 0 | json_object_put(digest_obj); |
388 | 0 | return -ENOMEM; |
389 | 0 | } |
390 | 0 | json_object_object_add(digest_obj, "iterations", field); |
391 | |
|
392 | 0 | *digest_object = digest_obj; |
393 | 0 | return 0; |
394 | 0 | } |
395 | | |
396 | | static int json_luks1_digests(const struct luks_phdr *hdr_v1, struct json_object **digests_object) |
397 | 0 | { |
398 | 0 | int r; |
399 | 0 | struct json_object *digests_obj, *field; |
400 | |
|
401 | 0 | digests_obj = json_object_new_object(); |
402 | 0 | if (!digests_obj) |
403 | 0 | return -ENOMEM; |
404 | | |
405 | 0 | r = json_luks1_digest(hdr_v1, &field); |
406 | 0 | if (r) { |
407 | 0 | json_object_put(digests_obj); |
408 | 0 | return r; |
409 | 0 | } |
410 | 0 | json_object_object_add(digests_obj, "0", field); |
411 | |
|
412 | 0 | *digests_object = digests_obj; |
413 | 0 | return 0; |
414 | 0 | } |
415 | | |
416 | | static int json_luks1_object(struct luks_phdr *hdr_v1, struct json_object **luks1_object, uint64_t keyslots_size) |
417 | 0 | { |
418 | 0 | int r; |
419 | 0 | struct json_object *luks1_obj, *field; |
420 | 0 | uint64_t json_size; |
421 | |
|
422 | 0 | luks1_obj = json_object_new_object(); |
423 | 0 | if (!luks1_obj) |
424 | 0 | return -ENOMEM; |
425 | | |
426 | | /* keyslots field */ |
427 | 0 | r = json_luks1_keyslots(hdr_v1, &field); |
428 | 0 | if (r) { |
429 | 0 | json_object_put(luks1_obj); |
430 | 0 | return r; |
431 | 0 | } |
432 | 0 | json_object_object_add(luks1_obj, "keyslots", field); |
433 | | |
434 | | /* tokens field */ |
435 | 0 | field = json_object_new_object(); |
436 | 0 | if (!field) { |
437 | 0 | json_object_put(luks1_obj); |
438 | 0 | return -ENOMEM; |
439 | 0 | } |
440 | 0 | json_object_object_add(luks1_obj, "tokens", field); |
441 | | |
442 | | /* segments field */ |
443 | 0 | r = json_luks1_segments(hdr_v1, &field); |
444 | 0 | if (r) { |
445 | 0 | json_object_put(luks1_obj); |
446 | 0 | return r; |
447 | 0 | } |
448 | 0 | json_object_object_add(luks1_obj, "segments", field); |
449 | | |
450 | | /* digests field */ |
451 | 0 | r = json_luks1_digests(hdr_v1, &field); |
452 | 0 | if (r) { |
453 | 0 | json_object_put(luks1_obj); |
454 | 0 | return r; |
455 | 0 | } |
456 | 0 | json_object_object_add(luks1_obj, "digests", field); |
457 | | |
458 | | /* config field */ |
459 | | /* anything else? */ |
460 | 0 | field = json_object_new_object(); |
461 | 0 | if (!field) { |
462 | 0 | json_object_put(luks1_obj); |
463 | 0 | return -ENOMEM; |
464 | 0 | } |
465 | 0 | json_object_object_add(luks1_obj, "config", field); |
466 | |
|
467 | 0 | json_size = LUKS2_HDR_16K_LEN - LUKS2_HDR_BIN_LEN; |
468 | 0 | json_object_object_add(field, "json_size", crypt_jobj_new_uint64(json_size)); |
469 | 0 | keyslots_size -= (keyslots_size % 4096); |
470 | 0 | json_object_object_add(field, "keyslots_size", crypt_jobj_new_uint64(keyslots_size)); |
471 | |
|
472 | 0 | *luks1_object = luks1_obj; |
473 | 0 | return 0; |
474 | 0 | } |
475 | | |
476 | | static void move_keyslot_offset(json_object *jobj, int offset_add) |
477 | 0 | { |
478 | 0 | json_object *jobj1, *jobj2, *jobj_area; |
479 | 0 | uint64_t offset = 0; |
480 | |
|
481 | 0 | json_object_object_get_ex(jobj, "keyslots", &jobj1); |
482 | 0 | json_object_object_foreach(jobj1, key, val) { |
483 | 0 | UNUSED(key); |
484 | 0 | json_object_object_get_ex(val, "area", &jobj_area); |
485 | 0 | json_object_object_get_ex(jobj_area, "offset", &jobj2); |
486 | 0 | offset = crypt_jobj_get_uint64(jobj2) + offset_add; |
487 | 0 | json_object_object_add(jobj_area, "offset", crypt_jobj_new_uint64(offset)); |
488 | 0 | } |
489 | 0 | } |
490 | | |
491 | | static int move_keyslot_areas(struct crypt_device *cd, off_t offset_from, |
492 | | off_t offset_to, size_t buf_size) |
493 | 0 | { |
494 | 0 | int devfd, r = -EIO; |
495 | 0 | struct device *device = crypt_metadata_device(cd); |
496 | 0 | void *buf = NULL; |
497 | |
|
498 | 0 | log_dbg(cd, "Moving keyslot areas of size %zu from %jd to %jd.", |
499 | 0 | buf_size, (intmax_t)offset_from, (intmax_t)offset_to); |
500 | |
|
501 | 0 | if (posix_memalign(&buf, crypt_getpagesize(), buf_size)) |
502 | 0 | return -ENOMEM; |
503 | | |
504 | 0 | devfd = device_open(cd, device, O_RDWR); |
505 | 0 | if (devfd < 0) { |
506 | 0 | free(buf); |
507 | 0 | return -EIO; |
508 | 0 | } |
509 | | |
510 | | /* This can safely fail (for block devices). It only allocates space if it is possible. */ |
511 | 0 | if (posix_fallocate(devfd, offset_to, buf_size)) |
512 | 0 | log_dbg(cd, "Preallocation (fallocate) of new keyslot area not available."); |
513 | | |
514 | | /* Try to read *new* area to check that area is there (trimmed backup). */ |
515 | 0 | if (read_lseek_blockwise(devfd, device_block_size(cd, device), |
516 | 0 | device_alignment(device), buf, buf_size, |
517 | 0 | offset_to)!= (ssize_t)buf_size) |
518 | 0 | goto out; |
519 | | |
520 | 0 | if (read_lseek_blockwise(devfd, device_block_size(cd, device), |
521 | 0 | device_alignment(device), buf, buf_size, |
522 | 0 | offset_from)!= (ssize_t)buf_size) |
523 | 0 | goto out; |
524 | | |
525 | 0 | if (write_lseek_blockwise(devfd, device_block_size(cd, device), |
526 | 0 | device_alignment(device), buf, buf_size, |
527 | 0 | offset_to) != (ssize_t)buf_size) |
528 | 0 | goto out; |
529 | | |
530 | 0 | r = 0; |
531 | 0 | out: |
532 | 0 | device_sync(cd, device); |
533 | 0 | crypt_safe_memzero(buf, buf_size); |
534 | 0 | free(buf); |
535 | |
|
536 | 0 | return r; |
537 | 0 | } |
538 | | |
539 | | static int luks_header_in_use(struct crypt_device *cd) |
540 | 0 | { |
541 | 0 | int r; |
542 | |
|
543 | 0 | r = lookup_dm_dev_by_uuid(cd, crypt_get_uuid(cd), crypt_get_type(cd)); |
544 | 0 | if (r < 0) |
545 | 0 | log_err(cd, _("Cannot check status of device with uuid: %s."), crypt_get_uuid(cd)); |
546 | |
|
547 | 0 | return r; |
548 | 0 | } |
549 | | |
550 | | /* Check if there is a luksmeta area (foreign metadata created by the luksmeta package) */ |
551 | | static int luksmeta_header_present(struct crypt_device *cd, off_t luks1_size) |
552 | 0 | { |
553 | 0 | int devfd, r = 0; |
554 | 0 | static const uint8_t LM_MAGIC[] = { 'L', 'U', 'K', 'S', 'M', 'E', 'T', 'A' }; |
555 | 0 | struct device *device = crypt_metadata_device(cd); |
556 | 0 | void *buf = NULL; |
557 | |
|
558 | 0 | if (posix_memalign(&buf, crypt_getpagesize(), sizeof(LM_MAGIC))) |
559 | 0 | return -ENOMEM; |
560 | | |
561 | 0 | devfd = device_open(cd, device, O_RDONLY); |
562 | 0 | if (devfd < 0) { |
563 | 0 | free(buf); |
564 | 0 | return -EIO; |
565 | 0 | } |
566 | | |
567 | | /* Note: we must not detect failure as problem here, header can be trimmed. */ |
568 | 0 | if (read_lseek_blockwise(devfd, device_block_size(cd, device), device_alignment(device), |
569 | 0 | buf, sizeof(LM_MAGIC), luks1_size) == (ssize_t)sizeof(LM_MAGIC) && |
570 | 0 | !memcmp(LM_MAGIC, buf, sizeof(LM_MAGIC))) { |
571 | 0 | log_err(cd, _("Unable to convert header with LUKSMETA additional metadata.")); |
572 | 0 | r = -EBUSY; |
573 | 0 | } |
574 | |
|
575 | 0 | free(buf); |
576 | 0 | return r; |
577 | 0 | } |
578 | | |
579 | | /* Convert LUKS1 -> LUKS2 */ |
580 | | int LUKS2_luks1_to_luks2(struct crypt_device *cd, struct luks_phdr *hdr1, struct luks2_hdr *hdr2) |
581 | 0 | { |
582 | 0 | int r; |
583 | 0 | json_object *jobj = NULL; |
584 | 0 | size_t buf_size, buf_offset, luks1_size, luks1_shift = 2 * LUKS2_HDR_16K_LEN - LUKS_ALIGN_KEYSLOTS; |
585 | 0 | uint64_t required_size, max_size = crypt_get_data_offset(cd) * SECTOR_SIZE; |
586 | | |
587 | | /* for detached headers max size == device size */ |
588 | 0 | if (!max_size && (r = device_size(crypt_metadata_device(cd), &max_size))) |
589 | 0 | return r; |
590 | | |
591 | 0 | luks1_size = LUKS_device_sectors(hdr1) << SECTOR_SHIFT; |
592 | 0 | luks1_size = size_round_up(luks1_size, LUKS_ALIGN_KEYSLOTS); |
593 | 0 | if (!luks1_size) |
594 | 0 | return -EINVAL; |
595 | | |
596 | 0 | if (LUKS_keyslots_offset(hdr1) != (LUKS_ALIGN_KEYSLOTS / SECTOR_SIZE)) { |
597 | 0 | log_dbg(cd, "Unsupported keyslots material offset: %zu.", LUKS_keyslots_offset(hdr1)); |
598 | 0 | return -EINVAL; |
599 | 0 | } |
600 | | |
601 | 0 | if (LUKS2_check_cipher(cd, hdr1->keyBytes, hdr1->cipherName, hdr1->cipherMode)) { |
602 | 0 | log_err(cd, _("Unable to use cipher specification %s-%s for LUKS2."), |
603 | 0 | hdr1->cipherName, hdr1->cipherMode); |
604 | 0 | return -EINVAL; |
605 | 0 | } |
606 | | |
607 | 0 | if (luksmeta_header_present(cd, luks1_size)) |
608 | 0 | return -EINVAL; |
609 | | |
610 | 0 | log_dbg(cd, "Max size: %" PRIu64 ", LUKS1 (full) header size %zu , required shift: %zu", |
611 | 0 | max_size, luks1_size, luks1_shift); |
612 | |
|
613 | 0 | required_size = luks1_size + luks1_shift; |
614 | |
|
615 | 0 | if ((max_size < required_size) && |
616 | 0 | device_fallocate(crypt_metadata_device(cd), required_size)) { |
617 | 0 | log_err(cd, _("Unable to move keyslot area. Not enough space.")); |
618 | 0 | return -EINVAL; |
619 | 0 | } |
620 | | |
621 | 0 | if (max_size < required_size) |
622 | 0 | max_size = required_size; |
623 | | |
624 | | /* fix coverity false positive integer underflow */ |
625 | 0 | if (max_size < 2 * LUKS2_HDR_16K_LEN) |
626 | 0 | return -EINVAL; |
627 | | |
628 | 0 | r = json_luks1_object(hdr1, &jobj, max_size - 2 * LUKS2_HDR_16K_LEN); |
629 | 0 | if (r < 0) |
630 | 0 | return r; |
631 | | |
632 | 0 | move_keyslot_offset(jobj, luks1_shift); |
633 | | |
634 | | /* Create and fill LUKS2 hdr */ |
635 | 0 | memset(hdr2, 0, sizeof(*hdr2)); |
636 | 0 | hdr2->hdr_size = LUKS2_HDR_16K_LEN; |
637 | 0 | hdr2->seqid = 1; |
638 | 0 | hdr2->version = 2; |
639 | 0 | strncpy(hdr2->checksum_alg, "sha256", LUKS2_CHECKSUM_ALG_L); |
640 | 0 | crypt_random_get(cd, (char*)hdr2->salt1, sizeof(hdr2->salt1), CRYPT_RND_SALT); |
641 | 0 | crypt_random_get(cd, (char*)hdr2->salt2, sizeof(hdr2->salt2), CRYPT_RND_SALT); |
642 | 0 | strncpy(hdr2->uuid, crypt_get_uuid(cd), LUKS2_UUID_L-1); /* UUID should be max 36 chars */ |
643 | 0 | hdr2->jobj = jobj; |
644 | | |
645 | | /* |
646 | | * It duplicates check in LUKS2_hdr_write() but we don't want to move |
647 | | * keyslot areas in case it would fail later |
648 | | */ |
649 | 0 | if (max_size < LUKS2_hdr_and_areas_size(hdr2)) { |
650 | 0 | r = -EINVAL; |
651 | 0 | goto out; |
652 | 0 | } |
653 | | |
654 | | /* check future LUKS2 metadata before moving keyslots area */ |
655 | 0 | if (LUKS2_hdr_validate(cd, hdr2->jobj, hdr2->hdr_size - LUKS2_HDR_BIN_LEN)) { |
656 | 0 | log_err(cd, _("Cannot convert to LUKS2 format - invalid metadata.")); |
657 | 0 | r = -EINVAL; |
658 | 0 | goto out; |
659 | 0 | } |
660 | | |
661 | 0 | if ((r = luks_header_in_use(cd))) { |
662 | 0 | if (r > 0) |
663 | 0 | r = -EBUSY; |
664 | 0 | goto out; |
665 | 0 | } |
666 | | |
667 | | /* move keyslots 4k -> 32k offset */ |
668 | 0 | buf_offset = 2 * LUKS2_HDR_16K_LEN; |
669 | 0 | buf_size = luks1_size - LUKS_ALIGN_KEYSLOTS; |
670 | | |
671 | | /* check future LUKS2 keyslots area is at least as large as LUKS1 keyslots area */ |
672 | 0 | if (buf_size > LUKS2_keyslots_size(hdr2)) { |
673 | 0 | log_err(cd, _("Unable to move keyslot area. LUKS2 keyslots area too small.")); |
674 | 0 | r = -EINVAL; |
675 | 0 | goto out; |
676 | 0 | } |
677 | | |
678 | 0 | if ((r = move_keyslot_areas(cd, 8 * SECTOR_SIZE, buf_offset, buf_size)) < 0) { |
679 | 0 | log_err(cd, _("Unable to move keyslot area.")); |
680 | 0 | goto out; |
681 | 0 | } |
682 | | |
683 | | /* Write new LUKS2 JSON */ |
684 | 0 | r = LUKS2_hdr_write(cd, hdr2); |
685 | 0 | out: |
686 | 0 | LUKS2_hdr_free(cd, hdr2); |
687 | |
|
688 | 0 | return r; |
689 | 0 | } |
690 | | |
691 | | static int keyslot_LUKS1_compatible(struct crypt_device *cd, struct luks2_hdr *hdr, |
692 | | int keyslot, uint32_t key_size, const char *hash) |
693 | 0 | { |
694 | 0 | json_object *jobj_keyslot, *jobj, *jobj_kdf, *jobj_af; |
695 | 0 | uint64_t l2_offset, l2_length; |
696 | 0 | size_t ks_key_size; |
697 | 0 | const char *ks_cipher, *data_cipher; |
698 | |
|
699 | 0 | jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, keyslot); |
700 | 0 | if (!jobj_keyslot) |
701 | 0 | return 1; |
702 | | |
703 | 0 | if (!json_object_object_get_ex(jobj_keyslot, "type", &jobj) || |
704 | 0 | strcmp(json_object_get_string(jobj), "luks2")) |
705 | 0 | return 0; |
706 | | |
707 | | /* Using PBKDF2, this implies memory and parallel is not used. */ |
708 | 0 | jobj = NULL; |
709 | 0 | if (!json_object_object_get_ex(jobj_keyslot, "kdf", &jobj_kdf) || |
710 | 0 | !json_object_object_get_ex(jobj_kdf, "type", &jobj) || |
711 | 0 | strcmp(json_object_get_string(jobj), CRYPT_KDF_PBKDF2) || |
712 | 0 | !json_object_object_get_ex(jobj_kdf, "hash", &jobj) || |
713 | 0 | strcmp(json_object_get_string(jobj), hash)) |
714 | 0 | return 0; |
715 | | |
716 | 0 | jobj = NULL; |
717 | 0 | if (!json_object_object_get_ex(jobj_keyslot, "af", &jobj_af) || |
718 | 0 | !json_object_object_get_ex(jobj_af, "stripes", &jobj) || |
719 | 0 | json_object_get_int(jobj) != LUKS_STRIPES) |
720 | 0 | return 0; |
721 | | |
722 | 0 | jobj = NULL; |
723 | 0 | if (!json_object_object_get_ex(jobj_af, "hash", &jobj) || |
724 | 0 | (crypt_hash_size(json_object_get_string(jobj)) < 0) || |
725 | 0 | strcmp(json_object_get_string(jobj), hash)) |
726 | 0 | return 0; |
727 | | |
728 | 0 | ks_cipher = LUKS2_get_keyslot_cipher(hdr, keyslot, &ks_key_size); |
729 | 0 | data_cipher = LUKS2_get_cipher(hdr, CRYPT_DEFAULT_SEGMENT); |
730 | 0 | if (!ks_cipher || !data_cipher || key_size != ks_key_size || strcmp(ks_cipher, data_cipher)) { |
731 | 0 | log_dbg(cd, "Cipher in keyslot %d is different from volume key encryption.", keyslot); |
732 | 0 | return 0; |
733 | 0 | } |
734 | | |
735 | 0 | if (LUKS2_keyslot_area(hdr, keyslot, &l2_offset, &l2_length)) |
736 | 0 | return 0; |
737 | | |
738 | 0 | if (l2_length != (size_round_up(AF_split_sectors(key_size, LUKS_STRIPES) * SECTOR_SIZE, 4096))) { |
739 | 0 | log_dbg(cd, "Area length in LUKS2 keyslot (%d) is not compatible with LUKS1", keyslot); |
740 | 0 | return 0; |
741 | 0 | } |
742 | | |
743 | 0 | return 1; |
744 | 0 | } |
745 | | |
746 | | /* Convert LUKS2 -> LUKS1 */ |
747 | | int LUKS2_luks2_to_luks1(struct crypt_device *cd, struct luks2_hdr *hdr2, struct luks_phdr *hdr1) |
748 | 0 | { |
749 | 0 | size_t buf_size, buf_offset; |
750 | 0 | char cipher[LUKS_CIPHERNAME_L], cipher_mode[LUKS_CIPHERMODE_L]; |
751 | 0 | char *digest, *digest_salt; |
752 | 0 | const char *hash; |
753 | 0 | size_t len; |
754 | 0 | json_object *jobj_keyslot, *jobj_digest, *jobj_segment, *jobj_kdf, *jobj_area, *jobj1, *jobj2; |
755 | 0 | uint32_t key_size; |
756 | 0 | int i, r, last_active = 0; |
757 | 0 | uint64_t offset, area_length; |
758 | 0 | char *buf, luksMagic[] = LUKS_MAGIC; |
759 | |
|
760 | 0 | jobj_digest = LUKS2_get_digest_jobj(hdr2, 0); |
761 | 0 | if (!jobj_digest) |
762 | 0 | return -EINVAL; |
763 | | |
764 | 0 | jobj_segment = LUKS2_get_segment_jobj(hdr2, CRYPT_DEFAULT_SEGMENT); |
765 | 0 | if (!jobj_segment) |
766 | 0 | return -EINVAL; |
767 | | |
768 | 0 | if (json_segment_get_sector_size(jobj_segment) != SECTOR_SIZE) { |
769 | 0 | log_err(cd, _("Cannot convert to LUKS1 format - default segment encryption sector size is not 512 bytes.")); |
770 | 0 | return -EINVAL; |
771 | 0 | } |
772 | | |
773 | 0 | json_object_object_get_ex(hdr2->jobj, "digests", &jobj1); |
774 | 0 | if (!json_object_object_get_ex(jobj_digest, "type", &jobj2) || |
775 | 0 | strcmp(json_object_get_string(jobj2), "pbkdf2") || |
776 | 0 | json_object_object_length(jobj1) != 1) { |
777 | 0 | log_err(cd, _("Cannot convert to LUKS1 format - key slot digests are not LUKS1 compatible.")); |
778 | 0 | return -EINVAL; |
779 | 0 | } |
780 | 0 | if (!json_object_object_get_ex(jobj_digest, "hash", &jobj2)) |
781 | 0 | return -EINVAL; |
782 | 0 | hash = json_object_get_string(jobj2); |
783 | |
|
784 | 0 | r = crypt_parse_name_and_mode(LUKS2_get_cipher(hdr2, CRYPT_DEFAULT_SEGMENT), cipher, NULL, cipher_mode); |
785 | 0 | if (r < 0) |
786 | 0 | return r; |
787 | | |
788 | 0 | if (crypt_cipher_wrapped_key(cipher, cipher_mode)) { |
789 | 0 | log_err(cd, _("Cannot convert to LUKS1 format - device uses wrapped key cipher %s."), cipher); |
790 | 0 | return -EINVAL; |
791 | 0 | } |
792 | | |
793 | 0 | if (json_segments_count(LUKS2_get_segments_jobj(hdr2)) != 1) { |
794 | 0 | log_err(cd, _("Cannot convert to LUKS1 format - device uses more segments.")); |
795 | 0 | return -EINVAL; |
796 | 0 | } |
797 | | |
798 | 0 | r = LUKS2_tokens_count(hdr2); |
799 | 0 | if (r < 0) |
800 | 0 | return r; |
801 | 0 | if (r > 0) { |
802 | 0 | log_err(cd, _("Cannot convert to LUKS1 format - LUKS2 header contains %u token(s)."), r); |
803 | 0 | return -EINVAL; |
804 | 0 | } |
805 | | |
806 | 0 | r = LUKS2_get_volume_key_size(hdr2, 0); |
807 | 0 | if (r < 0) |
808 | 0 | return -EINVAL; |
809 | 0 | key_size = r; |
810 | |
|
811 | 0 | for (i = 0; i < LUKS2_KEYSLOTS_MAX; i++) { |
812 | 0 | if (LUKS2_keyslot_info(hdr2, i) == CRYPT_SLOT_INACTIVE) |
813 | 0 | continue; |
814 | | |
815 | 0 | if (LUKS2_keyslot_info(hdr2, i) == CRYPT_SLOT_INVALID) { |
816 | 0 | log_err(cd, _("Cannot convert to LUKS1 format - keyslot %u is in invalid state."), i); |
817 | 0 | return -EINVAL; |
818 | 0 | } |
819 | | |
820 | 0 | if (i >= LUKS_NUMKEYS) { |
821 | 0 | log_err(cd, _("Cannot convert to LUKS1 format - slot %u (over maximum slots) is still active."), i); |
822 | 0 | return -EINVAL; |
823 | 0 | } |
824 | | |
825 | 0 | if (!keyslot_LUKS1_compatible(cd, hdr2, i, key_size, hash)) { |
826 | 0 | log_err(cd, _("Cannot convert to LUKS1 format - keyslot %u is not LUKS1 compatible."), i); |
827 | 0 | return -EINVAL; |
828 | 0 | } |
829 | 0 | } |
830 | | |
831 | 0 | memset(hdr1, 0, sizeof(*hdr1)); |
832 | |
|
833 | 0 | for (i = 0; i < LUKS_NUMKEYS; i++) { |
834 | 0 | hdr1->keyblock[i].active = LUKS_KEY_DISABLED; |
835 | 0 | hdr1->keyblock[i].stripes = LUKS_STRIPES; |
836 | |
|
837 | 0 | jobj_keyslot = LUKS2_get_keyslot_jobj(hdr2, i); |
838 | |
|
839 | 0 | if (jobj_keyslot) { |
840 | 0 | if (!json_object_object_get_ex(jobj_keyslot, "area", &jobj_area)) |
841 | 0 | return -EINVAL; |
842 | 0 | if (!json_object_object_get_ex(jobj_area, "offset", &jobj1)) |
843 | 0 | return -EINVAL; |
844 | 0 | offset = crypt_jobj_get_uint64(jobj1); |
845 | 0 | } else { |
846 | 0 | if (LUKS2_find_area_gap(cd, hdr2, key_size, &offset, &area_length)) |
847 | 0 | return -EINVAL; |
848 | | /* |
849 | | * We have to create placeholder luks2 keyslots in place of all |
850 | | * inactive keyslots. Otherwise we would allocate all |
851 | | * inactive luks1 keyslots over same binary keyslot area. |
852 | | */ |
853 | 0 | if (placeholder_keyslot_alloc(cd, i, offset, area_length)) |
854 | 0 | return -EINVAL; |
855 | 0 | } |
856 | | |
857 | 0 | offset /= SECTOR_SIZE; |
858 | 0 | if (offset > UINT32_MAX) |
859 | 0 | return -EINVAL; |
860 | | |
861 | 0 | hdr1->keyblock[i].keyMaterialOffset = offset; |
862 | 0 | hdr1->keyblock[i].keyMaterialOffset -= |
863 | 0 | ((2 * LUKS2_HDR_16K_LEN - LUKS_ALIGN_KEYSLOTS) / SECTOR_SIZE); |
864 | |
|
865 | 0 | if (!jobj_keyslot) |
866 | 0 | continue; |
867 | | |
868 | 0 | hdr1->keyblock[i].active = LUKS_KEY_ENABLED; |
869 | 0 | last_active = i; |
870 | |
|
871 | 0 | if (!json_object_object_get_ex(jobj_keyslot, "kdf", &jobj_kdf)) |
872 | 0 | continue; |
873 | | |
874 | 0 | if (!json_object_object_get_ex(jobj_kdf, "iterations", &jobj1)) |
875 | 0 | continue; |
876 | 0 | hdr1->keyblock[i].passwordIterations = crypt_jobj_get_uint32(jobj1); |
877 | |
|
878 | 0 | if (!json_object_object_get_ex(jobj_kdf, "salt", &jobj1)) |
879 | 0 | continue; |
880 | | |
881 | 0 | if (crypt_base64_decode(&buf, &len, json_object_get_string(jobj1), |
882 | 0 | json_object_get_string_len(jobj1))) |
883 | 0 | continue; |
884 | 0 | if (len > 0 && len != LUKS_SALTSIZE) { |
885 | 0 | free(buf); |
886 | 0 | continue; |
887 | 0 | } |
888 | 0 | memcpy(hdr1->keyblock[i].passwordSalt, buf, LUKS_SALTSIZE); |
889 | 0 | free(buf); |
890 | 0 | } |
891 | | |
892 | 0 | if (!jobj_keyslot) { |
893 | 0 | jobj_keyslot = LUKS2_get_keyslot_jobj(hdr2, last_active); |
894 | 0 | if (!jobj_keyslot) |
895 | 0 | return -EINVAL; |
896 | 0 | } |
897 | | |
898 | 0 | if (!json_object_object_get_ex(jobj_keyslot, "area", &jobj_area)) |
899 | 0 | return -EINVAL; |
900 | 0 | if (!json_object_object_get_ex(jobj_area, "encryption", &jobj1)) |
901 | 0 | return -EINVAL; |
902 | 0 | r = crypt_parse_name_and_mode(json_object_get_string(jobj1), cipher, NULL, cipher_mode); |
903 | 0 | if (r < 0) |
904 | 0 | return r; |
905 | | |
906 | 0 | strncpy(hdr1->cipherName, cipher, LUKS_CIPHERNAME_L - 1); |
907 | 0 | hdr1->cipherName[LUKS_CIPHERNAME_L-1] = '\0'; |
908 | 0 | strncpy(hdr1->cipherMode, cipher_mode, LUKS_CIPHERMODE_L - 1); |
909 | 0 | hdr1->cipherMode[LUKS_CIPHERMODE_L-1] = '\0'; |
910 | |
|
911 | 0 | if (!json_object_object_get_ex(jobj_keyslot, "kdf", &jobj_kdf)) |
912 | 0 | return -EINVAL; |
913 | 0 | if (!json_object_object_get_ex(jobj_kdf, "hash", &jobj1)) |
914 | 0 | return -EINVAL; |
915 | 0 | strncpy(hdr1->hashSpec, json_object_get_string(jobj1), sizeof(hdr1->hashSpec) - 1); |
916 | |
|
917 | 0 | hdr1->keyBytes = key_size; |
918 | |
|
919 | 0 | if (!json_object_object_get_ex(jobj_digest, "iterations", &jobj1)) |
920 | 0 | return -EINVAL; |
921 | 0 | hdr1->mkDigestIterations = crypt_jobj_get_uint32(jobj1); |
922 | |
|
923 | 0 | if (!json_object_object_get_ex(jobj_digest, "digest", &jobj1)) |
924 | 0 | return -EINVAL; |
925 | 0 | r = crypt_base64_decode(&digest, &len, json_object_get_string(jobj1), |
926 | 0 | json_object_get_string_len(jobj1)); |
927 | 0 | if (r < 0) |
928 | 0 | return r; |
929 | | /* We can store full digest here, not only sha1 length */ |
930 | 0 | if (len < LUKS_DIGESTSIZE) { |
931 | 0 | free(digest); |
932 | 0 | return -EINVAL; |
933 | 0 | } |
934 | 0 | memcpy(hdr1->mkDigest, digest, LUKS_DIGESTSIZE); |
935 | 0 | free(digest); |
936 | |
|
937 | 0 | if (!json_object_object_get_ex(jobj_digest, "salt", &jobj1)) |
938 | 0 | return -EINVAL; |
939 | 0 | r = crypt_base64_decode(&digest_salt, &len, json_object_get_string(jobj1), |
940 | 0 | json_object_get_string_len(jobj1)); |
941 | 0 | if (r < 0) |
942 | 0 | return r; |
943 | 0 | if (len != LUKS_SALTSIZE) { |
944 | 0 | free(digest_salt); |
945 | 0 | return -EINVAL; |
946 | 0 | } |
947 | 0 | memcpy(hdr1->mkDigestSalt, digest_salt, LUKS_SALTSIZE); |
948 | 0 | free(digest_salt); |
949 | |
|
950 | 0 | if (!json_object_object_get_ex(jobj_segment, "offset", &jobj1)) |
951 | 0 | return -EINVAL; |
952 | 0 | offset = crypt_jobj_get_uint64(jobj1) / SECTOR_SIZE; |
953 | 0 | if (offset > UINT32_MAX) |
954 | 0 | return -EINVAL; |
955 | 0 | hdr1->payloadOffset = offset; |
956 | |
|
957 | 0 | strncpy(hdr1->uuid, hdr2->uuid, UUID_STRING_L); /* max 36 chars */ |
958 | 0 | hdr1->uuid[UUID_STRING_L-1] = '\0'; |
959 | |
|
960 | 0 | memcpy(hdr1->magic, luksMagic, LUKS_MAGIC_L); |
961 | |
|
962 | 0 | hdr1->version = 1; |
963 | |
|
964 | 0 | r = luks_header_in_use(cd); |
965 | 0 | if (r) |
966 | 0 | return r > 0 ? -EBUSY : r; |
967 | | |
968 | | /* move keyslots 32k -> 4k offset */ |
969 | 0 | buf_offset = 2 * LUKS2_HDR_16K_LEN; |
970 | 0 | buf_size = LUKS2_keyslots_size(hdr2); |
971 | 0 | r = move_keyslot_areas(cd, buf_offset, 8 * SECTOR_SIZE, buf_size); |
972 | 0 | if (r < 0) { |
973 | 0 | log_err(cd, _("Unable to move keyslot area.")); |
974 | 0 | return r; |
975 | 0 | } |
976 | | |
977 | 0 | crypt_wipe_device(cd, crypt_metadata_device(cd), CRYPT_WIPE_ZERO, 0, |
978 | 0 | 8 * SECTOR_SIZE, 8 * SECTOR_SIZE, NULL, NULL); |
979 | | |
980 | | /* Write new LUKS1 hdr */ |
981 | 0 | return LUKS_write_phdr(hdr1, cd); |
982 | 0 | } |