/src/cryptsetup/lib/luks2/luks2_reencrypt.c
Line | Count | Source |
1 | | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | | /* |
3 | | * LUKS - Linux Unified Key Setup v2, reencryption helpers |
4 | | * |
5 | | * Copyright (C) 2015-2025 Red Hat, Inc. All rights reserved. |
6 | | * Copyright (C) 2015-2025 Ondrej Kozina |
7 | | */ |
8 | | |
9 | | #include "luks2_internal.h" |
10 | | #include "utils_device_locking.h" |
11 | | #include "keyslot_context.h" |
12 | | |
13 | | struct luks2_reencrypt { |
14 | | /* reencryption window attributes */ |
15 | | uint64_t offset; |
16 | | uint64_t progress; |
17 | | uint64_t length; |
18 | | uint64_t device_size; |
19 | | bool online; |
20 | | bool fixed_length; |
21 | | crypt_reencrypt_direction_info direction; |
22 | | crypt_reencrypt_mode_info mode; |
23 | | |
24 | | char *device_name; |
25 | | char *hotzone_name; |
26 | | char *overlay_name; |
27 | | uint32_t flags; |
28 | | |
29 | | /* reencryption window persistence attributes */ |
30 | | struct reenc_protection rp; |
31 | | struct reenc_protection rp_moved_segment; |
32 | | |
33 | | int reenc_keyslot; |
34 | | |
35 | | /* already running reencryption */ |
36 | | json_object *jobj_segs_hot; |
37 | | struct json_object *jobj_segs_post; |
38 | | |
39 | | /* backup segments */ |
40 | | json_object *jobj_segment_new; |
41 | | int digest_new; |
42 | | json_object *jobj_segment_old; |
43 | | int digest_old; |
44 | | json_object *jobj_segment_moved; |
45 | | |
46 | | struct volume_key *vks; |
47 | | |
48 | | void *reenc_buffer; |
49 | | ssize_t read; |
50 | | |
51 | | struct crypt_storage_wrapper *cw1; |
52 | | struct crypt_storage_wrapper *cw2; |
53 | | |
54 | | uint32_t wflags1; |
55 | | uint32_t wflags2; |
56 | | |
57 | | struct crypt_lock_handle *reenc_lock; |
58 | | }; |
59 | | #if USE_LUKS2_REENCRYPTION |
60 | | static uint64_t data_shift_value(struct reenc_protection *rp) |
61 | 0 | { |
62 | 0 | return rp->type == REENC_PROTECTION_DATASHIFT ? rp->p.ds.data_shift : 0; |
63 | 0 | } |
64 | | |
65 | | static json_object *reencrypt_segment(struct luks2_hdr *hdr, unsigned new) |
66 | 0 | { |
67 | 0 | return LUKS2_get_segment_by_flag(hdr, new ? "backup-final" : "backup-previous"); |
68 | 0 | } |
69 | | |
70 | | static json_object *reencrypt_segment_new(struct luks2_hdr *hdr) |
71 | 0 | { |
72 | 0 | return reencrypt_segment(hdr, 1); |
73 | 0 | } |
74 | | |
75 | | static json_object *reencrypt_segment_old(struct luks2_hdr *hdr) |
76 | 0 | { |
77 | 0 | return reencrypt_segment(hdr, 0); |
78 | 0 | } |
79 | | |
80 | | static json_object *reencrypt_segments_old(struct luks2_hdr *hdr) |
81 | 0 | { |
82 | 0 | json_object *jobj_segments, *jobj = NULL; |
83 | |
|
84 | 0 | if (json_object_copy(reencrypt_segment_old(hdr), &jobj)) |
85 | 0 | return NULL; |
86 | | |
87 | 0 | json_segment_remove_flag(jobj, "backup-previous"); |
88 | |
|
89 | 0 | jobj_segments = json_object_new_object(); |
90 | 0 | if (!jobj_segments) { |
91 | 0 | json_object_put(jobj); |
92 | 0 | return NULL; |
93 | 0 | } |
94 | | |
95 | 0 | if (json_object_object_add_by_uint(jobj_segments, 0, jobj)) { |
96 | 0 | json_object_put(jobj); |
97 | 0 | json_object_put(jobj_segments); |
98 | 0 | return NULL; |
99 | 0 | } |
100 | | |
101 | 0 | return jobj_segments; |
102 | 0 | } |
103 | | |
104 | | static const char *reencrypt_segment_cipher_new(struct luks2_hdr *hdr) |
105 | 0 | { |
106 | 0 | return json_segment_get_cipher(reencrypt_segment(hdr, 1)); |
107 | 0 | } |
108 | | |
109 | | static const char *reencrypt_segment_cipher_old(struct luks2_hdr *hdr) |
110 | 0 | { |
111 | 0 | return json_segment_get_cipher(reencrypt_segment(hdr, 0)); |
112 | 0 | } |
113 | | |
114 | | static uint32_t reencrypt_get_sector_size_new(struct luks2_hdr *hdr) |
115 | 0 | { |
116 | 0 | return json_segment_get_sector_size(reencrypt_segment(hdr, 1)); |
117 | 0 | } |
118 | | |
119 | | static uint32_t reencrypt_get_sector_size_old(struct luks2_hdr *hdr) |
120 | 0 | { |
121 | 0 | return json_segment_get_sector_size(reencrypt_segment(hdr, 0)); |
122 | 0 | } |
123 | | |
124 | | static uint64_t reencrypt_data_offset(struct luks2_hdr *hdr, unsigned new) |
125 | 0 | { |
126 | 0 | json_object *jobj = reencrypt_segment(hdr, new); |
127 | 0 | if (jobj) |
128 | 0 | return json_segment_get_offset(jobj, 0); |
129 | | |
130 | 0 | return LUKS2_get_data_offset(hdr) << SECTOR_SHIFT; |
131 | 0 | } |
132 | | |
133 | | static uint64_t LUKS2_reencrypt_get_data_offset_moved(struct luks2_hdr *hdr) |
134 | 0 | { |
135 | 0 | json_object *jobj_segment = LUKS2_get_segment_by_flag(hdr, "backup-moved-segment"); |
136 | |
|
137 | 0 | if (!jobj_segment) |
138 | 0 | return 0; |
139 | | |
140 | 0 | return json_segment_get_offset(jobj_segment, 0); |
141 | 0 | } |
142 | | |
143 | | static uint64_t reencrypt_get_data_offset_new(struct luks2_hdr *hdr) |
144 | 0 | { |
145 | 0 | return reencrypt_data_offset(hdr, 1); |
146 | 0 | } |
147 | | |
148 | | static uint64_t reencrypt_get_data_offset_old(struct luks2_hdr *hdr) |
149 | 0 | { |
150 | 0 | return reencrypt_data_offset(hdr, 0); |
151 | 0 | } |
152 | | #endif |
153 | | |
154 | | static int reencrypt_digest(struct luks2_hdr *hdr, unsigned new) |
155 | 0 | { |
156 | 0 | int segment = LUKS2_get_segment_id_by_flag(hdr, new ? "backup-final" : "backup-previous"); |
157 | |
|
158 | 0 | if (segment < 0) |
159 | 0 | return segment; |
160 | | |
161 | 0 | return LUKS2_digest_by_segment(hdr, segment); |
162 | 0 | } |
163 | | |
164 | | int LUKS2_reencrypt_digest_new(struct luks2_hdr *hdr) |
165 | 0 | { |
166 | 0 | return reencrypt_digest(hdr, 1); |
167 | 0 | } |
168 | | |
169 | | int LUKS2_reencrypt_digest_old(struct luks2_hdr *hdr) |
170 | 0 | { |
171 | 0 | return reencrypt_digest(hdr, 0); |
172 | 0 | } |
173 | | |
174 | | int LUKS2_reencrypt_segment_new(struct luks2_hdr *hdr) |
175 | 0 | { |
176 | 0 | return LUKS2_get_segment_id_by_flag(hdr, "backup-final"); |
177 | 0 | } |
178 | | |
179 | | int LUKS2_reencrypt_segment_old(struct luks2_hdr *hdr) |
180 | 0 | { |
181 | 0 | return LUKS2_get_segment_id_by_flag(hdr, "backup-previous"); |
182 | 0 | } |
183 | | |
184 | | unsigned LUKS2_reencrypt_vks_count(struct luks2_hdr *hdr) |
185 | 0 | { |
186 | 0 | int digest_old, digest_new; |
187 | 0 | unsigned vks_count = 0; |
188 | |
|
189 | 0 | if ((digest_new = LUKS2_reencrypt_digest_new(hdr)) >= 0) |
190 | 0 | vks_count++; |
191 | 0 | if ((digest_old = LUKS2_reencrypt_digest_old(hdr)) >= 0) { |
192 | 0 | if (digest_old != digest_new) |
193 | 0 | vks_count++; |
194 | 0 | } |
195 | |
|
196 | 0 | return vks_count; |
197 | 0 | } |
198 | | |
199 | | /* none, checksums, journal or shift */ |
200 | | static const char *reencrypt_resilience_type(struct luks2_hdr *hdr) |
201 | 0 | { |
202 | 0 | json_object *jobj_keyslot, *jobj_area, *jobj_type; |
203 | 0 | int ks = LUKS2_find_keyslot(hdr, "reencrypt"); |
204 | |
|
205 | 0 | if (ks < 0) |
206 | 0 | return NULL; |
207 | | |
208 | 0 | jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks); |
209 | |
|
210 | 0 | json_object_object_get_ex(jobj_keyslot, "area", &jobj_area); |
211 | 0 | if (!json_object_object_get_ex(jobj_area, "type", &jobj_type)) |
212 | 0 | return NULL; |
213 | | |
214 | 0 | return json_object_get_string(jobj_type); |
215 | 0 | } |
216 | | |
217 | | static const char *reencrypt_resilience_hash(struct luks2_hdr *hdr) |
218 | 0 | { |
219 | 0 | json_object *jobj_keyslot, *jobj_area, *jobj_type, *jobj_hash; |
220 | 0 | int ks = LUKS2_find_keyslot(hdr, "reencrypt"); |
221 | |
|
222 | 0 | if (ks < 0) |
223 | 0 | return NULL; |
224 | | |
225 | 0 | jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks); |
226 | |
|
227 | 0 | json_object_object_get_ex(jobj_keyslot, "area", &jobj_area); |
228 | 0 | if (!json_object_object_get_ex(jobj_area, "type", &jobj_type)) |
229 | 0 | return NULL; |
230 | 0 | if (strcmp(json_object_get_string(jobj_type), "checksum")) |
231 | 0 | return NULL; |
232 | 0 | if (!json_object_object_get_ex(jobj_area, "hash", &jobj_hash)) |
233 | 0 | return NULL; |
234 | | |
235 | 0 | return json_object_get_string(jobj_hash); |
236 | 0 | } |
237 | | #if USE_LUKS2_REENCRYPTION |
238 | | static json_object *_enc_create_segments_shift_after(struct luks2_reencrypt *rh, uint64_t data_offset) |
239 | 0 | { |
240 | 0 | int reenc_seg, i = 0; |
241 | 0 | json_object *jobj, *jobj_copy = NULL, *jobj_seg_new = NULL, *jobj_segs_post = json_object_new_object(); |
242 | 0 | uint64_t tmp; |
243 | |
|
244 | 0 | if (!rh->jobj_segs_hot || !jobj_segs_post) |
245 | 0 | goto err; |
246 | | |
247 | 0 | if (json_segments_count(rh->jobj_segs_hot) == 0) |
248 | 0 | return jobj_segs_post; |
249 | | |
250 | 0 | reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot); |
251 | 0 | if (reenc_seg < 0) |
252 | 0 | goto err; |
253 | | |
254 | 0 | while (i < reenc_seg) { |
255 | 0 | jobj_copy = json_segments_get_segment(rh->jobj_segs_hot, i); |
256 | 0 | if (!jobj_copy || json_object_object_add_by_uint(jobj_segs_post, i++, json_object_get(jobj_copy))) |
257 | 0 | goto err; |
258 | 0 | } |
259 | 0 | jobj_copy = NULL; |
260 | |
|
261 | 0 | jobj = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1); |
262 | 0 | if (!jobj) { |
263 | 0 | jobj = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg); |
264 | 0 | if (!jobj || json_object_copy(jobj, &jobj_seg_new)) |
265 | 0 | goto err; |
266 | 0 | json_segment_remove_flag(jobj_seg_new, "in-reencryption"); |
267 | 0 | tmp = rh->length; |
268 | 0 | } else { |
269 | 0 | if (json_object_copy(jobj, &jobj_seg_new)) |
270 | 0 | goto err; |
271 | 0 | json_object_object_add(jobj_seg_new, "offset", crypt_jobj_new_uint64(rh->offset + data_offset)); |
272 | 0 | json_object_object_add(jobj_seg_new, "iv_tweak", crypt_jobj_new_uint64(rh->offset >> SECTOR_SHIFT)); |
273 | 0 | tmp = json_segment_get_size(jobj_seg_new, 0) + rh->length; |
274 | 0 | } |
275 | | |
276 | | /* alter size of new segment, reenc_seg == 0 we're finished */ |
277 | 0 | json_object_object_add(jobj_seg_new, "size", reenc_seg > 0 ? crypt_jobj_new_uint64(tmp) : json_object_new_string("dynamic")); |
278 | 0 | if (!json_object_object_add_by_uint(jobj_segs_post, reenc_seg, jobj_seg_new)) |
279 | 0 | return jobj_segs_post; |
280 | | |
281 | 0 | err: |
282 | 0 | json_object_put(jobj_seg_new); |
283 | 0 | json_object_put(jobj_copy); |
284 | 0 | json_object_put(jobj_segs_post); |
285 | 0 | return NULL; |
286 | 0 | } |
287 | | |
288 | | static json_object *reencrypt_make_hot_segments_encrypt_shift(struct luks2_hdr *hdr, |
289 | | struct luks2_reencrypt *rh, |
290 | | uint64_t data_offset) |
291 | 0 | { |
292 | 0 | int sg, crypt_seg, i = 0; |
293 | 0 | uint64_t segment_size; |
294 | 0 | json_object *jobj_seg_shrunk = NULL, *jobj_seg_new = NULL, *jobj_copy = NULL, *jobj_enc_seg = NULL, |
295 | 0 | *jobj_segs_hot = json_object_new_object(); |
296 | |
|
297 | 0 | if (!jobj_segs_hot) |
298 | 0 | return NULL; |
299 | | |
300 | 0 | crypt_seg = LUKS2_segment_by_type(hdr, "crypt"); |
301 | | |
302 | | /* FIXME: This is hack. Find proper way to fix it. */ |
303 | 0 | sg = LUKS2_last_segment_by_type(hdr, "linear"); |
304 | 0 | if (rh->offset && sg < 0) |
305 | 0 | goto err; |
306 | 0 | if (sg < 0) |
307 | 0 | return jobj_segs_hot; |
308 | | |
309 | 0 | jobj_enc_seg = json_segment_create_crypt(data_offset + rh->offset, |
310 | 0 | rh->offset >> SECTOR_SHIFT, |
311 | 0 | &rh->length, |
312 | 0 | reencrypt_segment_cipher_new(hdr), |
313 | 0 | NULL, 0, /* integrity */ |
314 | 0 | reencrypt_get_sector_size_new(hdr), |
315 | 0 | 1); |
316 | |
|
317 | 0 | while (i < sg) { |
318 | 0 | jobj_copy = LUKS2_get_segment_jobj(hdr, i); |
319 | 0 | if (!jobj_copy || json_object_object_add_by_uint(jobj_segs_hot, i++, json_object_get(jobj_copy))) |
320 | 0 | goto err; |
321 | 0 | } |
322 | 0 | jobj_copy = NULL; |
323 | |
|
324 | 0 | segment_size = LUKS2_segment_size(hdr, sg, 0); |
325 | 0 | if (segment_size > rh->length) { |
326 | 0 | if (json_object_copy(LUKS2_get_segment_jobj(hdr, sg), &jobj_seg_shrunk)) |
327 | 0 | goto err; |
328 | 0 | json_object_object_add(jobj_seg_shrunk, "size", crypt_jobj_new_uint64(segment_size - rh->length)); |
329 | 0 | if (json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_seg_shrunk)) |
330 | 0 | goto err; |
331 | 0 | } |
332 | | |
333 | 0 | if (json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_enc_seg)) |
334 | 0 | goto err; |
335 | | |
336 | | /* first crypt segment after encryption ? */ |
337 | 0 | if (crypt_seg >= 0) { |
338 | 0 | jobj_seg_new = LUKS2_get_segment_jobj(hdr, crypt_seg); |
339 | 0 | if (!jobj_seg_new || json_object_object_add_by_uint(jobj_segs_hot, sg, json_object_get(jobj_seg_new))) |
340 | 0 | goto err; |
341 | 0 | } |
342 | | |
343 | 0 | return jobj_segs_hot; |
344 | 0 | err: |
345 | 0 | json_object_put(jobj_copy); |
346 | 0 | json_object_put(jobj_seg_new); |
347 | 0 | json_object_put(jobj_seg_shrunk); |
348 | 0 | json_object_put(jobj_enc_seg); |
349 | 0 | json_object_put(jobj_segs_hot); |
350 | |
|
351 | 0 | return NULL; |
352 | 0 | } |
353 | | |
354 | | static json_object *reencrypt_make_segment_new(struct crypt_device *cd, |
355 | | struct luks2_hdr *hdr, |
356 | | const struct luks2_reencrypt *rh, |
357 | | uint64_t data_offset, |
358 | | uint64_t segment_offset, |
359 | | uint64_t iv_offset, |
360 | | const uint64_t *segment_length) |
361 | 0 | { |
362 | 0 | switch (rh->mode) { |
363 | 0 | case CRYPT_REENCRYPT_REENCRYPT: |
364 | 0 | case CRYPT_REENCRYPT_ENCRYPT: |
365 | 0 | return json_segment_create_crypt(data_offset + segment_offset, |
366 | 0 | crypt_get_iv_offset(cd) + (iv_offset >> SECTOR_SHIFT), |
367 | 0 | segment_length, |
368 | 0 | reencrypt_segment_cipher_new(hdr), |
369 | 0 | NULL, 0, /* integrity */ |
370 | 0 | reencrypt_get_sector_size_new(hdr), 0); |
371 | 0 | case CRYPT_REENCRYPT_DECRYPT: |
372 | 0 | return json_segment_create_linear(data_offset + segment_offset, segment_length, 0); |
373 | 0 | } |
374 | | |
375 | 0 | return NULL; |
376 | 0 | } |
377 | | |
378 | | static json_object *reencrypt_make_post_segments_forward(struct crypt_device *cd, |
379 | | struct luks2_hdr *hdr, |
380 | | struct luks2_reencrypt *rh, |
381 | | uint64_t data_offset) |
382 | 0 | { |
383 | 0 | int reenc_seg; |
384 | 0 | json_object *jobj_old_seg, *jobj_new_seg_after = NULL, *jobj_old_seg_copy = NULL, |
385 | 0 | *jobj_segs_post = json_object_new_object(); |
386 | 0 | uint64_t fixed_length = rh->offset + rh->length; |
387 | |
|
388 | 0 | if (!rh->jobj_segs_hot || !jobj_segs_post) |
389 | 0 | goto err; |
390 | | |
391 | 0 | reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot); |
392 | 0 | if (reenc_seg < 0) |
393 | 0 | goto err; |
394 | | |
395 | 0 | jobj_old_seg = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1); |
396 | | |
397 | | /* |
398 | | * if there's no old segment after reencryption, we're done. |
399 | | * Set size to 'dynamic' again. |
400 | | */ |
401 | 0 | jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, 0, 0, jobj_old_seg ? &fixed_length : NULL); |
402 | 0 | if (!jobj_new_seg_after || json_object_object_add_by_uint_by_ref(jobj_segs_post, 0, &jobj_new_seg_after)) |
403 | 0 | goto err; |
404 | | |
405 | 0 | if (jobj_old_seg) { |
406 | 0 | if (rh->fixed_length) { |
407 | 0 | if (json_object_copy(jobj_old_seg, &jobj_old_seg_copy)) |
408 | 0 | goto err; |
409 | 0 | fixed_length = rh->device_size - fixed_length; |
410 | 0 | json_object_object_add(jobj_old_seg_copy, "size", crypt_jobj_new_uint64(fixed_length)); |
411 | 0 | } else |
412 | 0 | jobj_old_seg_copy = json_object_get(jobj_old_seg); |
413 | | |
414 | 0 | if (json_object_object_add_by_uint_by_ref(jobj_segs_post, 1, &jobj_old_seg_copy)) |
415 | 0 | goto err; |
416 | 0 | } |
417 | | |
418 | 0 | return jobj_segs_post; |
419 | 0 | err: |
420 | 0 | json_object_put(jobj_new_seg_after); |
421 | 0 | json_object_put(jobj_old_seg_copy); |
422 | 0 | json_object_put(jobj_segs_post); |
423 | 0 | return NULL; |
424 | 0 | } |
425 | | |
426 | | static json_object *reencrypt_make_post_segments_backward(struct crypt_device *cd, |
427 | | struct luks2_hdr *hdr, |
428 | | struct luks2_reencrypt *rh, |
429 | | uint64_t data_offset) |
430 | 0 | { |
431 | 0 | int reenc_seg; |
432 | 0 | uint64_t fixed_length; |
433 | |
|
434 | 0 | json_object *jobj_new_seg_after = NULL, *jobj_old_seg = NULL, |
435 | 0 | *jobj_segs_post = json_object_new_object(); |
436 | |
|
437 | 0 | if (!rh->jobj_segs_hot || !jobj_segs_post) |
438 | 0 | goto err; |
439 | | |
440 | 0 | reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot); |
441 | 0 | if (reenc_seg < 0) |
442 | 0 | goto err; |
443 | | |
444 | 0 | jobj_old_seg = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg - 1); |
445 | 0 | if (jobj_old_seg) { |
446 | 0 | json_object_get(jobj_old_seg); |
447 | 0 | if (json_object_object_add_by_uint_by_ref(jobj_segs_post, reenc_seg - 1, &jobj_old_seg)) |
448 | 0 | goto err; |
449 | 0 | } |
450 | | |
451 | 0 | if (rh->fixed_length && rh->offset) { |
452 | 0 | fixed_length = rh->device_size - rh->offset; |
453 | 0 | jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset, rh->offset, &fixed_length); |
454 | 0 | } else |
455 | 0 | jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset, rh->offset, NULL); |
456 | |
|
457 | 0 | if (jobj_new_seg_after && !json_object_object_add_by_uint(jobj_segs_post, reenc_seg, jobj_new_seg_after)) |
458 | 0 | return jobj_segs_post; |
459 | 0 | err: |
460 | 0 | json_object_put(jobj_new_seg_after); |
461 | 0 | json_object_put(jobj_old_seg); |
462 | 0 | json_object_put(jobj_segs_post); |
463 | 0 | return NULL; |
464 | 0 | } |
465 | | |
466 | | static json_object *reencrypt_make_segment_reencrypt(struct crypt_device *cd, |
467 | | struct luks2_hdr *hdr, |
468 | | const struct luks2_reencrypt *rh, |
469 | | uint64_t data_offset, |
470 | | uint64_t segment_offset, |
471 | | uint64_t iv_offset, |
472 | | const uint64_t *segment_length) |
473 | 0 | { |
474 | 0 | switch (rh->mode) { |
475 | 0 | case CRYPT_REENCRYPT_REENCRYPT: |
476 | 0 | case CRYPT_REENCRYPT_ENCRYPT: |
477 | 0 | return json_segment_create_crypt(data_offset + segment_offset, |
478 | 0 | crypt_get_iv_offset(cd) + (iv_offset >> SECTOR_SHIFT), |
479 | 0 | segment_length, |
480 | 0 | reencrypt_segment_cipher_new(hdr), |
481 | 0 | NULL, 0, /* integrity */ |
482 | 0 | reencrypt_get_sector_size_new(hdr), 1); |
483 | 0 | case CRYPT_REENCRYPT_DECRYPT: |
484 | 0 | return json_segment_create_linear(data_offset + segment_offset, segment_length, 1); |
485 | 0 | } |
486 | | |
487 | 0 | return NULL; |
488 | 0 | } |
489 | | |
490 | | static json_object *reencrypt_make_segment_old(struct crypt_device *cd, |
491 | | struct luks2_hdr *hdr, |
492 | | const struct luks2_reencrypt *rh, |
493 | | uint64_t data_offset, |
494 | | uint64_t segment_offset, |
495 | | const uint64_t *segment_length) |
496 | 0 | { |
497 | 0 | json_object *jobj_old_seg = NULL; |
498 | |
|
499 | 0 | switch (rh->mode) { |
500 | 0 | case CRYPT_REENCRYPT_REENCRYPT: |
501 | 0 | case CRYPT_REENCRYPT_DECRYPT: |
502 | 0 | jobj_old_seg = json_segment_create_crypt(data_offset + segment_offset, |
503 | 0 | crypt_get_iv_offset(cd) + (segment_offset >> SECTOR_SHIFT), |
504 | 0 | segment_length, |
505 | 0 | reencrypt_segment_cipher_old(hdr), |
506 | 0 | NULL, 0, /* integrity */ |
507 | 0 | reencrypt_get_sector_size_old(hdr), |
508 | 0 | 0); |
509 | 0 | break; |
510 | 0 | case CRYPT_REENCRYPT_ENCRYPT: |
511 | 0 | jobj_old_seg = json_segment_create_linear(data_offset + segment_offset, segment_length, 0); |
512 | 0 | } |
513 | | |
514 | 0 | return jobj_old_seg; |
515 | 0 | } |
516 | | |
517 | | static json_object *reencrypt_make_hot_segments_forward(struct crypt_device *cd, |
518 | | struct luks2_hdr *hdr, |
519 | | struct luks2_reencrypt *rh, |
520 | | uint64_t device_size, |
521 | | uint64_t data_offset) |
522 | 0 | { |
523 | 0 | uint64_t fixed_length, tmp = rh->offset + rh->length; |
524 | 0 | json_object *jobj_segs_hot = json_object_new_object(), *jobj_reenc_seg = NULL, |
525 | 0 | *jobj_old_seg = NULL, *jobj_new_seg = NULL; |
526 | 0 | unsigned int sg = 0; |
527 | |
|
528 | 0 | if (!jobj_segs_hot) |
529 | 0 | return NULL; |
530 | | |
531 | 0 | if (rh->offset) { |
532 | 0 | jobj_new_seg = reencrypt_make_segment_new(cd, hdr, rh, data_offset, 0, 0, &rh->offset); |
533 | 0 | if (!jobj_new_seg || json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_new_seg)) |
534 | 0 | goto err; |
535 | 0 | } |
536 | | |
537 | 0 | jobj_reenc_seg = reencrypt_make_segment_reencrypt(cd, hdr, rh, data_offset, rh->offset, rh->offset, &rh->length); |
538 | 0 | if (!jobj_reenc_seg) |
539 | 0 | goto err; |
540 | | |
541 | 0 | if (json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_reenc_seg)) |
542 | 0 | goto err; |
543 | | |
544 | 0 | if (tmp < device_size) { |
545 | 0 | fixed_length = device_size - tmp; |
546 | 0 | jobj_old_seg = reencrypt_make_segment_old(cd, hdr, rh, data_offset + data_shift_value(&rh->rp), |
547 | 0 | rh->offset + rh->length, rh->fixed_length ? &fixed_length : NULL); |
548 | 0 | if (!jobj_old_seg || json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg, &jobj_old_seg)) |
549 | 0 | goto err; |
550 | 0 | } |
551 | | |
552 | 0 | return jobj_segs_hot; |
553 | 0 | err: |
554 | 0 | json_object_put(jobj_reenc_seg); |
555 | 0 | json_object_put(jobj_old_seg); |
556 | 0 | json_object_put(jobj_new_seg); |
557 | 0 | json_object_put(jobj_segs_hot); |
558 | 0 | return NULL; |
559 | 0 | } |
560 | | |
561 | | static json_object *reencrypt_make_hot_segments_decrypt_shift(struct crypt_device *cd, |
562 | | struct luks2_hdr *hdr, struct luks2_reencrypt *rh, |
563 | | uint64_t device_size, uint64_t data_offset) |
564 | 0 | { |
565 | 0 | uint64_t fixed_length, tmp = rh->offset + rh->length, linear_length = rh->progress; |
566 | 0 | json_object *jobj, *jobj_segs_hot = json_object_new_object(), *jobj_reenc_seg = NULL, |
567 | 0 | *jobj_old_seg = NULL, *jobj_new_seg = NULL; |
568 | 0 | unsigned int sg = 0; |
569 | |
|
570 | 0 | if (!jobj_segs_hot) |
571 | 0 | return NULL; |
572 | | |
573 | 0 | if (rh->offset) { |
574 | 0 | jobj = LUKS2_get_segment_jobj(hdr, 0); |
575 | 0 | if (!jobj) |
576 | 0 | goto err; |
577 | | |
578 | 0 | jobj_new_seg = json_object_get(jobj); |
579 | 0 | if (json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_new_seg)) |
580 | 0 | goto err; |
581 | | |
582 | 0 | if (linear_length) { |
583 | 0 | jobj_new_seg = reencrypt_make_segment_new(cd, hdr, rh, |
584 | 0 | data_offset, |
585 | 0 | json_segment_get_size(jobj, 0), |
586 | 0 | 0, |
587 | 0 | &linear_length); |
588 | 0 | if (!jobj_new_seg || json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_new_seg)) |
589 | 0 | goto err; |
590 | 0 | } |
591 | 0 | } |
592 | | |
593 | 0 | jobj_reenc_seg = reencrypt_make_segment_reencrypt(cd, hdr, rh, data_offset, |
594 | 0 | rh->offset, |
595 | 0 | rh->offset, |
596 | 0 | &rh->length); |
597 | 0 | if (!jobj_reenc_seg || json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_reenc_seg)) |
598 | 0 | goto err; |
599 | | |
600 | 0 | if (!rh->offset && (jobj = LUKS2_get_segment_jobj(hdr, 1)) && |
601 | 0 | !json_segment_is_backup(jobj)) { |
602 | 0 | jobj_new_seg = json_object_get(jobj); |
603 | 0 | if (json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_new_seg)) |
604 | 0 | goto err; |
605 | 0 | } else if (tmp < device_size) { |
606 | 0 | fixed_length = device_size - tmp; |
607 | 0 | jobj_old_seg = reencrypt_make_segment_old(cd, hdr, rh, |
608 | 0 | data_offset + data_shift_value(&rh->rp), |
609 | 0 | rh->offset + rh->length, |
610 | 0 | rh->fixed_length ? &fixed_length : NULL); |
611 | 0 | if (!jobj_old_seg || json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg, &jobj_old_seg)) |
612 | 0 | goto err; |
613 | 0 | } |
614 | | |
615 | 0 | return jobj_segs_hot; |
616 | 0 | err: |
617 | 0 | json_object_put(jobj_reenc_seg); |
618 | 0 | json_object_put(jobj_old_seg); |
619 | 0 | json_object_put(jobj_new_seg); |
620 | 0 | json_object_put(jobj_segs_hot); |
621 | 0 | return NULL; |
622 | 0 | } |
623 | | |
624 | | static json_object *_dec_create_segments_shift_after(struct crypt_device *cd, |
625 | | struct luks2_hdr *hdr, |
626 | | struct luks2_reencrypt *rh, |
627 | | uint64_t data_offset) |
628 | 0 | { |
629 | 0 | int reenc_seg, i = 0; |
630 | 0 | json_object *jobj_seg_old, *jobj_copy = NULL, *jobj_seg_old_copy = NULL, *jobj_seg_new = NULL, |
631 | 0 | *jobj_segs_post = json_object_new_object(); |
632 | 0 | unsigned segs; |
633 | 0 | uint64_t tmp; |
634 | |
|
635 | 0 | if (!rh->jobj_segs_hot || !jobj_segs_post) |
636 | 0 | goto err; |
637 | | |
638 | 0 | segs = json_segments_count(rh->jobj_segs_hot); |
639 | 0 | if (segs == 0) |
640 | 0 | return jobj_segs_post; |
641 | | |
642 | 0 | reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot); |
643 | 0 | if (reenc_seg < 0) |
644 | 0 | goto err; |
645 | | |
646 | 0 | if (reenc_seg == 0) { |
647 | 0 | jobj_seg_new = reencrypt_make_segment_new(cd, hdr, rh, data_offset, 0, 0, NULL); |
648 | 0 | if (!jobj_seg_new || json_object_object_add_by_uint(jobj_segs_post, 0, jobj_seg_new)) |
649 | 0 | goto err; |
650 | | |
651 | 0 | return jobj_segs_post; |
652 | 0 | } |
653 | | |
654 | 0 | jobj_copy = json_segments_get_segment(rh->jobj_segs_hot, 0); |
655 | 0 | if (!jobj_copy) |
656 | 0 | goto err; |
657 | 0 | json_object_get(jobj_copy); |
658 | 0 | if (json_object_object_add_by_uint_by_ref(jobj_segs_post, i++, &jobj_copy)) |
659 | 0 | goto err; |
660 | | |
661 | 0 | if ((jobj_seg_old = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1))) |
662 | 0 | jobj_seg_old_copy = json_object_get(jobj_seg_old); |
663 | |
|
664 | 0 | tmp = rh->length + rh->progress; |
665 | 0 | jobj_seg_new = reencrypt_make_segment_new(cd, hdr, rh, data_offset, |
666 | 0 | json_segment_get_size(rh->jobj_segment_moved, 0), |
667 | 0 | data_shift_value(&rh->rp), |
668 | 0 | jobj_seg_old ? &tmp : NULL); |
669 | 0 | if (!jobj_seg_new || json_object_object_add_by_uint_by_ref(jobj_segs_post, i++, &jobj_seg_new)) |
670 | 0 | goto err; |
671 | | |
672 | 0 | if (jobj_seg_old_copy && json_object_object_add_by_uint(jobj_segs_post, i, jobj_seg_old_copy)) |
673 | 0 | goto err; |
674 | | |
675 | 0 | return jobj_segs_post; |
676 | 0 | err: |
677 | 0 | json_object_put(jobj_copy); |
678 | 0 | json_object_put(jobj_seg_old_copy); |
679 | 0 | json_object_put(jobj_seg_new); |
680 | 0 | json_object_put(jobj_segs_post); |
681 | 0 | return NULL; |
682 | 0 | } |
683 | | |
684 | | static json_object *reencrypt_make_hot_segments_backward(struct crypt_device *cd, |
685 | | struct luks2_hdr *hdr, |
686 | | struct luks2_reencrypt *rh, |
687 | | uint64_t device_size, |
688 | | uint64_t data_offset) |
689 | 0 | { |
690 | 0 | uint64_t fixed_length, tmp = rh->offset + rh->length; |
691 | 0 | json_object *jobj_reenc_seg = NULL, *jobj_new_seg = NULL, *jobj_old_seg = NULL, |
692 | 0 | *jobj_segs_hot = json_object_new_object(); |
693 | 0 | int sg = 0; |
694 | |
|
695 | 0 | if (!jobj_segs_hot) |
696 | 0 | return NULL; |
697 | | |
698 | 0 | if (rh->offset) { |
699 | 0 | if (json_object_copy(LUKS2_get_segment_jobj(hdr, 0), &jobj_old_seg)) |
700 | 0 | goto err; |
701 | 0 | json_object_object_add(jobj_old_seg, "size", crypt_jobj_new_uint64(rh->offset)); |
702 | |
|
703 | 0 | if (json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_old_seg)) |
704 | 0 | goto err; |
705 | 0 | } |
706 | | |
707 | 0 | jobj_reenc_seg = reencrypt_make_segment_reencrypt(cd, hdr, rh, data_offset, rh->offset, rh->offset, &rh->length); |
708 | 0 | if (!jobj_reenc_seg || json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_reenc_seg)) |
709 | 0 | goto err; |
710 | | |
711 | 0 | if (tmp < device_size) { |
712 | 0 | fixed_length = device_size - tmp; |
713 | 0 | jobj_new_seg = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset + rh->length, |
714 | 0 | rh->offset + rh->length, rh->fixed_length ? &fixed_length : NULL); |
715 | 0 | if (!jobj_new_seg || json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg, &jobj_new_seg)) |
716 | 0 | goto err; |
717 | 0 | } |
718 | | |
719 | 0 | return jobj_segs_hot; |
720 | 0 | err: |
721 | 0 | json_object_put(jobj_reenc_seg); |
722 | 0 | json_object_put(jobj_new_seg); |
723 | 0 | json_object_put(jobj_old_seg); |
724 | 0 | json_object_put(jobj_segs_hot); |
725 | 0 | return NULL; |
726 | 0 | } |
727 | | |
728 | | static int reencrypt_make_hot_segments(struct crypt_device *cd, |
729 | | struct luks2_hdr *hdr, |
730 | | struct luks2_reencrypt *rh, |
731 | | uint64_t device_size, |
732 | | uint64_t data_offset) |
733 | 0 | { |
734 | 0 | rh->jobj_segs_hot = NULL; |
735 | |
|
736 | 0 | if (rh->mode == CRYPT_REENCRYPT_ENCRYPT && rh->direction == CRYPT_REENCRYPT_BACKWARD && |
737 | 0 | rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) { |
738 | 0 | log_dbg(cd, "Calculating hot segments for encryption with data move."); |
739 | 0 | rh->jobj_segs_hot = reencrypt_make_hot_segments_encrypt_shift(hdr, rh, data_offset); |
740 | 0 | } else if (rh->mode == CRYPT_REENCRYPT_DECRYPT && rh->direction == CRYPT_REENCRYPT_FORWARD && |
741 | 0 | rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) { |
742 | 0 | log_dbg(cd, "Calculating hot segments for decryption with data move."); |
743 | 0 | rh->jobj_segs_hot = reencrypt_make_hot_segments_decrypt_shift(cd, hdr, rh, device_size, data_offset); |
744 | 0 | } else if (rh->direction == CRYPT_REENCRYPT_FORWARD) { |
745 | 0 | log_dbg(cd, "Calculating hot segments (forward direction)."); |
746 | 0 | rh->jobj_segs_hot = reencrypt_make_hot_segments_forward(cd, hdr, rh, device_size, data_offset); |
747 | 0 | } else if (rh->direction == CRYPT_REENCRYPT_BACKWARD) { |
748 | 0 | log_dbg(cd, "Calculating hot segments (backward direction)."); |
749 | 0 | rh->jobj_segs_hot = reencrypt_make_hot_segments_backward(cd, hdr, rh, device_size, data_offset); |
750 | 0 | } |
751 | |
|
752 | 0 | return rh->jobj_segs_hot ? 0 : -EINVAL; |
753 | 0 | } |
754 | | |
755 | | static int reencrypt_make_post_segments(struct crypt_device *cd, |
756 | | struct luks2_hdr *hdr, |
757 | | struct luks2_reencrypt *rh, |
758 | | uint64_t data_offset) |
759 | 0 | { |
760 | 0 | rh->jobj_segs_post = NULL; |
761 | |
|
762 | 0 | if (rh->mode == CRYPT_REENCRYPT_ENCRYPT && rh->direction == CRYPT_REENCRYPT_BACKWARD && |
763 | 0 | rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) { |
764 | 0 | log_dbg(cd, "Calculating post segments for encryption with data move."); |
765 | 0 | rh->jobj_segs_post = _enc_create_segments_shift_after(rh, data_offset); |
766 | 0 | } else if (rh->mode == CRYPT_REENCRYPT_DECRYPT && rh->direction == CRYPT_REENCRYPT_FORWARD && |
767 | 0 | rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) { |
768 | 0 | log_dbg(cd, "Calculating post segments for decryption with data move."); |
769 | 0 | rh->jobj_segs_post = _dec_create_segments_shift_after(cd, hdr, rh, data_offset); |
770 | 0 | } else if (rh->direction == CRYPT_REENCRYPT_FORWARD) { |
771 | 0 | log_dbg(cd, "Calculating post segments (forward direction)."); |
772 | 0 | rh->jobj_segs_post = reencrypt_make_post_segments_forward(cd, hdr, rh, data_offset); |
773 | 0 | } else if (rh->direction == CRYPT_REENCRYPT_BACKWARD) { |
774 | 0 | log_dbg(cd, "Calculating segments (backward direction)."); |
775 | 0 | rh->jobj_segs_post = reencrypt_make_post_segments_backward(cd, hdr, rh, data_offset); |
776 | 0 | } |
777 | |
|
778 | 0 | return rh->jobj_segs_post ? 0 : -EINVAL; |
779 | 0 | } |
780 | | #endif |
781 | | |
782 | | static uint64_t reencrypt_data_shift(struct luks2_hdr *hdr) |
783 | 0 | { |
784 | 0 | json_object *jobj_keyslot, *jobj_area, *jobj_data_shift; |
785 | 0 | int ks = LUKS2_find_keyslot(hdr, "reencrypt"); |
786 | |
|
787 | 0 | if (ks < 0) |
788 | 0 | return 0; |
789 | | |
790 | 0 | jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks); |
791 | |
|
792 | 0 | json_object_object_get_ex(jobj_keyslot, "area", &jobj_area); |
793 | 0 | if (!json_object_object_get_ex(jobj_area, "shift_size", &jobj_data_shift)) |
794 | 0 | return 0; |
795 | | |
796 | 0 | return crypt_jobj_get_uint64(jobj_data_shift); |
797 | 0 | } |
798 | | |
799 | | static crypt_reencrypt_mode_info reencrypt_mode(struct luks2_hdr *hdr) |
800 | 0 | { |
801 | 0 | const char *mode; |
802 | 0 | crypt_reencrypt_mode_info mi = CRYPT_REENCRYPT_REENCRYPT; |
803 | 0 | json_object *jobj_keyslot, *jobj_mode; |
804 | |
|
805 | 0 | jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, LUKS2_find_keyslot(hdr, "reencrypt")); |
806 | 0 | if (!jobj_keyslot) |
807 | 0 | return mi; |
808 | | |
809 | 0 | json_object_object_get_ex(jobj_keyslot, "mode", &jobj_mode); |
810 | 0 | mode = json_object_get_string(jobj_mode); |
811 | | |
812 | | /* validation enforces allowed values */ |
813 | 0 | if (!strcmp(mode, "encrypt")) |
814 | 0 | mi = CRYPT_REENCRYPT_ENCRYPT; |
815 | 0 | else if (!strcmp(mode, "decrypt")) |
816 | 0 | mi = CRYPT_REENCRYPT_DECRYPT; |
817 | |
|
818 | 0 | return mi; |
819 | 0 | } |
820 | | |
821 | | static crypt_reencrypt_direction_info reencrypt_direction(struct luks2_hdr *hdr) |
822 | 0 | { |
823 | 0 | const char *value; |
824 | 0 | json_object *jobj_keyslot, *jobj_mode; |
825 | 0 | crypt_reencrypt_direction_info di = CRYPT_REENCRYPT_FORWARD; |
826 | |
|
827 | 0 | jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, LUKS2_find_keyslot(hdr, "reencrypt")); |
828 | 0 | if (!jobj_keyslot) |
829 | 0 | return di; |
830 | | |
831 | 0 | json_object_object_get_ex(jobj_keyslot, "direction", &jobj_mode); |
832 | 0 | value = json_object_get_string(jobj_mode); |
833 | | |
834 | | /* validation enforces allowed values */ |
835 | 0 | if (strcmp(value, "forward")) |
836 | 0 | di = CRYPT_REENCRYPT_BACKWARD; |
837 | |
|
838 | 0 | return di; |
839 | 0 | } |
840 | | |
841 | | typedef enum { REENC_OK = 0, REENC_ERR, REENC_ROLLBACK, REENC_FATAL } reenc_status_t; |
842 | | |
843 | | void LUKS2_reencrypt_protection_erase(struct reenc_protection *rp) |
844 | 0 | { |
845 | 0 | if (!rp || rp->type != REENC_PROTECTION_CHECKSUM) |
846 | 0 | return; |
847 | | |
848 | 0 | if (rp->p.csum.ch) { |
849 | 0 | crypt_hash_destroy(rp->p.csum.ch); |
850 | 0 | rp->p.csum.ch = NULL; |
851 | 0 | } |
852 | |
|
853 | 0 | if (rp->p.csum.checksums) { |
854 | 0 | crypt_safe_memzero(rp->p.csum.checksums, rp->p.csum.checksums_len); |
855 | 0 | free(rp->p.csum.checksums); |
856 | 0 | rp->p.csum.checksums = NULL; |
857 | 0 | } |
858 | 0 | } |
859 | | |
860 | | void LUKS2_reencrypt_free(struct crypt_device *cd, struct luks2_reencrypt *rh) |
861 | 2.17k | { |
862 | 2.17k | if (!rh) |
863 | 2.17k | return; |
864 | | |
865 | 0 | LUKS2_reencrypt_protection_erase(&rh->rp); |
866 | 0 | LUKS2_reencrypt_protection_erase(&rh->rp_moved_segment); |
867 | |
|
868 | 0 | json_object_put(rh->jobj_segs_hot); |
869 | 0 | rh->jobj_segs_hot = NULL; |
870 | 0 | json_object_put(rh->jobj_segs_post); |
871 | 0 | rh->jobj_segs_post = NULL; |
872 | 0 | json_object_put(rh->jobj_segment_old); |
873 | 0 | rh->jobj_segment_old = NULL; |
874 | 0 | json_object_put(rh->jobj_segment_new); |
875 | 0 | rh->jobj_segment_new = NULL; |
876 | 0 | json_object_put(rh->jobj_segment_moved); |
877 | 0 | rh->jobj_segment_moved = NULL; |
878 | |
|
879 | 0 | free(rh->reenc_buffer); |
880 | 0 | rh->reenc_buffer = NULL; |
881 | 0 | crypt_storage_wrapper_destroy(rh->cw1); |
882 | 0 | rh->cw1 = NULL; |
883 | 0 | crypt_storage_wrapper_destroy(rh->cw2); |
884 | 0 | rh->cw2 = NULL; |
885 | |
|
886 | 0 | free(rh->device_name); |
887 | 0 | free(rh->overlay_name); |
888 | 0 | free(rh->hotzone_name); |
889 | 0 | crypt_drop_uploaded_keyring_key(cd, rh->vks); |
890 | 0 | crypt_free_volume_key(rh->vks); |
891 | 0 | device_release_excl(cd, crypt_data_device(cd)); |
892 | 0 | crypt_unlock_internal(cd, rh->reenc_lock); |
893 | 0 | free(rh); |
894 | 0 | } |
895 | | |
896 | | #if USE_LUKS2_REENCRYPTION |
897 | | int LUKS2_reencrypt_max_hotzone_size(struct crypt_device *cd __attribute__((unused)), |
898 | | struct luks2_hdr *hdr, |
899 | | const struct reenc_protection *rp, |
900 | | int reencrypt_keyslot, |
901 | | uint64_t *r_length) |
902 | 0 | { |
903 | 0 | int r; |
904 | 0 | uint64_t dummy, area_length; |
905 | |
|
906 | 0 | assert(hdr); |
907 | 0 | assert(rp); |
908 | 0 | assert(r_length); |
909 | |
|
910 | 0 | if (rp->type <= REENC_PROTECTION_NONE) { |
911 | 0 | *r_length = LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH; |
912 | 0 | return 0; |
913 | 0 | } |
914 | | |
915 | 0 | if (rp->type == REENC_PROTECTION_DATASHIFT) { |
916 | 0 | *r_length = rp->p.ds.data_shift; |
917 | 0 | return 0; |
918 | 0 | } |
919 | | |
920 | 0 | r = LUKS2_keyslot_area(hdr, reencrypt_keyslot, &dummy, &area_length); |
921 | 0 | if (r < 0) |
922 | 0 | return -EINVAL; |
923 | | |
924 | 0 | if (rp->type == REENC_PROTECTION_JOURNAL) { |
925 | 0 | *r_length = area_length; |
926 | 0 | return 0; |
927 | 0 | } |
928 | | |
929 | 0 | if (rp->type == REENC_PROTECTION_CHECKSUM) { |
930 | 0 | *r_length = (area_length / rp->p.csum.hash_size) * rp->p.csum.block_size; |
931 | 0 | return 0; |
932 | 0 | } |
933 | | |
934 | 0 | return -EINVAL; |
935 | 0 | } |
936 | | |
937 | | static size_t reencrypt_get_alignment(struct crypt_device *cd, |
938 | | struct luks2_hdr *hdr) |
939 | 0 | { |
940 | 0 | size_t ss, alignment = device_block_size(cd, crypt_data_device(cd)); |
941 | |
|
942 | 0 | ss = reencrypt_get_sector_size_old(hdr); |
943 | 0 | if (ss > alignment) |
944 | 0 | alignment = ss; |
945 | 0 | ss = reencrypt_get_sector_size_new(hdr); |
946 | 0 | if (ss > alignment) |
947 | 0 | alignment = ss; |
948 | |
|
949 | 0 | return alignment; |
950 | 0 | } |
951 | | |
952 | | /* returns void because it must not fail on valid LUKS2 header */ |
953 | | static void _load_backup_segments(struct luks2_hdr *hdr, |
954 | | struct luks2_reencrypt *rh) |
955 | 0 | { |
956 | 0 | int segment = LUKS2_get_segment_id_by_flag(hdr, "backup-final"); |
957 | |
|
958 | 0 | if (segment >= 0) { |
959 | 0 | rh->jobj_segment_new = json_object_get(LUKS2_get_segment_jobj(hdr, segment)); |
960 | 0 | rh->digest_new = LUKS2_digest_by_segment(hdr, segment); |
961 | 0 | } else { |
962 | 0 | rh->jobj_segment_new = NULL; |
963 | 0 | rh->digest_new = -ENOENT; |
964 | 0 | } |
965 | |
|
966 | 0 | segment = LUKS2_get_segment_id_by_flag(hdr, "backup-previous"); |
967 | 0 | if (segment >= 0) { |
968 | 0 | rh->jobj_segment_old = json_object_get(LUKS2_get_segment_jobj(hdr, segment)); |
969 | 0 | rh->digest_old = LUKS2_digest_by_segment(hdr, segment); |
970 | 0 | } else { |
971 | 0 | rh->jobj_segment_old = NULL; |
972 | 0 | rh->digest_old = -ENOENT; |
973 | 0 | } |
974 | |
|
975 | 0 | segment = LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment"); |
976 | 0 | if (segment >= 0) |
977 | 0 | rh->jobj_segment_moved = json_object_get(LUKS2_get_segment_jobj(hdr, segment)); |
978 | 0 | else |
979 | 0 | rh->jobj_segment_moved = NULL; |
980 | 0 | } |
981 | | |
982 | | static int reencrypt_offset_backward_moved(struct luks2_hdr *hdr, json_object *jobj_segments, |
983 | | uint64_t *reencrypt_length, uint64_t data_shift, uint64_t *offset) |
984 | 0 | { |
985 | 0 | uint64_t tmp, linear_length = 0; |
986 | 0 | int sg, segs = json_segments_count(jobj_segments); |
987 | | |
988 | | /* find reencrypt offset with data shift */ |
989 | 0 | for (sg = 0; sg < segs; sg++) |
990 | 0 | if (LUKS2_segment_is_type(hdr, sg, "linear")) |
991 | 0 | linear_length += LUKS2_segment_size(hdr, sg, 0); |
992 | | |
993 | | /* all active linear segments length */ |
994 | 0 | if (linear_length && segs > 1) { |
995 | 0 | if (linear_length < data_shift) |
996 | 0 | return -EINVAL; |
997 | 0 | tmp = linear_length - data_shift; |
998 | 0 | if (tmp && tmp < data_shift) { |
999 | 0 | *offset = data_shift; |
1000 | 0 | *reencrypt_length = tmp; |
1001 | 0 | } else |
1002 | 0 | *offset = tmp; |
1003 | 0 | return 0; |
1004 | 0 | } |
1005 | | |
1006 | 0 | if (segs == 1) { |
1007 | 0 | *offset = 0; |
1008 | 0 | return 0; |
1009 | 0 | } |
1010 | | |
1011 | | /* should be unreachable */ |
1012 | | |
1013 | 0 | return -EINVAL; |
1014 | 0 | } |
1015 | | |
1016 | | static int reencrypt_offset_forward_moved(struct luks2_hdr *hdr, |
1017 | | uint64_t data_shift, |
1018 | | uint64_t *offset) |
1019 | 0 | { |
1020 | 0 | int last_crypt = LUKS2_last_segment_by_type(hdr, "crypt"); |
1021 | | |
1022 | | /* if last crypt segment exists and it's first one, just return offset = 0 */ |
1023 | 0 | if (last_crypt <= 0) { |
1024 | 0 | *offset = 0; |
1025 | 0 | return 0; |
1026 | 0 | } |
1027 | | |
1028 | 0 | *offset = LUKS2_segment_offset(hdr, last_crypt, 0) - data_shift; |
1029 | 0 | return 0; |
1030 | 0 | } |
1031 | | |
1032 | | static int _offset_forward(json_object *jobj_segments, uint64_t *offset) |
1033 | 0 | { |
1034 | 0 | int segs = json_segments_count(jobj_segments); |
1035 | |
|
1036 | 0 | if (segs == 1) |
1037 | 0 | *offset = 0; |
1038 | 0 | else if (segs == 2) { |
1039 | 0 | *offset = json_segment_get_size(json_segments_get_segment(jobj_segments, 0), 0); |
1040 | 0 | if (!*offset) |
1041 | 0 | return -EINVAL; |
1042 | 0 | } else |
1043 | 0 | return -EINVAL; |
1044 | | |
1045 | 0 | return 0; |
1046 | 0 | } |
1047 | | |
1048 | | static int _offset_backward(json_object *jobj_segments, uint64_t device_size, uint64_t *length, uint64_t *offset) |
1049 | 0 | { |
1050 | 0 | int segs = json_segments_count(jobj_segments); |
1051 | 0 | uint64_t tmp; |
1052 | |
|
1053 | 0 | if (segs == 1) { |
1054 | 0 | if (device_size < *length) |
1055 | 0 | *length = device_size; |
1056 | 0 | *offset = device_size - *length; |
1057 | 0 | } else if (segs == 2) { |
1058 | 0 | tmp = json_segment_get_size(json_segments_get_segment(jobj_segments, 0), 0); |
1059 | 0 | if (tmp < *length) |
1060 | 0 | *length = tmp; |
1061 | 0 | *offset = tmp - *length; |
1062 | 0 | } else |
1063 | 0 | return -EINVAL; |
1064 | | |
1065 | 0 | return 0; |
1066 | 0 | } |
1067 | | |
1068 | | /* must be always relative to data offset */ |
1069 | | /* the LUKS2 header MUST be valid */ |
1070 | | static int reencrypt_offset(struct luks2_hdr *hdr, |
1071 | | crypt_reencrypt_direction_info di, |
1072 | | uint64_t device_size, |
1073 | | uint64_t *reencrypt_length, |
1074 | | uint64_t *offset) |
1075 | 0 | { |
1076 | 0 | int r, sg; |
1077 | 0 | json_object *jobj_segments; |
1078 | 0 | uint64_t data_shift = reencrypt_data_shift(hdr); |
1079 | |
|
1080 | 0 | if (!offset) |
1081 | 0 | return -EINVAL; |
1082 | | |
1083 | | /* if there's segment in reencryption return directly offset of it */ |
1084 | 0 | json_object_object_get_ex(hdr->jobj, "segments", &jobj_segments); |
1085 | 0 | sg = json_segments_segment_in_reencrypt(jobj_segments); |
1086 | 0 | if (sg >= 0) { |
1087 | 0 | *offset = LUKS2_segment_offset(hdr, sg, 0) - (reencrypt_get_data_offset_new(hdr)); |
1088 | 0 | return 0; |
1089 | 0 | } |
1090 | | |
1091 | 0 | if (di == CRYPT_REENCRYPT_FORWARD) { |
1092 | 0 | if (reencrypt_mode(hdr) == CRYPT_REENCRYPT_DECRYPT && |
1093 | 0 | LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0) { |
1094 | 0 | r = reencrypt_offset_forward_moved(hdr, data_shift, offset); |
1095 | 0 | if (!r && *offset > device_size) |
1096 | 0 | *offset = device_size; |
1097 | 0 | return r; |
1098 | 0 | } |
1099 | 0 | return _offset_forward(jobj_segments, offset); |
1100 | 0 | } else if (di == CRYPT_REENCRYPT_BACKWARD) { |
1101 | 0 | if (reencrypt_mode(hdr) == CRYPT_REENCRYPT_ENCRYPT && |
1102 | 0 | LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0) |
1103 | 0 | return reencrypt_offset_backward_moved(hdr, jobj_segments, reencrypt_length, data_shift, offset); |
1104 | 0 | return _offset_backward(jobj_segments, device_size, reencrypt_length, offset); |
1105 | 0 | } |
1106 | | |
1107 | 0 | return -EINVAL; |
1108 | 0 | } |
1109 | | |
1110 | | static uint64_t reencrypt_length(struct crypt_device *cd, |
1111 | | struct reenc_protection *rp, |
1112 | | uint64_t keyslot_area_length, |
1113 | | uint64_t length_max, |
1114 | | size_t alignment) |
1115 | 0 | { |
1116 | 0 | unsigned long dummy, optimal_alignment; |
1117 | 0 | uint64_t length, soft_mem_limit; |
1118 | |
|
1119 | 0 | if (rp->type == REENC_PROTECTION_NONE) |
1120 | 0 | length = length_max ?: LUKS2_DEFAULT_NONE_REENCRYPTION_LENGTH; |
1121 | 0 | else if (rp->type == REENC_PROTECTION_CHECKSUM) |
1122 | 0 | length = (keyslot_area_length / rp->p.csum.hash_size) * rp->p.csum.block_size; |
1123 | 0 | else if (rp->type == REENC_PROTECTION_DATASHIFT) |
1124 | 0 | return rp->p.ds.data_shift; |
1125 | 0 | else |
1126 | 0 | length = keyslot_area_length; |
1127 | | |
1128 | | /* hard limit */ |
1129 | 0 | if (length > LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH) |
1130 | 0 | length = LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH; |
1131 | | |
1132 | | /* soft limit is 1/4 of system memory */ |
1133 | 0 | soft_mem_limit = crypt_getphysmemory_kb() << 8; /* multiply by (1024/4) */ |
1134 | |
|
1135 | 0 | if (soft_mem_limit && length > soft_mem_limit) |
1136 | 0 | length = soft_mem_limit; |
1137 | |
|
1138 | 0 | if (length_max && length > length_max) |
1139 | 0 | length = length_max; |
1140 | |
|
1141 | 0 | length -= (length % alignment); |
1142 | | |
1143 | | /* Emits error later */ |
1144 | 0 | if (!length) |
1145 | 0 | return length; |
1146 | | |
1147 | 0 | device_topology_alignment(cd, crypt_data_device(cd), &optimal_alignment, &dummy, length); |
1148 | | |
1149 | | /* we have to stick with encryption sector size alignment */ |
1150 | 0 | if (optimal_alignment % alignment) |
1151 | 0 | return length; |
1152 | | |
1153 | | /* align to opt-io size only if remaining size allows it */ |
1154 | 0 | if (length > optimal_alignment) |
1155 | 0 | length -= (length % optimal_alignment); |
1156 | |
|
1157 | 0 | return length; |
1158 | 0 | } |
1159 | | |
1160 | | static int reencrypt_context_init(struct crypt_device *cd, |
1161 | | struct luks2_hdr *hdr, |
1162 | | struct luks2_reencrypt *rh, |
1163 | | uint64_t device_size, |
1164 | | uint64_t max_hotzone_size, |
1165 | | uint64_t fixed_device_size) |
1166 | 0 | { |
1167 | 0 | int r; |
1168 | 0 | size_t alignment; |
1169 | 0 | uint64_t dummy, area_length; |
1170 | |
|
1171 | 0 | rh->reenc_keyslot = LUKS2_find_keyslot(hdr, "reencrypt"); |
1172 | 0 | if (rh->reenc_keyslot < 0) |
1173 | 0 | return -EINVAL; |
1174 | 0 | if (LUKS2_keyslot_area(hdr, rh->reenc_keyslot, &dummy, &area_length) < 0) |
1175 | 0 | return -EINVAL; |
1176 | | |
1177 | 0 | rh->mode = reencrypt_mode(hdr); |
1178 | |
|
1179 | 0 | rh->direction = reencrypt_direction(hdr); |
1180 | |
|
1181 | 0 | r = LUKS2_keyslot_reencrypt_load(cd, hdr, rh->reenc_keyslot, &rh->rp, true); |
1182 | 0 | if (r < 0) |
1183 | 0 | return r; |
1184 | | |
1185 | 0 | if (rh->rp.type == REENC_PROTECTION_CHECKSUM) |
1186 | 0 | alignment = rh->rp.p.csum.block_size; |
1187 | 0 | else |
1188 | 0 | alignment = reencrypt_get_alignment(cd, hdr); |
1189 | |
|
1190 | 0 | if (!alignment) |
1191 | 0 | return -EINVAL; |
1192 | | |
1193 | 0 | if ((max_hotzone_size << SECTOR_SHIFT) % alignment) { |
1194 | 0 | log_err(cd, _("Hotzone size must be multiple of calculated zone alignment (%zu bytes)."), alignment); |
1195 | 0 | return -EINVAL; |
1196 | 0 | } |
1197 | | |
1198 | 0 | if ((fixed_device_size << SECTOR_SHIFT) % alignment) { |
1199 | 0 | log_err(cd, _("Device size must be multiple of calculated zone alignment (%zu bytes)."), alignment); |
1200 | 0 | return -EINVAL; |
1201 | 0 | } |
1202 | | |
1203 | 0 | if (fixed_device_size) { |
1204 | 0 | log_dbg(cd, "Switching reencryption to fixed size mode."); |
1205 | 0 | device_size = fixed_device_size << SECTOR_SHIFT; |
1206 | 0 | rh->fixed_length = true; |
1207 | 0 | } else |
1208 | 0 | rh->fixed_length = false; |
1209 | |
|
1210 | 0 | rh->length = reencrypt_length(cd, &rh->rp, area_length, max_hotzone_size << SECTOR_SHIFT, alignment); |
1211 | 0 | if (!rh->length) { |
1212 | 0 | log_dbg(cd, "Invalid reencryption length."); |
1213 | 0 | return -EINVAL; |
1214 | 0 | } |
1215 | | |
1216 | 0 | if (reencrypt_offset(hdr, rh->direction, device_size, &rh->length, &rh->offset)) { |
1217 | 0 | log_dbg(cd, "Failed to get reencryption offset."); |
1218 | 0 | return -EINVAL; |
1219 | 0 | } |
1220 | | |
1221 | 0 | if (rh->offset > device_size) |
1222 | 0 | return -EINVAL; |
1223 | 0 | if (rh->length > device_size - rh->offset) |
1224 | 0 | rh->length = device_size - rh->offset; |
1225 | |
|
1226 | 0 | _load_backup_segments(hdr, rh); |
1227 | |
|
1228 | 0 | r = LUKS2_keyslot_reencrypt_load(cd, hdr, rh->reenc_keyslot, &rh->rp_moved_segment, false); |
1229 | 0 | if (r < 0) |
1230 | 0 | return r; |
1231 | | |
1232 | 0 | if (rh->rp_moved_segment.type == REENC_PROTECTION_NOT_SET) |
1233 | 0 | log_dbg(cd, "No moved segment resilience configured."); |
1234 | |
|
1235 | 0 | if (rh->direction == CRYPT_REENCRYPT_BACKWARD) |
1236 | 0 | rh->progress = device_size - rh->offset - rh->length; |
1237 | 0 | else if (rh->jobj_segment_moved && rh->direction == CRYPT_REENCRYPT_FORWARD) { |
1238 | 0 | if (rh->offset == json_segment_get_offset(LUKS2_get_segment_by_flag(hdr, "backup-moved-segment"), false)) |
1239 | 0 | rh->progress = device_size - json_segment_get_size(LUKS2_get_segment_by_flag(hdr, "backup-moved-segment"), false); |
1240 | 0 | else |
1241 | 0 | rh->progress = rh->offset - json_segment_get_size(rh->jobj_segment_moved, 0); |
1242 | 0 | } else |
1243 | 0 | rh->progress = rh->offset; |
1244 | |
|
1245 | 0 | log_dbg(cd, "reencrypt-direction: %s", rh->direction == CRYPT_REENCRYPT_FORWARD ? "forward" : "backward"); |
1246 | 0 | log_dbg(cd, "backup-previous digest id: %d", rh->digest_old); |
1247 | 0 | log_dbg(cd, "backup-final digest id: %d", rh->digest_new); |
1248 | 0 | log_dbg(cd, "reencrypt length: %" PRIu64, rh->length); |
1249 | 0 | log_dbg(cd, "reencrypt offset: %" PRIu64, rh->offset); |
1250 | 0 | log_dbg(cd, "reencrypt shift: %s%" PRIu64, |
1251 | 0 | (rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->direction == CRYPT_REENCRYPT_BACKWARD ? "-" : ""), |
1252 | 0 | data_shift_value(&rh->rp)); |
1253 | 0 | log_dbg(cd, "reencrypt alignment: %zu", alignment); |
1254 | 0 | log_dbg(cd, "reencrypt progress: %" PRIu64, rh->progress); |
1255 | |
|
1256 | 0 | rh->device_size = device_size; |
1257 | |
|
1258 | 0 | return rh->length < 512 ? -EINVAL : 0; |
1259 | 0 | } |
1260 | | |
1261 | | static size_t reencrypt_buffer_length(struct luks2_reencrypt *rh) |
1262 | 0 | { |
1263 | 0 | if (rh->rp.type == REENC_PROTECTION_DATASHIFT) |
1264 | 0 | return data_shift_value(&rh->rp); |
1265 | 0 | return rh->length; |
1266 | 0 | } |
1267 | | |
1268 | | static int reencrypt_load_clean(struct crypt_device *cd, |
1269 | | struct luks2_hdr *hdr, |
1270 | | uint64_t device_size, |
1271 | | uint64_t max_hotzone_size, |
1272 | | uint64_t fixed_device_size, |
1273 | | struct luks2_reencrypt **rh) |
1274 | 0 | { |
1275 | 0 | int r; |
1276 | 0 | struct luks2_reencrypt *tmp = crypt_zalloc(sizeof (*tmp)); |
1277 | |
|
1278 | 0 | if (!tmp) |
1279 | 0 | return -ENOMEM; |
1280 | | |
1281 | 0 | log_dbg(cd, "Loading stored reencryption context."); |
1282 | |
|
1283 | 0 | r = reencrypt_context_init(cd, hdr, tmp, device_size, max_hotzone_size, fixed_device_size); |
1284 | 0 | if (r) |
1285 | 0 | goto err; |
1286 | | |
1287 | 0 | if (posix_memalign(&tmp->reenc_buffer, device_alignment(crypt_data_device(cd)), |
1288 | 0 | reencrypt_buffer_length(tmp))) { |
1289 | 0 | r = -ENOMEM; |
1290 | 0 | goto err; |
1291 | 0 | } |
1292 | | |
1293 | 0 | *rh = tmp; |
1294 | |
|
1295 | 0 | return 0; |
1296 | 0 | err: |
1297 | 0 | LUKS2_reencrypt_free(cd, tmp); |
1298 | |
|
1299 | 0 | return r; |
1300 | 0 | } |
1301 | | |
1302 | | static int reencrypt_make_segments(struct crypt_device *cd, |
1303 | | struct luks2_hdr *hdr, |
1304 | | struct luks2_reencrypt *rh, |
1305 | | uint64_t device_size) |
1306 | 0 | { |
1307 | 0 | int r; |
1308 | 0 | uint64_t data_offset = reencrypt_get_data_offset_new(hdr); |
1309 | |
|
1310 | 0 | log_dbg(cd, "Calculating segments."); |
1311 | |
|
1312 | 0 | r = reencrypt_make_hot_segments(cd, hdr, rh, device_size, data_offset); |
1313 | 0 | if (!r) { |
1314 | 0 | r = reencrypt_make_post_segments(cd, hdr, rh, data_offset); |
1315 | 0 | if (r) |
1316 | 0 | json_object_put(rh->jobj_segs_hot); |
1317 | 0 | } |
1318 | |
|
1319 | 0 | if (r) |
1320 | 0 | log_dbg(cd, "Failed to make reencryption segments."); |
1321 | |
|
1322 | 0 | return r; |
1323 | 0 | } |
1324 | | |
1325 | | static int reencrypt_make_segments_crashed(struct crypt_device *cd, |
1326 | | struct luks2_hdr *hdr, |
1327 | | struct luks2_reencrypt *rh) |
1328 | 0 | { |
1329 | 0 | int r; |
1330 | 0 | uint64_t data_offset = crypt_get_data_offset(cd) << SECTOR_SHIFT; |
1331 | |
|
1332 | 0 | if (!rh) |
1333 | 0 | return -EINVAL; |
1334 | | |
1335 | 0 | rh->jobj_segs_hot = json_object_new_object(); |
1336 | 0 | if (!rh->jobj_segs_hot) |
1337 | 0 | return -ENOMEM; |
1338 | | |
1339 | 0 | json_object_object_foreach(LUKS2_get_segments_jobj(hdr), key, val) { |
1340 | 0 | if (json_segment_is_backup(val)) |
1341 | 0 | continue; |
1342 | 0 | json_object_object_add(rh->jobj_segs_hot, key, json_object_get(val)); |
1343 | 0 | } |
1344 | |
|
1345 | 0 | r = reencrypt_make_post_segments(cd, hdr, rh, data_offset); |
1346 | 0 | if (r) { |
1347 | 0 | json_object_put(rh->jobj_segs_hot); |
1348 | 0 | rh->jobj_segs_hot = NULL; |
1349 | 0 | } |
1350 | |
|
1351 | 0 | return r; |
1352 | 0 | } |
1353 | | |
1354 | | static int reencrypt_load_crashed(struct crypt_device *cd, |
1355 | | struct luks2_hdr *hdr, uint64_t device_size, struct luks2_reencrypt **rh) |
1356 | 0 | { |
1357 | 0 | bool dynamic; |
1358 | 0 | uint64_t required_device_size; |
1359 | 0 | int r, reenc_seg; |
1360 | |
|
1361 | 0 | if (LUKS2_get_data_size(hdr, &required_device_size, &dynamic)) |
1362 | 0 | return -EINVAL; |
1363 | | |
1364 | 0 | if (dynamic) |
1365 | 0 | required_device_size = 0; |
1366 | 0 | else |
1367 | 0 | required_device_size >>= SECTOR_SHIFT; |
1368 | |
|
1369 | 0 | r = reencrypt_load_clean(cd, hdr, device_size, 0, required_device_size, rh); |
1370 | |
|
1371 | 0 | if (!r) { |
1372 | 0 | reenc_seg = json_segments_segment_in_reencrypt(LUKS2_get_segments_jobj(hdr)); |
1373 | 0 | if (reenc_seg < 0) |
1374 | 0 | r = -EINVAL; |
1375 | 0 | else |
1376 | 0 | (*rh)->length = LUKS2_segment_size(hdr, reenc_seg, 0); |
1377 | 0 | } |
1378 | |
|
1379 | 0 | if (!r) |
1380 | 0 | r = reencrypt_make_segments_crashed(cd, hdr, *rh); |
1381 | |
|
1382 | 0 | if (r) { |
1383 | 0 | LUKS2_reencrypt_free(cd, *rh); |
1384 | 0 | *rh = NULL; |
1385 | 0 | } |
1386 | 0 | return r; |
1387 | 0 | } |
1388 | | |
1389 | | static int reencrypt_init_storage_wrappers(struct crypt_device *cd, |
1390 | | struct luks2_hdr *hdr, |
1391 | | struct luks2_reencrypt *rh, |
1392 | | struct volume_key *vks) |
1393 | 0 | { |
1394 | 0 | int r; |
1395 | 0 | struct volume_key *vk; |
1396 | 0 | uint32_t wrapper_flags = (getuid() || geteuid()) ? 0 : DISABLE_KCAPI; |
1397 | |
|
1398 | 0 | vk = crypt_volume_key_by_id(vks, rh->digest_old); |
1399 | 0 | r = crypt_storage_wrapper_init(cd, &rh->cw1, crypt_data_device(cd), |
1400 | 0 | reencrypt_get_data_offset_old(hdr), |
1401 | 0 | crypt_get_iv_offset(cd), |
1402 | 0 | reencrypt_get_sector_size_old(hdr), |
1403 | 0 | reencrypt_segment_cipher_old(hdr), |
1404 | 0 | vk, wrapper_flags | OPEN_READONLY); |
1405 | 0 | if (r) { |
1406 | 0 | log_err(cd, _("Failed to initialize old segment storage wrapper.")); |
1407 | 0 | return r; |
1408 | 0 | } |
1409 | 0 | rh->wflags1 = wrapper_flags | OPEN_READONLY; |
1410 | 0 | log_dbg(cd, "Old cipher storage wrapper type: %d.", crypt_storage_wrapper_get_type(rh->cw1)); |
1411 | |
|
1412 | 0 | vk = crypt_volume_key_by_id(vks, rh->digest_new); |
1413 | 0 | r = crypt_storage_wrapper_init(cd, &rh->cw2, crypt_data_device(cd), |
1414 | 0 | reencrypt_get_data_offset_new(hdr), |
1415 | 0 | crypt_get_iv_offset(cd), |
1416 | 0 | reencrypt_get_sector_size_new(hdr), |
1417 | 0 | reencrypt_segment_cipher_new(hdr), |
1418 | 0 | vk, wrapper_flags); |
1419 | 0 | if (r) { |
1420 | 0 | log_err(cd, _("Failed to initialize new segment storage wrapper.")); |
1421 | 0 | return r; |
1422 | 0 | } |
1423 | 0 | rh->wflags2 = wrapper_flags; |
1424 | 0 | log_dbg(cd, "New cipher storage wrapper type: %d", crypt_storage_wrapper_get_type(rh->cw2)); |
1425 | |
|
1426 | 0 | return 0; |
1427 | 0 | } |
1428 | | |
1429 | | static int reencrypt_context_set_names(struct luks2_reencrypt *rh, const char *name) |
1430 | 0 | { |
1431 | 0 | if (!rh || !name) |
1432 | 0 | return -EINVAL; |
1433 | | |
1434 | 0 | if (*name == '/') { |
1435 | 0 | if (!(rh->device_name = dm_device_name(name))) |
1436 | 0 | return -EINVAL; |
1437 | 0 | } else if (!(rh->device_name = strdup(name))) |
1438 | 0 | return -ENOMEM; |
1439 | | |
1440 | 0 | if (asprintf(&rh->hotzone_name, "%s-hotzone-%s", rh->device_name, |
1441 | 0 | rh->direction == CRYPT_REENCRYPT_FORWARD ? "forward" : "backward") < 0) { |
1442 | 0 | rh->hotzone_name = NULL; |
1443 | 0 | return -ENOMEM; |
1444 | 0 | } |
1445 | 0 | if (asprintf(&rh->overlay_name, "%s-overlay", rh->device_name) < 0) { |
1446 | 0 | rh->overlay_name = NULL; |
1447 | 0 | return -ENOMEM; |
1448 | 0 | } |
1449 | | |
1450 | 0 | rh->online = true; |
1451 | 0 | return 0; |
1452 | 0 | } |
1453 | | |
1454 | | static int modify_offset(uint64_t *offset, uint64_t data_shift, crypt_reencrypt_direction_info di) |
1455 | 0 | { |
1456 | 0 | int r = -EINVAL; |
1457 | |
|
1458 | 0 | if (!offset) |
1459 | 0 | return r; |
1460 | | |
1461 | 0 | if (di == CRYPT_REENCRYPT_FORWARD) { |
1462 | 0 | if (*offset >= data_shift) { |
1463 | 0 | *offset -= data_shift; |
1464 | 0 | r = 0; |
1465 | 0 | } |
1466 | 0 | } else if (di == CRYPT_REENCRYPT_BACKWARD) { |
1467 | 0 | *offset += data_shift; |
1468 | 0 | r = 0; |
1469 | 0 | } |
1470 | |
|
1471 | 0 | return r; |
1472 | 0 | } |
1473 | | |
1474 | | static int reencrypt_update_flag(struct crypt_device *cd, uint8_t version, |
1475 | | bool enable, bool commit) |
1476 | 0 | { |
1477 | 0 | uint32_t reqs; |
1478 | 0 | struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2); |
1479 | |
|
1480 | 0 | if (enable) { |
1481 | 0 | log_dbg(cd, "Going to store reencryption requirement flag (version: %u).", version); |
1482 | 0 | return LUKS2_config_set_requirement_version(cd, hdr, CRYPT_REQUIREMENT_ONLINE_REENCRYPT, version, commit); |
1483 | 0 | } |
1484 | | |
1485 | 0 | LUKS2_config_get_requirements(cd, hdr, &reqs); |
1486 | |
|
1487 | 0 | reqs &= ~CRYPT_REQUIREMENT_ONLINE_REENCRYPT; |
1488 | |
|
1489 | 0 | log_dbg(cd, "Going to wipe reencryption requirement flag."); |
1490 | |
|
1491 | 0 | return LUKS2_config_set_requirements(cd, hdr, reqs, commit); |
1492 | 0 | } |
1493 | | |
1494 | | static int reencrypt_hotzone_protect_ready(struct crypt_device *cd, |
1495 | | struct reenc_protection *rp) |
1496 | 0 | { |
1497 | 0 | assert(rp); |
1498 | |
|
1499 | 0 | if (rp->type == REENC_PROTECTION_NOT_SET) |
1500 | 0 | return -EINVAL; |
1501 | | |
1502 | 0 | if (rp->type != REENC_PROTECTION_CHECKSUM) |
1503 | 0 | return 0; |
1504 | | |
1505 | 0 | if (!rp->p.csum.checksums) { |
1506 | 0 | log_dbg(cd, "Allocating buffer for storing resilience checksums."); |
1507 | 0 | if (posix_memalign(&rp->p.csum.checksums, device_alignment(crypt_metadata_device(cd)), |
1508 | 0 | rp->p.csum.checksums_len)) |
1509 | 0 | return -ENOMEM; |
1510 | 0 | } |
1511 | | |
1512 | 0 | return 0; |
1513 | 0 | } |
1514 | | |
1515 | | static int reencrypt_recover_segment(struct crypt_device *cd, |
1516 | | struct luks2_hdr *hdr, |
1517 | | struct luks2_reencrypt *rh, |
1518 | | struct volume_key *vks) |
1519 | 0 | { |
1520 | 0 | struct volume_key *vk_old, *vk_new; |
1521 | 0 | size_t count, s; |
1522 | 0 | ssize_t read, w; |
1523 | 0 | struct reenc_protection *rp; |
1524 | 0 | int devfd, r, new_sector_size, old_sector_size, rseg; |
1525 | 0 | uint64_t area_offset, area_length, area_length_read, crash_iv_offset, |
1526 | 0 | data_offset = crypt_get_data_offset(cd) << SECTOR_SHIFT; |
1527 | 0 | char *checksum_tmp = NULL, *data_buffer = NULL; |
1528 | 0 | struct crypt_storage_wrapper *cw1 = NULL, *cw2 = NULL; |
1529 | |
|
1530 | 0 | assert(hdr); |
1531 | 0 | assert(rh); |
1532 | 0 | assert(vks); |
1533 | |
|
1534 | 0 | rseg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot); |
1535 | 0 | if (rh->offset == 0 && rh->rp_moved_segment.type > REENC_PROTECTION_NOT_SET) { |
1536 | 0 | log_dbg(cd, "Recovery using moved segment protection."); |
1537 | 0 | rp = &rh->rp_moved_segment; |
1538 | 0 | } else |
1539 | 0 | rp = &rh->rp; |
1540 | |
|
1541 | 0 | if (rseg < 0 || rh->length < 512) |
1542 | 0 | return -EINVAL; |
1543 | | |
1544 | 0 | r = reencrypt_hotzone_protect_ready(cd, rp); |
1545 | 0 | if (r) { |
1546 | 0 | log_err(cd, _("Failed to initialize hotzone protection.")); |
1547 | 0 | return -EINVAL; |
1548 | 0 | } |
1549 | | |
1550 | 0 | vk_new = crypt_volume_key_by_id(vks, rh->digest_new); |
1551 | 0 | if (!vk_new && rh->mode != CRYPT_REENCRYPT_DECRYPT) |
1552 | 0 | return -EINVAL; |
1553 | 0 | vk_old = crypt_volume_key_by_id(vks, rh->digest_old); |
1554 | 0 | if (!vk_old && rh->mode != CRYPT_REENCRYPT_ENCRYPT) |
1555 | 0 | return -EINVAL; |
1556 | 0 | old_sector_size = json_segment_get_sector_size(reencrypt_segment_old(hdr)); |
1557 | 0 | new_sector_size = json_segment_get_sector_size(reencrypt_segment_new(hdr)); |
1558 | 0 | if (rh->mode == CRYPT_REENCRYPT_DECRYPT) |
1559 | 0 | crash_iv_offset = rh->offset >> SECTOR_SHIFT; /* TODO: + old iv_tweak */ |
1560 | 0 | else |
1561 | 0 | crash_iv_offset = json_segment_get_iv_offset(json_segments_get_segment(rh->jobj_segs_hot, rseg)); |
1562 | |
|
1563 | 0 | log_dbg(cd, "crash_offset: %" PRIu64 ", crash_length: %" PRIu64 ", crash_iv_offset: %" PRIu64, |
1564 | 0 | data_offset + rh->offset, rh->length, crash_iv_offset); |
1565 | |
|
1566 | 0 | r = crypt_storage_wrapper_init(cd, &cw2, crypt_data_device(cd), |
1567 | 0 | data_offset + rh->offset, crash_iv_offset, new_sector_size, |
1568 | 0 | reencrypt_segment_cipher_new(hdr), vk_new, 0); |
1569 | 0 | if (r) { |
1570 | 0 | log_err(cd, _("Failed to initialize new segment storage wrapper.")); |
1571 | 0 | return r; |
1572 | 0 | } |
1573 | | |
1574 | 0 | if (LUKS2_keyslot_area(hdr, rh->reenc_keyslot, &area_offset, &area_length)) { |
1575 | 0 | r = -EINVAL; |
1576 | 0 | goto out; |
1577 | 0 | } |
1578 | | |
1579 | 0 | if (posix_memalign((void**)&data_buffer, device_alignment(crypt_data_device(cd)), rh->length)) { |
1580 | 0 | r = -ENOMEM; |
1581 | 0 | goto out; |
1582 | 0 | } |
1583 | | |
1584 | 0 | switch (rp->type) { |
1585 | 0 | case REENC_PROTECTION_CHECKSUM: |
1586 | 0 | log_dbg(cd, "Checksums based recovery."); |
1587 | |
|
1588 | 0 | r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd), |
1589 | 0 | data_offset + rh->offset, crash_iv_offset, old_sector_size, |
1590 | 0 | reencrypt_segment_cipher_old(hdr), vk_old, 0); |
1591 | 0 | if (r) { |
1592 | 0 | log_err(cd, _("Failed to initialize old segment storage wrapper.")); |
1593 | 0 | goto out; |
1594 | 0 | } |
1595 | | |
1596 | 0 | count = rh->length / rp->p.csum.block_size; |
1597 | 0 | area_length_read = count * rp->p.csum.hash_size; |
1598 | 0 | if (area_length_read > area_length) { |
1599 | 0 | log_dbg(cd, "Internal error in calculated area_length."); |
1600 | 0 | r = -EINVAL; |
1601 | 0 | goto out; |
1602 | 0 | } |
1603 | | |
1604 | 0 | checksum_tmp = malloc(rp->p.csum.hash_size); |
1605 | 0 | if (!checksum_tmp) { |
1606 | 0 | r = -ENOMEM; |
1607 | 0 | goto out; |
1608 | 0 | } |
1609 | | |
1610 | | /* TODO: lock for read */ |
1611 | 0 | devfd = device_open(cd, crypt_metadata_device(cd), O_RDONLY); |
1612 | 0 | if (devfd < 0) |
1613 | 0 | goto out; |
1614 | | |
1615 | | /* read old data checksums */ |
1616 | 0 | read = read_lseek_blockwise(devfd, device_block_size(cd, crypt_metadata_device(cd)), |
1617 | 0 | device_alignment(crypt_metadata_device(cd)), rp->p.csum.checksums, area_length_read, area_offset); |
1618 | 0 | if (read < 0 || (size_t)read != area_length_read) { |
1619 | 0 | log_err(cd, _("Failed to read checksums for current hotzone.")); |
1620 | 0 | r = -EINVAL; |
1621 | 0 | goto out; |
1622 | 0 | } |
1623 | | |
1624 | 0 | read = crypt_storage_wrapper_read(cw2, 0, data_buffer, rh->length); |
1625 | 0 | if (read < 0 || (size_t)read != rh->length) { |
1626 | 0 | log_err(cd, _("Failed to read hotzone area starting at %" PRIu64 "."), rh->offset + data_offset); |
1627 | 0 | r = -EINVAL; |
1628 | 0 | goto out; |
1629 | 0 | } |
1630 | | |
1631 | 0 | for (s = 0; s < count; s++) { |
1632 | 0 | if (crypt_hash_write(rp->p.csum.ch, data_buffer + (s * rp->p.csum.block_size), rp->p.csum.block_size)) { |
1633 | 0 | log_dbg(cd, "Failed to write hash."); |
1634 | 0 | r = EINVAL; |
1635 | 0 | goto out; |
1636 | 0 | } |
1637 | 0 | if (crypt_hash_final(rp->p.csum.ch, checksum_tmp, rp->p.csum.hash_size)) { |
1638 | 0 | log_dbg(cd, "Failed to finalize hash."); |
1639 | 0 | r = EINVAL; |
1640 | 0 | goto out; |
1641 | 0 | } |
1642 | 0 | if (!memcmp(checksum_tmp, (char *)rp->p.csum.checksums + (s * rp->p.csum.hash_size), rp->p.csum.hash_size)) { |
1643 | 0 | log_dbg(cd, "Sector %zu (size %zu, offset %zu) needs recovery", s, rp->p.csum.block_size, s * rp->p.csum.block_size); |
1644 | 0 | if (crypt_storage_wrapper_decrypt(cw1, s * rp->p.csum.block_size, data_buffer + (s * rp->p.csum.block_size), rp->p.csum.block_size)) { |
1645 | 0 | log_err(cd, _("Failed to decrypt sector %zu."), s); |
1646 | 0 | r = -EINVAL; |
1647 | 0 | goto out; |
1648 | 0 | } |
1649 | 0 | w = crypt_storage_wrapper_encrypt_write(cw2, s * rp->p.csum.block_size, data_buffer + (s * rp->p.csum.block_size), rp->p.csum.block_size); |
1650 | 0 | if (w < 0 || (size_t)w != rp->p.csum.block_size) { |
1651 | 0 | log_err(cd, _("Failed to recover sector %zu."), s); |
1652 | 0 | r = -EINVAL; |
1653 | 0 | goto out; |
1654 | 0 | } |
1655 | 0 | } |
1656 | 0 | } |
1657 | | |
1658 | 0 | r = 0; |
1659 | 0 | break; |
1660 | 0 | case REENC_PROTECTION_JOURNAL: |
1661 | 0 | log_dbg(cd, "Journal based recovery."); |
1662 | | |
1663 | | /* FIXME: validation candidate */ |
1664 | 0 | if (rh->length > area_length) { |
1665 | 0 | r = -EINVAL; |
1666 | 0 | log_dbg(cd, "Invalid journal size."); |
1667 | 0 | goto out; |
1668 | 0 | } |
1669 | | |
1670 | | /* TODO locking */ |
1671 | 0 | r = crypt_storage_wrapper_init(cd, &cw1, crypt_metadata_device(cd), |
1672 | 0 | area_offset, crash_iv_offset, old_sector_size, |
1673 | 0 | reencrypt_segment_cipher_old(hdr), vk_old, 0); |
1674 | 0 | if (r) { |
1675 | 0 | log_err(cd, _("Failed to initialize old segment storage wrapper.")); |
1676 | 0 | goto out; |
1677 | 0 | } |
1678 | 0 | read = crypt_storage_wrapper_read_decrypt(cw1, 0, data_buffer, rh->length); |
1679 | 0 | if (read < 0 || (size_t)read != rh->length) { |
1680 | 0 | log_dbg(cd, "Failed to read journaled data."); |
1681 | 0 | r = -EIO; |
1682 | | /* may content plaintext */ |
1683 | 0 | crypt_safe_memzero(data_buffer, rh->length); |
1684 | 0 | goto out; |
1685 | 0 | } |
1686 | 0 | read = crypt_storage_wrapper_encrypt_write(cw2, 0, data_buffer, rh->length); |
1687 | | /* may content plaintext */ |
1688 | 0 | crypt_safe_memzero(data_buffer, rh->length); |
1689 | 0 | if (read < 0 || (size_t)read != rh->length) { |
1690 | 0 | log_dbg(cd, "recovery write failed."); |
1691 | 0 | r = -EINVAL; |
1692 | 0 | goto out; |
1693 | 0 | } |
1694 | | |
1695 | 0 | r = 0; |
1696 | 0 | break; |
1697 | 0 | case REENC_PROTECTION_DATASHIFT: |
1698 | 0 | log_dbg(cd, "Data shift based recovery."); |
1699 | |
|
1700 | 0 | if (rseg == 0) { |
1701 | 0 | r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd), |
1702 | 0 | json_segment_get_offset(rh->jobj_segment_moved, 0), 0, |
1703 | 0 | reencrypt_get_sector_size_old(hdr), |
1704 | 0 | reencrypt_segment_cipher_old(hdr), vk_old, 0); |
1705 | 0 | } else { |
1706 | 0 | if (rh->direction == CRYPT_REENCRYPT_FORWARD) |
1707 | 0 | data_offset = data_offset + rh->offset + data_shift_value(rp); |
1708 | 0 | else |
1709 | 0 | data_offset = data_offset + rh->offset - data_shift_value(rp); |
1710 | 0 | r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd), |
1711 | 0 | data_offset, |
1712 | 0 | crash_iv_offset, |
1713 | 0 | reencrypt_get_sector_size_old(hdr), |
1714 | 0 | reencrypt_segment_cipher_old(hdr), vk_old, 0); |
1715 | 0 | } |
1716 | 0 | if (r) { |
1717 | 0 | log_err(cd, _("Failed to initialize old segment storage wrapper.")); |
1718 | 0 | goto out; |
1719 | 0 | } |
1720 | | |
1721 | 0 | read = crypt_storage_wrapper_read_decrypt(cw1, 0, data_buffer, rh->length); |
1722 | 0 | if (read < 0 || (size_t)read != rh->length) { |
1723 | 0 | log_dbg(cd, "Failed to read data."); |
1724 | 0 | r = -EIO; |
1725 | | /* may content plaintext */ |
1726 | 0 | crypt_safe_memzero(data_buffer, rh->length); |
1727 | 0 | goto out; |
1728 | 0 | } |
1729 | | |
1730 | 0 | read = crypt_storage_wrapper_encrypt_write(cw2, 0, data_buffer, rh->length); |
1731 | | /* may content plaintext */ |
1732 | 0 | crypt_safe_memzero(data_buffer, rh->length); |
1733 | 0 | if (read < 0 || (size_t)read != rh->length) { |
1734 | 0 | log_dbg(cd, "recovery write failed."); |
1735 | 0 | r = -EINVAL; |
1736 | 0 | goto out; |
1737 | 0 | } |
1738 | 0 | r = 0; |
1739 | 0 | break; |
1740 | 0 | default: |
1741 | 0 | r = -EINVAL; |
1742 | 0 | } |
1743 | | |
1744 | 0 | if (!r) |
1745 | 0 | rh->read = rh->length; |
1746 | 0 | out: |
1747 | 0 | free(data_buffer); |
1748 | 0 | free(checksum_tmp); |
1749 | 0 | crypt_storage_wrapper_destroy(cw1); |
1750 | 0 | crypt_storage_wrapper_destroy(cw2); |
1751 | |
|
1752 | 0 | return r; |
1753 | 0 | } |
1754 | | |
1755 | | static int reencrypt_add_moved_segment(struct crypt_device *cd, struct luks2_hdr *hdr, struct luks2_reencrypt *rh) |
1756 | 0 | { |
1757 | 0 | int digest = rh->digest_old, s = LUKS2_segment_first_unused_id(hdr); |
1758 | |
|
1759 | 0 | if (!rh->jobj_segment_moved) |
1760 | 0 | return 0; |
1761 | | |
1762 | 0 | if (s < 0) |
1763 | 0 | return s; |
1764 | | |
1765 | 0 | if (json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), s, json_object_get(rh->jobj_segment_moved))) { |
1766 | 0 | json_object_put(rh->jobj_segment_moved); |
1767 | 0 | return -EINVAL; |
1768 | 0 | } |
1769 | | |
1770 | 0 | if (!strcmp(json_segment_type(rh->jobj_segment_moved), "crypt")) |
1771 | 0 | return LUKS2_digest_segment_assign(cd, hdr, s, digest, 1, 0); |
1772 | | |
1773 | 0 | return 0; |
1774 | 0 | } |
1775 | | |
1776 | | static int reencrypt_add_backup_segment(struct crypt_device *cd, |
1777 | | struct luks2_hdr *hdr, |
1778 | | struct luks2_reencrypt *rh, |
1779 | | unsigned final) |
1780 | 0 | { |
1781 | 0 | int digest, s = LUKS2_segment_first_unused_id(hdr); |
1782 | 0 | json_object *jobj; |
1783 | |
|
1784 | 0 | if (s < 0) |
1785 | 0 | return s; |
1786 | | |
1787 | 0 | digest = final ? rh->digest_new : rh->digest_old; |
1788 | 0 | jobj = final ? rh->jobj_segment_new : rh->jobj_segment_old; |
1789 | |
|
1790 | 0 | if (json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), s, json_object_get(jobj))) { |
1791 | 0 | json_object_put(jobj); |
1792 | 0 | return -EINVAL; |
1793 | 0 | } |
1794 | | |
1795 | 0 | if (strcmp(json_segment_type(jobj), "crypt")) |
1796 | 0 | return 0; |
1797 | | |
1798 | 0 | return LUKS2_digest_segment_assign(cd, hdr, s, digest, 1, 0); |
1799 | 0 | } |
1800 | | |
1801 | | static int reencrypt_assign_segments_simple(struct crypt_device *cd, |
1802 | | struct luks2_hdr *hdr, |
1803 | | struct luks2_reencrypt *rh, |
1804 | | unsigned hot, |
1805 | | unsigned commit) |
1806 | 0 | { |
1807 | 0 | int r, sg; |
1808 | |
|
1809 | 0 | if (hot && json_segments_count(rh->jobj_segs_hot) > 0) { |
1810 | 0 | log_dbg(cd, "Setting 'hot' segments."); |
1811 | |
|
1812 | 0 | r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_hot, 0); |
1813 | 0 | if (!r) |
1814 | 0 | rh->jobj_segs_hot = NULL; |
1815 | 0 | } else if (!hot && json_segments_count(rh->jobj_segs_post) > 0) { |
1816 | 0 | log_dbg(cd, "Setting 'post' segments."); |
1817 | 0 | r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_post, 0); |
1818 | 0 | if (!r) |
1819 | 0 | rh->jobj_segs_post = NULL; |
1820 | 0 | } else { |
1821 | 0 | log_dbg(cd, "No segments to set."); |
1822 | 0 | return -EINVAL; |
1823 | 0 | } |
1824 | | |
1825 | 0 | if (r) { |
1826 | 0 | log_dbg(cd, "Failed to assign new enc segments."); |
1827 | 0 | return r; |
1828 | 0 | } |
1829 | | |
1830 | 0 | r = reencrypt_add_backup_segment(cd, hdr, rh, 0); |
1831 | 0 | if (r) { |
1832 | 0 | log_dbg(cd, "Failed to assign reencryption previous backup segment."); |
1833 | 0 | return r; |
1834 | 0 | } |
1835 | | |
1836 | 0 | r = reencrypt_add_backup_segment(cd, hdr, rh, 1); |
1837 | 0 | if (r) { |
1838 | 0 | log_dbg(cd, "Failed to assign reencryption final backup segment."); |
1839 | 0 | return r; |
1840 | 0 | } |
1841 | | |
1842 | 0 | r = reencrypt_add_moved_segment(cd, hdr, rh); |
1843 | 0 | if (r) { |
1844 | 0 | log_dbg(cd, "Failed to assign reencryption moved backup segment."); |
1845 | 0 | return r; |
1846 | 0 | } |
1847 | | |
1848 | 0 | for (sg = 0; sg < LUKS2_segments_count(hdr); sg++) { |
1849 | 0 | if (LUKS2_segment_is_type(hdr, sg, "crypt") && |
1850 | 0 | LUKS2_digest_segment_assign(cd, hdr, sg, rh->mode == CRYPT_REENCRYPT_ENCRYPT ? rh->digest_new : rh->digest_old, 1, 0)) { |
1851 | 0 | log_dbg(cd, "Failed to assign digest %u to segment %u.", rh->digest_new, sg); |
1852 | 0 | return -EINVAL; |
1853 | 0 | } |
1854 | 0 | } |
1855 | | |
1856 | 0 | return commit ? LUKS2_hdr_write(cd, hdr) : 0; |
1857 | 0 | } |
1858 | | |
1859 | | static int reencrypt_assign_segments(struct crypt_device *cd, |
1860 | | struct luks2_hdr *hdr, |
1861 | | struct luks2_reencrypt *rh, |
1862 | | unsigned hot, |
1863 | | unsigned commit) |
1864 | 0 | { |
1865 | 0 | bool forward; |
1866 | 0 | int rseg, scount, r = -EINVAL; |
1867 | | |
1868 | | /* FIXME: validate in reencrypt context load */ |
1869 | 0 | if (rh->digest_new < 0 && rh->mode != CRYPT_REENCRYPT_DECRYPT) |
1870 | 0 | return -EINVAL; |
1871 | | |
1872 | 0 | if (LUKS2_digest_segment_assign(cd, hdr, CRYPT_ANY_SEGMENT, CRYPT_ANY_DIGEST, 0, 0)) |
1873 | 0 | return -EINVAL; |
1874 | | |
1875 | 0 | if (rh->mode == CRYPT_REENCRYPT_ENCRYPT || rh->mode == CRYPT_REENCRYPT_DECRYPT) |
1876 | 0 | return reencrypt_assign_segments_simple(cd, hdr, rh, hot, commit); |
1877 | | |
1878 | 0 | if (hot && rh->jobj_segs_hot) { |
1879 | 0 | log_dbg(cd, "Setting 'hot' segments."); |
1880 | |
|
1881 | 0 | r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_hot, 0); |
1882 | 0 | if (!r) |
1883 | 0 | rh->jobj_segs_hot = NULL; |
1884 | 0 | } else if (!hot && rh->jobj_segs_post) { |
1885 | 0 | log_dbg(cd, "Setting 'post' segments."); |
1886 | 0 | r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_post, 0); |
1887 | 0 | if (!r) |
1888 | 0 | rh->jobj_segs_post = NULL; |
1889 | 0 | } |
1890 | |
|
1891 | 0 | if (r) |
1892 | 0 | return r; |
1893 | | |
1894 | 0 | scount = LUKS2_segments_count(hdr); |
1895 | | |
1896 | | /* segment in reencryption has to hold reference on both digests */ |
1897 | 0 | rseg = json_segments_segment_in_reencrypt(LUKS2_get_segments_jobj(hdr)); |
1898 | 0 | if (rseg < 0 && hot) |
1899 | 0 | return -EINVAL; |
1900 | | |
1901 | 0 | if (rseg >= 0) { |
1902 | 0 | LUKS2_digest_segment_assign(cd, hdr, rseg, rh->digest_new, 1, 0); |
1903 | 0 | LUKS2_digest_segment_assign(cd, hdr, rseg, rh->digest_old, 1, 0); |
1904 | 0 | } |
1905 | |
|
1906 | 0 | forward = (rh->direction == CRYPT_REENCRYPT_FORWARD); |
1907 | 0 | if (hot) { |
1908 | 0 | if (rseg > 0) |
1909 | 0 | LUKS2_digest_segment_assign(cd, hdr, 0, forward ? rh->digest_new : rh->digest_old, 1, 0); |
1910 | 0 | if (scount > rseg + 1) |
1911 | 0 | LUKS2_digest_segment_assign(cd, hdr, rseg + 1, forward ? rh->digest_old : rh->digest_new, 1, 0); |
1912 | 0 | } else { |
1913 | 0 | LUKS2_digest_segment_assign(cd, hdr, 0, forward || scount == 1 ? rh->digest_new : rh->digest_old, 1, 0); |
1914 | 0 | if (scount > 1) |
1915 | 0 | LUKS2_digest_segment_assign(cd, hdr, 1, forward ? rh->digest_old : rh->digest_new, 1, 0); |
1916 | 0 | } |
1917 | |
|
1918 | 0 | r = reencrypt_add_backup_segment(cd, hdr, rh, 0); |
1919 | 0 | if (r) { |
1920 | 0 | log_dbg(cd, "Failed to assign hot reencryption backup segment."); |
1921 | 0 | return r; |
1922 | 0 | } |
1923 | 0 | r = reencrypt_add_backup_segment(cd, hdr, rh, 1); |
1924 | 0 | if (r) { |
1925 | 0 | log_dbg(cd, "Failed to assign post reencryption backup segment."); |
1926 | 0 | return r; |
1927 | 0 | } |
1928 | | |
1929 | 0 | return commit ? LUKS2_hdr_write(cd, hdr) : 0; |
1930 | 0 | } |
1931 | | |
1932 | | static int reencrypt_set_encrypt_segments(struct crypt_device *cd, struct luks2_hdr *hdr, |
1933 | | uint64_t dev_size, uint64_t data_size, uint64_t data_shift, bool move_first_segment, |
1934 | | crypt_reencrypt_direction_info di) |
1935 | 0 | { |
1936 | 0 | int r; |
1937 | 0 | uint64_t first_segment_offset, first_segment_length, |
1938 | 0 | second_segment_offset, second_segment_length, |
1939 | 0 | data_offset = LUKS2_get_data_offset(hdr) << SECTOR_SHIFT; |
1940 | 0 | json_object *jobj_segment_first = NULL, *jobj_segment_second = NULL, *jobj_segments; |
1941 | |
|
1942 | 0 | if (dev_size < data_shift) |
1943 | 0 | return -EINVAL; |
1944 | | |
1945 | 0 | if (data_shift && (di == CRYPT_REENCRYPT_FORWARD)) |
1946 | 0 | return -ENOTSUP; |
1947 | | |
1948 | 0 | if (move_first_segment) { |
1949 | | /* |
1950 | | * future data_device layout: |
1951 | | * [future LUKS2 header (data shift size)][second data segment][gap (data shift size)][first data segment (data shift size)] |
1952 | | */ |
1953 | 0 | first_segment_offset = dev_size; |
1954 | 0 | if (data_size < data_shift) { |
1955 | 0 | first_segment_length = data_size; |
1956 | 0 | second_segment_length = second_segment_offset = 0; |
1957 | 0 | } else { |
1958 | 0 | first_segment_length = data_shift; |
1959 | 0 | second_segment_offset = data_shift; |
1960 | 0 | second_segment_length = data_size - data_shift; |
1961 | 0 | } |
1962 | 0 | } else if (data_shift) { |
1963 | 0 | first_segment_offset = data_offset; |
1964 | 0 | first_segment_length = dev_size; |
1965 | 0 | } else { |
1966 | | /* future data_device layout with detached header: [first data segment] */ |
1967 | 0 | first_segment_offset = data_offset; |
1968 | 0 | first_segment_length = 0; /* dynamic */ |
1969 | 0 | } |
1970 | |
|
1971 | 0 | jobj_segments = json_object_new_object(); |
1972 | 0 | if (!jobj_segments) |
1973 | 0 | return -ENOMEM; |
1974 | | |
1975 | 0 | r = -EINVAL; |
1976 | 0 | if (move_first_segment) { |
1977 | 0 | jobj_segment_first = json_segment_create_linear(first_segment_offset, &first_segment_length, 0); |
1978 | 0 | if (second_segment_length && |
1979 | 0 | !(jobj_segment_second = json_segment_create_linear(second_segment_offset, &second_segment_length, 0))) { |
1980 | 0 | log_dbg(cd, "Failed generate 2nd segment."); |
1981 | 0 | return r; |
1982 | 0 | } |
1983 | 0 | } else |
1984 | 0 | jobj_segment_first = json_segment_create_linear(first_segment_offset, first_segment_length ? &first_segment_length : NULL, 0); |
1985 | | |
1986 | 0 | if (!jobj_segment_first) { |
1987 | 0 | log_dbg(cd, "Failed generate 1st segment."); |
1988 | 0 | return r; |
1989 | 0 | } |
1990 | | |
1991 | 0 | json_object_object_add(jobj_segments, "0", jobj_segment_first); |
1992 | 0 | if (jobj_segment_second) |
1993 | 0 | json_object_object_add(jobj_segments, "1", jobj_segment_second); |
1994 | |
|
1995 | 0 | r = LUKS2_digest_segment_assign(cd, hdr, CRYPT_ANY_SEGMENT, CRYPT_ANY_DIGEST, 0, 0); |
1996 | |
|
1997 | 0 | return r ?: LUKS2_segments_set(cd, hdr, jobj_segments, 0); |
1998 | 0 | } |
1999 | | |
2000 | | static int reencrypt_set_decrypt_shift_segments(struct crypt_device *cd, |
2001 | | struct luks2_hdr *hdr, |
2002 | | uint64_t dev_size, |
2003 | | uint64_t moved_segment_length, |
2004 | | crypt_reencrypt_direction_info di) |
2005 | 0 | { |
2006 | 0 | int digest, r; |
2007 | 0 | uint64_t data_offset = LUKS2_get_data_offset(hdr) << SECTOR_SHIFT; |
2008 | 0 | json_object *jobj_segment_first = NULL, *jobj_segment_second = NULL, *jobj_segments; |
2009 | |
|
2010 | 0 | if (di == CRYPT_REENCRYPT_BACKWARD) |
2011 | 0 | return -ENOTSUP; |
2012 | | |
2013 | 0 | digest = LUKS2_digest_by_segment(hdr, CRYPT_DEFAULT_SEGMENT); |
2014 | 0 | if (digest < 0) |
2015 | 0 | return -EINVAL; |
2016 | | |
2017 | | /* |
2018 | | * future data_device layout: |
2019 | | * [encrypted first segment (max data shift size)][gap (data shift size)][second encrypted data segment] |
2020 | | */ |
2021 | 0 | jobj_segments = json_object_new_object(); |
2022 | 0 | if (!jobj_segments) |
2023 | 0 | return -ENOMEM; |
2024 | | |
2025 | 0 | r = -EINVAL; |
2026 | 0 | jobj_segment_first = json_segment_create_crypt(0, crypt_get_iv_offset(cd), |
2027 | 0 | &moved_segment_length, crypt_get_cipher_spec(cd), |
2028 | 0 | NULL, 0, crypt_get_sector_size(cd), 0); |
2029 | |
|
2030 | 0 | if (!jobj_segment_first) { |
2031 | 0 | log_dbg(cd, "Failed generate 1st segment."); |
2032 | 0 | goto err; |
2033 | 0 | } |
2034 | | |
2035 | 0 | r = json_object_object_add_by_uint_by_ref(jobj_segments, 0, &jobj_segment_first); |
2036 | 0 | if (r) |
2037 | 0 | goto err; |
2038 | | |
2039 | 0 | if (dev_size > moved_segment_length) { |
2040 | 0 | jobj_segment_second = json_segment_create_crypt(data_offset + moved_segment_length, |
2041 | 0 | crypt_get_iv_offset(cd) + (moved_segment_length >> SECTOR_SHIFT), |
2042 | 0 | NULL, |
2043 | 0 | crypt_get_cipher_spec(cd), |
2044 | 0 | NULL, 0, /* integrity */ |
2045 | 0 | crypt_get_sector_size(cd), 0); |
2046 | 0 | if (!jobj_segment_second) { |
2047 | 0 | r = -EINVAL; |
2048 | 0 | log_dbg(cd, "Failed generate 2nd segment."); |
2049 | 0 | goto err; |
2050 | 0 | } |
2051 | | |
2052 | 0 | r = json_object_object_add_by_uint_by_ref(jobj_segments, 1, &jobj_segment_second); |
2053 | 0 | if (r) |
2054 | 0 | goto err; |
2055 | 0 | } |
2056 | | |
2057 | 0 | if (!(r = LUKS2_segments_set(cd, hdr, jobj_segments, 0))) |
2058 | 0 | return LUKS2_digest_segment_assign(cd, hdr, CRYPT_ANY_SEGMENT, digest, 1, 0); |
2059 | 0 | err: |
2060 | 0 | json_object_put(jobj_segment_first); |
2061 | 0 | json_object_put(jobj_segment_second); |
2062 | 0 | json_object_put(jobj_segments); |
2063 | 0 | return r; |
2064 | 0 | } |
2065 | | |
2066 | | static int reencrypt_make_targets(struct crypt_device *cd, |
2067 | | struct luks2_hdr *hdr, |
2068 | | struct device *hz_device, |
2069 | | struct volume_key *vks, |
2070 | | struct dm_target *result, |
2071 | | uint64_t size) |
2072 | 0 | { |
2073 | 0 | bool reenc_seg; |
2074 | 0 | struct volume_key *vk; |
2075 | 0 | uint64_t segment_size, segment_offset, segment_start = 0; |
2076 | 0 | int r; |
2077 | 0 | int s = 0; |
2078 | 0 | json_object *jobj, *jobj_segments = LUKS2_get_segments_jobj(hdr); |
2079 | |
|
2080 | 0 | while (result) { |
2081 | 0 | jobj = json_segments_get_segment(jobj_segments, s); |
2082 | 0 | if (!jobj) { |
2083 | 0 | log_dbg(cd, "Internal error. Segment %u is null.", s); |
2084 | 0 | return -EINVAL; |
2085 | 0 | } |
2086 | | |
2087 | 0 | reenc_seg = (s == json_segments_segment_in_reencrypt(jobj_segments)); |
2088 | |
|
2089 | 0 | segment_offset = json_segment_get_offset(jobj, 1); |
2090 | 0 | segment_size = json_segment_get_size(jobj, 1); |
2091 | | /* 'dynamic' length allowed in last segment only */ |
2092 | 0 | if (!segment_size && !result->next) |
2093 | 0 | segment_size = (size >> SECTOR_SHIFT) - segment_start; |
2094 | 0 | if (!segment_size) { |
2095 | 0 | log_dbg(cd, "Internal error. Wrong segment size %u", s); |
2096 | 0 | return -EINVAL; |
2097 | 0 | } |
2098 | | |
2099 | 0 | if (reenc_seg) |
2100 | 0 | segment_offset -= crypt_get_data_offset(cd); |
2101 | |
|
2102 | 0 | if (!strcmp(json_segment_type(jobj), "crypt")) { |
2103 | 0 | vk = crypt_volume_key_by_id(vks, reenc_seg ? LUKS2_reencrypt_digest_new(hdr) : LUKS2_digest_by_segment(hdr, s)); |
2104 | 0 | if (!vk) { |
2105 | 0 | log_err(cd, _("Missing key for dm-crypt segment %u"), s); |
2106 | 0 | return -EINVAL; |
2107 | 0 | } |
2108 | | |
2109 | 0 | r = dm_crypt_target_set(result, segment_start, segment_size, |
2110 | 0 | reenc_seg ? hz_device : crypt_data_device(cd), |
2111 | 0 | vk, |
2112 | 0 | json_segment_get_cipher(jobj), |
2113 | 0 | json_segment_get_iv_offset(jobj), |
2114 | 0 | segment_offset, |
2115 | 0 | "none", 0, 0, |
2116 | 0 | json_segment_get_sector_size(jobj)); |
2117 | 0 | if (r) { |
2118 | 0 | log_err(cd, _("Failed to set dm-crypt segment.")); |
2119 | 0 | return r; |
2120 | 0 | } |
2121 | 0 | } else if (!strcmp(json_segment_type(jobj), "linear")) { |
2122 | 0 | r = dm_linear_target_set(result, segment_start, segment_size, reenc_seg ? hz_device : crypt_data_device(cd), segment_offset); |
2123 | 0 | if (r) { |
2124 | 0 | log_err(cd, _("Failed to set dm-linear segment.")); |
2125 | 0 | return r; |
2126 | 0 | } |
2127 | 0 | } else |
2128 | 0 | return EINVAL; |
2129 | | |
2130 | 0 | segment_start += segment_size; |
2131 | 0 | s++; |
2132 | 0 | result = result->next; |
2133 | 0 | } |
2134 | | |
2135 | 0 | return s; |
2136 | 0 | } |
2137 | | |
2138 | | /* GLOBAL FIXME: audit function names and parameters names */ |
2139 | | |
2140 | | /* FIXME: |
2141 | | * 1) audit log routines |
2142 | | * 2) can't we derive hotzone device name from crypt context? (unlocked name, device uuid, etc?) |
2143 | | */ |
2144 | | static int reencrypt_load_overlay_device(struct crypt_device *cd, struct luks2_hdr *hdr, |
2145 | | const char *overlay, const char *hotzone, struct volume_key *vks, uint64_t size, |
2146 | | uint32_t flags) |
2147 | 0 | { |
2148 | 0 | char hz_path[PATH_MAX]; |
2149 | 0 | int r; |
2150 | |
|
2151 | 0 | struct device *hz_dev = NULL; |
2152 | 0 | struct crypt_dm_active_device dmd = { |
2153 | 0 | .flags = flags, |
2154 | 0 | }; |
2155 | |
|
2156 | 0 | log_dbg(cd, "Loading new table for overlay device %s.", overlay); |
2157 | |
|
2158 | 0 | r = snprintf(hz_path, PATH_MAX, "%s/%s", dm_get_dir(), hotzone); |
2159 | 0 | if (r < 0 || r >= PATH_MAX) { |
2160 | 0 | r = -EINVAL; |
2161 | 0 | goto out; |
2162 | 0 | } |
2163 | | |
2164 | 0 | r = device_alloc(cd, &hz_dev, hz_path); |
2165 | 0 | if (r) |
2166 | 0 | goto out; |
2167 | | |
2168 | 0 | r = dm_targets_allocate(&dmd.segment, LUKS2_segments_count(hdr)); |
2169 | 0 | if (r) |
2170 | 0 | goto out; |
2171 | | |
2172 | 0 | r = reencrypt_make_targets(cd, hdr, hz_dev, vks, &dmd.segment, size); |
2173 | 0 | if (r < 0) |
2174 | 0 | goto out; |
2175 | | |
2176 | 0 | r = dm_reload_device(cd, overlay, &dmd, 0, 0); |
2177 | | |
2178 | | /* what else on error here ? */ |
2179 | 0 | out: |
2180 | 0 | dm_targets_free(cd, &dmd); |
2181 | 0 | device_free(cd, hz_dev); |
2182 | |
|
2183 | 0 | return r; |
2184 | 0 | } |
2185 | | |
2186 | | static int reencrypt_replace_device(struct crypt_device *cd, const char *target, const char *source, uint32_t flags) |
2187 | 0 | { |
2188 | 0 | int r, exists = 1; |
2189 | 0 | struct crypt_dm_active_device dmd_source, dmd_target = {}; |
2190 | 0 | uint64_t dmflags = DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH; |
2191 | |
|
2192 | 0 | log_dbg(cd, "Replacing table in device %s with table from device %s.", target, source); |
2193 | | |
2194 | | /* check only whether target device exists */ |
2195 | 0 | r = dm_status_device(cd, target); |
2196 | 0 | if (r < 0) { |
2197 | 0 | if (r == -ENODEV) |
2198 | 0 | exists = 0; |
2199 | 0 | else |
2200 | 0 | return r; |
2201 | 0 | } |
2202 | | |
2203 | 0 | r = dm_query_device(cd, source, DM_ACTIVE_DEVICE | DM_ACTIVE_CRYPT_CIPHER | |
2204 | 0 | DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY, &dmd_source); |
2205 | |
|
2206 | 0 | if (r < 0) |
2207 | 0 | return r; |
2208 | | |
2209 | 0 | if (exists && ((r = dm_query_device(cd, target, 0, &dmd_target)) < 0)) |
2210 | 0 | goto out; |
2211 | | |
2212 | 0 | dmd_source.flags |= flags; |
2213 | 0 | dmd_source.uuid = crypt_get_uuid(cd); |
2214 | |
|
2215 | 0 | if (exists) { |
2216 | 0 | if (dmd_target.size != dmd_source.size) { |
2217 | 0 | log_err(cd, _("Source and target device sizes don't match. Source %" PRIu64 ", target: %" PRIu64 "."), |
2218 | 0 | dmd_source.size, dmd_target.size); |
2219 | 0 | r = -EINVAL; |
2220 | 0 | goto out; |
2221 | 0 | } |
2222 | 0 | r = dm_reload_device(cd, target, &dmd_source, 0, 0); |
2223 | 0 | if (!r) { |
2224 | 0 | log_dbg(cd, "Resuming device %s", target); |
2225 | 0 | r = dm_resume_device(cd, target, dmflags | act2dmflags(dmd_source.flags)); |
2226 | 0 | } |
2227 | 0 | } else |
2228 | 0 | r = dm_create_device(cd, target, CRYPT_SUBDEV, &dmd_source); |
2229 | 0 | out: |
2230 | 0 | dm_targets_free(cd, &dmd_source); |
2231 | 0 | dm_targets_free(cd, &dmd_target); |
2232 | |
|
2233 | 0 | return r; |
2234 | 0 | } |
2235 | | |
2236 | | static int reencrypt_swap_backing_device(struct crypt_device *cd, const char *name, |
2237 | | const char *new_backend_name) |
2238 | 0 | { |
2239 | 0 | int r; |
2240 | 0 | struct device *overlay_dev = NULL; |
2241 | 0 | char overlay_path[PATH_MAX] = { 0 }; |
2242 | 0 | struct crypt_dm_active_device dmd = {}; |
2243 | |
|
2244 | 0 | log_dbg(cd, "Redirecting %s mapping to new backing device: %s.", name, new_backend_name); |
2245 | |
|
2246 | 0 | r = snprintf(overlay_path, PATH_MAX, "%s/%s", dm_get_dir(), new_backend_name); |
2247 | 0 | if (r < 0 || r >= PATH_MAX) { |
2248 | 0 | r = -EINVAL; |
2249 | 0 | goto out; |
2250 | 0 | } |
2251 | | |
2252 | 0 | r = device_alloc(cd, &overlay_dev, overlay_path); |
2253 | 0 | if (r) |
2254 | 0 | goto out; |
2255 | | |
2256 | 0 | r = device_block_adjust(cd, overlay_dev, DEV_OK, |
2257 | 0 | 0, &dmd.size, &dmd.flags); |
2258 | 0 | if (r) |
2259 | 0 | goto out; |
2260 | | |
2261 | 0 | r = dm_linear_target_set(&dmd.segment, 0, dmd.size, overlay_dev, 0); |
2262 | 0 | if (r) |
2263 | 0 | goto out; |
2264 | | |
2265 | 0 | r = dm_reload_device(cd, name, &dmd, 0, 0); |
2266 | 0 | if (!r) { |
2267 | 0 | log_dbg(cd, "Resuming device %s", name); |
2268 | 0 | r = dm_resume_device(cd, name, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH); |
2269 | 0 | } |
2270 | |
|
2271 | 0 | out: |
2272 | 0 | dm_targets_free(cd, &dmd); |
2273 | 0 | device_free(cd, overlay_dev); |
2274 | |
|
2275 | 0 | return r; |
2276 | 0 | } |
2277 | | |
2278 | | static int reencrypt_activate_hotzone_device(struct crypt_device *cd, const char *name, uint64_t device_size, uint32_t flags) |
2279 | 0 | { |
2280 | 0 | int r; |
2281 | 0 | uint64_t new_offset = reencrypt_get_data_offset_new(crypt_get_hdr(cd, CRYPT_LUKS2)) >> SECTOR_SHIFT; |
2282 | |
|
2283 | 0 | struct crypt_dm_active_device dmd = { |
2284 | 0 | .flags = flags, |
2285 | 0 | .uuid = crypt_get_uuid(cd), |
2286 | 0 | .size = device_size >> SECTOR_SHIFT |
2287 | 0 | }; |
2288 | |
|
2289 | 0 | log_dbg(cd, "Activating hotzone device %s.", name); |
2290 | |
|
2291 | 0 | r = device_block_adjust(cd, crypt_data_device(cd), DEV_OK, |
2292 | 0 | new_offset, &dmd.size, &dmd.flags); |
2293 | 0 | if (r) |
2294 | 0 | goto out; |
2295 | | |
2296 | 0 | r = dm_linear_target_set(&dmd.segment, 0, dmd.size, crypt_data_device(cd), new_offset); |
2297 | 0 | if (r) |
2298 | 0 | goto out; |
2299 | | |
2300 | 0 | r = dm_create_device(cd, name, CRYPT_SUBDEV, &dmd); |
2301 | 0 | out: |
2302 | 0 | dm_targets_free(cd, &dmd); |
2303 | |
|
2304 | 0 | return r; |
2305 | 0 | } |
2306 | | |
2307 | | static int reencrypt_init_device_stack(struct crypt_device *cd, |
2308 | | const struct luks2_reencrypt *rh) |
2309 | 0 | { |
2310 | 0 | int r; |
2311 | | |
2312 | | /* Activate hotzone device 1:1 linear mapping to data_device */ |
2313 | 0 | r = reencrypt_activate_hotzone_device(cd, rh->hotzone_name, rh->device_size, CRYPT_ACTIVATE_PRIVATE); |
2314 | 0 | if (r) { |
2315 | 0 | log_err(cd, _("Failed to activate hotzone device %s."), rh->hotzone_name); |
2316 | 0 | return r; |
2317 | 0 | } |
2318 | | |
2319 | | /* |
2320 | | * Activate overlay device with exactly same table as original 'name' mapping. |
2321 | | * Note that within this step the 'name' device may already include a table |
2322 | | * constructed from more than single dm-crypt segment. Therefore transfer |
2323 | | * mapping as is. |
2324 | | * |
2325 | | * If we're about to resume reencryption orig mapping has to be already validated for |
2326 | | * abrupt shutdown and rchunk_offset has to point on next chunk to reencrypt! |
2327 | | * |
2328 | | * TODO: in crypt_activate_by* |
2329 | | */ |
2330 | 0 | r = reencrypt_replace_device(cd, rh->overlay_name, rh->device_name, CRYPT_ACTIVATE_PRIVATE); |
2331 | 0 | if (r) { |
2332 | 0 | log_err(cd, _("Failed to activate overlay device %s with actual origin table."), rh->overlay_name); |
2333 | 0 | goto err; |
2334 | 0 | } |
2335 | | |
2336 | | /* swap origin mapping to overlay device */ |
2337 | 0 | r = reencrypt_swap_backing_device(cd, rh->device_name, rh->overlay_name); |
2338 | 0 | if (r) { |
2339 | 0 | log_err(cd, _("Failed to load new mapping for device %s."), rh->device_name); |
2340 | 0 | goto err; |
2341 | 0 | } |
2342 | | |
2343 | | /* |
2344 | | * Now the 'name' (unlocked luks) device is mapped via dm-linear to an overlay dev. |
2345 | | * The overlay device has a original live table of 'name' device in-before the swap. |
2346 | | */ |
2347 | | |
2348 | 0 | return 0; |
2349 | 0 | err: |
2350 | | /* TODO: force error helper devices on error path */ |
2351 | 0 | dm_remove_device(cd, rh->overlay_name, 0); |
2352 | 0 | dm_remove_device(cd, rh->hotzone_name, 0); |
2353 | |
|
2354 | 0 | return r; |
2355 | 0 | } |
2356 | | |
2357 | | /* TODO: |
2358 | | * 1) audit error path. any error in this routine is fatal and should be unlikely. |
2359 | | * usually it would hint some collision with another userspace process touching |
2360 | | * dm devices directly. |
2361 | | */ |
2362 | | static int reenc_refresh_helper_devices(struct crypt_device *cd, const char *overlay, const char *hotzone) |
2363 | 0 | { |
2364 | 0 | int r; |
2365 | | |
2366 | | /* |
2367 | | * we have to explicitly suspend the overlay device before suspending |
2368 | | * the hotzone one. Resuming overlay device (aka switching tables) only |
2369 | | * after suspending the hotzone may lead to deadlock. |
2370 | | * |
2371 | | * In other words: always suspend the stack from top to bottom! |
2372 | | */ |
2373 | 0 | r = dm_suspend_device(cd, overlay, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH); |
2374 | 0 | if (r) { |
2375 | 0 | log_err(cd, _("Failed to suspend device %s."), overlay); |
2376 | 0 | return r; |
2377 | 0 | } |
2378 | | |
2379 | | /* suspend HZ device */ |
2380 | 0 | r = dm_suspend_device(cd, hotzone, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH); |
2381 | 0 | if (r) { |
2382 | 0 | log_err(cd, _("Failed to suspend device %s."), hotzone); |
2383 | 0 | return r; |
2384 | 0 | } |
2385 | | |
2386 | | /* resume overlay device: inactive table (with hotozne) -> live */ |
2387 | 0 | r = dm_resume_device(cd, overlay, DM_RESUME_PRIVATE); |
2388 | 0 | if (r) |
2389 | 0 | log_err(cd, _("Failed to resume device %s."), overlay); |
2390 | |
|
2391 | 0 | return r; |
2392 | 0 | } |
2393 | | |
2394 | | static int reencrypt_refresh_overlay_devices(struct crypt_device *cd, |
2395 | | struct luks2_hdr *hdr, |
2396 | | const char *overlay, |
2397 | | const char *hotzone, |
2398 | | struct volume_key *vks, |
2399 | | uint64_t device_size, |
2400 | | uint32_t flags) |
2401 | 0 | { |
2402 | 0 | int r = reencrypt_load_overlay_device(cd, hdr, overlay, hotzone, vks, device_size, flags); |
2403 | 0 | if (r) { |
2404 | 0 | log_err(cd, _("Failed to reload device %s."), overlay); |
2405 | 0 | return REENC_ERR; |
2406 | 0 | } |
2407 | | |
2408 | 0 | r = reenc_refresh_helper_devices(cd, overlay, hotzone); |
2409 | 0 | if (r) { |
2410 | 0 | log_err(cd, _("Failed to refresh reencryption devices stack.")); |
2411 | 0 | return REENC_ROLLBACK; |
2412 | 0 | } |
2413 | | |
2414 | 0 | return REENC_OK; |
2415 | 0 | } |
2416 | | |
2417 | | static int reencrypt_move_data(struct crypt_device *cd, |
2418 | | int devfd, |
2419 | | uint64_t data_shift, |
2420 | | crypt_reencrypt_mode_info mode) |
2421 | 0 | { |
2422 | 0 | void *buffer; |
2423 | 0 | int r; |
2424 | 0 | ssize_t ret; |
2425 | 0 | uint64_t buffer_len, offset, |
2426 | 0 | read_offset = (mode == CRYPT_REENCRYPT_ENCRYPT ? 0 : data_shift); |
2427 | 0 | struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2); |
2428 | |
|
2429 | 0 | offset = json_segment_get_offset(LUKS2_get_segment_jobj(hdr, 0), 0); |
2430 | 0 | buffer_len = json_segment_get_size(LUKS2_get_segment_jobj(hdr, 0), 0); |
2431 | 0 | if (!buffer_len || buffer_len > data_shift) |
2432 | 0 | return -EINVAL; |
2433 | | |
2434 | 0 | if (posix_memalign(&buffer, device_alignment(crypt_data_device(cd)), buffer_len)) |
2435 | 0 | return -ENOMEM; |
2436 | | |
2437 | 0 | ret = read_lseek_blockwise(devfd, |
2438 | 0 | device_block_size(cd, crypt_data_device(cd)), |
2439 | 0 | device_alignment(crypt_data_device(cd)), |
2440 | 0 | buffer, buffer_len, read_offset); |
2441 | 0 | if (ret < 0 || (uint64_t)ret != buffer_len) { |
2442 | 0 | log_dbg(cd, "Failed to read data at offset %" PRIu64 " (size: %zu)", |
2443 | 0 | read_offset, buffer_len); |
2444 | 0 | r = -EIO; |
2445 | 0 | goto out; |
2446 | 0 | } |
2447 | | |
2448 | 0 | log_dbg(cd, "Going to write %" PRIu64 " bytes read at offset %" PRIu64 " to new offset %" PRIu64, |
2449 | 0 | buffer_len, read_offset, offset); |
2450 | 0 | ret = write_lseek_blockwise(devfd, |
2451 | 0 | device_block_size(cd, crypt_data_device(cd)), |
2452 | 0 | device_alignment(crypt_data_device(cd)), |
2453 | 0 | buffer, buffer_len, offset); |
2454 | 0 | if (ret < 0 || (uint64_t)ret != buffer_len) { |
2455 | 0 | log_dbg(cd, "Failed to write data at offset %" PRIu64 " (size: %zu)", |
2456 | 0 | offset, buffer_len); |
2457 | 0 | r = -EIO; |
2458 | 0 | goto out; |
2459 | 0 | } |
2460 | | |
2461 | 0 | r = 0; |
2462 | 0 | out: |
2463 | 0 | crypt_safe_memzero(buffer, buffer_len); |
2464 | 0 | free(buffer); |
2465 | 0 | return r; |
2466 | 0 | } |
2467 | | |
2468 | | static int reencrypt_make_backup_segments(struct crypt_device *cd, |
2469 | | struct luks2_hdr *hdr, |
2470 | | int digest_new, |
2471 | | const char *cipher, |
2472 | | uint64_t data_offset, |
2473 | | const struct crypt_params_reencrypt *params) |
2474 | 0 | { |
2475 | 0 | const char *type; |
2476 | 0 | int r, segment, moved_segment = -1, digest_old = -1; |
2477 | 0 | json_object *jobj_tmp, *jobj_segment_new = NULL, *jobj_segment_old = NULL, *jobj_segment_bcp = NULL; |
2478 | 0 | uint32_t sector_size = params->luks2 ? params->luks2->sector_size : SECTOR_SIZE; |
2479 | 0 | uint64_t segment_offset, tmp, data_shift = params->data_shift << SECTOR_SHIFT, |
2480 | 0 | device_size = params->device_size << SECTOR_SHIFT; |
2481 | |
|
2482 | 0 | if (params->mode != CRYPT_REENCRYPT_DECRYPT && digest_new < 0) |
2483 | 0 | return -EINVAL; |
2484 | | |
2485 | 0 | if (params->mode != CRYPT_REENCRYPT_ENCRYPT) { |
2486 | 0 | digest_old = LUKS2_digest_by_segment(hdr, CRYPT_DEFAULT_SEGMENT); |
2487 | 0 | if (digest_old < 0) |
2488 | 0 | return -EINVAL; |
2489 | 0 | } |
2490 | | |
2491 | 0 | segment = LUKS2_segment_first_unused_id(hdr); |
2492 | 0 | if (segment < 0) |
2493 | 0 | return -EINVAL; |
2494 | | |
2495 | 0 | if (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT) { |
2496 | 0 | if (json_object_copy(LUKS2_get_segment_jobj(hdr, 0), &jobj_segment_bcp)) { |
2497 | 0 | r = -EINVAL; |
2498 | 0 | goto err; |
2499 | 0 | } |
2500 | 0 | r = LUKS2_segment_set_flag(jobj_segment_bcp, "backup-moved-segment"); |
2501 | 0 | if (r) |
2502 | 0 | goto err; |
2503 | 0 | moved_segment = segment++; |
2504 | 0 | r = json_object_object_add_by_uint_by_ref(LUKS2_get_segments_jobj(hdr), moved_segment, &jobj_segment_bcp); |
2505 | 0 | if (r) |
2506 | 0 | goto err; |
2507 | | |
2508 | 0 | if (!(type = json_segment_type(LUKS2_get_segment_jobj(hdr, moved_segment)))) { |
2509 | 0 | r = -EINVAL; |
2510 | 0 | goto err; |
2511 | 0 | } |
2512 | | |
2513 | 0 | if (!strcmp(type, "crypt") && ((r = LUKS2_digest_segment_assign(cd, hdr, moved_segment, digest_old, 1, 0)))) |
2514 | 0 | goto err; |
2515 | 0 | } |
2516 | | |
2517 | | /* FIXME: Add detection for case (digest old == digest new && old segment == new segment) */ |
2518 | 0 | if (digest_old >= 0) { |
2519 | 0 | if (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT) { |
2520 | 0 | jobj_tmp = LUKS2_get_segment_jobj(hdr, 0); |
2521 | 0 | if (!jobj_tmp) { |
2522 | 0 | r = -EINVAL; |
2523 | 0 | goto err; |
2524 | 0 | } |
2525 | | |
2526 | 0 | jobj_segment_old = json_segment_create_crypt(data_offset, |
2527 | 0 | json_segment_get_iv_offset(jobj_tmp), |
2528 | 0 | device_size ? &device_size : NULL, |
2529 | 0 | json_segment_get_cipher(jobj_tmp), |
2530 | 0 | NULL, 0, /* integrity */ |
2531 | 0 | json_segment_get_sector_size(jobj_tmp), |
2532 | 0 | 0); |
2533 | 0 | } else { |
2534 | 0 | if (json_object_copy(LUKS2_get_segment_jobj(hdr, CRYPT_DEFAULT_SEGMENT), &jobj_segment_old)) { |
2535 | 0 | r = -EINVAL; |
2536 | 0 | goto err; |
2537 | 0 | } |
2538 | 0 | } |
2539 | 0 | } else if (params->mode == CRYPT_REENCRYPT_ENCRYPT) { |
2540 | 0 | r = LUKS2_get_data_size(hdr, &tmp, NULL); |
2541 | 0 | if (r) |
2542 | 0 | goto err; |
2543 | | |
2544 | 0 | if (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT) |
2545 | 0 | jobj_segment_old = json_segment_create_linear(0, tmp ? &tmp : NULL, 0); |
2546 | 0 | else |
2547 | 0 | jobj_segment_old = json_segment_create_linear(data_offset, tmp ? &tmp : NULL, 0); |
2548 | 0 | } |
2549 | | |
2550 | 0 | if (!jobj_segment_old) { |
2551 | 0 | r = -EINVAL; |
2552 | 0 | goto err; |
2553 | 0 | } |
2554 | | |
2555 | 0 | r = LUKS2_segment_set_flag(jobj_segment_old, "backup-previous"); |
2556 | 0 | if (r) |
2557 | 0 | goto err; |
2558 | | |
2559 | 0 | r = json_object_object_add_by_uint_by_ref(LUKS2_get_segments_jobj(hdr), segment, &jobj_segment_old); |
2560 | 0 | if (r) |
2561 | 0 | goto err; |
2562 | | |
2563 | 0 | if (digest_old >= 0 && (r = LUKS2_digest_segment_assign(cd, hdr, segment, digest_old, 1, 0))) |
2564 | 0 | goto err; |
2565 | | |
2566 | 0 | segment++; |
2567 | |
|
2568 | 0 | if (digest_new >= 0) { |
2569 | 0 | segment_offset = data_offset; |
2570 | 0 | if (params->mode != CRYPT_REENCRYPT_ENCRYPT && |
2571 | 0 | modify_offset(&segment_offset, data_shift, params->direction)) { |
2572 | 0 | r = -EINVAL; |
2573 | 0 | goto err; |
2574 | 0 | } |
2575 | 0 | jobj_segment_new = json_segment_create_crypt(segment_offset, |
2576 | 0 | crypt_get_iv_offset(cd), |
2577 | 0 | NULL, cipher, NULL, 0, sector_size, 0); |
2578 | 0 | } else if (params->mode == CRYPT_REENCRYPT_DECRYPT) { |
2579 | 0 | segment_offset = data_offset; |
2580 | 0 | if (modify_offset(&segment_offset, data_shift, params->direction)) { |
2581 | 0 | r = -EINVAL; |
2582 | 0 | goto err; |
2583 | 0 | } |
2584 | 0 | jobj_segment_new = json_segment_create_linear(segment_offset, NULL, 0); |
2585 | 0 | } |
2586 | | |
2587 | 0 | if (!jobj_segment_new) { |
2588 | 0 | r = -EINVAL; |
2589 | 0 | goto err; |
2590 | 0 | } |
2591 | | |
2592 | 0 | r = LUKS2_segment_set_flag(jobj_segment_new, "backup-final"); |
2593 | 0 | if (r) |
2594 | 0 | goto err; |
2595 | | |
2596 | 0 | r = json_object_object_add_by_uint_by_ref(LUKS2_get_segments_jobj(hdr), segment, &jobj_segment_new); |
2597 | 0 | if (r) |
2598 | 0 | goto err; |
2599 | | |
2600 | 0 | if (digest_new >= 0 && (r = LUKS2_digest_segment_assign(cd, hdr, segment, digest_new, 1, 0))) |
2601 | 0 | goto err; |
2602 | | |
2603 | | /* FIXME: also check occupied space by keyslot in shrunk area */ |
2604 | 0 | if (params->direction == CRYPT_REENCRYPT_FORWARD && data_shift && |
2605 | 0 | crypt_metadata_device(cd) == crypt_data_device(cd) && |
2606 | 0 | LUKS2_set_keyslots_size(hdr, json_segment_get_offset(reencrypt_segment_new(hdr), 0))) { |
2607 | 0 | log_err(cd, _("Failed to set new keyslots area size.")); |
2608 | 0 | r = -EINVAL; |
2609 | 0 | goto err; |
2610 | 0 | } |
2611 | | |
2612 | 0 | return 0; |
2613 | 0 | err: |
2614 | 0 | json_object_put(jobj_segment_new); |
2615 | 0 | json_object_put(jobj_segment_old); |
2616 | 0 | json_object_put(jobj_segment_bcp); |
2617 | 0 | return r; |
2618 | 0 | } |
2619 | | |
2620 | | static int reencrypt_verify_single_key(struct crypt_device *cd, int digest, struct volume_key *vks) |
2621 | 0 | { |
2622 | 0 | struct volume_key *vk; |
2623 | |
|
2624 | 0 | vk = crypt_volume_key_by_id(vks, digest); |
2625 | 0 | if (!vk) |
2626 | 0 | return -ENOENT; |
2627 | | |
2628 | 0 | if (LUKS2_digest_verify_by_digest(cd, digest, vk) != digest) |
2629 | 0 | return -EINVAL; |
2630 | | |
2631 | 0 | return 0; |
2632 | 0 | } |
2633 | | |
2634 | | static int reencrypt_verify_keys(struct crypt_device *cd, |
2635 | | int digest_old, |
2636 | | int digest_new, |
2637 | | struct volume_key *vks) |
2638 | 0 | { |
2639 | 0 | int r; |
2640 | |
|
2641 | 0 | if (digest_new >= 0 && (r = reencrypt_verify_single_key(cd, digest_new, vks))) |
2642 | 0 | return r; |
2643 | | |
2644 | 0 | if (digest_old >= 0 && (r = reencrypt_verify_single_key(cd, digest_old, vks))) |
2645 | 0 | return r; |
2646 | | |
2647 | 0 | return 0; |
2648 | 0 | } |
2649 | | |
2650 | | static int reencrypt_upload_single_key(struct crypt_device *cd, |
2651 | | int digest, |
2652 | | struct volume_key *vks) |
2653 | 0 | { |
2654 | 0 | struct volume_key *vk; |
2655 | |
|
2656 | 0 | vk = crypt_volume_key_by_id(vks, digest); |
2657 | 0 | if (!vk) |
2658 | 0 | return -EINVAL; |
2659 | | |
2660 | 0 | return LUKS2_volume_key_load_in_keyring_by_digest(cd, vk, digest); |
2661 | 0 | } |
2662 | | |
2663 | | static int reencrypt_upload_keys(struct crypt_device *cd, |
2664 | | struct luks2_hdr *hdr, |
2665 | | int digest_old, |
2666 | | int digest_new, |
2667 | | struct volume_key *vks) |
2668 | 0 | { |
2669 | 0 | int r; |
2670 | |
|
2671 | 0 | if (!crypt_use_keyring_for_vk(cd)) |
2672 | 0 | return 0; |
2673 | | |
2674 | 0 | if (digest_new >= 0 && !crypt_is_cipher_null(reencrypt_segment_cipher_new(hdr)) && |
2675 | 0 | (r = reencrypt_upload_single_key(cd, digest_new, vks))) |
2676 | 0 | return r; |
2677 | | |
2678 | 0 | if (digest_old >= 0 && !crypt_is_cipher_null(reencrypt_segment_cipher_old(hdr)) && |
2679 | 0 | (r = reencrypt_upload_single_key(cd, digest_old, vks))) { |
2680 | 0 | crypt_drop_uploaded_keyring_key(cd, vks); |
2681 | 0 | return r; |
2682 | 0 | } |
2683 | | |
2684 | 0 | return 0; |
2685 | 0 | } |
2686 | | |
2687 | | static int reencrypt_verify_and_upload_keys(struct crypt_device *cd, |
2688 | | struct luks2_hdr *hdr, |
2689 | | int digest_old, |
2690 | | int digest_new, |
2691 | | struct volume_key *vks) |
2692 | 0 | { |
2693 | 0 | int r; |
2694 | |
|
2695 | 0 | r = reencrypt_verify_keys(cd, digest_old, digest_new, vks); |
2696 | 0 | if (r) |
2697 | 0 | return r; |
2698 | | |
2699 | 0 | r = reencrypt_upload_keys(cd, hdr, digest_old, digest_new, vks); |
2700 | 0 | if (r) |
2701 | 0 | return r; |
2702 | | |
2703 | 0 | return 0; |
2704 | 0 | } |
2705 | | |
2706 | | static int reencrypt_verify_checksum_params(struct crypt_device *cd, |
2707 | | const struct crypt_params_reencrypt *params) |
2708 | 0 | { |
2709 | 0 | size_t len; |
2710 | 0 | struct crypt_hash *ch; |
2711 | |
|
2712 | 0 | assert(params); |
2713 | |
|
2714 | 0 | if (!params->hash) |
2715 | 0 | return -EINVAL; |
2716 | | |
2717 | 0 | len = strlen(params->hash); |
2718 | 0 | if (!len || len > (LUKS2_CHECKSUM_ALG_L - 1)) |
2719 | 0 | return -EINVAL; |
2720 | | |
2721 | 0 | if (crypt_hash_size(params->hash) <= 0) |
2722 | 0 | return -EINVAL; |
2723 | | |
2724 | 0 | if (crypt_hash_init(&ch, params->hash)) { |
2725 | 0 | log_err(cd, _("Hash algorithm %s is not available."), params->hash); |
2726 | 0 | return -EINVAL; |
2727 | 0 | } |
2728 | | /* We just check for alg availability */ |
2729 | 0 | crypt_hash_destroy(ch); |
2730 | |
|
2731 | 0 | return 0; |
2732 | 0 | } |
2733 | | |
2734 | | static int reencrypt_verify_datashift_params(struct crypt_device *cd, |
2735 | | const struct crypt_params_reencrypt *params, |
2736 | | uint32_t sector_size) |
2737 | 0 | { |
2738 | 0 | assert(params); |
2739 | |
|
2740 | 0 | if (!params->data_shift) |
2741 | 0 | return -EINVAL; |
2742 | 0 | if (MISALIGNED(params->data_shift, sector_size >> SECTOR_SHIFT)) { |
2743 | 0 | log_err(cd, _("Data shift value is not aligned to encryption sector size (%" PRIu32 " bytes)."), |
2744 | 0 | sector_size); |
2745 | 0 | return -EINVAL; |
2746 | 0 | } |
2747 | | |
2748 | 0 | return 0; |
2749 | 0 | } |
2750 | | |
2751 | | static int reencrypt_verify_resilience_params(struct crypt_device *cd, |
2752 | | const struct crypt_params_reencrypt *params, |
2753 | | uint32_t sector_size, bool move_first_segment) |
2754 | 0 | { |
2755 | | /* no change requested */ |
2756 | 0 | if (!params || !params->resilience) |
2757 | 0 | return 0; |
2758 | | |
2759 | 0 | if (!strcmp(params->resilience, "journal")) |
2760 | 0 | return (params->data_shift || move_first_segment) ? -EINVAL : 0; |
2761 | 0 | else if (!strcmp(params->resilience, "none")) |
2762 | 0 | return (params->data_shift || move_first_segment) ? -EINVAL : 0; |
2763 | 0 | else if (!strcmp(params->resilience, "datashift")) |
2764 | 0 | return reencrypt_verify_datashift_params(cd, params, sector_size); |
2765 | 0 | else if (!strcmp(params->resilience, "checksum")) { |
2766 | 0 | if (params->data_shift || move_first_segment) |
2767 | 0 | return -EINVAL; |
2768 | 0 | return reencrypt_verify_checksum_params(cd, params); |
2769 | 0 | } else if (!strcmp(params->resilience, "datashift-checksum")) { |
2770 | 0 | if (!move_first_segment || |
2771 | 0 | reencrypt_verify_datashift_params(cd, params, sector_size)) |
2772 | 0 | return -EINVAL; |
2773 | 0 | return reencrypt_verify_checksum_params(cd, params); |
2774 | 0 | } else if (!strcmp(params->resilience, "datashift-journal")) { |
2775 | 0 | if (!move_first_segment) |
2776 | 0 | return -EINVAL; |
2777 | 0 | return reencrypt_verify_datashift_params(cd, params, sector_size); |
2778 | 0 | } |
2779 | | |
2780 | 0 | log_err(cd, _("Unsupported resilience mode %s"), params->resilience); |
2781 | 0 | return -EINVAL; |
2782 | 0 | } |
2783 | | |
2784 | | static int reencrypt_decrypt_with_datashift_init(struct crypt_device *cd, |
2785 | | const char *name, |
2786 | | struct luks2_hdr *hdr, |
2787 | | int reencrypt_keyslot, |
2788 | | uint32_t sector_size, |
2789 | | uint64_t data_size, |
2790 | | uint64_t data_offset, |
2791 | | struct crypt_keyslot_context *kc_old, |
2792 | | int keyslot_old, |
2793 | | const struct crypt_params_reencrypt *params, |
2794 | | struct volume_key **vks) |
2795 | 0 | { |
2796 | 0 | bool clear_table = false; |
2797 | 0 | int r, devfd = -1; |
2798 | 0 | uint64_t data_shift, max_moved_segment_length, moved_segment_length; |
2799 | 0 | struct reenc_protection check_rp = {}; |
2800 | 0 | struct crypt_dm_active_device dmd_target, dmd_source = { |
2801 | 0 | .uuid = crypt_get_uuid(cd), |
2802 | 0 | .flags = CRYPT_ACTIVATE_SHARED /* turn off exclusive open checks */ |
2803 | 0 | }; |
2804 | 0 | json_object *jobj_segments_old; |
2805 | |
|
2806 | 0 | assert(hdr); |
2807 | 0 | assert(params); |
2808 | 0 | assert(params->resilience); |
2809 | 0 | assert(params->data_shift); |
2810 | 0 | assert(vks); |
2811 | |
|
2812 | 0 | if (!data_offset) |
2813 | 0 | return -EINVAL; |
2814 | | |
2815 | 0 | if (params->max_hotzone_size > params->data_shift) { |
2816 | 0 | log_err(cd, _("Moved segment size can not be greater than data shift value.")); |
2817 | 0 | return -EINVAL; |
2818 | 0 | } |
2819 | | |
2820 | 0 | log_dbg(cd, "Initializing decryption with datashift."); |
2821 | |
|
2822 | 0 | data_shift = params->data_shift << SECTOR_SHIFT; |
2823 | | |
2824 | | /* |
2825 | | * In offline mode we must perform data move with exclusively opened data |
2826 | | * device in order to exclude LUKS2 decryption process and filesystem mount. |
2827 | | */ |
2828 | 0 | if (name) |
2829 | 0 | devfd = device_open(cd, crypt_data_device(cd), O_RDWR); |
2830 | 0 | else |
2831 | 0 | devfd = device_open_excl(cd, crypt_data_device(cd), O_RDWR); |
2832 | 0 | if (devfd < 0) |
2833 | 0 | return -EINVAL; |
2834 | | |
2835 | | /* in-memory only */ |
2836 | 0 | moved_segment_length = params->max_hotzone_size << SECTOR_SHIFT; |
2837 | 0 | if (!moved_segment_length) |
2838 | 0 | moved_segment_length = data_shift < LUKS2_DEFAULT_NONE_REENCRYPTION_LENGTH ? |
2839 | 0 | data_shift : LUKS2_DEFAULT_NONE_REENCRYPTION_LENGTH; |
2840 | |
|
2841 | 0 | if (moved_segment_length > data_size) |
2842 | 0 | moved_segment_length = data_size; |
2843 | |
|
2844 | 0 | r = reencrypt_set_decrypt_shift_segments(cd, hdr, data_size, |
2845 | 0 | moved_segment_length, |
2846 | 0 | params->direction); |
2847 | 0 | if (r) |
2848 | 0 | goto out; |
2849 | | |
2850 | 0 | r = reencrypt_make_backup_segments(cd, hdr, CRYPT_ANY_DIGEST, NULL, data_offset, params); |
2851 | 0 | if (r) { |
2852 | 0 | log_dbg(cd, "Failed to create reencryption backup device segments."); |
2853 | 0 | goto out; |
2854 | 0 | } |
2855 | | |
2856 | 0 | r = reencrypt_verify_resilience_params(cd, params, sector_size, true); |
2857 | 0 | if (r < 0) { |
2858 | 0 | log_err(cd, _("Invalid reencryption resilience parameters.")); |
2859 | 0 | goto out; |
2860 | 0 | } |
2861 | | |
2862 | 0 | r = LUKS2_keyslot_reencrypt_allocate(cd, hdr, reencrypt_keyslot, |
2863 | 0 | params, reencrypt_get_alignment(cd, hdr)); |
2864 | 0 | if (r < 0) |
2865 | 0 | goto out; |
2866 | | |
2867 | 0 | r = LUKS2_keyslot_reencrypt_load(cd, hdr, reencrypt_keyslot, &check_rp, false); |
2868 | 0 | if (r < 0) |
2869 | 0 | goto out; |
2870 | | |
2871 | 0 | r = LUKS2_reencrypt_max_hotzone_size(cd, hdr, &check_rp, |
2872 | 0 | reencrypt_keyslot, |
2873 | 0 | &max_moved_segment_length); |
2874 | 0 | if (r < 0) |
2875 | 0 | goto out; |
2876 | | |
2877 | 0 | LUKS2_reencrypt_protection_erase(&check_rp); |
2878 | |
|
2879 | 0 | if (moved_segment_length > max_moved_segment_length) { |
2880 | 0 | log_err(cd, _("Moved segment too large. Requested size %" PRIu64 ", available space for: %" PRIu64 "."), |
2881 | 0 | moved_segment_length, max_moved_segment_length); |
2882 | 0 | r = -EINVAL; |
2883 | 0 | goto out; |
2884 | 0 | } |
2885 | | |
2886 | 0 | r = LUKS2_keyslot_context_open_all_segments(cd, keyslot_old, CRYPT_ANY_SLOT, |
2887 | 0 | kc_old, NULL, vks); |
2888 | 0 | if (r < 0) |
2889 | 0 | goto out; |
2890 | | |
2891 | 0 | r = LUKS2_keyslot_reencrypt_digest_create(cd, hdr, LUKS2_DECRYPT_DATASHIFT_REQ_VERSION, *vks); |
2892 | 0 | if (r < 0) |
2893 | 0 | goto out; |
2894 | | |
2895 | 0 | if (name) { |
2896 | 0 | r = reencrypt_verify_and_upload_keys(cd, hdr, |
2897 | 0 | LUKS2_reencrypt_digest_old(hdr), |
2898 | 0 | LUKS2_reencrypt_digest_new(hdr), |
2899 | 0 | *vks); |
2900 | 0 | if (r) |
2901 | 0 | goto out; |
2902 | | |
2903 | 0 | r = dm_query_device(cd, name, DM_ACTIVE_UUID | DM_ACTIVE_DEVICE | |
2904 | 0 | DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY | |
2905 | 0 | DM_ACTIVE_CRYPT_CIPHER, &dmd_target); |
2906 | 0 | if (r < 0) |
2907 | 0 | goto out; |
2908 | | |
2909 | 0 | jobj_segments_old = reencrypt_segments_old(hdr); |
2910 | 0 | if (!jobj_segments_old) { |
2911 | 0 | dm_targets_free(cd, &dmd_target); |
2912 | 0 | free(CONST_CAST(void*)dmd_target.uuid); |
2913 | 0 | r = -EINVAL; |
2914 | 0 | goto out; |
2915 | 0 | } |
2916 | 0 | r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, jobj_segments_old, &dmd_source); |
2917 | 0 | if (!r) { |
2918 | 0 | r = crypt_compare_dm_devices(cd, &dmd_source, &dmd_target); |
2919 | 0 | if (r) |
2920 | 0 | log_err(cd, _("Mismatching parameters on device %s."), name); |
2921 | 0 | } |
2922 | 0 | json_object_put(jobj_segments_old); |
2923 | |
|
2924 | 0 | dm_targets_free(cd, &dmd_source); |
2925 | 0 | dm_targets_free(cd, &dmd_target); |
2926 | 0 | free(CONST_CAST(void*)dmd_target.uuid); |
2927 | |
|
2928 | 0 | if (r) |
2929 | 0 | goto out; |
2930 | | |
2931 | 0 | dmd_source.size = dmd_target.size; |
2932 | 0 | r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, LUKS2_get_segments_jobj(hdr), &dmd_source); |
2933 | 0 | if (!r) { |
2934 | 0 | r = dm_reload_device(cd, name, &dmd_source, dmd_target.flags, 0); |
2935 | 0 | if (r) |
2936 | 0 | log_err(cd, _("Failed to reload device %s."), name); |
2937 | 0 | else |
2938 | 0 | clear_table = true; |
2939 | 0 | } |
2940 | |
|
2941 | 0 | dm_targets_free(cd, &dmd_source); |
2942 | |
|
2943 | 0 | if (r) |
2944 | 0 | goto out; |
2945 | 0 | } |
2946 | | |
2947 | 0 | if (name) { |
2948 | 0 | r = dm_suspend_device(cd, name, DM_SUSPEND_SKIP_LOCKFS); |
2949 | 0 | if (r) { |
2950 | 0 | log_err(cd, _("Failed to suspend device %s."), name); |
2951 | 0 | goto out; |
2952 | 0 | } |
2953 | 0 | } |
2954 | | |
2955 | 0 | if (reencrypt_move_data(cd, devfd, data_shift, params->mode)) { |
2956 | 0 | r = -EIO; |
2957 | 0 | goto out; |
2958 | 0 | } |
2959 | | |
2960 | | /* This must be first and only write in LUKS2 metadata during _reencrypt_init */ |
2961 | 0 | r = reencrypt_update_flag(cd, LUKS2_DECRYPT_DATASHIFT_REQ_VERSION, true, true); |
2962 | 0 | if (r) { |
2963 | 0 | log_dbg(cd, "Failed to set online-reencryption requirement."); |
2964 | 0 | r = -EINVAL; |
2965 | 0 | } else |
2966 | 0 | r = reencrypt_keyslot; |
2967 | 0 | out: |
2968 | 0 | if (r < 0 && clear_table && dm_clear_device(cd, name)) |
2969 | 0 | log_err(cd, _("Failed to clear table.")); |
2970 | 0 | else if (clear_table && dm_resume_device(cd, name, DM_SUSPEND_SKIP_LOCKFS)) |
2971 | 0 | log_err(cd, _("Failed to resume device %s."), name); |
2972 | |
|
2973 | 0 | device_release_excl(cd, crypt_data_device(cd)); |
2974 | 0 | if (r < 0 && LUKS2_hdr_rollback(cd, hdr) < 0) |
2975 | 0 | log_dbg(cd, "Failed to rollback LUKS2 metadata after failure."); |
2976 | |
|
2977 | 0 | return r; |
2978 | 0 | } |
2979 | | |
2980 | | /* This function must be called with metadata lock held */ |
2981 | | static int reencrypt_init(struct crypt_device *cd, |
2982 | | const char *name, |
2983 | | struct luks2_hdr *hdr, |
2984 | | struct crypt_keyslot_context *kc_old, |
2985 | | struct crypt_keyslot_context *kc_new, |
2986 | | int keyslot_old, |
2987 | | int keyslot_new, |
2988 | | const char *cipher, |
2989 | | const char *cipher_mode, |
2990 | | const struct crypt_params_reencrypt *params, |
2991 | | struct volume_key **vks) |
2992 | 0 | { |
2993 | 0 | bool move_first_segment; |
2994 | 0 | char _cipher[128]; |
2995 | 0 | uint32_t check_sector_size, new_sector_size, old_sector_size; |
2996 | 0 | int digest_new, r, reencrypt_keyslot, devfd = -1; |
2997 | 0 | uint64_t data_offset_bytes, data_size_bytes, data_shift_bytes, device_size_bytes; |
2998 | 0 | struct volume_key *vk; |
2999 | 0 | struct crypt_dm_active_device dmd_target, dmd_source = { |
3000 | 0 | .uuid = crypt_get_uuid(cd), |
3001 | 0 | .flags = CRYPT_ACTIVATE_SHARED /* turn off exclusive open checks */ |
3002 | 0 | }; |
3003 | |
|
3004 | 0 | assert(cd); |
3005 | 0 | assert(hdr); |
3006 | |
|
3007 | 0 | if (!params || !params->resilience || params->mode > CRYPT_REENCRYPT_DECRYPT) |
3008 | 0 | return -EINVAL; |
3009 | | |
3010 | 0 | if (params->mode != CRYPT_REENCRYPT_DECRYPT && |
3011 | 0 | (!params->luks2 || !(cipher && cipher_mode) || |
3012 | 0 | (keyslot_new < 0 && !(params->flags & CRYPT_REENCRYPT_CREATE_NEW_DIGEST)))) |
3013 | 0 | return -EINVAL; |
3014 | | |
3015 | 0 | log_dbg(cd, "Initializing reencryption (mode: %s) in LUKS2 metadata.", |
3016 | 0 | crypt_reencrypt_mode_to_str(params->mode)); |
3017 | |
|
3018 | 0 | move_first_segment = (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT); |
3019 | |
|
3020 | 0 | old_sector_size = LUKS2_get_sector_size(hdr); |
3021 | | |
3022 | | /* implicit sector size 512 for decryption */ |
3023 | 0 | new_sector_size = params->luks2 ? params->luks2->sector_size : SECTOR_SIZE; |
3024 | 0 | if (new_sector_size < SECTOR_SIZE || new_sector_size > MAX_SECTOR_SIZE || |
3025 | 0 | NOTPOW2(new_sector_size)) { |
3026 | 0 | log_err(cd, _("Unsupported encryption sector size.")); |
3027 | 0 | return -EINVAL; |
3028 | 0 | } |
3029 | | /* check the larger encryption sector size only */ |
3030 | 0 | check_sector_size = new_sector_size > old_sector_size ? new_sector_size : old_sector_size; |
3031 | |
|
3032 | 0 | if (!cipher_mode || *cipher_mode == '\0') |
3033 | 0 | r = snprintf(_cipher, sizeof(_cipher), "%s", cipher); |
3034 | 0 | else |
3035 | 0 | r = snprintf(_cipher, sizeof(_cipher), "%s-%s", cipher, cipher_mode); |
3036 | 0 | if (r < 0 || (size_t)r >= sizeof(_cipher)) |
3037 | 0 | return -EINVAL; |
3038 | | |
3039 | 0 | data_offset_bytes = LUKS2_get_data_offset(hdr) << SECTOR_SHIFT; |
3040 | |
|
3041 | 0 | r = device_check_access(cd, crypt_data_device(cd), DEV_OK); |
3042 | 0 | if (r) |
3043 | 0 | return r; |
3044 | | |
3045 | 0 | r = device_check_size(cd, crypt_data_device(cd), data_offset_bytes, 1); |
3046 | 0 | if (r) |
3047 | 0 | return r; |
3048 | | |
3049 | 0 | r = device_size(crypt_data_device(cd), &device_size_bytes); |
3050 | 0 | if (r) |
3051 | 0 | return r; |
3052 | | |
3053 | 0 | if (move_first_segment && params->mode == CRYPT_REENCRYPT_ENCRYPT && |
3054 | 0 | params->data_shift < LUKS2_get_data_offset(hdr)) { |
3055 | 0 | log_err(cd, _("Data shift (%" PRIu64 " sectors) is less than future data offset (%" PRIu64 " sectors)."), |
3056 | 0 | params->data_shift, LUKS2_get_data_offset(hdr)); |
3057 | 0 | return -EINVAL; |
3058 | 0 | } |
3059 | | |
3060 | 0 | device_size_bytes -= data_offset_bytes; |
3061 | 0 | data_shift_bytes = params->data_shift << SECTOR_SHIFT; |
3062 | 0 | data_size_bytes = params->device_size << SECTOR_SHIFT; |
3063 | |
|
3064 | 0 | if (device_size_bytes < data_shift_bytes && params->direction == CRYPT_REENCRYPT_BACKWARD) { |
3065 | 0 | log_err(cd, _("Device %s is too small."), device_path(crypt_data_device(cd))); |
3066 | 0 | return -EINVAL; |
3067 | 0 | } |
3068 | | |
3069 | 0 | if (data_size_bytes > device_size_bytes) { |
3070 | 0 | log_err(cd, _("Reduced data size is larger than real device size.")); |
3071 | 0 | return -EINVAL; |
3072 | 0 | } |
3073 | | |
3074 | 0 | if (data_size_bytes && params->mode == CRYPT_REENCRYPT_ENCRYPT && |
3075 | 0 | move_first_segment && data_shift_bytes) { |
3076 | 0 | if (data_size_bytes > device_size_bytes - data_shift_bytes) { |
3077 | 0 | log_err(cd, _("Reduced data size is larger than real device size.")); |
3078 | 0 | return -EINVAL; |
3079 | 0 | } |
3080 | 0 | } else if (!data_size_bytes && params->mode == CRYPT_REENCRYPT_ENCRYPT && |
3081 | 0 | move_first_segment && data_shift_bytes) |
3082 | 0 | data_size_bytes = device_size_bytes - data_shift_bytes; |
3083 | 0 | else if (!data_size_bytes) |
3084 | 0 | data_size_bytes = device_size_bytes; |
3085 | | |
3086 | 0 | if (MISALIGNED(data_size_bytes, check_sector_size)) { |
3087 | 0 | log_err(cd, _("Data device is not aligned to encryption sector size (%" PRIu32 " bytes)."), check_sector_size); |
3088 | 0 | return -EINVAL; |
3089 | 0 | } |
3090 | | |
3091 | 0 | reencrypt_keyslot = LUKS2_keyslot_find_empty(cd, hdr, 0); |
3092 | 0 | if (reencrypt_keyslot < 0) { |
3093 | 0 | log_err(cd, _("All key slots full.")); |
3094 | 0 | return -EINVAL; |
3095 | 0 | } |
3096 | | |
3097 | 0 | if (params->mode == CRYPT_REENCRYPT_DECRYPT && data_shift_bytes && move_first_segment) |
3098 | 0 | return reencrypt_decrypt_with_datashift_init(cd, name, hdr, |
3099 | 0 | reencrypt_keyslot, |
3100 | 0 | check_sector_size, |
3101 | 0 | data_size_bytes, |
3102 | 0 | data_offset_bytes, |
3103 | 0 | kc_old, |
3104 | 0 | keyslot_old, |
3105 | 0 | params, |
3106 | 0 | vks); |
3107 | | |
3108 | | /* |
3109 | | * We must perform data move with exclusive open data device |
3110 | | * to exclude another cryptsetup process to colide with |
3111 | | * encryption initialization (or mount) |
3112 | | */ |
3113 | 0 | if (move_first_segment) { |
3114 | 0 | devfd = device_open_excl(cd, crypt_data_device(cd), O_RDWR); |
3115 | 0 | if (devfd < 0) { |
3116 | 0 | if (devfd == -EBUSY) |
3117 | 0 | log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."), |
3118 | 0 | device_path(crypt_data_device(cd))); |
3119 | 0 | return -EINVAL; |
3120 | 0 | } |
3121 | 0 | } |
3122 | | |
3123 | 0 | if (params->mode == CRYPT_REENCRYPT_ENCRYPT) { |
3124 | | /* in-memory only */ |
3125 | 0 | r = reencrypt_set_encrypt_segments(cd, hdr, device_size_bytes, data_size_bytes, |
3126 | 0 | data_shift_bytes, |
3127 | 0 | move_first_segment, |
3128 | 0 | params->direction); |
3129 | 0 | if (r) |
3130 | 0 | goto out; |
3131 | 0 | } |
3132 | | |
3133 | 0 | if (params->flags & CRYPT_REENCRYPT_CREATE_NEW_DIGEST) { |
3134 | 0 | assert(kc_new->get_luks2_key); |
3135 | 0 | r = kc_new->get_luks2_key(cd, kc_new, CRYPT_ANY_SLOT, CRYPT_ANY_SEGMENT, &vk); |
3136 | 0 | if (r < 0) |
3137 | 0 | goto out; |
3138 | | |
3139 | | /* do not create new digest in case it matches the current one */ |
3140 | 0 | r = LUKS2_digest_verify_by_segment(cd, hdr, CRYPT_DEFAULT_SEGMENT, vk); |
3141 | 0 | if (r == -EPERM || r == -ENOENT) |
3142 | 0 | r = LUKS2_digest_create(cd, "pbkdf2", hdr, vk); |
3143 | |
|
3144 | 0 | crypt_free_volume_key(vk); |
3145 | 0 | if (r < 0) |
3146 | 0 | goto out; |
3147 | 0 | digest_new = r; |
3148 | 0 | } else |
3149 | 0 | digest_new = LUKS2_digest_by_keyslot(hdr, keyslot_new); |
3150 | | |
3151 | 0 | r = reencrypt_make_backup_segments(cd, hdr, digest_new, _cipher, data_offset_bytes, params); |
3152 | 0 | if (r) { |
3153 | 0 | log_dbg(cd, "Failed to create reencryption backup device segments."); |
3154 | 0 | goto out; |
3155 | 0 | } |
3156 | | |
3157 | 0 | r = reencrypt_verify_resilience_params(cd, params, check_sector_size, move_first_segment); |
3158 | 0 | if (r < 0) |
3159 | 0 | goto out; |
3160 | | |
3161 | 0 | r = LUKS2_keyslot_reencrypt_allocate(cd, hdr, reencrypt_keyslot, params, |
3162 | 0 | reencrypt_get_alignment(cd, hdr)); |
3163 | 0 | if (r < 0) |
3164 | 0 | goto out; |
3165 | | |
3166 | 0 | r = LUKS2_keyslot_context_open_all_segments(cd, keyslot_old, keyslot_new, kc_old, kc_new, vks); |
3167 | 0 | if (r < 0) |
3168 | 0 | goto out; |
3169 | | |
3170 | 0 | r = LUKS2_keyslot_reencrypt_digest_create(cd, hdr, LUKS2_REENCRYPT_REQ_VERSION, *vks); |
3171 | 0 | if (r < 0) |
3172 | 0 | goto out; |
3173 | | |
3174 | 0 | if (name && params->mode != CRYPT_REENCRYPT_ENCRYPT) { |
3175 | 0 | r = reencrypt_verify_and_upload_keys(cd, hdr, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks); |
3176 | 0 | if (r) |
3177 | 0 | goto out; |
3178 | | |
3179 | 0 | r = dm_query_device(cd, name, DM_ACTIVE_UUID | DM_ACTIVE_DEVICE | |
3180 | 0 | DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY | |
3181 | 0 | DM_ACTIVE_CRYPT_CIPHER, &dmd_target); |
3182 | 0 | if (r < 0) |
3183 | 0 | goto out; |
3184 | | |
3185 | 0 | r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, LUKS2_get_segments_jobj(hdr), &dmd_source); |
3186 | 0 | if (!r) { |
3187 | 0 | r = crypt_compare_dm_devices(cd, &dmd_source, &dmd_target); |
3188 | 0 | if (r) |
3189 | 0 | log_err(cd, _("Mismatching parameters on device %s."), name); |
3190 | 0 | } |
3191 | |
|
3192 | 0 | dm_targets_free(cd, &dmd_source); |
3193 | 0 | dm_targets_free(cd, &dmd_target); |
3194 | 0 | free(CONST_CAST(void*)dmd_target.uuid); |
3195 | |
|
3196 | 0 | if (r) |
3197 | 0 | goto out; |
3198 | 0 | } |
3199 | | |
3200 | 0 | if (move_first_segment && reencrypt_move_data(cd, devfd, data_shift_bytes, params->mode)) { |
3201 | 0 | r = -EIO; |
3202 | 0 | goto out; |
3203 | 0 | } |
3204 | | |
3205 | | /* This must be first and only write in LUKS2 metadata during reencrypt_init */ |
3206 | 0 | r = reencrypt_update_flag(cd, LUKS2_REENCRYPT_REQ_VERSION, true, true); |
3207 | 0 | if (r) { |
3208 | 0 | log_dbg(cd, "Failed to set online-reencryption requirement."); |
3209 | 0 | r = -EINVAL; |
3210 | 0 | } else |
3211 | 0 | r = reencrypt_keyslot; |
3212 | 0 | out: |
3213 | 0 | device_release_excl(cd, crypt_data_device(cd)); |
3214 | 0 | if (r < 0 && LUKS2_hdr_rollback(cd, hdr) < 0) |
3215 | 0 | log_dbg(cd, "Failed to rollback LUKS2 metadata after failure."); |
3216 | |
|
3217 | 0 | return r; |
3218 | 0 | } |
3219 | | |
3220 | | static int reencrypt_hotzone_protect_final(struct crypt_device *cd, |
3221 | | struct luks2_hdr *hdr, int reencrypt_keyslot, |
3222 | | const struct reenc_protection *rp, |
3223 | | const void *buffer, size_t buffer_len) |
3224 | 0 | { |
3225 | 0 | const void *pbuffer; |
3226 | 0 | size_t data_offset, len; |
3227 | 0 | int r; |
3228 | |
|
3229 | 0 | assert(hdr); |
3230 | 0 | assert(rp); |
3231 | |
|
3232 | 0 | if (rp->type == REENC_PROTECTION_NONE) |
3233 | 0 | return 0; |
3234 | | |
3235 | 0 | if (rp->type == REENC_PROTECTION_CHECKSUM) { |
3236 | 0 | log_dbg(cd, "Checksums hotzone resilience."); |
3237 | |
|
3238 | 0 | for (data_offset = 0, len = 0; data_offset < buffer_len; data_offset += rp->p.csum.block_size, len += rp->p.csum.hash_size) { |
3239 | 0 | if (crypt_hash_write(rp->p.csum.ch, (const char *)buffer + data_offset, rp->p.csum.block_size)) { |
3240 | 0 | log_dbg(cd, "Failed to hash sector at offset %zu.", data_offset); |
3241 | 0 | return -EINVAL; |
3242 | 0 | } |
3243 | 0 | if (crypt_hash_final(rp->p.csum.ch, (char *)rp->p.csum.checksums + len, rp->p.csum.hash_size)) { |
3244 | 0 | log_dbg(cd, "Failed to finalize hash."); |
3245 | 0 | return -EINVAL; |
3246 | 0 | } |
3247 | 0 | } |
3248 | 0 | pbuffer = rp->p.csum.checksums; |
3249 | 0 | } else if (rp->type == REENC_PROTECTION_JOURNAL) { |
3250 | 0 | log_dbg(cd, "Journal hotzone resilience."); |
3251 | 0 | len = buffer_len; |
3252 | 0 | pbuffer = buffer; |
3253 | 0 | } else if (rp->type == REENC_PROTECTION_DATASHIFT) { |
3254 | 0 | log_dbg(cd, "Data shift hotzone resilience."); |
3255 | 0 | return LUKS2_hdr_write(cd, hdr); |
3256 | 0 | } else |
3257 | 0 | return -EINVAL; |
3258 | | |
3259 | 0 | log_dbg(cd, "Going to store %zu bytes in reencrypt keyslot.", len); |
3260 | |
|
3261 | 0 | r = LUKS2_keyslot_reencrypt_store(cd, hdr, reencrypt_keyslot, pbuffer, len); |
3262 | |
|
3263 | 0 | return r > 0 ? 0 : r; |
3264 | 0 | } |
3265 | | |
3266 | | static int reencrypt_context_update(struct crypt_device *cd, |
3267 | | struct luks2_reencrypt *rh) |
3268 | 0 | { |
3269 | 0 | if (rh->read < 0) |
3270 | 0 | return -EINVAL; |
3271 | | |
3272 | 0 | if (rh->direction == CRYPT_REENCRYPT_BACKWARD) { |
3273 | 0 | if (rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->mode == CRYPT_REENCRYPT_ENCRYPT) { |
3274 | 0 | if (rh->offset) |
3275 | 0 | rh->offset -= data_shift_value(&rh->rp); |
3276 | 0 | if (rh->offset && (rh->offset < data_shift_value(&rh->rp))) { |
3277 | 0 | rh->length = rh->offset; |
3278 | 0 | rh->offset = data_shift_value(&rh->rp); |
3279 | 0 | } |
3280 | 0 | if (!rh->offset) |
3281 | 0 | rh->length = data_shift_value(&rh->rp); |
3282 | 0 | } else { |
3283 | 0 | if (rh->offset < rh->length) |
3284 | 0 | rh->length = rh->offset; |
3285 | 0 | rh->offset -= rh->length; |
3286 | 0 | } |
3287 | 0 | } else if (rh->direction == CRYPT_REENCRYPT_FORWARD) { |
3288 | 0 | rh->offset += (uint64_t)rh->read; |
3289 | 0 | if (rh->device_size == rh->offset && |
3290 | 0 | rh->jobj_segment_moved && |
3291 | 0 | rh->mode == CRYPT_REENCRYPT_DECRYPT && |
3292 | 0 | rh->rp.type == REENC_PROTECTION_DATASHIFT) { |
3293 | 0 | rh->offset = 0; |
3294 | 0 | rh->length = json_segment_get_size(rh->jobj_segment_moved, 0); |
3295 | 0 | } |
3296 | | /* it fails in-case of device_size < rh->offset later */ |
3297 | 0 | else if (rh->device_size - rh->offset < rh->length) |
3298 | 0 | rh->length = rh->device_size - rh->offset; |
3299 | 0 | } else |
3300 | 0 | return -EINVAL; |
3301 | | |
3302 | 0 | if (rh->device_size < rh->offset) { |
3303 | 0 | log_dbg(cd, "Calculated reencryption offset %" PRIu64 " is beyond device size %" PRIu64 ".", rh->offset, rh->device_size); |
3304 | 0 | return -EINVAL; |
3305 | 0 | } |
3306 | | |
3307 | 0 | rh->progress += (uint64_t)rh->read; |
3308 | |
|
3309 | 0 | return 0; |
3310 | 0 | } |
3311 | | |
3312 | | static int reencrypt_load(struct crypt_device *cd, struct luks2_hdr *hdr, |
3313 | | uint64_t device_size, |
3314 | | uint64_t max_hotzone_size, |
3315 | | uint64_t required_device_size, |
3316 | | struct volume_key *vks, |
3317 | | struct luks2_reencrypt **rh) |
3318 | 0 | { |
3319 | 0 | int r; |
3320 | 0 | struct luks2_reencrypt *tmp = NULL; |
3321 | 0 | crypt_reencrypt_info ri = LUKS2_reencrypt_status(hdr); |
3322 | |
|
3323 | 0 | if (ri == CRYPT_REENCRYPT_NONE) { |
3324 | 0 | log_err(cd, _("Device not marked for LUKS2 reencryption.")); |
3325 | 0 | return -EINVAL; |
3326 | 0 | } else if (ri == CRYPT_REENCRYPT_INVALID) |
3327 | 0 | return -EINVAL; |
3328 | | |
3329 | 0 | r = LUKS2_reencrypt_digest_verify(cd, hdr, vks); |
3330 | 0 | if (r < 0) |
3331 | 0 | return r; |
3332 | | |
3333 | 0 | if (ri == CRYPT_REENCRYPT_CLEAN) |
3334 | 0 | r = reencrypt_load_clean(cd, hdr, device_size, max_hotzone_size, required_device_size, &tmp); |
3335 | 0 | else if (ri == CRYPT_REENCRYPT_CRASH) |
3336 | 0 | r = reencrypt_load_crashed(cd, hdr, device_size, &tmp); |
3337 | 0 | else |
3338 | 0 | r = -EINVAL; |
3339 | |
|
3340 | 0 | if (r < 0 || !tmp) { |
3341 | 0 | log_err(cd, _("Failed to load LUKS2 reencryption context.")); |
3342 | 0 | return r < 0 ? r : -EINVAL; |
3343 | 0 | } |
3344 | | |
3345 | 0 | *rh = tmp; |
3346 | |
|
3347 | 0 | return 0; |
3348 | 0 | } |
3349 | | #else |
3350 | | int LUKS2_reencrypt_max_hotzone_size(struct crypt_device *cd __attribute__((unused)), |
3351 | | struct luks2_hdr *hdr __attribute__((unused)), |
3352 | | const struct reenc_protection *rp __attribute__((unused)), |
3353 | | int reencrypt_keyslot __attribute__((unused)), |
3354 | | uint64_t *r_length __attribute__((unused))) |
3355 | | { |
3356 | | return -ENOTSUP; |
3357 | | } |
3358 | | #endif |
3359 | | |
3360 | | static int reencrypt_lock_internal(struct crypt_device *cd, const char *uuid, struct crypt_lock_handle **reencrypt_lock) |
3361 | 0 | { |
3362 | 0 | int r; |
3363 | 0 | char *lock_resource; |
3364 | |
|
3365 | 0 | if (!crypt_metadata_locking_enabled()) { |
3366 | 0 | *reencrypt_lock = NULL; |
3367 | 0 | return 0; |
3368 | 0 | } |
3369 | | |
3370 | 0 | r = asprintf(&lock_resource, "LUKS2-reencryption-%s", uuid); |
3371 | 0 | if (r < 0) |
3372 | 0 | return -ENOMEM; |
3373 | 0 | if (r < 20) { |
3374 | 0 | free(lock_resource); |
3375 | 0 | return -EINVAL; |
3376 | 0 | } |
3377 | | |
3378 | 0 | r = crypt_write_lock(cd, lock_resource, false, reencrypt_lock); |
3379 | |
|
3380 | 0 | free(lock_resource); |
3381 | |
|
3382 | 0 | return r; |
3383 | 0 | } |
3384 | | |
3385 | | /* internal only */ |
3386 | | int LUKS2_reencrypt_lock_by_dm_uuid(struct crypt_device *cd, const char *dm_uuid, |
3387 | | struct crypt_lock_handle **reencrypt_lock) |
3388 | 0 | { |
3389 | 0 | int r; |
3390 | 0 | char hdr_uuid[37]; |
3391 | 0 | const char *uuid = crypt_get_uuid(cd); |
3392 | |
|
3393 | 0 | if (!dm_uuid) |
3394 | 0 | return -EINVAL; |
3395 | | |
3396 | 0 | if (!uuid) { |
3397 | 0 | r = snprintf(hdr_uuid, sizeof(hdr_uuid), "%.8s-%.4s-%.4s-%.4s-%.12s", |
3398 | 0 | dm_uuid + 6, dm_uuid + 14, dm_uuid + 18, dm_uuid + 22, dm_uuid + 26); |
3399 | 0 | if (r < 0 || (size_t)r != (sizeof(hdr_uuid) - 1)) |
3400 | 0 | return -EINVAL; |
3401 | 0 | } else if (dm_uuid_cmp(dm_uuid, uuid)) |
3402 | 0 | return -EINVAL; |
3403 | | |
3404 | 0 | return reencrypt_lock_internal(cd, uuid, reencrypt_lock); |
3405 | 0 | } |
3406 | | |
3407 | | /* internal only */ |
3408 | | int LUKS2_reencrypt_lock(struct crypt_device *cd, struct crypt_lock_handle **reencrypt_lock) |
3409 | 0 | { |
3410 | 0 | if (!cd || !crypt_get_type(cd) || strcmp(crypt_get_type(cd), CRYPT_LUKS2)) |
3411 | 0 | return -EINVAL; |
3412 | | |
3413 | 0 | return reencrypt_lock_internal(cd, crypt_get_uuid(cd), reencrypt_lock); |
3414 | 0 | } |
3415 | | |
3416 | | /* internal only */ |
3417 | | void LUKS2_reencrypt_unlock(struct crypt_device *cd, struct crypt_lock_handle *reencrypt_lock) |
3418 | 0 | { |
3419 | 0 | crypt_unlock_internal(cd, reencrypt_lock); |
3420 | 0 | } |
3421 | | #if USE_LUKS2_REENCRYPTION |
3422 | | static int reencrypt_lock_and_verify(struct crypt_device *cd, struct luks2_hdr *hdr, |
3423 | | struct crypt_lock_handle **reencrypt_lock) |
3424 | 0 | { |
3425 | 0 | int r; |
3426 | 0 | crypt_reencrypt_info ri; |
3427 | 0 | struct crypt_lock_handle *h; |
3428 | |
|
3429 | 0 | ri = LUKS2_reencrypt_status(hdr); |
3430 | 0 | if (ri == CRYPT_REENCRYPT_INVALID) |
3431 | 0 | return -EINVAL; |
3432 | 0 | if (ri < CRYPT_REENCRYPT_CLEAN) { |
3433 | 0 | log_err(cd, _("Device is not in reencryption.")); |
3434 | 0 | return -EINVAL; |
3435 | 0 | } |
3436 | | |
3437 | 0 | r = LUKS2_reencrypt_lock(cd, &h); |
3438 | 0 | if (r < 0) { |
3439 | 0 | if (r == -EBUSY) |
3440 | 0 | log_err(cd, _("Reencryption process is already running.")); |
3441 | 0 | else |
3442 | 0 | log_err(cd, _("Failed to acquire reencryption lock.")); |
3443 | 0 | return r; |
3444 | 0 | } |
3445 | | |
3446 | | /* With reencryption lock held, reload device context and verify metadata state */ |
3447 | 0 | r = crypt_load(cd, CRYPT_LUKS2, NULL); |
3448 | 0 | if (r) { |
3449 | 0 | LUKS2_reencrypt_unlock(cd, h); |
3450 | 0 | return r; |
3451 | 0 | } |
3452 | | |
3453 | 0 | ri = LUKS2_reencrypt_status(hdr); |
3454 | 0 | if (ri == CRYPT_REENCRYPT_CLEAN) { |
3455 | 0 | *reencrypt_lock = h; |
3456 | 0 | return 0; |
3457 | 0 | } |
3458 | | |
3459 | 0 | LUKS2_reencrypt_unlock(cd, h); |
3460 | 0 | log_err(cd, _("Cannot proceed with reencryption. Run reencryption recovery first.")); |
3461 | 0 | return -EINVAL; |
3462 | 0 | } |
3463 | | |
3464 | | static int reencrypt_load_by_keyslot_context(struct crypt_device *cd, |
3465 | | const char *name, |
3466 | | struct crypt_keyslot_context *kc_old, |
3467 | | struct crypt_keyslot_context *kc_new, |
3468 | | int keyslot_old, |
3469 | | int keyslot_new, |
3470 | | struct volume_key **vks, |
3471 | | const struct crypt_params_reencrypt *params) |
3472 | 0 | { |
3473 | 0 | int r, reencrypt_slot; |
3474 | 0 | struct luks2_hdr *hdr; |
3475 | 0 | struct crypt_lock_handle *reencrypt_lock; |
3476 | 0 | struct luks2_reencrypt *rh; |
3477 | 0 | const struct volume_key *vk; |
3478 | 0 | size_t alignment; |
3479 | 0 | uint32_t old_sector_size, new_sector_size, sector_size; |
3480 | 0 | struct crypt_dm_active_device dmd_target, dmd_source = { |
3481 | 0 | .uuid = crypt_get_uuid(cd), |
3482 | 0 | .flags = CRYPT_ACTIVATE_SHARED /* turn off exclusive open checks */ |
3483 | 0 | }; |
3484 | 0 | uint64_t minimal_size, device_size, mapping_size = 0, required_size = 0, |
3485 | 0 | max_hotzone_size = 0; |
3486 | 0 | bool dynamic; |
3487 | 0 | uint32_t flags = 0; |
3488 | |
|
3489 | 0 | assert(cd); |
3490 | |
|
3491 | 0 | hdr = crypt_get_hdr(cd, CRYPT_LUKS2); |
3492 | 0 | if (!hdr) |
3493 | 0 | return -EINVAL; |
3494 | | |
3495 | 0 | log_dbg(cd, "Loading LUKS2 reencryption context."); |
3496 | |
|
3497 | 0 | old_sector_size = reencrypt_get_sector_size_old(hdr); |
3498 | 0 | new_sector_size = reencrypt_get_sector_size_new(hdr); |
3499 | 0 | sector_size = new_sector_size > old_sector_size ? new_sector_size : old_sector_size; |
3500 | |
|
3501 | 0 | r = reencrypt_verify_resilience_params(cd, params, sector_size, |
3502 | 0 | LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0); |
3503 | 0 | if (r < 0) |
3504 | 0 | return r; |
3505 | | |
3506 | 0 | if (params) { |
3507 | 0 | required_size = params->device_size; |
3508 | 0 | max_hotzone_size = params->max_hotzone_size; |
3509 | 0 | } |
3510 | |
|
3511 | 0 | rh = crypt_get_luks2_reencrypt(cd); |
3512 | 0 | if (rh) { |
3513 | 0 | LUKS2_reencrypt_free(cd, rh); |
3514 | 0 | crypt_set_luks2_reencrypt(cd, NULL); |
3515 | 0 | rh = NULL; |
3516 | 0 | } |
3517 | |
|
3518 | 0 | r = reencrypt_lock_and_verify(cd, hdr, &reencrypt_lock); |
3519 | 0 | if (r) |
3520 | 0 | return r; |
3521 | | |
3522 | 0 | reencrypt_slot = LUKS2_find_keyslot(hdr, "reencrypt"); |
3523 | 0 | if (reencrypt_slot < 0) { |
3524 | 0 | r = -EINVAL; |
3525 | 0 | goto err; |
3526 | 0 | } |
3527 | | |
3528 | | /* From now on we hold reencryption lock */ |
3529 | | |
3530 | 0 | if (LUKS2_get_data_size(hdr, &minimal_size, &dynamic)) { |
3531 | 0 | r = -EINVAL; |
3532 | 0 | goto err; |
3533 | 0 | } |
3534 | | |
3535 | | /* some configurations provides fixed device size */ |
3536 | 0 | r = LUKS2_reencrypt_check_device_size(cd, hdr, minimal_size, &device_size, false, dynamic); |
3537 | 0 | if (r) { |
3538 | 0 | r = -EINVAL; |
3539 | 0 | goto err; |
3540 | 0 | } |
3541 | | |
3542 | 0 | minimal_size >>= SECTOR_SHIFT; |
3543 | |
|
3544 | 0 | r = reencrypt_verify_keys(cd, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks); |
3545 | 0 | if (r == -ENOENT) { |
3546 | 0 | log_dbg(cd, "Keys are not ready. Unlocking all volume keys."); |
3547 | 0 | r = LUKS2_keyslot_context_open_all_segments(cd, keyslot_old, keyslot_new, |
3548 | 0 | kc_old, kc_new, vks); |
3549 | 0 | } |
3550 | |
|
3551 | 0 | if (r < 0) |
3552 | 0 | goto err; |
3553 | | |
3554 | 0 | if (name) { |
3555 | 0 | r = reencrypt_upload_keys(cd, hdr, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks); |
3556 | 0 | if (r < 0) |
3557 | 0 | goto err; |
3558 | | |
3559 | 0 | r = dm_query_device(cd, name, DM_ACTIVE_UUID | DM_ACTIVE_DEVICE | |
3560 | 0 | DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY | |
3561 | 0 | DM_ACTIVE_CRYPT_CIPHER, &dmd_target); |
3562 | 0 | if (r < 0) |
3563 | 0 | goto err; |
3564 | 0 | flags = dmd_target.flags; |
3565 | | |
3566 | | /* |
3567 | | * By default reencryption code aims to retain flags from existing dm device. |
3568 | | * The keyring activation flag can not be inherited if original cipher is null. |
3569 | | * |
3570 | | * In this case override the flag based on decision made in reencrypt_upload_keys |
3571 | | * above. The code checks if new VK is eligible for keyring. |
3572 | | */ |
3573 | 0 | vk = crypt_volume_key_by_id(*vks, LUKS2_reencrypt_digest_new(hdr)); |
3574 | 0 | if (vk && crypt_volume_key_description(vk) && crypt_is_cipher_null(reencrypt_segment_cipher_old(hdr))) { |
3575 | 0 | flags |= CRYPT_ACTIVATE_KEYRING_KEY; |
3576 | 0 | dmd_source.flags |= CRYPT_ACTIVATE_KEYRING_KEY; |
3577 | 0 | } |
3578 | |
|
3579 | 0 | r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, LUKS2_get_segments_jobj(hdr), &dmd_source); |
3580 | 0 | if (!r) { |
3581 | 0 | r = crypt_compare_dm_devices(cd, &dmd_source, &dmd_target); |
3582 | 0 | if (r) |
3583 | 0 | log_err(cd, _("Mismatching parameters on device %s."), name); |
3584 | 0 | } |
3585 | |
|
3586 | 0 | dm_targets_free(cd, &dmd_source); |
3587 | 0 | dm_targets_free(cd, &dmd_target); |
3588 | 0 | free(CONST_CAST(void*)dmd_target.uuid); |
3589 | 0 | if (r) |
3590 | 0 | goto err; |
3591 | 0 | mapping_size = dmd_target.size; |
3592 | 0 | } |
3593 | | |
3594 | 0 | r = -EINVAL; |
3595 | 0 | if (required_size && mapping_size && (required_size != mapping_size)) { |
3596 | 0 | log_err(cd, _("Active device size and requested reencryption size don't match.")); |
3597 | 0 | goto err; |
3598 | 0 | } |
3599 | | |
3600 | 0 | if (mapping_size) |
3601 | 0 | required_size = mapping_size; |
3602 | |
|
3603 | 0 | if (required_size) { |
3604 | | /* TODO: Add support for changing fixed minimal size in reencryption mda where possible */ |
3605 | 0 | if ((minimal_size && (required_size < minimal_size)) || |
3606 | 0 | (required_size > (device_size >> SECTOR_SHIFT)) || |
3607 | 0 | (!dynamic && (required_size != minimal_size)) || |
3608 | 0 | (old_sector_size > 0 && MISALIGNED(required_size, old_sector_size >> SECTOR_SHIFT)) || |
3609 | 0 | (new_sector_size > 0 && MISALIGNED(required_size, new_sector_size >> SECTOR_SHIFT))) { |
3610 | 0 | log_err(cd, _("Illegal device size requested in reencryption parameters.")); |
3611 | 0 | goto err; |
3612 | 0 | } |
3613 | 0 | } |
3614 | | |
3615 | 0 | alignment = reencrypt_get_alignment(cd, hdr); |
3616 | |
|
3617 | 0 | r = LUKS2_keyslot_reencrypt_update_needed(cd, hdr, reencrypt_slot, params, alignment); |
3618 | 0 | if (r > 0) /* metadata update needed */ |
3619 | 0 | r = LUKS2_keyslot_reencrypt_update(cd, hdr, reencrypt_slot, params, alignment, *vks); |
3620 | 0 | if (r < 0) |
3621 | 0 | goto err; |
3622 | | |
3623 | 0 | r = reencrypt_load(cd, hdr, device_size, max_hotzone_size, required_size, *vks, &rh); |
3624 | 0 | if (r < 0 || !rh) |
3625 | 0 | goto err; |
3626 | | |
3627 | 0 | if (name && (r = reencrypt_context_set_names(rh, name))) |
3628 | 0 | goto err; |
3629 | | |
3630 | | /* Reassure device is not mounted and there's no dm mapping active */ |
3631 | 0 | if (!name && (device_open_excl(cd, crypt_data_device(cd), O_RDONLY) < 0)) { |
3632 | 0 | log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."), device_path(crypt_data_device(cd))); |
3633 | 0 | r = -EBUSY; |
3634 | 0 | goto err; |
3635 | 0 | } |
3636 | 0 | device_release_excl(cd, crypt_data_device(cd)); |
3637 | | |
3638 | | /* There's a race for dm device activation not managed by cryptsetup. |
3639 | | * |
3640 | | * 1) excl close |
3641 | | * 2) rogue dm device activation |
3642 | | * 3) one or more dm-crypt based wrapper activation |
3643 | | * 4) next excl open gets skipped due to 3) device from 2) remains undetected. |
3644 | | */ |
3645 | 0 | r = reencrypt_init_storage_wrappers(cd, hdr, rh, *vks); |
3646 | 0 | if (r) |
3647 | 0 | goto err; |
3648 | | |
3649 | | /* If one of wrappers is based on dmcrypt fallback it already blocked mount */ |
3650 | 0 | if (!name && crypt_storage_wrapper_get_type(rh->cw1) != DMCRYPT && |
3651 | 0 | crypt_storage_wrapper_get_type(rh->cw2) != DMCRYPT) { |
3652 | 0 | if (device_open_excl(cd, crypt_data_device(cd), O_RDONLY) < 0) { |
3653 | 0 | log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."), device_path(crypt_data_device(cd))); |
3654 | 0 | r = -EBUSY; |
3655 | 0 | goto err; |
3656 | 0 | } |
3657 | 0 | } |
3658 | | |
3659 | 0 | rh->flags = flags; |
3660 | |
|
3661 | 0 | MOVE_REF(rh->vks, *vks); |
3662 | 0 | MOVE_REF(rh->reenc_lock, reencrypt_lock); |
3663 | |
|
3664 | 0 | crypt_set_luks2_reencrypt(cd, rh); |
3665 | |
|
3666 | 0 | return 0; |
3667 | 0 | err: |
3668 | 0 | LUKS2_reencrypt_unlock(cd, reencrypt_lock); |
3669 | 0 | LUKS2_reencrypt_free(cd, rh); |
3670 | 0 | return r; |
3671 | 0 | } |
3672 | | |
3673 | | static int reencrypt_locked_recovery(struct crypt_device *cd, |
3674 | | int keyslot_old, |
3675 | | int keyslot_new, |
3676 | | struct crypt_keyslot_context *kc_old, |
3677 | | struct crypt_keyslot_context *kc_new, |
3678 | | struct volume_key **r_vks) |
3679 | 0 | { |
3680 | 0 | int keyslot, r = -EINVAL; |
3681 | 0 | struct volume_key *_vks = NULL; |
3682 | |
|
3683 | 0 | r = LUKS2_keyslot_context_open_all_segments(cd, keyslot_old, keyslot_new, |
3684 | 0 | kc_old, kc_new, &_vks); |
3685 | 0 | if (r < 0) |
3686 | 0 | return r; |
3687 | 0 | keyslot = r; |
3688 | |
|
3689 | 0 | r = LUKS2_reencrypt_locked_recovery_by_vks(cd, _vks); |
3690 | 0 | if (!r && r_vks) |
3691 | 0 | MOVE_REF(*r_vks, _vks); |
3692 | |
|
3693 | 0 | crypt_free_volume_key(_vks); |
3694 | |
|
3695 | 0 | return r < 0 ? r : keyslot; |
3696 | 0 | } |
3697 | | |
3698 | | static int reencrypt_recovery_by_keyslot_context(struct crypt_device *cd, |
3699 | | struct luks2_hdr *hdr, |
3700 | | int keyslot_old, |
3701 | | int keyslot_new, |
3702 | | struct crypt_keyslot_context *kc_old, |
3703 | | struct crypt_keyslot_context *kc_new) |
3704 | 0 | { |
3705 | 0 | int r; |
3706 | 0 | crypt_reencrypt_info ri; |
3707 | 0 | struct crypt_lock_handle *reencrypt_lock; |
3708 | |
|
3709 | 0 | r = LUKS2_reencrypt_lock(cd, &reencrypt_lock); |
3710 | 0 | if (r) { |
3711 | 0 | if (r == -EBUSY) |
3712 | 0 | log_err(cd, _("Reencryption in-progress. Cannot perform recovery.")); |
3713 | 0 | else |
3714 | 0 | log_err(cd, _("Failed to get reencryption lock.")); |
3715 | 0 | return r; |
3716 | 0 | } |
3717 | | |
3718 | 0 | if ((r = crypt_load(cd, CRYPT_LUKS2, NULL))) { |
3719 | 0 | LUKS2_reencrypt_unlock(cd, reencrypt_lock); |
3720 | 0 | return r; |
3721 | 0 | } |
3722 | | |
3723 | 0 | ri = LUKS2_reencrypt_status(hdr); |
3724 | 0 | if (ri == CRYPT_REENCRYPT_INVALID) { |
3725 | 0 | LUKS2_reencrypt_unlock(cd, reencrypt_lock); |
3726 | 0 | return -EINVAL; |
3727 | 0 | } |
3728 | | |
3729 | 0 | if (ri == CRYPT_REENCRYPT_CRASH) { |
3730 | 0 | r = reencrypt_locked_recovery(cd, keyslot_old, keyslot_new, |
3731 | 0 | kc_old, kc_new, NULL); |
3732 | 0 | if (r < 0) |
3733 | 0 | log_err(cd, _("LUKS2 reencryption recovery failed.")); |
3734 | 0 | } else { |
3735 | 0 | log_dbg(cd, "No LUKS2 reencryption recovery needed."); |
3736 | 0 | r = 0; |
3737 | 0 | } |
3738 | |
|
3739 | 0 | LUKS2_reencrypt_unlock(cd, reencrypt_lock); |
3740 | 0 | return r; |
3741 | 0 | } |
3742 | | |
3743 | | static int reencrypt_repair( |
3744 | | struct crypt_device *cd, |
3745 | | struct luks2_hdr *hdr, |
3746 | | int keyslot_old, |
3747 | | int keyslot_new, |
3748 | | struct crypt_keyslot_context *kc_old, |
3749 | | struct crypt_keyslot_context *kc_new) |
3750 | 0 | { |
3751 | 0 | int r; |
3752 | 0 | struct crypt_lock_handle *reencrypt_lock; |
3753 | 0 | struct luks2_reencrypt *rh; |
3754 | 0 | crypt_reencrypt_info ri; |
3755 | 0 | uint8_t requirement_version; |
3756 | 0 | const char *resilience; |
3757 | 0 | struct volume_key *vks = NULL; |
3758 | |
|
3759 | 0 | log_dbg(cd, "Loading LUKS2 reencryption context for metadata repair."); |
3760 | |
|
3761 | 0 | rh = crypt_get_luks2_reencrypt(cd); |
3762 | 0 | if (rh) { |
3763 | 0 | LUKS2_reencrypt_free(cd, rh); |
3764 | 0 | crypt_set_luks2_reencrypt(cd, NULL); |
3765 | 0 | rh = NULL; |
3766 | 0 | } |
3767 | |
|
3768 | 0 | ri = LUKS2_reencrypt_status(hdr); |
3769 | 0 | if (ri == CRYPT_REENCRYPT_INVALID) |
3770 | 0 | return -EINVAL; |
3771 | | |
3772 | 0 | if (ri < CRYPT_REENCRYPT_CLEAN) { |
3773 | 0 | log_err(cd, _("Device is not in reencryption.")); |
3774 | 0 | return -EINVAL; |
3775 | 0 | } |
3776 | | |
3777 | 0 | r = LUKS2_reencrypt_lock(cd, &reencrypt_lock); |
3778 | 0 | if (r < 0) { |
3779 | 0 | if (r == -EBUSY) |
3780 | 0 | log_err(cd, _("Reencryption process is already running.")); |
3781 | 0 | else |
3782 | 0 | log_err(cd, _("Failed to acquire reencryption lock.")); |
3783 | 0 | return r; |
3784 | 0 | } |
3785 | | |
3786 | | /* With reencryption lock held, reload device context and verify metadata state */ |
3787 | 0 | r = crypt_load(cd, CRYPT_LUKS2, NULL); |
3788 | 0 | if (r) |
3789 | 0 | goto out; |
3790 | | |
3791 | 0 | ri = LUKS2_reencrypt_status(hdr); |
3792 | 0 | if (ri == CRYPT_REENCRYPT_INVALID) { |
3793 | 0 | r = -EINVAL; |
3794 | 0 | goto out; |
3795 | 0 | } |
3796 | 0 | if (ri == CRYPT_REENCRYPT_NONE) { |
3797 | 0 | r = 0; |
3798 | 0 | goto out; |
3799 | 0 | } |
3800 | | |
3801 | 0 | resilience = reencrypt_resilience_type(hdr); |
3802 | 0 | if (!resilience) { |
3803 | 0 | r = -EINVAL; |
3804 | 0 | goto out; |
3805 | 0 | } |
3806 | | |
3807 | 0 | if (reencrypt_mode(hdr) == CRYPT_REENCRYPT_DECRYPT && |
3808 | 0 | !strncmp(resilience, "datashift-", 10) && |
3809 | 0 | LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0) |
3810 | 0 | requirement_version = LUKS2_DECRYPT_DATASHIFT_REQ_VERSION; |
3811 | 0 | else |
3812 | 0 | requirement_version = LUKS2_REENCRYPT_REQ_VERSION; |
3813 | |
|
3814 | 0 | r = LUKS2_keyslot_context_open_all_segments(cd, keyslot_old, keyslot_new, kc_old, kc_new, &vks); |
3815 | 0 | if (r < 0) |
3816 | 0 | goto out; |
3817 | | |
3818 | 0 | r = LUKS2_keyslot_reencrypt_digest_create(cd, hdr, requirement_version, vks); |
3819 | 0 | crypt_free_volume_key(vks); |
3820 | 0 | vks = NULL; |
3821 | 0 | if (r < 0) |
3822 | 0 | goto out; |
3823 | | |
3824 | | /* replaces old online-reencrypt flag with updated version and commits metadata */ |
3825 | 0 | r = reencrypt_update_flag(cd, requirement_version, true, true); |
3826 | 0 | out: |
3827 | 0 | LUKS2_reencrypt_unlock(cd, reencrypt_lock); |
3828 | 0 | crypt_free_volume_key(vks); |
3829 | 0 | return r; |
3830 | |
|
3831 | 0 | } |
3832 | | |
3833 | | static int reencrypt_init_by_keyslot_context(struct crypt_device *cd, |
3834 | | const char *name, |
3835 | | struct crypt_keyslot_context *kc_old, |
3836 | | struct crypt_keyslot_context *kc_new, |
3837 | | int keyslot_old, |
3838 | | int keyslot_new, |
3839 | | const char *cipher, |
3840 | | const char *cipher_mode, |
3841 | | const struct crypt_params_reencrypt *params) |
3842 | 0 | { |
3843 | 0 | int r; |
3844 | 0 | crypt_reencrypt_info ri; |
3845 | 0 | size_t key_length; |
3846 | 0 | struct volume_key *vks = NULL; |
3847 | 0 | uint32_t flags = params ? params->flags : 0; |
3848 | 0 | struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2); |
3849 | |
|
3850 | 0 | if (params && (params->flags & CRYPT_REENCRYPT_CREATE_NEW_DIGEST) && |
3851 | 0 | (!kc_new || !kc_new->get_luks2_key || !kc_new->get_key_size || |
3852 | 0 | (params->flags & CRYPT_REENCRYPT_RESUME_ONLY))) |
3853 | 0 | return -EINVAL; |
3854 | | |
3855 | | /* short-circuit in reencryption metadata update and finish immediately. */ |
3856 | 0 | if (flags & CRYPT_REENCRYPT_REPAIR_NEEDED) |
3857 | 0 | return reencrypt_repair(cd, hdr, keyslot_old, keyslot_new, kc_old, kc_new); |
3858 | | |
3859 | | /* short-circuit in recovery and finish immediately. */ |
3860 | 0 | if (flags & CRYPT_REENCRYPT_RECOVERY) |
3861 | 0 | return reencrypt_recovery_by_keyslot_context(cd, hdr, keyslot_old, keyslot_new, kc_old, kc_new); |
3862 | | |
3863 | 0 | if (name && !device_direct_io(crypt_data_device(cd))) { |
3864 | 0 | log_dbg(cd, "Device %s does not support direct I/O.", device_path(crypt_data_device(cd))); |
3865 | | /* FIXME: Add more specific error message for translation later. */ |
3866 | 0 | log_err(cd, _("Failed to initialize reencryption device stack.")); |
3867 | 0 | return -EINVAL; |
3868 | 0 | } |
3869 | | |
3870 | 0 | if (cipher && !crypt_cipher_wrapped_key(cipher, cipher_mode)) { |
3871 | 0 | if (keyslot_new == CRYPT_ANY_SLOT && kc_new && kc_new->get_key_size) |
3872 | 0 | r = kc_new->get_key_size(cd, kc_new, &key_length); |
3873 | 0 | else { |
3874 | 0 | r = crypt_keyslot_get_key_size(cd, keyslot_new); |
3875 | 0 | if (r >= 0) |
3876 | 0 | key_length = r; |
3877 | 0 | } |
3878 | 0 | if (r < 0) |
3879 | 0 | return r; |
3880 | 0 | r = LUKS2_check_cipher(cd, key_length, cipher, cipher_mode); |
3881 | 0 | if (r < 0) { |
3882 | 0 | log_err(cd, _("Unable to use cipher specification %s-%s for LUKS2."), cipher, cipher_mode); |
3883 | 0 | return r; |
3884 | 0 | } |
3885 | 0 | } |
3886 | | |
3887 | 0 | r = LUKS2_device_write_lock(cd, hdr, crypt_metadata_device(cd)); |
3888 | 0 | if (r) |
3889 | 0 | return r; |
3890 | | |
3891 | 0 | ri = LUKS2_reencrypt_status(hdr); |
3892 | 0 | if (ri == CRYPT_REENCRYPT_INVALID) { |
3893 | 0 | device_write_unlock(cd, crypt_metadata_device(cd)); |
3894 | 0 | return -EINVAL; |
3895 | 0 | } |
3896 | | |
3897 | 0 | if ((ri > CRYPT_REENCRYPT_NONE) && (flags & CRYPT_REENCRYPT_INITIALIZE_ONLY)) { |
3898 | 0 | device_write_unlock(cd, crypt_metadata_device(cd)); |
3899 | 0 | log_err(cd, _("LUKS2 reencryption already initialized in metadata.")); |
3900 | 0 | return -EBUSY; |
3901 | 0 | } |
3902 | | |
3903 | 0 | if (ri == CRYPT_REENCRYPT_NONE && !(flags & CRYPT_REENCRYPT_RESUME_ONLY)) { |
3904 | 0 | r = reencrypt_init(cd, name, hdr, kc_old, kc_new, keyslot_old, |
3905 | 0 | keyslot_new, cipher, cipher_mode, params, &vks); |
3906 | 0 | if (r < 0) |
3907 | 0 | log_err(cd, _("Failed to initialize LUKS2 reencryption in metadata.")); |
3908 | 0 | } else if (ri > CRYPT_REENCRYPT_NONE) { |
3909 | 0 | log_dbg(cd, "LUKS2 reencryption already initialized."); |
3910 | 0 | r = 0; |
3911 | 0 | } |
3912 | |
|
3913 | 0 | device_write_unlock(cd, crypt_metadata_device(cd)); |
3914 | |
|
3915 | 0 | if (r < 0 || (flags & CRYPT_REENCRYPT_INITIALIZE_ONLY)) |
3916 | 0 | goto out; |
3917 | | |
3918 | 0 | r = reencrypt_load_by_keyslot_context(cd, name, kc_old, kc_new, keyslot_old, |
3919 | 0 | keyslot_new, &vks, params); |
3920 | 0 | out: |
3921 | 0 | if (r < 0) |
3922 | 0 | crypt_drop_uploaded_keyring_key(cd, vks); |
3923 | 0 | crypt_free_volume_key(vks); |
3924 | 0 | return r < 0 ? r : LUKS2_find_keyslot(hdr, "reencrypt"); |
3925 | 0 | } |
3926 | | #else |
3927 | | static int reencrypt_init_by_keyslot_context(struct crypt_device *cd, |
3928 | | const char *name __attribute__((unused)), |
3929 | | struct crypt_keyslot_context *kc_old __attribute__((unused)), |
3930 | | struct crypt_keyslot_context *kc_new __attribute__((unused)), |
3931 | | int keyslot_old __attribute__((unused)), |
3932 | | int keyslot_new __attribute__((unused)), |
3933 | | const char *cipher __attribute__((unused)), |
3934 | | const char *cipher_mode __attribute__((unused)), |
3935 | | const struct crypt_params_reencrypt *params __attribute__((unused))) |
3936 | | { |
3937 | | log_err(cd, _("This operation is not supported for this device type.")); |
3938 | | return -ENOTSUP; |
3939 | | } |
3940 | | #endif |
3941 | | |
3942 | | int crypt_reencrypt_init_by_keyring(struct crypt_device *cd, |
3943 | | const char *name, |
3944 | | const char *passphrase_description, |
3945 | | int keyslot_old, |
3946 | | int keyslot_new, |
3947 | | const char *cipher, |
3948 | | const char *cipher_mode, |
3949 | | const struct crypt_params_reencrypt *params) |
3950 | 0 | { |
3951 | 0 | int r; |
3952 | 0 | struct crypt_keyslot_context kc = {0}; |
3953 | |
|
3954 | 0 | if (onlyLUKS2reencrypt(cd) || !passphrase_description) |
3955 | 0 | return -EINVAL; |
3956 | 0 | if (params && (params->flags & CRYPT_REENCRYPT_INITIALIZE_ONLY) && (params->flags & CRYPT_REENCRYPT_RESUME_ONLY)) |
3957 | 0 | return -EINVAL; |
3958 | | |
3959 | 0 | if (device_is_dax(crypt_data_device(cd)) > 0) { |
3960 | 0 | log_err(cd, _("Reencryption is not supported for DAX (persistent memory) devices.")); |
3961 | 0 | return -EINVAL; |
3962 | 0 | } |
3963 | | |
3964 | 0 | crypt_keyslot_context_init_by_keyring_internal(&kc, passphrase_description); |
3965 | 0 | r = reencrypt_init_by_keyslot_context(cd, name, &kc, &kc, keyslot_old, |
3966 | 0 | keyslot_new, cipher, cipher_mode, params); |
3967 | |
|
3968 | 0 | crypt_keyslot_context_destroy_internal(&kc); |
3969 | |
|
3970 | 0 | return r; |
3971 | 0 | } |
3972 | | |
3973 | | int crypt_reencrypt_init_by_passphrase(struct crypt_device *cd, |
3974 | | const char *name, |
3975 | | const char *passphrase, |
3976 | | size_t passphrase_size, |
3977 | | int keyslot_old, |
3978 | | int keyslot_new, |
3979 | | const char *cipher, |
3980 | | const char *cipher_mode, |
3981 | | const struct crypt_params_reencrypt *params) |
3982 | 0 | { |
3983 | 0 | int r; |
3984 | 0 | struct crypt_keyslot_context kc = {0}; |
3985 | |
|
3986 | 0 | if (onlyLUKS2reencrypt(cd) || !passphrase) |
3987 | 0 | return -EINVAL; |
3988 | 0 | if (params && (params->flags & CRYPT_REENCRYPT_INITIALIZE_ONLY) && (params->flags & CRYPT_REENCRYPT_RESUME_ONLY)) |
3989 | 0 | return -EINVAL; |
3990 | | |
3991 | 0 | if (device_is_dax(crypt_data_device(cd)) > 0) { |
3992 | 0 | log_err(cd, _("Reencryption is not supported for DAX (persistent memory) devices.")); |
3993 | 0 | return -EINVAL; |
3994 | 0 | } |
3995 | | |
3996 | 0 | crypt_keyslot_context_init_by_passphrase_internal(&kc, passphrase, passphrase_size); |
3997 | |
|
3998 | 0 | r = reencrypt_init_by_keyslot_context(cd, name, &kc, &kc, keyslot_old, |
3999 | 0 | keyslot_new, cipher, cipher_mode, params); |
4000 | |
|
4001 | 0 | crypt_keyslot_context_destroy_internal(&kc); |
4002 | |
|
4003 | 0 | return r; |
4004 | 0 | } |
4005 | | |
4006 | | int crypt_reencrypt_init_by_keyslot_context(struct crypt_device *cd, |
4007 | | const char *name, |
4008 | | struct crypt_keyslot_context *kc_old, |
4009 | | struct crypt_keyslot_context *kc_new, |
4010 | | int keyslot_old, |
4011 | | int keyslot_new, |
4012 | | const char *cipher, |
4013 | | const char *cipher_mode, |
4014 | | const struct crypt_params_reencrypt *params) |
4015 | 0 | { |
4016 | 0 | if (onlyLUKS2reencrypt(cd) || (!kc_old && !kc_new)) |
4017 | 0 | return -EINVAL; |
4018 | 0 | if (params && (params->flags & CRYPT_REENCRYPT_INITIALIZE_ONLY) && (params->flags & CRYPT_REENCRYPT_RESUME_ONLY)) |
4019 | 0 | return -EINVAL; |
4020 | | |
4021 | 0 | if (device_is_dax(crypt_data_device(cd)) > 0) { |
4022 | 0 | log_err(cd, _("Reencryption is not supported for DAX (persistent memory) devices.")); |
4023 | 0 | return -EINVAL; |
4024 | 0 | } |
4025 | | |
4026 | 0 | return reencrypt_init_by_keyslot_context(cd, name, kc_old, kc_new, keyslot_old, keyslot_new, cipher, cipher_mode, params); |
4027 | 0 | } |
4028 | | |
4029 | | #if USE_LUKS2_REENCRYPTION |
4030 | | static reenc_status_t reencrypt_step(struct crypt_device *cd, |
4031 | | struct luks2_hdr *hdr, |
4032 | | struct luks2_reencrypt *rh, |
4033 | | uint64_t device_size, |
4034 | | bool online) |
4035 | 0 | { |
4036 | 0 | int r; |
4037 | 0 | struct reenc_protection *rp; |
4038 | |
|
4039 | 0 | assert(hdr); |
4040 | 0 | assert(rh); |
4041 | |
|
4042 | 0 | rp = &rh->rp; |
4043 | | |
4044 | | /* in memory only */ |
4045 | 0 | r = reencrypt_make_segments(cd, hdr, rh, device_size); |
4046 | 0 | if (r) |
4047 | 0 | return REENC_ERR; |
4048 | | |
4049 | 0 | r = reencrypt_assign_segments(cd, hdr, rh, 1, 0); |
4050 | 0 | if (r) { |
4051 | 0 | log_err(cd, _("Failed to set device segments for next reencryption hotzone.")); |
4052 | 0 | return REENC_ERR; |
4053 | 0 | } |
4054 | | |
4055 | 0 | log_dbg(cd, "Reencrypting chunk starting at offset: %" PRIu64 ", size :%" PRIu64 ".", rh->offset, rh->length); |
4056 | 0 | log_dbg(cd, "data_offset: %" PRIu64, crypt_get_data_offset(cd) << SECTOR_SHIFT); |
4057 | |
|
4058 | 0 | if (!rh->offset && rp->type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) { |
4059 | 0 | crypt_storage_wrapper_destroy(rh->cw1); |
4060 | 0 | log_dbg(cd, "Reinitializing old segment storage wrapper for moved segment."); |
4061 | 0 | r = crypt_storage_wrapper_init(cd, &rh->cw1, crypt_data_device(cd), |
4062 | 0 | LUKS2_reencrypt_get_data_offset_moved(hdr), |
4063 | 0 | crypt_get_iv_offset(cd), |
4064 | 0 | reencrypt_get_sector_size_old(hdr), |
4065 | 0 | reencrypt_segment_cipher_old(hdr), |
4066 | 0 | crypt_volume_key_by_id(rh->vks, rh->digest_old), |
4067 | 0 | rh->wflags1); |
4068 | 0 | if (r) { |
4069 | 0 | log_err(cd, _("Failed to initialize old segment storage wrapper.")); |
4070 | 0 | return REENC_ROLLBACK; |
4071 | 0 | } |
4072 | | |
4073 | 0 | if (rh->rp_moved_segment.type != REENC_PROTECTION_NOT_SET) { |
4074 | 0 | log_dbg(cd, "Switching to moved segment resilience type."); |
4075 | 0 | rp = &rh->rp_moved_segment; |
4076 | 0 | } |
4077 | 0 | } |
4078 | | |
4079 | 0 | r = reencrypt_hotzone_protect_ready(cd, rp); |
4080 | 0 | if (r) { |
4081 | 0 | log_err(cd, _("Failed to initialize hotzone protection.")); |
4082 | 0 | return REENC_ROLLBACK; |
4083 | 0 | } |
4084 | | |
4085 | 0 | if (online) { |
4086 | 0 | r = reencrypt_refresh_overlay_devices(cd, hdr, rh->overlay_name, rh->hotzone_name, rh->vks, rh->device_size, rh->flags); |
4087 | | /* Teardown overlay devices with dm-error. None bio shall pass! */ |
4088 | 0 | if (r != REENC_OK) |
4089 | 0 | return r; |
4090 | 0 | } |
4091 | | |
4092 | 0 | rh->read = crypt_storage_wrapper_read(rh->cw1, rh->offset, rh->reenc_buffer, rh->length); |
4093 | 0 | if (rh->read < 0) { |
4094 | | /* severity normal */ |
4095 | 0 | log_err(cd, _("Failed to read hotzone area starting at %" PRIu64 "."), rh->offset); |
4096 | 0 | return REENC_ROLLBACK; |
4097 | 0 | } |
4098 | | |
4099 | | /* metadata commit point */ |
4100 | 0 | r = reencrypt_hotzone_protect_final(cd, hdr, rh->reenc_keyslot, rp, rh->reenc_buffer, rh->read); |
4101 | 0 | if (r < 0) { |
4102 | | /* severity normal */ |
4103 | 0 | log_err(cd, _("Failed to write reencryption resilience metadata.")); |
4104 | 0 | return REENC_ROLLBACK; |
4105 | 0 | } |
4106 | | |
4107 | 0 | r = crypt_storage_wrapper_decrypt(rh->cw1, rh->offset, rh->reenc_buffer, rh->read); |
4108 | 0 | if (r) { |
4109 | | /* severity normal */ |
4110 | 0 | log_err(cd, _("Decryption failed.")); |
4111 | 0 | return REENC_ROLLBACK; |
4112 | 0 | } |
4113 | 0 | if (rh->read != crypt_storage_wrapper_encrypt_write(rh->cw2, rh->offset, rh->reenc_buffer, rh->read)) { |
4114 | | /* severity fatal */ |
4115 | 0 | log_err(cd, _("Failed to write hotzone area starting at %" PRIu64 "."), rh->offset); |
4116 | 0 | return REENC_FATAL; |
4117 | 0 | } |
4118 | | |
4119 | 0 | if (rp->type != REENC_PROTECTION_NONE && crypt_storage_wrapper_datasync(rh->cw2)) { |
4120 | 0 | log_err(cd, _("Failed to sync data.")); |
4121 | 0 | return REENC_FATAL; |
4122 | 0 | } |
4123 | | |
4124 | | /* metadata commit safe point */ |
4125 | 0 | r = reencrypt_assign_segments(cd, hdr, rh, 0, rp->type != REENC_PROTECTION_NONE); |
4126 | 0 | if (r) { |
4127 | | /* severity fatal */ |
4128 | 0 | log_err(cd, _("Failed to update metadata after current reencryption hotzone completed.")); |
4129 | 0 | return REENC_FATAL; |
4130 | 0 | } |
4131 | | |
4132 | 0 | if (online) { |
4133 | | /* severity normal */ |
4134 | 0 | log_dbg(cd, "Resuming device %s", rh->hotzone_name); |
4135 | 0 | r = dm_resume_device(cd, rh->hotzone_name, DM_RESUME_PRIVATE); |
4136 | 0 | if (r) { |
4137 | 0 | log_err(cd, _("Failed to resume device %s."), rh->hotzone_name); |
4138 | 0 | return REENC_ERR; |
4139 | 0 | } |
4140 | 0 | } |
4141 | | |
4142 | 0 | return REENC_OK; |
4143 | 0 | } |
4144 | | |
4145 | | static int reencrypt_erase_backup_segments(struct crypt_device *cd, |
4146 | | struct luks2_hdr *hdr) |
4147 | 0 | { |
4148 | 0 | int segment = LUKS2_get_segment_id_by_flag(hdr, "backup-previous"); |
4149 | 0 | if (segment >= 0) { |
4150 | 0 | if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0)) |
4151 | 0 | return -EINVAL; |
4152 | 0 | json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment); |
4153 | 0 | } |
4154 | 0 | segment = LUKS2_get_segment_id_by_flag(hdr, "backup-final"); |
4155 | 0 | if (segment >= 0) { |
4156 | 0 | if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0)) |
4157 | 0 | return -EINVAL; |
4158 | 0 | json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment); |
4159 | 0 | } |
4160 | 0 | segment = LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment"); |
4161 | 0 | if (segment >= 0) { |
4162 | 0 | if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0)) |
4163 | 0 | return -EINVAL; |
4164 | 0 | json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment); |
4165 | 0 | } |
4166 | | |
4167 | 0 | return 0; |
4168 | 0 | } |
4169 | | |
4170 | | static int reencrypt_wipe_unused_device_area(struct crypt_device *cd, struct luks2_reencrypt *rh) |
4171 | 0 | { |
4172 | 0 | uint64_t offset, length, dev_size; |
4173 | 0 | int r = 0; |
4174 | |
|
4175 | 0 | assert(cd); |
4176 | 0 | assert(rh); |
4177 | |
|
4178 | 0 | if (rh->jobj_segment_moved && rh->mode == CRYPT_REENCRYPT_ENCRYPT) { |
4179 | 0 | offset = json_segment_get_offset(rh->jobj_segment_moved, 0); |
4180 | 0 | length = json_segment_get_size(rh->jobj_segment_moved, 0); |
4181 | 0 | log_dbg(cd, "Wiping %" PRIu64 " bytes of backup segment data at offset %" PRIu64, |
4182 | 0 | length, offset); |
4183 | 0 | r = crypt_wipe_device(cd, crypt_data_device(cd), CRYPT_WIPE_RANDOM, |
4184 | 0 | offset, length, 1024 * 1024, NULL, NULL); |
4185 | 0 | } |
4186 | |
|
4187 | 0 | if (r < 0) |
4188 | 0 | return r; |
4189 | | |
4190 | 0 | if (rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->direction == CRYPT_REENCRYPT_FORWARD) { |
4191 | 0 | r = device_size(crypt_data_device(cd), &dev_size); |
4192 | 0 | if (r < 0) |
4193 | 0 | return r; |
4194 | | |
4195 | 0 | if (dev_size < data_shift_value(&rh->rp)) |
4196 | 0 | return -EINVAL; |
4197 | | |
4198 | 0 | offset = dev_size - data_shift_value(&rh->rp); |
4199 | 0 | length = data_shift_value(&rh->rp); |
4200 | 0 | log_dbg(cd, "Wiping %" PRIu64 " bytes of data at offset %" PRIu64, |
4201 | 0 | length, offset); |
4202 | 0 | r = crypt_wipe_device(cd, crypt_data_device(cd), CRYPT_WIPE_RANDOM, |
4203 | 0 | offset, length, 1024 * 1024, NULL, NULL); |
4204 | 0 | } |
4205 | | |
4206 | 0 | return r; |
4207 | 0 | } |
4208 | | |
4209 | | static int reencrypt_teardown_ok(struct crypt_device *cd, struct luks2_hdr *hdr, struct luks2_reencrypt *rh) |
4210 | 0 | { |
4211 | 0 | int i, r; |
4212 | 0 | uint64_t dmt_flags; |
4213 | 0 | bool finished = !(rh->device_size > rh->progress); |
4214 | |
|
4215 | 0 | if (rh->rp.type == REENC_PROTECTION_NONE && |
4216 | 0 | LUKS2_hdr_write(cd, hdr)) { |
4217 | 0 | log_err(cd, _("Failed to write LUKS2 metadata.")); |
4218 | 0 | return -EINVAL; |
4219 | 0 | } |
4220 | | |
4221 | 0 | if (rh->online) { |
4222 | 0 | r = LUKS2_reload(cd, rh->device_name, rh->vks, rh->device_size, rh->flags); |
4223 | 0 | if (r) |
4224 | 0 | log_err(cd, _("Failed to reload device %s."), rh->device_name); |
4225 | 0 | if (!r) { |
4226 | 0 | r = dm_resume_device(cd, rh->device_name, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH); |
4227 | 0 | if (r) |
4228 | 0 | log_err(cd, _("Failed to resume device %s."), rh->device_name); |
4229 | 0 | } |
4230 | 0 | dm_remove_device(cd, rh->overlay_name, 0); |
4231 | 0 | dm_remove_device(cd, rh->hotzone_name, 0); |
4232 | |
|
4233 | 0 | if (!r && finished && rh->mode == CRYPT_REENCRYPT_DECRYPT && |
4234 | 0 | !dm_flags(cd, DM_LINEAR, &dmt_flags) && (dmt_flags & DM_DEFERRED_SUPPORTED)) |
4235 | 0 | dm_remove_device(cd, rh->device_name, CRYPT_DEACTIVATE_DEFERRED); |
4236 | 0 | } |
4237 | |
|
4238 | 0 | if (finished) { |
4239 | 0 | if (reencrypt_wipe_unused_device_area(cd, rh)) |
4240 | 0 | log_err(cd, _("Failed to wipe unused data device area.")); |
4241 | 0 | if (reencrypt_get_data_offset_new(hdr) && LUKS2_set_keyslots_size(hdr, reencrypt_get_data_offset_new(hdr))) |
4242 | 0 | log_dbg(cd, "Failed to set new keyslots area size."); |
4243 | 0 | if (rh->digest_old >= 0 && rh->digest_new != rh->digest_old) |
4244 | 0 | for (i = 0; i < LUKS2_KEYSLOTS_MAX; i++) |
4245 | 0 | if (LUKS2_digest_by_keyslot(hdr, i) == rh->digest_old && crypt_keyslot_destroy(cd, i)) |
4246 | 0 | log_err(cd, _("Failed to remove unused (unbound) keyslot %d."), i); |
4247 | |
|
4248 | 0 | if (reencrypt_erase_backup_segments(cd, hdr)) |
4249 | 0 | log_dbg(cd, "Failed to erase backup segments"); |
4250 | |
|
4251 | 0 | if (reencrypt_update_flag(cd, 0, false, false)) |
4252 | 0 | log_dbg(cd, "Failed to disable reencryption requirement flag."); |
4253 | | |
4254 | | /* metadata commit point also removing reencryption flag on-disk */ |
4255 | 0 | if (crypt_keyslot_destroy(cd, rh->reenc_keyslot)) { |
4256 | 0 | log_err(cd, _("Failed to remove reencryption keyslot.")); |
4257 | 0 | return -EINVAL; |
4258 | 0 | } |
4259 | 0 | } |
4260 | | |
4261 | 0 | return 0; |
4262 | 0 | } |
4263 | | |
4264 | | static void reencrypt_teardown_fatal(struct crypt_device *cd, struct luks2_reencrypt *rh) |
4265 | 0 | { |
4266 | 0 | log_err(cd, _("Fatal error while reencrypting chunk starting at %" PRIu64 ", %" PRIu64 " sectors long."), |
4267 | 0 | (rh->offset >> SECTOR_SHIFT) + crypt_get_data_offset(cd), rh->length >> SECTOR_SHIFT); |
4268 | |
|
4269 | 0 | if (rh->online) { |
4270 | 0 | log_err(cd, _("Online reencryption failed.")); |
4271 | 0 | if (dm_status_suspended(cd, rh->hotzone_name) > 0) { |
4272 | 0 | log_dbg(cd, "Hotzone device %s suspended, replacing with dm-error.", rh->hotzone_name); |
4273 | 0 | if (dm_error_device(cd, rh->hotzone_name)) { |
4274 | 0 | log_err(cd, _("Failed to replace suspended device %s with dm-error target."), rh->hotzone_name); |
4275 | 0 | log_err(cd, _("Do not resume the device unless replaced with error target manually.")); |
4276 | 0 | } |
4277 | 0 | } |
4278 | 0 | } |
4279 | 0 | } |
4280 | | |
4281 | | static int reencrypt_teardown(struct crypt_device *cd, struct luks2_hdr *hdr, |
4282 | | struct luks2_reencrypt *rh, reenc_status_t rs, bool interrupted, |
4283 | | int (*progress)(uint64_t size, uint64_t offset, void *usrptr), |
4284 | | void *usrptr) |
4285 | 0 | { |
4286 | 0 | int r; |
4287 | |
|
4288 | 0 | switch (rs) { |
4289 | 0 | case REENC_OK: |
4290 | 0 | if (progress && !interrupted) |
4291 | 0 | progress(rh->device_size, rh->progress, usrptr); |
4292 | 0 | r = reencrypt_teardown_ok(cd, hdr, rh); |
4293 | 0 | break; |
4294 | 0 | case REENC_FATAL: |
4295 | 0 | reencrypt_teardown_fatal(cd, rh); |
4296 | | /* fall-through */ |
4297 | 0 | default: |
4298 | 0 | r = -EIO; |
4299 | 0 | } |
4300 | | |
4301 | | /* this frees reencryption lock */ |
4302 | 0 | LUKS2_reencrypt_free(cd, rh); |
4303 | 0 | crypt_set_luks2_reencrypt(cd, NULL); |
4304 | |
|
4305 | 0 | return r; |
4306 | 0 | } |
4307 | | |
4308 | | int crypt_reencrypt_run( |
4309 | | struct crypt_device *cd, |
4310 | | int (*progress)(uint64_t size, uint64_t offset, void *usrptr), |
4311 | | void *usrptr) |
4312 | 0 | { |
4313 | 0 | int r; |
4314 | 0 | crypt_reencrypt_info ri; |
4315 | 0 | struct luks2_hdr *hdr; |
4316 | 0 | struct luks2_reencrypt *rh; |
4317 | 0 | reenc_status_t rs; |
4318 | 0 | bool quit = false; |
4319 | |
|
4320 | 0 | if (onlyLUKS2reencrypt(cd)) |
4321 | 0 | return -EINVAL; |
4322 | | |
4323 | 0 | hdr = crypt_get_hdr(cd, CRYPT_LUKS2); |
4324 | |
|
4325 | 0 | ri = LUKS2_reencrypt_status(hdr); |
4326 | 0 | if (ri > CRYPT_REENCRYPT_CLEAN) { |
4327 | 0 | log_err(cd, _("Cannot proceed with reencryption. Unexpected reencryption status.")); |
4328 | 0 | return -EINVAL; |
4329 | 0 | } |
4330 | | |
4331 | 0 | rh = crypt_get_luks2_reencrypt(cd); |
4332 | 0 | if (!rh || (!rh->reenc_lock && crypt_metadata_locking_enabled())) { |
4333 | 0 | log_err(cd, _("Missing or invalid reencrypt context.")); |
4334 | 0 | return -EINVAL; |
4335 | 0 | } |
4336 | | |
4337 | 0 | log_dbg(cd, "Resuming LUKS2 reencryption."); |
4338 | |
|
4339 | 0 | if (rh->online) { |
4340 | | /* This is last resort to avoid data corruption. Abort is justified here. */ |
4341 | 0 | assert(device_direct_io(crypt_data_device(cd))); |
4342 | |
|
4343 | 0 | if (reencrypt_init_device_stack(cd, rh)) { |
4344 | 0 | log_err(cd, _("Failed to initialize reencryption device stack.")); |
4345 | 0 | return -EINVAL; |
4346 | 0 | } |
4347 | 0 | } |
4348 | | |
4349 | 0 | log_dbg(cd, "Progress %" PRIu64 ", device_size %" PRIu64, rh->progress, rh->device_size); |
4350 | |
|
4351 | 0 | rs = REENC_OK; |
4352 | |
|
4353 | 0 | if (progress && progress(rh->device_size, rh->progress, usrptr)) |
4354 | 0 | quit = true; |
4355 | |
|
4356 | 0 | while (!quit && (rh->device_size > rh->progress)) { |
4357 | 0 | rs = reencrypt_step(cd, hdr, rh, rh->device_size, rh->online); |
4358 | 0 | if (rs != REENC_OK) |
4359 | 0 | break; |
4360 | | |
4361 | 0 | log_dbg(cd, "Progress %" PRIu64 ", device_size %" PRIu64, rh->progress, rh->device_size); |
4362 | 0 | if (progress && progress(rh->device_size, rh->progress, usrptr)) |
4363 | 0 | quit = true; |
4364 | |
|
4365 | 0 | r = reencrypt_context_update(cd, rh); |
4366 | 0 | if (r) { |
4367 | 0 | log_err(cd, _("Failed to update reencryption context.")); |
4368 | 0 | rs = REENC_ERR; |
4369 | 0 | break; |
4370 | 0 | } |
4371 | | |
4372 | 0 | log_dbg(cd, "Next reencryption offset will be %" PRIu64 " sectors.", rh->offset); |
4373 | 0 | log_dbg(cd, "Next reencryption chunk size will be %" PRIu64 " sectors).", rh->length); |
4374 | 0 | } |
4375 | |
|
4376 | 0 | r = reencrypt_teardown(cd, hdr, rh, rs, quit, progress, usrptr); |
4377 | 0 | return r; |
4378 | 0 | } |
4379 | | |
4380 | | |
4381 | | static int reencrypt_recovery(struct crypt_device *cd, |
4382 | | struct luks2_hdr *hdr, |
4383 | | uint64_t device_size, |
4384 | | struct volume_key *vks) |
4385 | 0 | { |
4386 | 0 | int r; |
4387 | 0 | struct luks2_reencrypt *rh = NULL; |
4388 | |
|
4389 | 0 | r = reencrypt_load(cd, hdr, device_size, 0, 0, vks, &rh); |
4390 | 0 | if (r < 0) { |
4391 | 0 | log_err(cd, _("Failed to load LUKS2 reencryption context.")); |
4392 | 0 | return r; |
4393 | 0 | } |
4394 | | |
4395 | 0 | r = reencrypt_recover_segment(cd, hdr, rh, vks); |
4396 | 0 | if (r < 0) |
4397 | 0 | goto out; |
4398 | | |
4399 | 0 | if ((r = reencrypt_assign_segments(cd, hdr, rh, 0, 0))) |
4400 | 0 | goto out; |
4401 | | |
4402 | 0 | r = reencrypt_context_update(cd, rh); |
4403 | 0 | if (r) { |
4404 | 0 | log_err(cd, _("Failed to update reencryption context.")); |
4405 | 0 | goto out; |
4406 | 0 | } |
4407 | | |
4408 | 0 | r = reencrypt_teardown_ok(cd, hdr, rh); |
4409 | 0 | if (!r) |
4410 | 0 | r = LUKS2_hdr_write(cd, hdr); |
4411 | 0 | out: |
4412 | 0 | LUKS2_reencrypt_free(cd, rh); |
4413 | |
|
4414 | 0 | return r; |
4415 | 0 | } |
4416 | | #else /* USE_LUKS2_REENCRYPTION */ |
4417 | | int crypt_reencrypt_run( |
4418 | | struct crypt_device *cd, |
4419 | | int (*progress)(uint64_t size, uint64_t offset, void *usrptr), |
4420 | | void *usrptr) |
4421 | | { |
4422 | | UNUSED(progress); |
4423 | | UNUSED(usrptr); |
4424 | | |
4425 | | log_err(cd, _("This operation is not supported for this device type.")); |
4426 | | return -ENOTSUP; |
4427 | | } |
4428 | | #endif |
4429 | | |
4430 | | int crypt_reencrypt( |
4431 | | struct crypt_device *cd, |
4432 | | int (*progress)(uint64_t size, uint64_t offset, void *usrptr)) |
4433 | 0 | { |
4434 | 0 | return crypt_reencrypt_run(cd, progress, NULL); |
4435 | 0 | } |
4436 | | |
4437 | | /* |
4438 | | * use only for calculation of minimal data device size. |
4439 | | * The real data offset is taken directly from segments! |
4440 | | */ |
4441 | | int LUKS2_reencrypt_data_offset(struct luks2_hdr *hdr, bool blockwise) |
4442 | 0 | { |
4443 | 0 | crypt_reencrypt_info ri = LUKS2_reencrypt_status(hdr); |
4444 | 0 | uint64_t data_offset = LUKS2_get_data_offset(hdr); |
4445 | |
|
4446 | 0 | if (ri == CRYPT_REENCRYPT_CLEAN && reencrypt_direction(hdr) == CRYPT_REENCRYPT_FORWARD) |
4447 | 0 | data_offset += reencrypt_data_shift(hdr) >> SECTOR_SHIFT; |
4448 | |
|
4449 | 0 | return blockwise ? data_offset : data_offset << SECTOR_SHIFT; |
4450 | 0 | } |
4451 | | |
4452 | | /* internal only */ |
4453 | | int LUKS2_reencrypt_check_device_size(struct crypt_device *cd, struct luks2_hdr *hdr, |
4454 | | uint64_t check_size, uint64_t *dev_size, bool device_exclusive_check, bool dynamic) |
4455 | 0 | { |
4456 | 0 | int r; |
4457 | 0 | uint64_t data_offset, real_size = 0; |
4458 | |
|
4459 | 0 | if (reencrypt_direction(hdr) == CRYPT_REENCRYPT_BACKWARD && |
4460 | 0 | (LUKS2_get_segment_by_flag(hdr, "backup-moved-segment") || dynamic)) |
4461 | 0 | check_size += reencrypt_data_shift(hdr); |
4462 | |
|
4463 | 0 | r = device_check_access(cd, crypt_data_device(cd), |
4464 | 0 | device_exclusive_check ? DEV_EXCL : DEV_OK); |
4465 | 0 | if (r) |
4466 | 0 | return r; |
4467 | | |
4468 | 0 | data_offset = LUKS2_reencrypt_data_offset(hdr, false); |
4469 | |
|
4470 | 0 | r = device_check_size(cd, crypt_data_device(cd), data_offset, 1); |
4471 | 0 | if (r) |
4472 | 0 | return r; |
4473 | | |
4474 | 0 | r = device_size(crypt_data_device(cd), &real_size); |
4475 | 0 | if (r) |
4476 | 0 | return r; |
4477 | | |
4478 | 0 | log_dbg(cd, "Required minimal device size: %" PRIu64 " (%" PRIu64 " sectors)" |
4479 | 0 | ", real device size: %" PRIu64 " (%" PRIu64 " sectors) " |
4480 | 0 | "calculated device size: %" PRIu64 " (%" PRIu64 " sectors)", |
4481 | 0 | check_size, check_size >> SECTOR_SHIFT, real_size, real_size >> SECTOR_SHIFT, |
4482 | 0 | real_size - data_offset, (real_size - data_offset) >> SECTOR_SHIFT); |
4483 | |
|
4484 | 0 | if (real_size < data_offset || (check_size && real_size < check_size)) { |
4485 | 0 | log_err(cd, _("Device %s is too small."), device_path(crypt_data_device(cd))); |
4486 | 0 | return -EINVAL; |
4487 | 0 | } |
4488 | | |
4489 | 0 | *dev_size = real_size - data_offset; |
4490 | |
|
4491 | 0 | return 0; |
4492 | 0 | } |
4493 | | #if USE_LUKS2_REENCRYPTION |
4494 | | /* returns keyslot number on success (>= 0) or negative errnor otherwise */ |
4495 | | int LUKS2_reencrypt_locked_recovery_by_vks(struct crypt_device *cd, |
4496 | | struct volume_key *vks) |
4497 | 0 | { |
4498 | 0 | uint64_t minimal_size, device_size; |
4499 | 0 | int r = -EINVAL; |
4500 | 0 | struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2); |
4501 | |
|
4502 | 0 | log_dbg(cd, "Entering reencryption crash recovery."); |
4503 | |
|
4504 | 0 | if (LUKS2_get_data_size(hdr, &minimal_size, NULL)) |
4505 | 0 | return r; |
4506 | 0 | if (LUKS2_reencrypt_check_device_size(cd, hdr, minimal_size, &device_size, true, false)) |
4507 | 0 | goto out; |
4508 | | |
4509 | 0 | r = reencrypt_recovery(cd, hdr, device_size, vks); |
4510 | |
|
4511 | 0 | out: |
4512 | 0 | if (r < 0) |
4513 | 0 | crypt_drop_uploaded_keyring_key(cd, vks); |
4514 | 0 | return r; |
4515 | 0 | } |
4516 | | #endif |
4517 | | crypt_reencrypt_info LUKS2_reencrypt_get_params(struct luks2_hdr *hdr, |
4518 | | struct crypt_params_reencrypt *params) |
4519 | 0 | { |
4520 | 0 | crypt_reencrypt_info ri; |
4521 | 0 | int digest; |
4522 | 0 | uint8_t version; |
4523 | |
|
4524 | 0 | if (params) |
4525 | 0 | memset(params, 0, sizeof(*params)); |
4526 | |
|
4527 | 0 | ri = LUKS2_reencrypt_status(hdr); |
4528 | 0 | if (ri == CRYPT_REENCRYPT_NONE || ri == CRYPT_REENCRYPT_INVALID || !params) |
4529 | 0 | return ri; |
4530 | | |
4531 | 0 | digest = LUKS2_digest_by_keyslot(hdr, LUKS2_find_keyslot(hdr, "reencrypt")); |
4532 | 0 | if (digest < 0 && digest != -ENOENT) |
4533 | 0 | return CRYPT_REENCRYPT_INVALID; |
4534 | | |
4535 | | /* |
4536 | | * In case there's an old "online-reencrypt" requirement or reencryption |
4537 | | * keyslot digest is missing inform caller reencryption metadata requires repair. |
4538 | | */ |
4539 | 0 | if (!LUKS2_config_get_reencrypt_version(hdr, &version) && |
4540 | 0 | (version < 2 || digest == -ENOENT)) { |
4541 | 0 | params->flags |= CRYPT_REENCRYPT_REPAIR_NEEDED; |
4542 | 0 | return ri; |
4543 | 0 | } |
4544 | | |
4545 | 0 | params->mode = reencrypt_mode(hdr); |
4546 | 0 | params->direction = reencrypt_direction(hdr); |
4547 | 0 | params->resilience = reencrypt_resilience_type(hdr); |
4548 | 0 | params->hash = reencrypt_resilience_hash(hdr); |
4549 | 0 | params->data_shift = reencrypt_data_shift(hdr) >> SECTOR_SHIFT; |
4550 | 0 | params->max_hotzone_size = 0; |
4551 | 0 | if (LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0) |
4552 | 0 | params->flags |= CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT; |
4553 | |
|
4554 | 0 | return ri; |
4555 | 0 | } |