Coverage Report

Created: 2025-11-25 07:00

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/cryptsetup/lib/luks2/luks2_reencrypt.c
Line
Count
Source
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
 * LUKS - Linux Unified Key Setup v2, reencryption helpers
4
 *
5
 * Copyright (C) 2015-2025 Red Hat, Inc. All rights reserved.
6
 * Copyright (C) 2015-2025 Ondrej Kozina
7
 */
8
9
#include "luks2_internal.h"
10
#include "utils_device_locking.h"
11
#include "keyslot_context.h"
12
13
struct luks2_reencrypt {
14
  /* reencryption window attributes */
15
  uint64_t offset;
16
  uint64_t progress;
17
  uint64_t length;
18
  uint64_t device_size;
19
  bool online;
20
  bool fixed_length;
21
  crypt_reencrypt_direction_info direction;
22
  crypt_reencrypt_mode_info mode;
23
24
  char *device_name;
25
  char *hotzone_name;
26
  char *overlay_name;
27
  uint32_t flags;
28
29
  /* reencryption window persistence attributes */
30
  struct reenc_protection rp;
31
  struct reenc_protection rp_moved_segment;
32
33
  int reenc_keyslot;
34
35
  /* already running reencryption */
36
  json_object *jobj_segs_hot;
37
  json_object *jobj_segs_post;
38
39
  /* backup segments */
40
  json_object *jobj_segment_new;
41
  int digest_new;
42
  json_object *jobj_segment_old;
43
  int digest_old;
44
  json_object *jobj_segment_moved;
45
46
  struct volume_key *vks;
47
48
  void *reenc_buffer;
49
  ssize_t read;
50
51
  struct crypt_storage_wrapper *cw1;
52
  struct crypt_storage_wrapper *cw2;
53
54
  uint32_t wflags1;
55
  uint32_t wflags2;
56
57
  struct device *hotzone_device;
58
59
  struct crypt_lock_handle *reenc_lock;
60
};
61
#if USE_LUKS2_REENCRYPTION
62
static uint64_t data_shift_value(struct reenc_protection *rp)
63
0
{
64
0
  return rp->type == REENC_PROTECTION_DATASHIFT ? rp->p.ds.data_shift : 0;
65
0
}
66
67
static json_object *reencrypt_segment(struct luks2_hdr *hdr, unsigned new)
68
0
{
69
0
  return LUKS2_get_segment_by_flag(hdr, new ? "backup-final" : "backup-previous");
70
0
}
71
72
static json_object *reencrypt_segment_new(struct luks2_hdr *hdr)
73
0
{
74
0
  return reencrypt_segment(hdr, 1);
75
0
}
76
77
static json_object *reencrypt_segment_old(struct luks2_hdr *hdr)
78
0
{
79
0
  return reencrypt_segment(hdr, 0);
80
0
}
81
82
static json_object *reencrypt_segments_old(struct luks2_hdr *hdr)
83
0
{
84
0
  json_object *jobj_segments, *jobj = NULL;
85
86
0
  if (json_object_copy(reencrypt_segment_old(hdr), &jobj))
87
0
    return NULL;
88
89
0
  json_segment_remove_flag(jobj, "backup-previous");
90
91
0
  jobj_segments = json_object_new_object();
92
0
  if (!jobj_segments) {
93
0
    json_object_put(jobj);
94
0
    return NULL;
95
0
  }
96
97
0
  if (json_object_object_add_by_uint(jobj_segments, 0, jobj)) {
98
0
    json_object_put(jobj);
99
0
    json_object_put(jobj_segments);
100
0
    return NULL;
101
0
  }
102
103
0
  return jobj_segments;
104
0
}
105
106
static const char *reencrypt_segment_cipher_new(struct luks2_hdr *hdr)
107
0
{
108
0
  return json_segment_get_cipher(reencrypt_segment(hdr, 1));
109
0
}
110
111
static const char *reencrypt_segment_cipher_old(struct luks2_hdr *hdr)
112
0
{
113
0
  return json_segment_get_cipher(reencrypt_segment(hdr, 0));
114
0
}
115
116
static uint32_t reencrypt_get_sector_size_new(struct luks2_hdr *hdr)
117
0
{
118
0
  return json_segment_get_sector_size(reencrypt_segment(hdr, 1));
119
0
}
120
121
static uint32_t reencrypt_get_sector_size_old(struct luks2_hdr *hdr)
122
0
{
123
0
  return json_segment_get_sector_size(reencrypt_segment(hdr, 0));
124
0
}
125
126
static uint64_t reencrypt_data_offset(struct luks2_hdr *hdr, unsigned new)
127
0
{
128
0
  json_object *jobj = reencrypt_segment(hdr, new);
129
0
  if (jobj)
130
0
    return json_segment_get_offset(jobj, 0);
131
132
0
  return LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
133
0
}
134
135
static uint64_t LUKS2_reencrypt_get_data_offset_moved(struct luks2_hdr *hdr)
136
0
{
137
0
  json_object *jobj_segment = LUKS2_get_segment_by_flag(hdr, "backup-moved-segment");
138
139
0
  if (!jobj_segment)
140
0
    return 0;
141
142
0
  return json_segment_get_offset(jobj_segment, 0);
143
0
}
144
145
static uint64_t reencrypt_get_data_offset_new(struct luks2_hdr *hdr)
146
0
{
147
0
  return reencrypt_data_offset(hdr, 1);
148
0
}
149
150
static uint64_t reencrypt_get_data_offset_old(struct luks2_hdr *hdr)
151
0
{
152
0
  return reencrypt_data_offset(hdr, 0);
153
0
}
154
#endif
155
156
static int reencrypt_digest(struct luks2_hdr *hdr, unsigned new)
157
0
{
158
0
  int segment = LUKS2_get_segment_id_by_flag(hdr, new ? "backup-final" : "backup-previous");
159
160
0
  if (segment < 0)
161
0
    return segment;
162
163
0
  return LUKS2_digest_by_segment(hdr, segment);
164
0
}
165
166
int LUKS2_reencrypt_digest_new(struct luks2_hdr *hdr)
167
0
{
168
0
  return reencrypt_digest(hdr, 1);
169
0
}
170
171
int LUKS2_reencrypt_digest_old(struct luks2_hdr *hdr)
172
0
{
173
0
  return reencrypt_digest(hdr, 0);
174
0
}
175
176
int LUKS2_reencrypt_segment_new(struct luks2_hdr *hdr)
177
0
{
178
0
  return LUKS2_get_segment_id_by_flag(hdr, "backup-final");
179
0
}
180
181
int LUKS2_reencrypt_segment_old(struct luks2_hdr *hdr)
182
0
{
183
0
  return LUKS2_get_segment_id_by_flag(hdr, "backup-previous");
184
0
}
185
186
unsigned LUKS2_reencrypt_vks_count(struct luks2_hdr *hdr)
187
0
{
188
0
  int digest_old, digest_new;
189
0
  unsigned vks_count = 0;
190
191
0
  if ((digest_new = LUKS2_reencrypt_digest_new(hdr)) >= 0)
192
0
    vks_count++;
193
0
  if ((digest_old = LUKS2_reencrypt_digest_old(hdr)) >= 0) {
194
0
    if (digest_old != digest_new)
195
0
      vks_count++;
196
0
  }
197
198
0
  return vks_count;
199
0
}
200
201
/* none, checksums, journal or shift */
202
static const char *reencrypt_resilience_type(struct luks2_hdr *hdr)
203
0
{
204
0
  json_object *jobj_keyslot, *jobj_area, *jobj_type;
205
0
  int ks = LUKS2_find_keyslot(hdr, "reencrypt");
206
207
0
  if (ks < 0)
208
0
    return NULL;
209
210
0
  jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
211
212
0
  json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
213
0
  if (!json_object_object_get_ex(jobj_area, "type", &jobj_type))
214
0
    return NULL;
215
216
0
  return json_object_get_string(jobj_type);
217
0
}
218
219
static const char *reencrypt_resilience_hash(struct luks2_hdr *hdr)
220
0
{
221
0
  json_object *jobj_keyslot, *jobj_area, *jobj_type, *jobj_hash;
222
0
  int ks = LUKS2_find_keyslot(hdr, "reencrypt");
223
224
0
  if (ks < 0)
225
0
    return NULL;
226
227
0
  jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
228
229
0
  json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
230
0
  if (!json_object_object_get_ex(jobj_area, "type", &jobj_type))
231
0
    return NULL;
232
0
  if (strcmp(json_object_get_string(jobj_type), "checksum"))
233
0
    return NULL;
234
0
  if (!json_object_object_get_ex(jobj_area, "hash", &jobj_hash))
235
0
    return NULL;
236
237
0
  return json_object_get_string(jobj_hash);
238
0
}
239
#if USE_LUKS2_REENCRYPTION
240
static json_object *_enc_create_segments_shift_after(struct luks2_reencrypt *rh, uint64_t data_offset)
241
0
{
242
0
  int reenc_seg, i = 0;
243
0
  json_object *jobj, *jobj_copy = NULL, *jobj_seg_new = NULL, *jobj_segs_post = json_object_new_object();
244
0
  uint64_t tmp;
245
246
0
  if (!rh->jobj_segs_hot || !jobj_segs_post)
247
0
    goto err;
248
249
0
  if (json_segments_count(rh->jobj_segs_hot) == 0)
250
0
    return jobj_segs_post;
251
252
0
  reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
253
0
  if (reenc_seg < 0)
254
0
    goto err;
255
256
0
  while (i < reenc_seg) {
257
0
    jobj_copy = json_segments_get_segment(rh->jobj_segs_hot, i);
258
0
    if (!jobj_copy || json_object_object_add_by_uint(jobj_segs_post, i++, json_object_get(jobj_copy)))
259
0
      goto err;
260
0
  }
261
0
  jobj_copy = NULL;
262
263
0
  jobj = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1);
264
0
  if (!jobj) {
265
0
    jobj = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg);
266
0
    if (!jobj || json_object_copy(jobj, &jobj_seg_new))
267
0
      goto err;
268
0
    json_segment_remove_flag(jobj_seg_new, "in-reencryption");
269
0
    tmp = rh->length;
270
0
  } else {
271
0
    if (json_object_copy(jobj, &jobj_seg_new))
272
0
      goto err;
273
0
    json_object_object_add(jobj_seg_new, "offset", crypt_jobj_new_uint64(rh->offset + data_offset));
274
0
    json_object_object_add(jobj_seg_new, "iv_tweak", crypt_jobj_new_uint64(rh->offset >> SECTOR_SHIFT));
275
0
    tmp = json_segment_get_size(jobj_seg_new, 0) + rh->length;
276
0
  }
277
278
  /* alter size of new segment, reenc_seg == 0 we're finished */
279
0
  json_object_object_add(jobj_seg_new, "size", reenc_seg > 0 ? crypt_jobj_new_uint64(tmp) : json_object_new_string("dynamic"));
280
0
  if (!json_object_object_add_by_uint(jobj_segs_post, reenc_seg, jobj_seg_new))
281
0
    return jobj_segs_post;
282
283
0
err:
284
0
  json_object_put(jobj_seg_new);
285
0
  json_object_put(jobj_copy);
286
0
  json_object_put(jobj_segs_post);
287
0
  return NULL;
288
0
}
289
290
static json_object *reencrypt_make_hot_segments_encrypt_shift(struct luks2_hdr *hdr,
291
  struct luks2_reencrypt *rh,
292
  uint64_t data_offset)
293
0
{
294
0
  int sg, crypt_seg, i = 0;
295
0
  uint64_t segment_size;
296
0
  json_object *jobj_seg_shrunk = NULL, *jobj_seg_new = NULL, *jobj_copy = NULL, *jobj_enc_seg = NULL,
297
0
         *jobj_segs_hot = json_object_new_object();
298
299
0
  if (!jobj_segs_hot)
300
0
    return NULL;
301
302
0
  crypt_seg = LUKS2_segment_by_type(hdr, "crypt");
303
304
  /* FIXME: This is hack. Find proper way to fix it. */
305
0
  sg = LUKS2_last_segment_by_type(hdr, "linear");
306
0
  if (rh->offset && sg < 0)
307
0
    goto err;
308
0
  if (sg < 0)
309
0
    return jobj_segs_hot;
310
311
0
  jobj_enc_seg = json_segment_create_crypt(data_offset + rh->offset,
312
0
                  rh->offset >> SECTOR_SHIFT,
313
0
                  &rh->length,
314
0
                  reencrypt_segment_cipher_new(hdr),
315
0
                  NULL, 0, /* integrity */
316
0
                  reencrypt_get_sector_size_new(hdr),
317
0
                  1);
318
319
0
  while (i < sg) {
320
0
    jobj_copy = LUKS2_get_segment_jobj(hdr, i);
321
0
    if (!jobj_copy || json_object_object_add_by_uint(jobj_segs_hot, i++, json_object_get(jobj_copy)))
322
0
      goto err;
323
0
  }
324
0
  jobj_copy = NULL;
325
326
0
  segment_size = LUKS2_segment_size(hdr, sg, 0);
327
0
  if (segment_size > rh->length) {
328
0
    if (json_object_copy(LUKS2_get_segment_jobj(hdr, sg), &jobj_seg_shrunk))
329
0
      goto err;
330
0
    json_object_object_add(jobj_seg_shrunk, "size", crypt_jobj_new_uint64(segment_size - rh->length));
331
0
    if (json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_seg_shrunk))
332
0
      goto err;
333
0
  }
334
335
0
  if (json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_enc_seg))
336
0
    goto err;
337
338
  /* first crypt segment after encryption ? */
339
0
  if (crypt_seg >= 0) {
340
0
    jobj_seg_new = LUKS2_get_segment_jobj(hdr, crypt_seg);
341
0
    if (!jobj_seg_new || json_object_object_add_by_uint(jobj_segs_hot, sg, json_object_get(jobj_seg_new)))
342
0
      goto err;
343
0
  }
344
345
0
  return jobj_segs_hot;
346
0
err:
347
0
  json_object_put(jobj_copy);
348
0
  json_object_put(jobj_seg_new);
349
0
  json_object_put(jobj_seg_shrunk);
350
0
  json_object_put(jobj_enc_seg);
351
0
  json_object_put(jobj_segs_hot);
352
353
0
  return NULL;
354
0
}
355
356
static json_object *reencrypt_make_segment_new(struct crypt_device *cd,
357
    struct luks2_hdr *hdr,
358
    const struct luks2_reencrypt *rh,
359
    uint64_t data_offset,
360
    uint64_t segment_offset,
361
    uint64_t iv_offset,
362
    const uint64_t *segment_length)
363
0
{
364
0
  switch (rh->mode) {
365
0
  case CRYPT_REENCRYPT_REENCRYPT:
366
0
  case CRYPT_REENCRYPT_ENCRYPT:
367
0
    return json_segment_create_crypt(data_offset + segment_offset,
368
0
              crypt_get_iv_offset(cd) + (iv_offset >> SECTOR_SHIFT),
369
0
              segment_length,
370
0
              reencrypt_segment_cipher_new(hdr),
371
0
              NULL, 0, /* integrity */
372
0
              reencrypt_get_sector_size_new(hdr), 0);
373
0
  case CRYPT_REENCRYPT_DECRYPT:
374
0
    return json_segment_create_linear(data_offset + segment_offset, segment_length, 0);
375
0
  }
376
377
0
  return NULL;
378
0
}
379
380
static json_object *reencrypt_make_post_segments_forward(struct crypt_device *cd,
381
  struct luks2_hdr *hdr,
382
  struct luks2_reencrypt *rh,
383
  uint64_t data_offset)
384
0
{
385
0
  int reenc_seg;
386
0
  json_object *jobj_old_seg, *jobj_new_seg_after = NULL, *jobj_old_seg_copy = NULL,
387
0
        *jobj_segs_post = json_object_new_object();
388
0
  uint64_t fixed_length = rh->offset + rh->length;
389
390
0
  if (!rh->jobj_segs_hot || !jobj_segs_post)
391
0
    goto err;
392
393
0
  reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
394
0
  if (reenc_seg < 0)
395
0
    goto err;
396
397
0
  jobj_old_seg = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1);
398
399
  /*
400
   * if there's no old segment after reencryption, we're done.
401
   * Set size to 'dynamic' again.
402
   */
403
0
  jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, 0, 0, jobj_old_seg ? &fixed_length : NULL);
404
0
  if (!jobj_new_seg_after || json_object_object_add_by_uint_by_ref(jobj_segs_post, 0, &jobj_new_seg_after))
405
0
    goto err;
406
407
0
  if (jobj_old_seg) {
408
0
    if (rh->fixed_length) {
409
0
      if (json_object_copy(jobj_old_seg, &jobj_old_seg_copy))
410
0
        goto err;
411
0
      fixed_length = rh->device_size - fixed_length;
412
0
      json_object_object_add(jobj_old_seg_copy, "size", crypt_jobj_new_uint64(fixed_length));
413
0
    } else
414
0
      jobj_old_seg_copy = json_object_get(jobj_old_seg);
415
416
0
    if (json_object_object_add_by_uint_by_ref(jobj_segs_post, 1, &jobj_old_seg_copy))
417
0
      goto err;
418
0
  }
419
420
0
  return jobj_segs_post;
421
0
err:
422
0
  json_object_put(jobj_new_seg_after);
423
0
  json_object_put(jobj_old_seg_copy);
424
0
  json_object_put(jobj_segs_post);
425
0
  return NULL;
426
0
}
427
428
static json_object *reencrypt_make_post_segments_backward(struct crypt_device *cd,
429
  struct luks2_hdr *hdr,
430
  struct luks2_reencrypt *rh,
431
  uint64_t data_offset)
432
0
{
433
0
  int reenc_seg;
434
0
  uint64_t fixed_length;
435
436
0
  json_object *jobj_new_seg_after = NULL, *jobj_old_seg = NULL,
437
0
        *jobj_segs_post = json_object_new_object();
438
439
0
  if (!rh->jobj_segs_hot || !jobj_segs_post)
440
0
    goto err;
441
442
0
  reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
443
0
  if (reenc_seg < 0)
444
0
    goto err;
445
446
0
  jobj_old_seg = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg - 1);
447
0
  if (jobj_old_seg) {
448
0
    json_object_get(jobj_old_seg);
449
0
    if (json_object_object_add_by_uint_by_ref(jobj_segs_post, reenc_seg - 1, &jobj_old_seg))
450
0
      goto err;
451
0
  }
452
453
0
  if (rh->fixed_length && rh->offset) {
454
0
    fixed_length = rh->device_size - rh->offset;
455
0
    jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset, rh->offset, &fixed_length);
456
0
  } else
457
0
    jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset, rh->offset, NULL);
458
459
0
  if (jobj_new_seg_after && !json_object_object_add_by_uint(jobj_segs_post, reenc_seg, jobj_new_seg_after))
460
0
    return jobj_segs_post;
461
0
err:
462
0
  json_object_put(jobj_new_seg_after);
463
0
  json_object_put(jobj_old_seg);
464
0
  json_object_put(jobj_segs_post);
465
0
  return NULL;
466
0
}
467
468
static json_object *reencrypt_make_segment_reencrypt(struct crypt_device *cd,
469
    struct luks2_hdr *hdr,
470
    const struct luks2_reencrypt *rh,
471
    uint64_t data_offset,
472
    uint64_t segment_offset,
473
    uint64_t iv_offset,
474
    const uint64_t *segment_length)
475
0
{
476
0
  switch (rh->mode) {
477
0
  case CRYPT_REENCRYPT_REENCRYPT:
478
0
  case CRYPT_REENCRYPT_ENCRYPT:
479
0
    return json_segment_create_crypt(data_offset + segment_offset,
480
0
        crypt_get_iv_offset(cd) + (iv_offset >> SECTOR_SHIFT),
481
0
        segment_length,
482
0
        reencrypt_segment_cipher_new(hdr),
483
0
              NULL, 0, /* integrity */
484
0
        reencrypt_get_sector_size_new(hdr), 1);
485
0
  case CRYPT_REENCRYPT_DECRYPT:
486
0
    return json_segment_create_linear(data_offset + segment_offset, segment_length, 1);
487
0
  }
488
489
0
  return NULL;
490
0
}
491
492
static json_object *reencrypt_make_segment_old(struct crypt_device *cd,
493
    struct luks2_hdr *hdr,
494
    const struct luks2_reencrypt *rh,
495
    uint64_t data_offset,
496
    uint64_t segment_offset,
497
    const uint64_t *segment_length)
498
0
{
499
0
  json_object *jobj_old_seg = NULL;
500
501
0
  switch (rh->mode) {
502
0
  case CRYPT_REENCRYPT_REENCRYPT:
503
0
  case CRYPT_REENCRYPT_DECRYPT:
504
0
    jobj_old_seg = json_segment_create_crypt(data_offset + segment_offset,
505
0
                crypt_get_iv_offset(cd) + (segment_offset >> SECTOR_SHIFT),
506
0
                segment_length,
507
0
                reencrypt_segment_cipher_old(hdr),
508
0
                NULL, 0, /* integrity */
509
0
                reencrypt_get_sector_size_old(hdr),
510
0
                0);
511
0
    break;
512
0
  case CRYPT_REENCRYPT_ENCRYPT:
513
0
    jobj_old_seg = json_segment_create_linear(data_offset + segment_offset, segment_length, 0);
514
0
  }
515
516
0
  return jobj_old_seg;
517
0
}
518
519
static json_object *reencrypt_make_hot_segments_forward(struct crypt_device *cd,
520
    struct luks2_hdr *hdr,
521
    struct luks2_reencrypt *rh,
522
    uint64_t device_size,
523
    uint64_t data_offset)
524
0
{
525
0
  uint64_t fixed_length, tmp = rh->offset + rh->length;
526
0
  json_object *jobj_segs_hot = json_object_new_object(), *jobj_reenc_seg = NULL,
527
0
        *jobj_old_seg = NULL, *jobj_new_seg = NULL;
528
0
  unsigned int sg = 0;
529
530
0
  if (!jobj_segs_hot)
531
0
    return NULL;
532
533
0
  if (rh->offset) {
534
0
    jobj_new_seg = reencrypt_make_segment_new(cd, hdr, rh, data_offset, 0, 0, &rh->offset);
535
0
    if (!jobj_new_seg || json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_new_seg))
536
0
      goto err;
537
0
  }
538
539
0
  jobj_reenc_seg = reencrypt_make_segment_reencrypt(cd, hdr, rh, data_offset, rh->offset, rh->offset, &rh->length);
540
0
  if (!jobj_reenc_seg)
541
0
    goto err;
542
543
0
  if (json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_reenc_seg))
544
0
    goto err;
545
546
0
  if (tmp < device_size) {
547
0
    fixed_length = device_size - tmp;
548
0
    jobj_old_seg = reencrypt_make_segment_old(cd, hdr, rh, data_offset + data_shift_value(&rh->rp),
549
0
                rh->offset + rh->length, rh->fixed_length ? &fixed_length : NULL);
550
0
    if (!jobj_old_seg || json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg, &jobj_old_seg))
551
0
      goto err;
552
0
  }
553
554
0
  return jobj_segs_hot;
555
0
err:
556
0
  json_object_put(jobj_reenc_seg);
557
0
  json_object_put(jobj_old_seg);
558
0
  json_object_put(jobj_new_seg);
559
0
  json_object_put(jobj_segs_hot);
560
0
  return NULL;
561
0
}
562
563
static json_object *reencrypt_make_hot_segments_decrypt_shift(struct crypt_device *cd,
564
  struct luks2_hdr *hdr, struct luks2_reencrypt *rh,
565
  uint64_t device_size, uint64_t data_offset)
566
0
{
567
0
  uint64_t fixed_length, tmp = rh->offset + rh->length, linear_length = rh->progress;
568
0
  json_object *jobj, *jobj_segs_hot = json_object_new_object(), *jobj_reenc_seg = NULL,
569
0
        *jobj_old_seg = NULL, *jobj_new_seg = NULL;
570
0
  unsigned int sg = 0;
571
572
0
  if (!jobj_segs_hot)
573
0
    return NULL;
574
575
0
  if (rh->offset) {
576
0
    jobj = LUKS2_get_segment_jobj(hdr, 0);
577
0
    if (!jobj)
578
0
      goto err;
579
580
0
    jobj_new_seg = json_object_get(jobj);
581
0
    if (json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_new_seg))
582
0
      goto err;
583
584
0
    if (linear_length) {
585
0
      jobj_new_seg = reencrypt_make_segment_new(cd, hdr, rh,
586
0
                  data_offset,
587
0
                  json_segment_get_size(jobj, 0),
588
0
                  0,
589
0
                  &linear_length);
590
0
      if (!jobj_new_seg || json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_new_seg))
591
0
        goto err;
592
0
    }
593
0
  }
594
595
0
  jobj_reenc_seg = reencrypt_make_segment_reencrypt(cd, hdr, rh, data_offset,
596
0
                rh->offset,
597
0
                rh->offset,
598
0
                &rh->length);
599
0
  if (!jobj_reenc_seg || json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_reenc_seg))
600
0
    goto err;
601
602
0
  if (!rh->offset && (jobj = LUKS2_get_segment_jobj(hdr, 1)) &&
603
0
      !json_segment_is_backup(jobj)) {
604
0
    jobj_new_seg = json_object_get(jobj);
605
0
    if (json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_new_seg))
606
0
      goto err;
607
0
  } else if (tmp < device_size) {
608
0
    fixed_length = device_size - tmp;
609
0
    jobj_old_seg = reencrypt_make_segment_old(cd, hdr, rh,
610
0
                data_offset + data_shift_value(&rh->rp),
611
0
                rh->offset + rh->length,
612
0
                rh->fixed_length ? &fixed_length : NULL);
613
0
    if (!jobj_old_seg || json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg, &jobj_old_seg))
614
0
      goto err;
615
0
  }
616
617
0
  return jobj_segs_hot;
618
0
err:
619
0
  json_object_put(jobj_reenc_seg);
620
0
  json_object_put(jobj_old_seg);
621
0
  json_object_put(jobj_new_seg);
622
0
  json_object_put(jobj_segs_hot);
623
0
  return NULL;
624
0
}
625
626
static json_object *_dec_create_segments_shift_after(struct crypt_device *cd,
627
  struct luks2_hdr *hdr,
628
  struct luks2_reencrypt *rh,
629
  uint64_t data_offset)
630
0
{
631
0
  int reenc_seg, i = 0;
632
0
  json_object *jobj_seg_old, *jobj_copy = NULL, *jobj_seg_old_copy = NULL, *jobj_seg_new = NULL,
633
0
        *jobj_segs_post = json_object_new_object();
634
0
  unsigned segs;
635
0
  uint64_t tmp;
636
637
0
  if (!rh->jobj_segs_hot || !jobj_segs_post)
638
0
    goto err;
639
640
0
  segs = json_segments_count(rh->jobj_segs_hot);
641
0
  if (segs == 0)
642
0
    return jobj_segs_post;
643
644
0
  reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
645
0
  if (reenc_seg < 0)
646
0
    goto err;
647
648
0
  if (reenc_seg == 0) {
649
0
    jobj_seg_new = reencrypt_make_segment_new(cd, hdr, rh, data_offset, 0, 0, NULL);
650
0
    if (!jobj_seg_new || json_object_object_add_by_uint(jobj_segs_post, 0, jobj_seg_new))
651
0
      goto err;
652
653
0
    return jobj_segs_post;
654
0
  }
655
656
0
  jobj_copy = json_segments_get_segment(rh->jobj_segs_hot, 0);
657
0
  if (!jobj_copy)
658
0
    goto err;
659
0
  json_object_get(jobj_copy);
660
0
  if (json_object_object_add_by_uint_by_ref(jobj_segs_post, i++, &jobj_copy))
661
0
    goto err;
662
663
0
  if ((jobj_seg_old = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1)))
664
0
    jobj_seg_old_copy = json_object_get(jobj_seg_old);
665
666
0
  tmp = rh->length + rh->progress;
667
0
  jobj_seg_new = reencrypt_make_segment_new(cd, hdr, rh, data_offset,
668
0
              json_segment_get_size(rh->jobj_segment_moved, 0),
669
0
              data_shift_value(&rh->rp),
670
0
              jobj_seg_old ? &tmp : NULL);
671
0
  if (!jobj_seg_new || json_object_object_add_by_uint_by_ref(jobj_segs_post, i++, &jobj_seg_new))
672
0
    goto err;
673
674
0
  if (jobj_seg_old_copy && json_object_object_add_by_uint(jobj_segs_post, i, jobj_seg_old_copy))
675
0
    goto err;
676
677
0
  return jobj_segs_post;
678
0
err:
679
0
  json_object_put(jobj_copy);
680
0
  json_object_put(jobj_seg_old_copy);
681
0
  json_object_put(jobj_seg_new);
682
0
  json_object_put(jobj_segs_post);
683
0
  return NULL;
684
0
}
685
686
static json_object *reencrypt_make_hot_segments_backward(struct crypt_device *cd,
687
    struct luks2_hdr *hdr,
688
    struct luks2_reencrypt *rh,
689
    uint64_t device_size,
690
    uint64_t data_offset)
691
0
{
692
0
  uint64_t fixed_length, tmp = rh->offset + rh->length;
693
0
  json_object *jobj_reenc_seg = NULL, *jobj_new_seg = NULL, *jobj_old_seg = NULL,
694
0
        *jobj_segs_hot = json_object_new_object();
695
0
  int sg = 0;
696
697
0
  if (!jobj_segs_hot)
698
0
    return NULL;
699
700
0
  if (rh->offset) {
701
0
    if (json_object_copy(LUKS2_get_segment_jobj(hdr, 0), &jobj_old_seg))
702
0
      goto err;
703
0
    json_object_object_add(jobj_old_seg, "size", crypt_jobj_new_uint64(rh->offset));
704
705
0
    if (json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_old_seg))
706
0
      goto err;
707
0
  }
708
709
0
  jobj_reenc_seg = reencrypt_make_segment_reencrypt(cd, hdr, rh, data_offset, rh->offset, rh->offset, &rh->length);
710
0
  if (!jobj_reenc_seg || json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_reenc_seg))
711
0
    goto err;
712
713
0
  if (tmp < device_size) {
714
0
    fixed_length = device_size - tmp;
715
0
    jobj_new_seg = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset + rh->length,
716
0
                rh->offset + rh->length, rh->fixed_length ? &fixed_length : NULL);
717
0
    if (!jobj_new_seg || json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg, &jobj_new_seg))
718
0
      goto err;
719
0
  }
720
721
0
  return jobj_segs_hot;
722
0
err:
723
0
  json_object_put(jobj_reenc_seg);
724
0
  json_object_put(jobj_new_seg);
725
0
  json_object_put(jobj_old_seg);
726
0
  json_object_put(jobj_segs_hot);
727
0
  return NULL;
728
0
}
729
730
static int reencrypt_make_hot_segments(struct crypt_device *cd,
731
    struct luks2_hdr *hdr,
732
    struct luks2_reencrypt *rh,
733
    uint64_t device_size,
734
    uint64_t data_offset)
735
0
{
736
0
  rh->jobj_segs_hot = NULL;
737
738
0
  if (rh->mode == CRYPT_REENCRYPT_ENCRYPT && rh->direction == CRYPT_REENCRYPT_BACKWARD &&
739
0
      rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) {
740
0
    log_dbg(cd, "Calculating hot segments for encryption with data move.");
741
0
    rh->jobj_segs_hot = reencrypt_make_hot_segments_encrypt_shift(hdr, rh, data_offset);
742
0
  } else if (rh->mode == CRYPT_REENCRYPT_DECRYPT && rh->direction == CRYPT_REENCRYPT_FORWARD &&
743
0
       rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) {
744
0
    log_dbg(cd, "Calculating hot segments for decryption with data move.");
745
0
    rh->jobj_segs_hot = reencrypt_make_hot_segments_decrypt_shift(cd, hdr, rh, device_size, data_offset);
746
0
  } else if (rh->direction == CRYPT_REENCRYPT_FORWARD) {
747
0
    log_dbg(cd, "Calculating hot segments (forward direction).");
748
0
    rh->jobj_segs_hot = reencrypt_make_hot_segments_forward(cd, hdr, rh, device_size, data_offset);
749
0
  } else if (rh->direction == CRYPT_REENCRYPT_BACKWARD) {
750
0
    log_dbg(cd, "Calculating hot segments (backward direction).");
751
0
    rh->jobj_segs_hot = reencrypt_make_hot_segments_backward(cd, hdr, rh, device_size, data_offset);
752
0
  }
753
754
0
  return rh->jobj_segs_hot ? 0 : -EINVAL;
755
0
}
756
757
static int reencrypt_make_post_segments(struct crypt_device *cd,
758
    struct luks2_hdr *hdr,
759
    struct luks2_reencrypt *rh,
760
    uint64_t data_offset)
761
0
{
762
0
  rh->jobj_segs_post = NULL;
763
764
0
  if (rh->mode == CRYPT_REENCRYPT_ENCRYPT && rh->direction == CRYPT_REENCRYPT_BACKWARD &&
765
0
      rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) {
766
0
    log_dbg(cd, "Calculating post segments for encryption with data move.");
767
0
    rh->jobj_segs_post = _enc_create_segments_shift_after(rh, data_offset);
768
0
  } else if (rh->mode == CRYPT_REENCRYPT_DECRYPT && rh->direction == CRYPT_REENCRYPT_FORWARD &&
769
0
       rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) {
770
0
    log_dbg(cd, "Calculating post segments for decryption with data move.");
771
0
    rh->jobj_segs_post = _dec_create_segments_shift_after(cd, hdr, rh, data_offset);
772
0
  } else if (rh->direction == CRYPT_REENCRYPT_FORWARD) {
773
0
    log_dbg(cd, "Calculating post segments (forward direction).");
774
0
    rh->jobj_segs_post = reencrypt_make_post_segments_forward(cd, hdr, rh, data_offset);
775
0
  } else if (rh->direction == CRYPT_REENCRYPT_BACKWARD) {
776
0
    log_dbg(cd, "Calculating segments (backward direction).");
777
0
    rh->jobj_segs_post = reencrypt_make_post_segments_backward(cd, hdr, rh, data_offset);
778
0
  }
779
780
0
  return rh->jobj_segs_post ? 0 : -EINVAL;
781
0
}
782
#endif
783
784
static uint64_t reencrypt_data_shift(struct luks2_hdr *hdr)
785
0
{
786
0
  json_object *jobj_keyslot, *jobj_area, *jobj_data_shift;
787
0
  int ks = LUKS2_find_keyslot(hdr, "reencrypt");
788
789
0
  if (ks < 0)
790
0
    return 0;
791
792
0
  jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
793
794
0
  json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
795
0
  if (!json_object_object_get_ex(jobj_area, "shift_size", &jobj_data_shift))
796
0
    return 0;
797
798
0
  return crypt_jobj_get_uint64(jobj_data_shift);
799
0
}
800
801
static crypt_reencrypt_mode_info reencrypt_mode(struct luks2_hdr *hdr)
802
0
{
803
0
  const char *mode;
804
0
  crypt_reencrypt_mode_info mi = CRYPT_REENCRYPT_REENCRYPT;
805
0
  json_object *jobj_keyslot, *jobj_mode;
806
807
0
  jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, LUKS2_find_keyslot(hdr, "reencrypt"));
808
0
  if (!jobj_keyslot)
809
0
    return mi;
810
811
0
  json_object_object_get_ex(jobj_keyslot, "mode", &jobj_mode);
812
0
  mode = json_object_get_string(jobj_mode);
813
814
  /* validation enforces allowed values */
815
0
  if (!strcmp(mode, "encrypt"))
816
0
    mi = CRYPT_REENCRYPT_ENCRYPT;
817
0
  else if (!strcmp(mode, "decrypt"))
818
0
    mi = CRYPT_REENCRYPT_DECRYPT;
819
820
0
  return mi;
821
0
}
822
823
static crypt_reencrypt_direction_info reencrypt_direction(struct luks2_hdr *hdr)
824
0
{
825
0
  const char *value;
826
0
  json_object *jobj_keyslot, *jobj_mode;
827
0
  crypt_reencrypt_direction_info di = CRYPT_REENCRYPT_FORWARD;
828
829
0
  jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, LUKS2_find_keyslot(hdr, "reencrypt"));
830
0
  if (!jobj_keyslot)
831
0
    return di;
832
833
0
  json_object_object_get_ex(jobj_keyslot, "direction", &jobj_mode);
834
0
  value = json_object_get_string(jobj_mode);
835
836
  /* validation enforces allowed values */
837
0
  if (strcmp(value, "forward"))
838
0
    di = CRYPT_REENCRYPT_BACKWARD;
839
840
0
  return di;
841
0
}
842
843
typedef enum { REENC_OK = 0, REENC_ERR, REENC_ROLLBACK, REENC_FATAL } reenc_status_t;
844
845
void LUKS2_reencrypt_protection_erase(struct reenc_protection *rp)
846
0
{
847
0
  if (!rp || rp->type != REENC_PROTECTION_CHECKSUM)
848
0
    return;
849
850
0
  if (rp->p.csum.ch) {
851
0
    crypt_hash_destroy(rp->p.csum.ch);
852
0
    rp->p.csum.ch = NULL;
853
0
  }
854
855
0
  if (rp->p.csum.checksums) {
856
0
    crypt_safe_memzero(rp->p.csum.checksums, rp->p.csum.checksums_len);
857
0
    free(rp->p.csum.checksums);
858
0
    rp->p.csum.checksums = NULL;
859
0
  }
860
0
}
861
862
void LUKS2_reencrypt_free(struct crypt_device *cd, struct luks2_reencrypt *rh)
863
1.87k
{
864
1.87k
  if (!rh)
865
1.87k
    return;
866
867
0
  LUKS2_reencrypt_protection_erase(&rh->rp);
868
0
  LUKS2_reencrypt_protection_erase(&rh->rp_moved_segment);
869
870
0
  json_object_put(rh->jobj_segs_hot);
871
0
  rh->jobj_segs_hot = NULL;
872
0
  json_object_put(rh->jobj_segs_post);
873
0
  rh->jobj_segs_post = NULL;
874
0
  json_object_put(rh->jobj_segment_old);
875
0
  rh->jobj_segment_old = NULL;
876
0
  json_object_put(rh->jobj_segment_new);
877
0
  rh->jobj_segment_new = NULL;
878
0
  json_object_put(rh->jobj_segment_moved);
879
0
  rh->jobj_segment_moved = NULL;
880
881
0
  free(rh->reenc_buffer);
882
0
  rh->reenc_buffer = NULL;
883
0
  crypt_storage_wrapper_destroy(rh->cw1);
884
0
  rh->cw1 = NULL;
885
0
  crypt_storage_wrapper_destroy(rh->cw2);
886
0
  rh->cw2 = NULL;
887
0
  device_free(cd, rh->hotzone_device);
888
0
  rh->hotzone_device = NULL;
889
890
0
  free(rh->device_name);
891
0
  free(rh->overlay_name);
892
0
  free(rh->hotzone_name);
893
0
  crypt_drop_uploaded_keyring_key(cd, rh->vks);
894
0
  crypt_free_volume_key(rh->vks);
895
0
  device_release_excl(cd, crypt_data_device(cd));
896
0
  crypt_unlock_internal(cd, rh->reenc_lock);
897
0
  free(rh);
898
0
}
899
900
#if USE_LUKS2_REENCRYPTION
901
int LUKS2_reencrypt_max_hotzone_size(struct crypt_device *cd __attribute__((unused)),
902
  struct luks2_hdr *hdr,
903
  const struct reenc_protection *rp,
904
  int reencrypt_keyslot,
905
  uint64_t *r_length)
906
0
{
907
0
  int r;
908
0
  uint64_t dummy, area_length;
909
910
0
  assert(hdr);
911
0
  assert(rp);
912
0
  assert(r_length);
913
914
0
  if (rp->type <= REENC_PROTECTION_NONE) {
915
0
    *r_length = LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH;
916
0
    return 0;
917
0
  }
918
919
0
  if (rp->type == REENC_PROTECTION_DATASHIFT) {
920
0
    *r_length = rp->p.ds.data_shift;
921
0
    return 0;
922
0
  }
923
924
0
  r = LUKS2_keyslot_area(hdr, reencrypt_keyslot, &dummy, &area_length);
925
0
  if (r < 0)
926
0
    return -EINVAL;
927
928
0
  if (rp->type == REENC_PROTECTION_JOURNAL) {
929
0
    *r_length = area_length;
930
0
    return 0;
931
0
  }
932
933
0
  if (rp->type == REENC_PROTECTION_CHECKSUM) {
934
0
    *r_length = (area_length / rp->p.csum.hash_size) * rp->p.csum.block_size;
935
0
    return 0;
936
0
  }
937
938
0
  return -EINVAL;
939
0
}
940
941
static size_t reencrypt_get_alignment(struct crypt_device *cd,
942
    struct luks2_hdr *hdr)
943
0
{
944
0
  size_t ss, alignment = device_block_size(cd, crypt_data_device(cd));
945
946
0
  ss = reencrypt_get_sector_size_old(hdr);
947
0
  if (ss > alignment)
948
0
    alignment = ss;
949
0
  ss = reencrypt_get_sector_size_new(hdr);
950
0
  if (ss > alignment)
951
0
    alignment = ss;
952
953
0
  return alignment;
954
0
}
955
956
/* returns void because it must not fail on valid LUKS2 header */
957
static void _load_backup_segments(struct luks2_hdr *hdr,
958
    struct luks2_reencrypt *rh)
959
0
{
960
0
  int segment = LUKS2_get_segment_id_by_flag(hdr, "backup-final");
961
962
0
  if (segment >= 0) {
963
0
    rh->jobj_segment_new = json_object_get(LUKS2_get_segment_jobj(hdr, segment));
964
0
    rh->digest_new = LUKS2_digest_by_segment(hdr, segment);
965
0
  } else {
966
0
    rh->jobj_segment_new = NULL;
967
0
    rh->digest_new = -ENOENT;
968
0
  }
969
970
0
  segment = LUKS2_get_segment_id_by_flag(hdr, "backup-previous");
971
0
  if (segment >= 0) {
972
0
    rh->jobj_segment_old = json_object_get(LUKS2_get_segment_jobj(hdr, segment));
973
0
    rh->digest_old = LUKS2_digest_by_segment(hdr, segment);
974
0
  } else {
975
0
    rh->jobj_segment_old = NULL;
976
0
    rh->digest_old = -ENOENT;
977
0
  }
978
979
0
  segment = LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment");
980
0
  if (segment >= 0)
981
0
    rh->jobj_segment_moved = json_object_get(LUKS2_get_segment_jobj(hdr, segment));
982
0
  else
983
0
    rh->jobj_segment_moved = NULL;
984
0
}
985
986
static int reencrypt_offset_backward_moved(struct luks2_hdr *hdr, json_object *jobj_segments,
987
             uint64_t *reencrypt_length, uint64_t data_shift, uint64_t *offset)
988
0
{
989
0
  uint64_t tmp, linear_length = 0;
990
0
  int sg, segs = json_segments_count(jobj_segments);
991
992
  /* find reencrypt offset with data shift */
993
0
  for (sg = 0; sg < segs; sg++)
994
0
    if (LUKS2_segment_is_type(hdr, sg, "linear"))
995
0
      linear_length += LUKS2_segment_size(hdr, sg, 0);
996
997
  /* all active linear segments length */
998
0
  if (linear_length && segs > 1) {
999
0
    if (linear_length < data_shift)
1000
0
      return -EINVAL;
1001
0
    tmp = linear_length - data_shift;
1002
0
    if (tmp && tmp < data_shift) {
1003
0
      *offset = data_shift;
1004
0
      *reencrypt_length = tmp;
1005
0
    } else
1006
0
      *offset = tmp;
1007
0
    return 0;
1008
0
  }
1009
1010
0
  if (segs == 1) {
1011
0
    *offset = 0;
1012
0
    return 0;
1013
0
  }
1014
1015
  /* should be unreachable */
1016
1017
0
  return -EINVAL;
1018
0
}
1019
1020
static int reencrypt_offset_forward_moved(struct luks2_hdr *hdr,
1021
  uint64_t data_shift,
1022
  uint64_t *offset)
1023
0
{
1024
0
  int last_crypt = LUKS2_last_segment_by_type(hdr, "crypt");
1025
1026
  /* if last crypt segment exists and it's first one, just return offset = 0 */
1027
0
  if (last_crypt <= 0) {
1028
0
    *offset = 0;
1029
0
    return 0;
1030
0
  }
1031
1032
0
  *offset = LUKS2_segment_offset(hdr, last_crypt, 0) - data_shift;
1033
0
  return 0;
1034
0
}
1035
1036
static int _offset_forward(json_object *jobj_segments, uint64_t *offset)
1037
0
{
1038
0
  int segs = json_segments_count(jobj_segments);
1039
1040
0
  if (segs == 1)
1041
0
    *offset = 0;
1042
0
  else if (segs == 2) {
1043
0
    *offset = json_segment_get_size(json_segments_get_segment(jobj_segments, 0), 0);
1044
0
    if (!*offset)
1045
0
      return -EINVAL;
1046
0
  } else
1047
0
    return -EINVAL;
1048
1049
0
  return 0;
1050
0
}
1051
1052
static int _offset_backward(json_object *jobj_segments, uint64_t device_size, uint64_t *length, uint64_t *offset)
1053
0
{
1054
0
  int segs = json_segments_count(jobj_segments);
1055
0
  uint64_t tmp;
1056
1057
0
  if (segs == 1) {
1058
0
    if (device_size < *length)
1059
0
      *length = device_size;
1060
0
    *offset = device_size - *length;
1061
0
  } else if (segs == 2) {
1062
0
    tmp = json_segment_get_size(json_segments_get_segment(jobj_segments, 0), 0);
1063
0
    if (tmp < *length)
1064
0
      *length = tmp;
1065
0
    *offset =  tmp - *length;
1066
0
  } else
1067
0
    return -EINVAL;
1068
1069
0
  return 0;
1070
0
}
1071
1072
/* must be always relative to data offset */
1073
/* the LUKS2 header MUST be valid */
1074
static int reencrypt_offset(struct luks2_hdr *hdr,
1075
    crypt_reencrypt_direction_info di,
1076
    uint64_t device_size,
1077
    uint64_t *reencrypt_length,
1078
    uint64_t *offset)
1079
0
{
1080
0
  int r, sg;
1081
0
  json_object *jobj_segments;
1082
0
  uint64_t data_shift = reencrypt_data_shift(hdr);
1083
1084
0
  if (!offset)
1085
0
    return -EINVAL;
1086
1087
  /* if there's segment in reencryption return directly offset of it */
1088
0
  json_object_object_get_ex(hdr->jobj, "segments", &jobj_segments);
1089
0
  sg = json_segments_segment_in_reencrypt(jobj_segments);
1090
0
  if (sg >= 0) {
1091
0
    *offset = LUKS2_segment_offset(hdr, sg, 0) - (reencrypt_get_data_offset_new(hdr));
1092
0
    return 0;
1093
0
  }
1094
1095
0
  if (di == CRYPT_REENCRYPT_FORWARD) {
1096
0
    if (reencrypt_mode(hdr) == CRYPT_REENCRYPT_DECRYPT &&
1097
0
        LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0) {
1098
0
      r = reencrypt_offset_forward_moved(hdr, data_shift, offset);
1099
0
      if (!r && *offset > device_size)
1100
0
        *offset = device_size;
1101
0
      return r;
1102
0
    }
1103
0
    return _offset_forward(jobj_segments, offset);
1104
0
  } else if (di == CRYPT_REENCRYPT_BACKWARD) {
1105
0
    if (reencrypt_mode(hdr) == CRYPT_REENCRYPT_ENCRYPT &&
1106
0
        LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0)
1107
0
      return reencrypt_offset_backward_moved(hdr, jobj_segments, reencrypt_length, data_shift, offset);
1108
0
    return _offset_backward(jobj_segments, device_size, reencrypt_length, offset);
1109
0
  }
1110
1111
0
  return -EINVAL;
1112
0
}
1113
1114
static uint64_t reencrypt_length(struct crypt_device *cd,
1115
    struct reenc_protection *rp,
1116
    uint64_t keyslot_area_length,
1117
    uint64_t length_max,
1118
    size_t alignment)
1119
0
{
1120
0
  unsigned long dummy, optimal_alignment;
1121
0
  uint64_t length, soft_mem_limit;
1122
1123
0
  if (rp->type == REENC_PROTECTION_NONE)
1124
0
    length = length_max ?: LUKS2_DEFAULT_NONE_REENCRYPTION_LENGTH;
1125
0
  else if (rp->type == REENC_PROTECTION_CHECKSUM)
1126
0
    length = (keyslot_area_length / rp->p.csum.hash_size) * rp->p.csum.block_size;
1127
0
  else if (rp->type == REENC_PROTECTION_DATASHIFT)
1128
0
    return rp->p.ds.data_shift;
1129
0
  else
1130
0
    length = keyslot_area_length;
1131
1132
  /* hard limit */
1133
0
  if (length > LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH)
1134
0
    length = LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH;
1135
1136
  /* soft limit is 1/4 of system memory */
1137
0
  soft_mem_limit = crypt_getphysmemory_kb() << 8; /* multiply by (1024/4) */
1138
1139
0
  if (soft_mem_limit && length > soft_mem_limit)
1140
0
    length = soft_mem_limit;
1141
1142
0
  if (length_max && length > length_max)
1143
0
    length = length_max;
1144
1145
0
  length -= (length % alignment);
1146
1147
  /* Emits error later */
1148
0
  if (!length)
1149
0
    return length;
1150
1151
0
  device_topology_alignment(cd, crypt_data_device(cd), &optimal_alignment, &dummy, length);
1152
1153
  /* we have to stick with encryption sector size alignment */
1154
0
  if (optimal_alignment % alignment)
1155
0
    return length;
1156
1157
  /* align to opt-io size only if remaining size allows it */
1158
0
  if (length > optimal_alignment)
1159
0
    length -= (length % optimal_alignment);
1160
1161
0
  return length;
1162
0
}
1163
1164
static int reencrypt_context_init(struct crypt_device *cd,
1165
  struct luks2_hdr *hdr,
1166
  struct luks2_reencrypt *rh,
1167
  uint64_t device_size,
1168
  uint64_t max_hotzone_size,
1169
  uint64_t fixed_device_size)
1170
0
{
1171
0
  int r;
1172
0
  size_t alignment;
1173
0
  uint64_t dummy, area_length;
1174
1175
0
  rh->reenc_keyslot = LUKS2_find_keyslot(hdr, "reencrypt");
1176
0
  if (rh->reenc_keyslot < 0)
1177
0
    return -EINVAL;
1178
0
  if (LUKS2_keyslot_area(hdr, rh->reenc_keyslot, &dummy, &area_length) < 0)
1179
0
    return -EINVAL;
1180
1181
0
  rh->mode = reencrypt_mode(hdr);
1182
1183
0
  rh->direction = reencrypt_direction(hdr);
1184
1185
0
  r = LUKS2_keyslot_reencrypt_load(cd, hdr, rh->reenc_keyslot, &rh->rp, true);
1186
0
  if (r < 0)
1187
0
    return r;
1188
1189
0
  if (rh->rp.type == REENC_PROTECTION_CHECKSUM)
1190
0
    alignment = rh->rp.p.csum.block_size;
1191
0
  else
1192
0
    alignment = reencrypt_get_alignment(cd, hdr);
1193
1194
0
  if (!alignment)
1195
0
    return -EINVAL;
1196
1197
0
  if ((max_hotzone_size << SECTOR_SHIFT) % alignment) {
1198
0
    log_err(cd, _("Hotzone size must be multiple of calculated zone alignment (%zu bytes)."), alignment);
1199
0
    return -EINVAL;
1200
0
  }
1201
1202
0
  if ((fixed_device_size << SECTOR_SHIFT) % alignment) {
1203
0
    log_err(cd, _("Device size must be multiple of calculated zone alignment (%zu bytes)."), alignment);
1204
0
    return -EINVAL;
1205
0
  }
1206
1207
0
  if (fixed_device_size) {
1208
0
    log_dbg(cd, "Switching reencryption to fixed size mode.");
1209
0
    device_size = fixed_device_size << SECTOR_SHIFT;
1210
0
    rh->fixed_length = true;
1211
0
  } else
1212
0
    rh->fixed_length = false;
1213
1214
0
  rh->length = reencrypt_length(cd, &rh->rp, area_length, max_hotzone_size << SECTOR_SHIFT, alignment);
1215
0
  if (!rh->length) {
1216
0
    log_dbg(cd, "Invalid reencryption length.");
1217
0
    return -EINVAL;
1218
0
  }
1219
1220
0
  if (reencrypt_offset(hdr, rh->direction, device_size, &rh->length, &rh->offset)) {
1221
0
    log_dbg(cd, "Failed to get reencryption offset.");
1222
0
    return -EINVAL;
1223
0
  }
1224
1225
0
  if (rh->offset > device_size)
1226
0
    return -EINVAL;
1227
0
  if (rh->length > device_size - rh->offset)
1228
0
    rh->length = device_size - rh->offset;
1229
1230
0
  _load_backup_segments(hdr, rh);
1231
1232
0
  r = LUKS2_keyslot_reencrypt_load(cd, hdr, rh->reenc_keyslot, &rh->rp_moved_segment, false);
1233
0
  if (r < 0)
1234
0
    return r;
1235
1236
0
  if (rh->rp_moved_segment.type == REENC_PROTECTION_NOT_SET)
1237
0
    log_dbg(cd, "No moved segment resilience configured.");
1238
1239
0
  if (rh->direction == CRYPT_REENCRYPT_BACKWARD)
1240
0
    rh->progress = device_size - rh->offset - rh->length;
1241
0
  else if (rh->jobj_segment_moved && rh->direction == CRYPT_REENCRYPT_FORWARD) {
1242
0
    if (rh->offset == json_segment_get_offset(LUKS2_get_segment_by_flag(hdr, "backup-moved-segment"), false))
1243
0
      rh->progress = device_size - json_segment_get_size(LUKS2_get_segment_by_flag(hdr, "backup-moved-segment"), false);
1244
0
    else
1245
0
      rh->progress = rh->offset - json_segment_get_size(rh->jobj_segment_moved, 0);
1246
0
  } else
1247
0
    rh->progress = rh->offset;
1248
1249
0
  log_dbg(cd, "reencrypt-direction: %s", rh->direction == CRYPT_REENCRYPT_FORWARD ? "forward" : "backward");
1250
0
  log_dbg(cd, "backup-previous digest id: %d", rh->digest_old);
1251
0
  log_dbg(cd, "backup-final digest id: %d", rh->digest_new);
1252
0
  log_dbg(cd, "reencrypt length: %" PRIu64, rh->length);
1253
0
  log_dbg(cd, "reencrypt offset: %" PRIu64, rh->offset);
1254
0
  log_dbg(cd, "reencrypt shift: %s%" PRIu64,
1255
0
    (rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->direction == CRYPT_REENCRYPT_BACKWARD ? "-" : ""),
1256
0
    data_shift_value(&rh->rp));
1257
0
  log_dbg(cd, "reencrypt alignment: %zu", alignment);
1258
0
  log_dbg(cd, "reencrypt progress: %" PRIu64, rh->progress);
1259
1260
0
  rh->device_size = device_size;
1261
1262
0
  return rh->length < 512 ? -EINVAL : 0;
1263
0
}
1264
1265
static size_t reencrypt_buffer_length(struct luks2_reencrypt *rh)
1266
0
{
1267
0
  if (rh->rp.type == REENC_PROTECTION_DATASHIFT)
1268
0
    return data_shift_value(&rh->rp);
1269
0
  return rh->length;
1270
0
}
1271
1272
static int reencrypt_load_clean(struct crypt_device *cd,
1273
  struct luks2_hdr *hdr,
1274
  uint64_t device_size,
1275
  uint64_t max_hotzone_size,
1276
  uint64_t fixed_device_size,
1277
  struct luks2_reencrypt **rh)
1278
0
{
1279
0
  int r;
1280
0
  struct luks2_reencrypt *tmp = crypt_zalloc(sizeof (*tmp));
1281
1282
0
  if (!tmp)
1283
0
    return -ENOMEM;
1284
1285
0
  log_dbg(cd, "Loading stored reencryption context.");
1286
1287
0
  r = reencrypt_context_init(cd, hdr, tmp, device_size, max_hotzone_size, fixed_device_size);
1288
0
  if (r)
1289
0
    goto err;
1290
1291
0
  if (posix_memalign(&tmp->reenc_buffer, device_alignment(crypt_data_device(cd)),
1292
0
         reencrypt_buffer_length(tmp))) {
1293
0
    r = -ENOMEM;
1294
0
    goto err;
1295
0
  }
1296
1297
0
  *rh = tmp;
1298
1299
0
  return 0;
1300
0
err:
1301
0
  LUKS2_reencrypt_free(cd, tmp);
1302
1303
0
  return r;
1304
0
}
1305
1306
static int reencrypt_make_segments(struct crypt_device *cd,
1307
  struct luks2_hdr *hdr,
1308
  struct luks2_reencrypt *rh,
1309
  uint64_t device_size)
1310
0
{
1311
0
  int r;
1312
0
  uint64_t data_offset = reencrypt_get_data_offset_new(hdr);
1313
1314
0
  log_dbg(cd, "Calculating segments.");
1315
1316
0
  r = reencrypt_make_hot_segments(cd, hdr, rh, device_size, data_offset);
1317
0
  if (!r) {
1318
0
    r = reencrypt_make_post_segments(cd, hdr, rh, data_offset);
1319
0
    if (r)
1320
0
      json_object_put(rh->jobj_segs_hot);
1321
0
  }
1322
1323
0
  if (r)
1324
0
    log_dbg(cd, "Failed to make reencryption segments.");
1325
1326
0
  return r;
1327
0
}
1328
1329
static int reencrypt_make_segments_crashed(struct crypt_device *cd,
1330
        struct luks2_hdr *hdr,
1331
              struct luks2_reencrypt *rh)
1332
0
{
1333
0
  int r;
1334
0
  uint64_t data_offset = crypt_get_data_offset(cd) << SECTOR_SHIFT;
1335
1336
0
  if (!rh)
1337
0
    return -EINVAL;
1338
1339
0
  rh->jobj_segs_hot = json_object_new_object();
1340
0
  if (!rh->jobj_segs_hot)
1341
0
    return -ENOMEM;
1342
1343
0
  json_object_object_foreach(LUKS2_get_segments_jobj(hdr), key, val) {
1344
0
    if (json_segment_is_backup(val))
1345
0
      continue;
1346
0
    json_object_object_add(rh->jobj_segs_hot, key, json_object_get(val));
1347
0
  }
1348
1349
0
  r = reencrypt_make_post_segments(cd, hdr, rh, data_offset);
1350
0
  if (r) {
1351
0
    json_object_put(rh->jobj_segs_hot);
1352
0
    rh->jobj_segs_hot = NULL;
1353
0
  }
1354
1355
0
  return r;
1356
0
}
1357
1358
static int reencrypt_load_crashed(struct crypt_device *cd,
1359
  struct luks2_hdr *hdr, uint64_t device_size, struct luks2_reencrypt **rh)
1360
0
{
1361
0
  bool dynamic;
1362
0
  uint64_t required_device_size;
1363
0
  int r, reenc_seg;
1364
1365
0
  if (LUKS2_get_data_size(hdr, &required_device_size, &dynamic))
1366
0
    return -EINVAL;
1367
1368
0
  if (dynamic)
1369
0
    required_device_size = 0;
1370
0
  else
1371
0
    required_device_size >>= SECTOR_SHIFT;
1372
1373
0
  r = reencrypt_load_clean(cd, hdr, device_size, 0, required_device_size, rh);
1374
1375
0
  if (!r) {
1376
0
    reenc_seg = json_segments_segment_in_reencrypt(LUKS2_get_segments_jobj(hdr));
1377
0
    if (reenc_seg < 0)
1378
0
      r = -EINVAL;
1379
0
    else
1380
0
      (*rh)->length = LUKS2_segment_size(hdr, reenc_seg, 0);
1381
0
  }
1382
1383
0
  if (!r)
1384
0
    r = reencrypt_make_segments_crashed(cd, hdr, *rh);
1385
1386
0
  if (r) {
1387
0
    LUKS2_reencrypt_free(cd, *rh);
1388
0
    *rh = NULL;
1389
0
  }
1390
0
  return r;
1391
0
}
1392
1393
static int reencrypt_init_storage_wrappers(struct crypt_device *cd,
1394
    struct luks2_hdr *hdr,
1395
    struct luks2_reencrypt *rh,
1396
    struct volume_key *vks)
1397
0
{
1398
0
  int r;
1399
0
  struct volume_key *vk;
1400
0
  uint32_t wrapper_flags = (getuid() || geteuid()) ? 0 : DISABLE_KCAPI;
1401
1402
0
  vk = crypt_volume_key_by_id(vks, rh->digest_old);
1403
0
  r = crypt_storage_wrapper_init(cd, &rh->cw1, crypt_data_device(cd),
1404
0
      reencrypt_get_data_offset_old(hdr),
1405
0
      crypt_get_iv_offset(cd),
1406
0
      reencrypt_get_sector_size_old(hdr),
1407
0
      reencrypt_segment_cipher_old(hdr),
1408
0
      vk, wrapper_flags | OPEN_READONLY);
1409
0
  if (r) {
1410
0
    log_err(cd, _("Failed to initialize old segment storage wrapper."));
1411
0
    return r;
1412
0
  }
1413
0
  rh->wflags1 = wrapper_flags | OPEN_READONLY;
1414
0
  log_dbg(cd, "Old cipher storage wrapper type: %d.", crypt_storage_wrapper_get_type(rh->cw1));
1415
1416
0
  vk = crypt_volume_key_by_id(vks, rh->digest_new);
1417
0
  r = crypt_storage_wrapper_init(cd, &rh->cw2, crypt_data_device(cd),
1418
0
      reencrypt_get_data_offset_new(hdr),
1419
0
      crypt_get_iv_offset(cd),
1420
0
      reencrypt_get_sector_size_new(hdr),
1421
0
      reencrypt_segment_cipher_new(hdr),
1422
0
      vk, wrapper_flags);
1423
0
  if (r) {
1424
0
    log_err(cd, _("Failed to initialize new segment storage wrapper."));
1425
0
    return r;
1426
0
  }
1427
0
  rh->wflags2 = wrapper_flags;
1428
0
  log_dbg(cd, "New cipher storage wrapper type: %d", crypt_storage_wrapper_get_type(rh->cw2));
1429
1430
0
  return 0;
1431
0
}
1432
1433
static int reencrypt_context_set_names(struct luks2_reencrypt *rh, const char *name)
1434
0
{
1435
0
  if (!rh || !name)
1436
0
    return -EINVAL;
1437
1438
0
  if (*name == '/') {
1439
0
    if (!(rh->device_name = dm_device_name(name)))
1440
0
      return -EINVAL;
1441
0
  } else if (!(rh->device_name = strdup(name)))
1442
0
    return -ENOMEM;
1443
1444
0
  if (asprintf(&rh->hotzone_name, "%s-hotzone-%s", rh->device_name,
1445
0
         rh->direction == CRYPT_REENCRYPT_FORWARD ? "forward" : "backward") < 0) {
1446
0
    rh->hotzone_name = NULL;
1447
0
    return -ENOMEM;
1448
0
  }
1449
0
  if (asprintf(&rh->overlay_name, "%s-overlay", rh->device_name) < 0) {
1450
0
    rh->overlay_name = NULL;
1451
0
    return -ENOMEM;
1452
0
  }
1453
1454
0
  rh->online = true;
1455
0
  return 0;
1456
0
}
1457
1458
static int modify_offset(uint64_t *offset, uint64_t data_shift, crypt_reencrypt_direction_info di)
1459
0
{
1460
0
  int r = -EINVAL;
1461
1462
0
  if (!offset)
1463
0
    return r;
1464
1465
0
  if (di == CRYPT_REENCRYPT_FORWARD) {
1466
0
    if (*offset >= data_shift) {
1467
0
      *offset -= data_shift;
1468
0
      r = 0;
1469
0
    }
1470
0
  } else if (di == CRYPT_REENCRYPT_BACKWARD) {
1471
0
    *offset += data_shift;
1472
0
    r = 0;
1473
0
  }
1474
1475
0
  return r;
1476
0
}
1477
1478
static int reencrypt_update_flag(struct crypt_device *cd, uint8_t version,
1479
  bool enable, bool commit)
1480
0
{
1481
0
  uint32_t reqs;
1482
0
  struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
1483
1484
0
  if (enable) {
1485
0
    log_dbg(cd, "Going to store reencryption requirement flag (version: %u).", version);
1486
0
    return LUKS2_config_set_requirement_version(cd, hdr, CRYPT_REQUIREMENT_ONLINE_REENCRYPT, version, commit);
1487
0
  }
1488
1489
0
  LUKS2_config_get_requirements(cd, hdr, &reqs);
1490
1491
0
  reqs &= ~CRYPT_REQUIREMENT_ONLINE_REENCRYPT;
1492
1493
0
  log_dbg(cd, "Going to wipe reencryption requirement flag.");
1494
1495
0
  return LUKS2_config_set_requirements(cd, hdr, reqs, commit);
1496
0
}
1497
1498
static int reencrypt_hotzone_protect_ready(struct crypt_device *cd,
1499
  struct reenc_protection *rp)
1500
0
{
1501
0
  assert(rp);
1502
1503
0
  if (rp->type == REENC_PROTECTION_NOT_SET)
1504
0
    return -EINVAL;
1505
1506
0
  if (rp->type != REENC_PROTECTION_CHECKSUM)
1507
0
    return 0;
1508
1509
0
  if (!rp->p.csum.checksums) {
1510
0
    log_dbg(cd, "Allocating buffer for storing resilience checksums.");
1511
0
    if (posix_memalign(&rp->p.csum.checksums, device_alignment(crypt_metadata_device(cd)),
1512
0
           rp->p.csum.checksums_len))
1513
0
      return -ENOMEM;
1514
0
  }
1515
1516
0
  return 0;
1517
0
}
1518
1519
static int reencrypt_recover_segment(struct crypt_device *cd,
1520
  struct luks2_hdr *hdr,
1521
  struct luks2_reencrypt *rh,
1522
  struct volume_key *vks)
1523
0
{
1524
0
  struct volume_key *vk_old, *vk_new;
1525
0
  size_t count, s;
1526
0
  ssize_t read, w;
1527
0
  struct reenc_protection *rp;
1528
0
  int devfd, r, new_sector_size, old_sector_size, rseg;
1529
0
  uint64_t area_offset, area_length, area_length_read, crash_iv_offset,
1530
0
     data_offset = crypt_get_data_offset(cd) << SECTOR_SHIFT;
1531
0
  char *checksum_tmp = NULL, *data_buffer = NULL;
1532
0
  struct crypt_storage_wrapper *cw1 = NULL, *cw2 = NULL;
1533
1534
0
  assert(hdr);
1535
0
  assert(rh);
1536
0
  assert(vks);
1537
1538
0
  rseg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
1539
0
  if (rh->offset == 0 && rh->rp_moved_segment.type > REENC_PROTECTION_NOT_SET) {
1540
0
    log_dbg(cd, "Recovery using moved segment protection.");
1541
0
    rp = &rh->rp_moved_segment;
1542
0
  } else
1543
0
    rp = &rh->rp;
1544
1545
0
  if (rseg < 0 || rh->length < 512)
1546
0
    return -EINVAL;
1547
1548
0
  r = reencrypt_hotzone_protect_ready(cd, rp);
1549
0
  if (r) {
1550
0
    log_err(cd, _("Failed to initialize hotzone protection."));
1551
0
    return -EINVAL;
1552
0
  }
1553
1554
0
  vk_new = crypt_volume_key_by_id(vks, rh->digest_new);
1555
0
  if (!vk_new && rh->mode != CRYPT_REENCRYPT_DECRYPT)
1556
0
    return -EINVAL;
1557
0
  vk_old = crypt_volume_key_by_id(vks, rh->digest_old);
1558
0
  if (!vk_old && rh->mode != CRYPT_REENCRYPT_ENCRYPT)
1559
0
    return -EINVAL;
1560
0
  old_sector_size = json_segment_get_sector_size(reencrypt_segment_old(hdr));
1561
0
  new_sector_size = json_segment_get_sector_size(reencrypt_segment_new(hdr));
1562
0
  if (rh->mode == CRYPT_REENCRYPT_DECRYPT)
1563
0
    crash_iv_offset = rh->offset >> SECTOR_SHIFT; /* TODO: + old iv_tweak */
1564
0
  else
1565
0
    crash_iv_offset = json_segment_get_iv_offset(json_segments_get_segment(rh->jobj_segs_hot, rseg));
1566
1567
0
  log_dbg(cd, "crash_offset: %" PRIu64 ", crash_length: %" PRIu64 ",  crash_iv_offset: %" PRIu64,
1568
0
    data_offset + rh->offset, rh->length, crash_iv_offset);
1569
1570
0
  r = crypt_storage_wrapper_init(cd, &cw2, crypt_data_device(cd),
1571
0
      data_offset + rh->offset, crash_iv_offset, new_sector_size,
1572
0
      reencrypt_segment_cipher_new(hdr), vk_new, 0);
1573
0
  if (r) {
1574
0
    log_err(cd, _("Failed to initialize new segment storage wrapper."));
1575
0
    return r;
1576
0
  }
1577
1578
0
  if (LUKS2_keyslot_area(hdr, rh->reenc_keyslot, &area_offset, &area_length)) {
1579
0
    r = -EINVAL;
1580
0
    goto out;
1581
0
  }
1582
1583
0
  if (posix_memalign((void**)&data_buffer, device_alignment(crypt_data_device(cd)), rh->length)) {
1584
0
    r = -ENOMEM;
1585
0
    goto out;
1586
0
  }
1587
1588
0
  switch (rp->type) {
1589
0
  case  REENC_PROTECTION_CHECKSUM:
1590
0
    log_dbg(cd, "Checksums based recovery.");
1591
1592
0
    r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd),
1593
0
        data_offset + rh->offset, crash_iv_offset, old_sector_size,
1594
0
        reencrypt_segment_cipher_old(hdr), vk_old, 0);
1595
0
    if (r) {
1596
0
      log_err(cd, _("Failed to initialize old segment storage wrapper."));
1597
0
      goto out;
1598
0
    }
1599
1600
0
    count = rh->length / rp->p.csum.block_size;
1601
0
    area_length_read = count * rp->p.csum.hash_size;
1602
0
    if (area_length_read > area_length) {
1603
0
      log_dbg(cd, "Internal error in calculated area_length.");
1604
0
      r = -EINVAL;
1605
0
      goto out;
1606
0
    }
1607
1608
0
    checksum_tmp = malloc(rp->p.csum.hash_size);
1609
0
    if (!checksum_tmp) {
1610
0
      r = -ENOMEM;
1611
0
      goto out;
1612
0
    }
1613
1614
    /* TODO: lock for read */
1615
0
    devfd = device_open(cd, crypt_metadata_device(cd), O_RDONLY);
1616
0
    if (devfd < 0)
1617
0
      goto out;
1618
1619
    /* read old data checksums */
1620
0
    read = read_lseek_blockwise(devfd, device_block_size(cd, crypt_metadata_device(cd)),
1621
0
          device_alignment(crypt_metadata_device(cd)), rp->p.csum.checksums, area_length_read, area_offset);
1622
0
    if (read < 0 || (size_t)read != area_length_read) {
1623
0
      log_err(cd, _("Failed to read checksums for current hotzone."));
1624
0
      r = -EINVAL;
1625
0
      goto out;
1626
0
    }
1627
1628
0
    read = crypt_storage_wrapper_read(cw2, 0, data_buffer, rh->length);
1629
0
    if (read < 0 || (size_t)read != rh->length) {
1630
0
      log_err(cd, _("Failed to read hotzone area starting at %" PRIu64 "."), rh->offset + data_offset);
1631
0
      r = -EINVAL;
1632
0
      goto out;
1633
0
    }
1634
1635
0
    for (s = 0; s < count; s++) {
1636
0
      if (crypt_hash_write(rp->p.csum.ch, data_buffer + (s * rp->p.csum.block_size), rp->p.csum.block_size)) {
1637
0
        log_dbg(cd, "Failed to write hash.");
1638
0
        r = EINVAL;
1639
0
        goto out;
1640
0
      }
1641
0
      if (crypt_hash_final(rp->p.csum.ch, checksum_tmp, rp->p.csum.hash_size)) {
1642
0
        log_dbg(cd, "Failed to finalize hash.");
1643
0
        r = EINVAL;
1644
0
        goto out;
1645
0
      }
1646
0
      if (!memcmp(checksum_tmp, (char *)rp->p.csum.checksums + (s * rp->p.csum.hash_size), rp->p.csum.hash_size)) {
1647
0
        log_dbg(cd, "Sector %zu (size %zu, offset %zu) needs recovery", s, rp->p.csum.block_size, s * rp->p.csum.block_size);
1648
0
        if (crypt_storage_wrapper_decrypt(cw1, s * rp->p.csum.block_size, data_buffer + (s * rp->p.csum.block_size), rp->p.csum.block_size)) {
1649
0
          log_err(cd, _("Failed to decrypt sector %zu."), s);
1650
0
          r = -EINVAL;
1651
0
          goto out;
1652
0
        }
1653
0
        w = crypt_storage_wrapper_encrypt_write(cw2, s * rp->p.csum.block_size, data_buffer + (s * rp->p.csum.block_size), rp->p.csum.block_size);
1654
0
        if (w < 0 || (size_t)w != rp->p.csum.block_size) {
1655
0
          log_err(cd, _("Failed to recover sector %zu."), s);
1656
0
          r = -EINVAL;
1657
0
          goto out;
1658
0
        }
1659
0
      }
1660
0
    }
1661
1662
0
    r = 0;
1663
0
    break;
1664
0
  case  REENC_PROTECTION_JOURNAL:
1665
0
    log_dbg(cd, "Journal based recovery.");
1666
1667
    /* FIXME: validation candidate */
1668
0
    if (rh->length > area_length) {
1669
0
      r = -EINVAL;
1670
0
      log_dbg(cd, "Invalid journal size.");
1671
0
      goto out;
1672
0
    }
1673
1674
    /* TODO locking */
1675
0
    r = crypt_storage_wrapper_init(cd, &cw1, crypt_metadata_device(cd),
1676
0
        area_offset, crash_iv_offset, old_sector_size,
1677
0
        reencrypt_segment_cipher_old(hdr), vk_old, 0);
1678
0
    if (r) {
1679
0
      log_err(cd, _("Failed to initialize old segment storage wrapper."));
1680
0
      goto out;
1681
0
    }
1682
0
    read = crypt_storage_wrapper_read_decrypt(cw1, 0, data_buffer, rh->length);
1683
0
    if (read < 0 || (size_t)read != rh->length) {
1684
0
      log_dbg(cd, "Failed to read journaled data.");
1685
0
      r = -EIO;
1686
      /* may content plaintext */
1687
0
      crypt_safe_memzero(data_buffer, rh->length);
1688
0
      goto out;
1689
0
    }
1690
0
    read = crypt_storage_wrapper_encrypt_write(cw2, 0, data_buffer, rh->length);
1691
    /* may content plaintext */
1692
0
    crypt_safe_memzero(data_buffer, rh->length);
1693
0
    if (read < 0 || (size_t)read != rh->length) {
1694
0
      log_dbg(cd, "recovery write failed.");
1695
0
      r = -EINVAL;
1696
0
      goto out;
1697
0
    }
1698
1699
0
    r = 0;
1700
0
    break;
1701
0
  case  REENC_PROTECTION_DATASHIFT:
1702
0
    log_dbg(cd, "Data shift based recovery.");
1703
1704
0
    if (rseg == 0) {
1705
0
      r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd),
1706
0
          json_segment_get_offset(rh->jobj_segment_moved, 0), 0,
1707
0
          reencrypt_get_sector_size_old(hdr),
1708
0
          reencrypt_segment_cipher_old(hdr), vk_old, 0);
1709
0
    } else {
1710
0
      if (rh->direction == CRYPT_REENCRYPT_FORWARD)
1711
0
        data_offset = data_offset + rh->offset + data_shift_value(rp);
1712
0
      else
1713
0
        data_offset = data_offset + rh->offset - data_shift_value(rp);
1714
0
      r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd),
1715
0
          data_offset,
1716
0
          crash_iv_offset,
1717
0
          reencrypt_get_sector_size_old(hdr),
1718
0
          reencrypt_segment_cipher_old(hdr), vk_old, 0);
1719
0
    }
1720
0
    if (r) {
1721
0
      log_err(cd, _("Failed to initialize old segment storage wrapper."));
1722
0
      goto out;
1723
0
    }
1724
1725
0
    read = crypt_storage_wrapper_read_decrypt(cw1, 0, data_buffer, rh->length);
1726
0
    if (read < 0 || (size_t)read != rh->length) {
1727
0
      log_dbg(cd, "Failed to read data.");
1728
0
      r = -EIO;
1729
      /* may content plaintext */
1730
0
      crypt_safe_memzero(data_buffer, rh->length);
1731
0
      goto out;
1732
0
    }
1733
1734
0
    read = crypt_storage_wrapper_encrypt_write(cw2, 0, data_buffer, rh->length);
1735
    /* may content plaintext */
1736
0
    crypt_safe_memzero(data_buffer, rh->length);
1737
0
    if (read < 0 || (size_t)read != rh->length) {
1738
0
      log_dbg(cd, "recovery write failed.");
1739
0
      r = -EINVAL;
1740
0
      goto out;
1741
0
    }
1742
0
    r = 0;
1743
0
    break;
1744
0
  default:
1745
0
    r = -EINVAL;
1746
0
  }
1747
1748
0
  if (!r)
1749
0
    rh->read = rh->length;
1750
0
out:
1751
0
  free(data_buffer);
1752
0
  free(checksum_tmp);
1753
0
  crypt_storage_wrapper_destroy(cw1);
1754
0
  crypt_storage_wrapper_destroy(cw2);
1755
1756
0
  return r;
1757
0
}
1758
1759
static int reencrypt_add_moved_segment(struct crypt_device *cd, struct luks2_hdr *hdr, struct luks2_reencrypt *rh)
1760
0
{
1761
0
  int digest = rh->digest_old, s = LUKS2_segment_first_unused_id(hdr);
1762
1763
0
  if (!rh->jobj_segment_moved)
1764
0
    return 0;
1765
1766
0
  if (s < 0)
1767
0
    return s;
1768
1769
0
  if (json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), s, json_object_get(rh->jobj_segment_moved))) {
1770
0
    json_object_put(rh->jobj_segment_moved);
1771
0
    return -EINVAL;
1772
0
  }
1773
1774
0
  if (!strcmp(json_segment_type(rh->jobj_segment_moved), "crypt"))
1775
0
    return LUKS2_digest_segment_assign(cd, hdr, s, digest, 1, 0);
1776
1777
0
  return 0;
1778
0
}
1779
1780
static int reencrypt_add_backup_segment(struct crypt_device *cd,
1781
    struct luks2_hdr *hdr,
1782
    struct luks2_reencrypt *rh,
1783
    unsigned final)
1784
0
{
1785
0
  int digest, s = LUKS2_segment_first_unused_id(hdr);
1786
0
  json_object *jobj;
1787
1788
0
  if (s < 0)
1789
0
    return s;
1790
1791
0
  digest = final ? rh->digest_new : rh->digest_old;
1792
0
  jobj = final ? rh->jobj_segment_new : rh->jobj_segment_old;
1793
1794
0
  if (json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), s, json_object_get(jobj))) {
1795
0
    json_object_put(jobj);
1796
0
    return -EINVAL;
1797
0
  }
1798
1799
0
  if (strcmp(json_segment_type(jobj), "crypt"))
1800
0
    return 0;
1801
1802
0
  return LUKS2_digest_segment_assign(cd, hdr, s, digest, 1, 0);
1803
0
}
1804
1805
static int reencrypt_assign_segments_simple(struct crypt_device *cd,
1806
  struct luks2_hdr *hdr,
1807
  struct luks2_reencrypt *rh,
1808
  unsigned hot,
1809
  unsigned commit)
1810
0
{
1811
0
  int r, sg;
1812
1813
0
  if (hot && json_segments_count(rh->jobj_segs_hot) > 0) {
1814
0
    log_dbg(cd, "Setting 'hot' segments.");
1815
1816
0
    r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_hot, 0);
1817
0
    if (!r)
1818
0
      rh->jobj_segs_hot = NULL;
1819
0
  } else if (!hot && json_segments_count(rh->jobj_segs_post) > 0) {
1820
0
    log_dbg(cd, "Setting 'post' segments.");
1821
0
    r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_post, 0);
1822
0
    if (!r)
1823
0
      rh->jobj_segs_post = NULL;
1824
0
  } else {
1825
0
    log_dbg(cd, "No segments to set.");
1826
0
    return -EINVAL;
1827
0
  }
1828
1829
0
  if (r) {
1830
0
    log_dbg(cd, "Failed to assign new enc segments.");
1831
0
    return r;
1832
0
  }
1833
1834
0
  r = reencrypt_add_backup_segment(cd, hdr, rh, 0);
1835
0
  if (r) {
1836
0
    log_dbg(cd, "Failed to assign reencryption previous backup segment.");
1837
0
    return r;
1838
0
  }
1839
1840
0
  r = reencrypt_add_backup_segment(cd, hdr, rh, 1);
1841
0
  if (r) {
1842
0
    log_dbg(cd, "Failed to assign reencryption final backup segment.");
1843
0
    return r;
1844
0
  }
1845
1846
0
  r = reencrypt_add_moved_segment(cd, hdr, rh);
1847
0
  if (r) {
1848
0
    log_dbg(cd, "Failed to assign reencryption moved backup segment.");
1849
0
    return r;
1850
0
  }
1851
1852
0
  for (sg = 0; sg < LUKS2_segments_count(hdr); sg++) {
1853
0
    if (LUKS2_segment_is_type(hdr, sg, "crypt") &&
1854
0
        LUKS2_digest_segment_assign(cd, hdr, sg, rh->mode == CRYPT_REENCRYPT_ENCRYPT ? rh->digest_new : rh->digest_old, 1, 0)) {
1855
0
      log_dbg(cd, "Failed to assign digest %u to segment %u.", rh->digest_new, sg);
1856
0
      return -EINVAL;
1857
0
    }
1858
0
  }
1859
1860
0
  return commit ? LUKS2_hdr_write(cd, hdr) : 0;
1861
0
}
1862
1863
static int reencrypt_assign_segments(struct crypt_device *cd,
1864
    struct luks2_hdr *hdr,
1865
    struct luks2_reencrypt *rh,
1866
    unsigned hot,
1867
    unsigned commit)
1868
0
{
1869
0
  bool forward;
1870
0
  int rseg, scount, r = -EINVAL;
1871
1872
  /* FIXME: validate in reencrypt context load */
1873
0
  if (rh->digest_new < 0 && rh->mode != CRYPT_REENCRYPT_DECRYPT)
1874
0
    return -EINVAL;
1875
1876
0
  if (LUKS2_digest_segment_assign(cd, hdr, CRYPT_ANY_SEGMENT, CRYPT_ANY_DIGEST, 0, 0))
1877
0
    return -EINVAL;
1878
1879
0
  if (rh->mode == CRYPT_REENCRYPT_ENCRYPT || rh->mode == CRYPT_REENCRYPT_DECRYPT)
1880
0
    return reencrypt_assign_segments_simple(cd, hdr, rh, hot, commit);
1881
1882
0
  if (hot && rh->jobj_segs_hot) {
1883
0
    log_dbg(cd, "Setting 'hot' segments.");
1884
1885
0
    r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_hot, 0);
1886
0
    if (!r)
1887
0
      rh->jobj_segs_hot = NULL;
1888
0
  } else if (!hot && rh->jobj_segs_post) {
1889
0
    log_dbg(cd, "Setting 'post' segments.");
1890
0
    r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_post, 0);
1891
0
    if (!r)
1892
0
      rh->jobj_segs_post = NULL;
1893
0
  }
1894
1895
0
  if (r)
1896
0
    return r;
1897
1898
0
  scount = LUKS2_segments_count(hdr);
1899
1900
  /* segment in reencryption has to hold reference on both digests */
1901
0
  rseg = json_segments_segment_in_reencrypt(LUKS2_get_segments_jobj(hdr));
1902
0
  if (rseg < 0 && hot)
1903
0
    return -EINVAL;
1904
1905
0
  if (rseg >= 0) {
1906
0
    LUKS2_digest_segment_assign(cd, hdr, rseg, rh->digest_new, 1, 0);
1907
0
    LUKS2_digest_segment_assign(cd, hdr, rseg, rh->digest_old, 1, 0);
1908
0
  }
1909
1910
0
  forward = (rh->direction == CRYPT_REENCRYPT_FORWARD);
1911
0
  if (hot) {
1912
0
    if (rseg > 0)
1913
0
      LUKS2_digest_segment_assign(cd, hdr, 0, forward ? rh->digest_new : rh->digest_old, 1, 0);
1914
0
    if (scount > rseg + 1)
1915
0
      LUKS2_digest_segment_assign(cd, hdr, rseg + 1, forward ? rh->digest_old : rh->digest_new, 1, 0);
1916
0
  } else {
1917
0
    LUKS2_digest_segment_assign(cd, hdr, 0, forward || scount == 1 ? rh->digest_new : rh->digest_old, 1, 0);
1918
0
    if (scount > 1)
1919
0
      LUKS2_digest_segment_assign(cd, hdr, 1, forward ? rh->digest_old : rh->digest_new, 1, 0);
1920
0
  }
1921
1922
0
  r = reencrypt_add_backup_segment(cd, hdr, rh, 0);
1923
0
  if (r) {
1924
0
    log_dbg(cd, "Failed to assign hot reencryption backup segment.");
1925
0
    return r;
1926
0
  }
1927
0
  r = reencrypt_add_backup_segment(cd, hdr, rh, 1);
1928
0
  if (r) {
1929
0
    log_dbg(cd, "Failed to assign post reencryption backup segment.");
1930
0
    return r;
1931
0
  }
1932
1933
0
  return commit ? LUKS2_hdr_write(cd, hdr) : 0;
1934
0
}
1935
1936
static int reencrypt_set_encrypt_segments(struct crypt_device *cd, struct luks2_hdr *hdr,
1937
            uint64_t dev_size, uint64_t data_size, uint64_t data_shift, bool move_first_segment,
1938
            crypt_reencrypt_direction_info di)
1939
0
{
1940
0
  int r;
1941
0
  uint64_t first_segment_offset, first_segment_length,
1942
0
     second_segment_offset, second_segment_length,
1943
0
     data_offset = LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
1944
0
  json_object *jobj_segment_first = NULL, *jobj_segment_second = NULL, *jobj_segments;
1945
1946
0
  if (dev_size < data_shift)
1947
0
    return -EINVAL;
1948
1949
0
  if (data_shift && (di == CRYPT_REENCRYPT_FORWARD))
1950
0
    return -ENOTSUP;
1951
1952
0
  if (move_first_segment) {
1953
    /*
1954
     * future data_device layout:
1955
     * [future LUKS2 header (data shift size)][second data segment][gap (data shift size)][first data segment (data shift size)]
1956
     */
1957
0
    first_segment_offset = dev_size;
1958
0
    if (data_size < data_shift) {
1959
0
      first_segment_length = data_size;
1960
0
      second_segment_length = second_segment_offset = 0;
1961
0
    } else {
1962
0
      first_segment_length = data_shift;
1963
0
      second_segment_offset = data_shift;
1964
0
      second_segment_length = data_size - data_shift;
1965
0
    }
1966
0
  } else if (data_shift) {
1967
0
    first_segment_offset = data_offset;
1968
0
    first_segment_length = dev_size;
1969
0
  } else {
1970
    /* future data_device layout with detached header: [first data segment] */
1971
0
    first_segment_offset = data_offset;
1972
0
    first_segment_length = 0; /* dynamic */
1973
0
  }
1974
1975
0
  jobj_segments = json_object_new_object();
1976
0
  if (!jobj_segments)
1977
0
    return -ENOMEM;
1978
1979
0
  r = -EINVAL;
1980
0
  if (move_first_segment) {
1981
0
    jobj_segment_first =  json_segment_create_linear(first_segment_offset, &first_segment_length, 0);
1982
0
    if (second_segment_length &&
1983
0
        !(jobj_segment_second = json_segment_create_linear(second_segment_offset, &second_segment_length, 0))) {
1984
0
      log_dbg(cd, "Failed generate 2nd segment.");
1985
0
      return r;
1986
0
    }
1987
0
  } else
1988
0
    jobj_segment_first =  json_segment_create_linear(first_segment_offset, first_segment_length ? &first_segment_length : NULL, 0);
1989
1990
0
  if (!jobj_segment_first) {
1991
0
    log_dbg(cd, "Failed generate 1st segment.");
1992
0
    return r;
1993
0
  }
1994
1995
0
  json_object_object_add(jobj_segments, "0", jobj_segment_first);
1996
0
  if (jobj_segment_second)
1997
0
    json_object_object_add(jobj_segments, "1", jobj_segment_second);
1998
1999
0
  r = LUKS2_digest_segment_assign(cd, hdr, CRYPT_ANY_SEGMENT, CRYPT_ANY_DIGEST, 0, 0);
2000
2001
0
  return r ?: LUKS2_segments_set(cd, hdr, jobj_segments, 0);
2002
0
}
2003
2004
static int reencrypt_set_decrypt_shift_segments(struct crypt_device *cd,
2005
  struct luks2_hdr *hdr,
2006
  uint64_t dev_size,
2007
  uint64_t moved_segment_length,
2008
  crypt_reencrypt_direction_info di)
2009
0
{
2010
0
  int digest, r;
2011
0
  uint64_t data_offset = LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
2012
0
  json_object *jobj_segment_first = NULL, *jobj_segment_second = NULL, *jobj_segments;
2013
2014
0
  if (di == CRYPT_REENCRYPT_BACKWARD)
2015
0
    return -ENOTSUP;
2016
2017
0
  digest = LUKS2_digest_by_segment(hdr, CRYPT_DEFAULT_SEGMENT);
2018
0
  if (digest < 0)
2019
0
    return -EINVAL;
2020
2021
  /*
2022
   * future data_device layout:
2023
   * [encrypted first segment (max data shift size)][gap (data shift size)][second encrypted data segment]
2024
   */
2025
0
  jobj_segments = json_object_new_object();
2026
0
  if (!jobj_segments)
2027
0
    return -ENOMEM;
2028
2029
0
  r = -EINVAL;
2030
0
  jobj_segment_first = json_segment_create_crypt(0, crypt_get_iv_offset(cd),
2031
0
        &moved_segment_length, crypt_get_cipher_spec(cd),
2032
0
        NULL, 0, crypt_get_sector_size(cd), 0);
2033
2034
0
  if (!jobj_segment_first) {
2035
0
    log_dbg(cd, "Failed generate 1st segment.");
2036
0
    goto err;
2037
0
  }
2038
2039
0
  r = json_object_object_add_by_uint_by_ref(jobj_segments, 0, &jobj_segment_first);
2040
0
  if (r)
2041
0
    goto err;
2042
2043
0
  if (dev_size > moved_segment_length) {
2044
0
    jobj_segment_second = json_segment_create_crypt(data_offset + moved_segment_length,
2045
0
                crypt_get_iv_offset(cd) + (moved_segment_length >> SECTOR_SHIFT),
2046
0
                NULL,
2047
0
                crypt_get_cipher_spec(cd),
2048
0
                NULL, 0, /* integrity */
2049
0
                crypt_get_sector_size(cd), 0);
2050
0
    if (!jobj_segment_second) {
2051
0
      r = -EINVAL;
2052
0
      log_dbg(cd, "Failed generate 2nd segment.");
2053
0
      goto err;
2054
0
    }
2055
2056
0
    r = json_object_object_add_by_uint_by_ref(jobj_segments, 1, &jobj_segment_second);
2057
0
    if (r)
2058
0
      goto err;
2059
0
  }
2060
2061
0
  if (!(r = LUKS2_segments_set(cd, hdr, jobj_segments, 0)))
2062
0
    return LUKS2_digest_segment_assign(cd, hdr, CRYPT_ANY_SEGMENT, digest, 1, 0);
2063
0
err:
2064
0
  json_object_put(jobj_segment_first);
2065
0
  json_object_put(jobj_segment_second);
2066
0
  json_object_put(jobj_segments);
2067
0
  return r;
2068
0
}
2069
2070
static int reencrypt_make_targets(struct crypt_device *cd,
2071
        struct luks2_hdr *hdr,
2072
        struct device *hz_device,
2073
        struct volume_key *vks,
2074
        struct dm_target *result,
2075
        uint64_t size)
2076
0
{
2077
0
  bool reenc_seg;
2078
0
  struct volume_key *vk;
2079
0
  uint64_t segment_size, segment_offset, segment_start = 0;
2080
0
  int r;
2081
0
  int s = 0;
2082
0
  json_object *jobj, *jobj_segments = LUKS2_get_segments_jobj(hdr);
2083
2084
0
  while (result) {
2085
0
    jobj = json_segments_get_segment(jobj_segments, s);
2086
0
    if (!jobj) {
2087
0
      log_dbg(cd, "Internal error. Segment %u is null.", s);
2088
0
      return -EINVAL;
2089
0
    }
2090
2091
0
    reenc_seg = (s == json_segments_segment_in_reencrypt(jobj_segments));
2092
2093
0
    segment_offset = json_segment_get_offset(jobj, 1);
2094
0
    segment_size = json_segment_get_size(jobj, 1);
2095
    /* 'dynamic' length allowed in last segment only */
2096
0
    if (!segment_size && !result->next)
2097
0
      segment_size = (size >> SECTOR_SHIFT) - segment_start;
2098
0
    if (!segment_size) {
2099
0
      log_dbg(cd, "Internal error. Wrong segment size %u", s);
2100
0
      return -EINVAL;
2101
0
    }
2102
2103
0
    if (reenc_seg)
2104
0
      segment_offset -= crypt_get_data_offset(cd);
2105
2106
0
    if (!strcmp(json_segment_type(jobj), "crypt")) {
2107
0
      vk = crypt_volume_key_by_id(vks, reenc_seg ? LUKS2_reencrypt_digest_new(hdr) : LUKS2_digest_by_segment(hdr, s));
2108
0
      if (!vk) {
2109
0
        log_err(cd, _("Missing key for dm-crypt segment %u"), s);
2110
0
        return -EINVAL;
2111
0
      }
2112
2113
0
      r = dm_crypt_target_set(result, segment_start, segment_size,
2114
0
            reenc_seg ? hz_device : crypt_data_device(cd),
2115
0
            vk,
2116
0
            json_segment_get_cipher(jobj),
2117
0
            json_segment_get_iv_offset(jobj),
2118
0
            segment_offset,
2119
0
            "none", 0, 0,
2120
0
            json_segment_get_sector_size(jobj));
2121
0
      if (r) {
2122
0
        log_err(cd, _("Failed to set dm-crypt segment."));
2123
0
        return r;
2124
0
      }
2125
0
    } else if (!strcmp(json_segment_type(jobj), "linear")) {
2126
0
      r = dm_linear_target_set(result, segment_start, segment_size, reenc_seg ? hz_device : crypt_data_device(cd), segment_offset);
2127
0
      if (r) {
2128
0
        log_err(cd, _("Failed to set dm-linear segment."));
2129
0
        return r;
2130
0
      }
2131
0
    } else
2132
0
      return EINVAL;
2133
2134
0
    segment_start += segment_size;
2135
0
    s++;
2136
0
    result = result->next;
2137
0
  }
2138
2139
0
  return s;
2140
0
}
2141
2142
/* GLOBAL FIXME: audit function names and parameters names */
2143
2144
/* FIXME:
2145
 *  1) audit log routines
2146
 *  2) can't we derive hotzone device name from crypt context? (unlocked name, device uuid, etc?)
2147
 */
2148
static int reencrypt_load_overlay_device(struct crypt_device *cd, struct luks2_hdr *hdr,
2149
  const char *overlay, struct device *hotzone_device, struct volume_key *vks, uint64_t size,
2150
  uint32_t flags)
2151
0
{
2152
0
  int r;
2153
2154
0
  struct crypt_dm_active_device dmd = {
2155
0
    .flags = flags,
2156
0
  };
2157
2158
0
  log_dbg(cd, "Loading new table for overlay device %s.", overlay);
2159
2160
0
  r = dm_targets_allocate(&dmd.segment, LUKS2_segments_count(hdr));
2161
0
  if (r)
2162
0
    goto out;
2163
2164
0
  r = reencrypt_make_targets(cd, hdr, hotzone_device, vks, &dmd.segment, size);
2165
0
  if (r < 0)
2166
0
    goto out;
2167
2168
0
  r = dm_reload_device(cd, overlay, &dmd, 0, 0);
2169
2170
  /* what else on error here ? */
2171
0
out:
2172
0
  dm_targets_free(cd, &dmd);
2173
2174
0
  return r;
2175
0
}
2176
2177
static int reencrypt_replace_device(struct crypt_device *cd, const char *target, const char *source, uint32_t flags)
2178
0
{
2179
0
  int r, exists = 1;
2180
0
  struct crypt_dm_active_device dmd_source, dmd_target = {};
2181
0
  uint64_t dmflags = DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH;
2182
2183
0
  log_dbg(cd, "Replacing table in device %s with table from device %s.", target, source);
2184
2185
  /* check only whether target device exists */
2186
0
  r = dm_status_device(cd, target);
2187
0
  if (r < 0) {
2188
0
    if (r == -ENODEV)
2189
0
      exists = 0;
2190
0
    else
2191
0
      return r;
2192
0
  }
2193
2194
0
  r = dm_query_device(cd, source, DM_ACTIVE_DEVICE | DM_ACTIVE_CRYPT_CIPHER |
2195
0
          DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY, &dmd_source);
2196
2197
0
  if (r < 0)
2198
0
    return r;
2199
2200
0
  if (exists && ((r = dm_query_device(cd, target, 0, &dmd_target)) < 0))
2201
0
    goto out;
2202
2203
0
  dmd_source.flags |= flags;
2204
0
  dmd_source.uuid = crypt_get_uuid(cd);
2205
2206
0
  if (exists) {
2207
0
    if (dmd_target.size != dmd_source.size) {
2208
0
      log_err(cd, _("Source and target device sizes don't match. Source %" PRIu64 ", target: %" PRIu64 "."),
2209
0
        dmd_source.size, dmd_target.size);
2210
0
      r = -EINVAL;
2211
0
      goto out;
2212
0
    }
2213
0
    r = dm_reload_device(cd, target, &dmd_source, 0, 0);
2214
0
    if (!r) {
2215
0
      log_dbg(cd, "Resuming device %s", target);
2216
0
      r = dm_resume_device(cd, target, dmflags | act2dmflags(dmd_source.flags));
2217
0
    }
2218
0
  } else
2219
0
    r = dm_create_device(cd, target, CRYPT_SUBDEV, &dmd_source);
2220
0
out:
2221
0
  dm_targets_free(cd, &dmd_source);
2222
0
  dm_targets_free(cd, &dmd_target);
2223
2224
0
  return r;
2225
0
}
2226
2227
static int reencrypt_swap_backing_device(struct crypt_device *cd, const char *name,
2228
            const char *new_backend_name)
2229
0
{
2230
0
  int r;
2231
0
  struct device *overlay_dev = NULL;
2232
0
  char overlay_path[PATH_MAX] = { 0 };
2233
0
  struct crypt_dm_active_device dmd = {};
2234
2235
0
  log_dbg(cd, "Redirecting %s mapping to new backing device: %s.", name, new_backend_name);
2236
2237
0
  r = snprintf(overlay_path, PATH_MAX, "%s/%s", dm_get_dir(), new_backend_name);
2238
0
  if (r < 0 || r >= PATH_MAX) {
2239
0
    r = -EINVAL;
2240
0
    goto out;
2241
0
  }
2242
2243
0
  r = device_alloc(cd, &overlay_dev, overlay_path);
2244
0
  if (r)
2245
0
    goto out;
2246
2247
0
  r = device_block_adjust(cd, overlay_dev, DEV_OK,
2248
0
        0, &dmd.size, &dmd.flags);
2249
0
  if (r)
2250
0
    goto out;
2251
2252
0
  r = dm_linear_target_set(&dmd.segment, 0, dmd.size, overlay_dev, 0);
2253
0
  if (r)
2254
0
    goto out;
2255
2256
0
  r = dm_reload_device(cd, name, &dmd, 0, 0);
2257
0
  if (!r) {
2258
0
    log_dbg(cd, "Resuming device %s", name);
2259
0
    r = dm_resume_device(cd, name, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
2260
0
  }
2261
2262
0
out:
2263
0
  dm_targets_free(cd, &dmd);
2264
0
  device_free(cd, overlay_dev);
2265
2266
0
  return r;
2267
0
}
2268
2269
static int reencrypt_activate_hotzone_device(struct crypt_device *cd, const char *name, uint64_t device_size, uint32_t flags)
2270
0
{
2271
0
  int r;
2272
0
  uint64_t new_offset = reencrypt_get_data_offset_new(crypt_get_hdr(cd, CRYPT_LUKS2)) >> SECTOR_SHIFT;
2273
2274
0
  struct crypt_dm_active_device dmd = {
2275
0
    .flags = flags,
2276
0
    .uuid = crypt_get_uuid(cd),
2277
0
    .size = device_size >> SECTOR_SHIFT
2278
0
  };
2279
2280
0
  log_dbg(cd, "Activating hotzone device %s.", name);
2281
2282
0
  r = device_block_adjust(cd, crypt_data_device(cd), DEV_OK,
2283
0
        new_offset, &dmd.size, &dmd.flags);
2284
0
  if (r)
2285
0
    goto out;
2286
2287
0
  r = dm_linear_target_set(&dmd.segment, 0, dmd.size, crypt_data_device(cd), new_offset);
2288
0
  if (r)
2289
0
    goto out;
2290
2291
0
  r = dm_create_device(cd, name, CRYPT_SUBDEV, &dmd);
2292
0
out:
2293
0
  dm_targets_free(cd, &dmd);
2294
2295
0
  return r;
2296
0
}
2297
2298
static int reencrypt_init_device_stack(struct crypt_device *cd,
2299
                         struct luks2_reencrypt *rh)
2300
0
{
2301
0
  int r;
2302
0
  char hz_path[PATH_MAX];
2303
2304
0
  assert(rh);
2305
0
  assert(!rh->hotzone_device);
2306
2307
  /* Activate hotzone device 1:1 linear mapping to data_device */
2308
0
  r = reencrypt_activate_hotzone_device(cd, rh->hotzone_name, rh->device_size, CRYPT_ACTIVATE_PRIVATE);
2309
0
  if (r) {
2310
0
    log_err(cd, _("Failed to activate hotzone device %s."), rh->hotzone_name);
2311
0
    return r;
2312
0
  }
2313
2314
0
  r = snprintf(hz_path, PATH_MAX, "%s/%s", dm_get_dir(), rh->hotzone_name);
2315
0
  if (r < 0 || r >= PATH_MAX) {
2316
0
    r = -EINVAL;
2317
0
    goto err;
2318
0
  }
2319
2320
0
  r = device_alloc(cd, &rh->hotzone_device, hz_path);
2321
0
  if (r) {
2322
0
    log_err(cd, _("Failed to allocate hotzone device %s."), rh->hotzone_name);
2323
0
    goto err;
2324
0
  }
2325
2326
  /*
2327
   * Activate overlay device with exactly same table as original 'name' mapping.
2328
   * Note that within this step the 'name' device may already include a table
2329
   * constructed from more than single dm-crypt segment. Therefore transfer
2330
   * mapping as is.
2331
   *
2332
   * If we're about to resume reencryption orig mapping has to be already validated for
2333
   * abrupt shutdown and rchunk_offset has to point on next chunk to reencrypt!
2334
   *
2335
   * TODO: in crypt_activate_by*
2336
   */
2337
0
  r = reencrypt_replace_device(cd, rh->overlay_name, rh->device_name, CRYPT_ACTIVATE_PRIVATE);
2338
0
  if (r) {
2339
0
    log_err(cd, _("Failed to activate overlay device %s with actual origin table."), rh->overlay_name);
2340
0
    goto err;
2341
0
  }
2342
2343
  /* swap origin mapping to overlay device */
2344
0
  r = reencrypt_swap_backing_device(cd, rh->device_name, rh->overlay_name);
2345
0
  if (r) {
2346
0
    log_err(cd, _("Failed to load new mapping for device %s."), rh->device_name);
2347
0
    goto err;
2348
0
  }
2349
2350
  /*
2351
   * Now the 'name' (unlocked luks) device is mapped via dm-linear to an overlay dev.
2352
   * The overlay device has a original live table of 'name' device in-before the swap.
2353
   */
2354
2355
0
  return 0;
2356
0
err:
2357
  /* TODO: force error helper devices on error path */
2358
0
  dm_remove_device(cd, rh->overlay_name, 0);
2359
0
  dm_remove_device(cd, rh->hotzone_name, 0);
2360
2361
0
  return r;
2362
0
}
2363
2364
/* TODO:
2365
 *  1) audit error path. any error in this routine is fatal and should be unlikely.
2366
 *     usually it would hint some collision with another userspace process touching
2367
 *     dm devices directly.
2368
 */
2369
static int reenc_refresh_helper_devices(struct crypt_device *cd, const char *overlay, const char *hotzone)
2370
0
{
2371
0
  int r;
2372
2373
  /*
2374
   * we have to explicitly suspend the overlay device before suspending
2375
   * the hotzone one. Resuming overlay device (aka switching tables) only
2376
   * after suspending the hotzone may lead to deadlock.
2377
   *
2378
   * In other words: always suspend the stack from top to bottom!
2379
   */
2380
0
  r = dm_suspend_device(cd, overlay, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
2381
0
  if (r) {
2382
0
    log_err(cd, _("Failed to suspend device %s."), overlay);
2383
0
    return r;
2384
0
  }
2385
2386
  /* suspend HZ device */
2387
0
  r = dm_suspend_device(cd, hotzone, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
2388
0
  if (r) {
2389
0
    log_err(cd, _("Failed to suspend device %s."), hotzone);
2390
0
    return r;
2391
0
  }
2392
2393
  /* resume overlay device: inactive table (with hotozne) -> live */
2394
0
  r = dm_resume_device(cd, overlay, DM_RESUME_PRIVATE);
2395
0
  if (r)
2396
0
    log_err(cd, _("Failed to resume device %s."), overlay);
2397
2398
0
  return r;
2399
0
}
2400
2401
static int reencrypt_refresh_overlay_devices(struct crypt_device *cd,
2402
    struct luks2_hdr *hdr,
2403
    const char *overlay,
2404
    const char *hotzone,
2405
    struct device *hotzone_device,
2406
    struct volume_key *vks,
2407
    uint64_t device_size,
2408
    uint32_t flags)
2409
0
{
2410
0
  int r = reencrypt_load_overlay_device(cd, hdr, overlay, hotzone_device, vks, device_size, flags);
2411
0
  if (r) {
2412
0
    log_err(cd, _("Failed to reload device %s."), overlay);
2413
0
    return REENC_ERR;
2414
0
  }
2415
2416
0
  r = reenc_refresh_helper_devices(cd, overlay, hotzone);
2417
0
  if (r) {
2418
0
    log_err(cd, _("Failed to refresh reencryption devices stack."));
2419
0
    return REENC_ROLLBACK;
2420
0
  }
2421
2422
0
  return REENC_OK;
2423
0
}
2424
2425
static int reencrypt_move_data(struct crypt_device *cd,
2426
  int devfd,
2427
  uint64_t data_shift,
2428
  crypt_reencrypt_mode_info mode)
2429
0
{
2430
0
  void *buffer;
2431
0
  int r;
2432
0
  ssize_t ret;
2433
0
  uint64_t buffer_len, offset,
2434
0
     read_offset = (mode == CRYPT_REENCRYPT_ENCRYPT ? 0 : data_shift);
2435
0
  struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
2436
2437
0
  offset = json_segment_get_offset(LUKS2_get_segment_jobj(hdr, 0), 0);
2438
0
  buffer_len = json_segment_get_size(LUKS2_get_segment_jobj(hdr, 0), 0);
2439
0
  if (!buffer_len || buffer_len > data_shift)
2440
0
    return -EINVAL;
2441
2442
0
  if (posix_memalign(&buffer, device_alignment(crypt_data_device(cd)), buffer_len))
2443
0
    return -ENOMEM;
2444
2445
0
  ret = read_lseek_blockwise(devfd,
2446
0
      device_block_size(cd, crypt_data_device(cd)),
2447
0
      device_alignment(crypt_data_device(cd)),
2448
0
      buffer, buffer_len, read_offset);
2449
0
  if (ret < 0 || (uint64_t)ret != buffer_len) {
2450
0
    log_dbg(cd, "Failed to read data at offset %" PRIu64 " (size: %zu)",
2451
0
      read_offset, buffer_len);
2452
0
    r = -EIO;
2453
0
    goto out;
2454
0
  }
2455
2456
0
  log_dbg(cd, "Going to write %" PRIu64 " bytes read at offset %" PRIu64 " to new offset %" PRIu64,
2457
0
    buffer_len, read_offset, offset);
2458
0
  ret = write_lseek_blockwise(devfd,
2459
0
      device_block_size(cd, crypt_data_device(cd)),
2460
0
      device_alignment(crypt_data_device(cd)),
2461
0
      buffer, buffer_len, offset);
2462
0
  if (ret < 0 || (uint64_t)ret != buffer_len) {
2463
0
    log_dbg(cd, "Failed to write data at offset %" PRIu64 " (size: %zu)",
2464
0
      offset, buffer_len);
2465
0
    r = -EIO;
2466
0
    goto out;
2467
0
  }
2468
2469
0
  r = 0;
2470
0
out:
2471
0
  crypt_safe_memzero(buffer, buffer_len);
2472
0
  free(buffer);
2473
0
  return r;
2474
0
}
2475
2476
static int reencrypt_make_backup_segments(struct crypt_device *cd,
2477
    struct luks2_hdr *hdr,
2478
    int digest_new,
2479
    const char *cipher,
2480
    uint64_t data_offset,
2481
    const struct crypt_params_reencrypt *params)
2482
0
{
2483
0
  const char *type;
2484
0
  int r, segment, moved_segment = -1, digest_old = -1;
2485
0
  json_object *jobj_tmp, *jobj_segment_new = NULL, *jobj_segment_old = NULL, *jobj_segment_bcp = NULL;
2486
0
  uint32_t sector_size = params->luks2 ? params->luks2->sector_size : SECTOR_SIZE;
2487
0
  uint64_t segment_offset, tmp, data_shift = params->data_shift << SECTOR_SHIFT,
2488
0
     device_size = params->device_size << SECTOR_SHIFT;
2489
2490
0
  if (params->mode != CRYPT_REENCRYPT_DECRYPT && digest_new < 0)
2491
0
    return -EINVAL;
2492
2493
0
  if (params->mode != CRYPT_REENCRYPT_ENCRYPT) {
2494
0
    digest_old = LUKS2_digest_by_segment(hdr, CRYPT_DEFAULT_SEGMENT);
2495
0
    if (digest_old < 0)
2496
0
      return -EINVAL;
2497
0
  }
2498
2499
0
  segment = LUKS2_segment_first_unused_id(hdr);
2500
0
  if (segment < 0)
2501
0
    return -EINVAL;
2502
2503
0
  if (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT) {
2504
0
    if (json_object_copy(LUKS2_get_segment_jobj(hdr, 0), &jobj_segment_bcp)) {
2505
0
      r = -EINVAL;
2506
0
      goto err;
2507
0
    }
2508
0
    r = LUKS2_segment_set_flag(jobj_segment_bcp, "backup-moved-segment");
2509
0
    if (r)
2510
0
      goto err;
2511
0
    moved_segment = segment++;
2512
0
    r = json_object_object_add_by_uint_by_ref(LUKS2_get_segments_jobj(hdr), moved_segment, &jobj_segment_bcp);
2513
0
    if (r)
2514
0
      goto err;
2515
2516
0
    if (!(type = json_segment_type(LUKS2_get_segment_jobj(hdr, moved_segment)))) {
2517
0
      r = -EINVAL;
2518
0
      goto err;
2519
0
    }
2520
2521
0
    if (!strcmp(type, "crypt") && ((r = LUKS2_digest_segment_assign(cd, hdr, moved_segment, digest_old, 1, 0))))
2522
0
      goto err;
2523
0
  }
2524
2525
  /* FIXME: Add detection for case (digest old == digest new && old segment == new segment) */
2526
0
  if (digest_old >= 0) {
2527
0
    if (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT) {
2528
0
      jobj_tmp = LUKS2_get_segment_jobj(hdr, 0);
2529
0
      if (!jobj_tmp) {
2530
0
        r = -EINVAL;
2531
0
        goto err;
2532
0
      }
2533
2534
0
      jobj_segment_old = json_segment_create_crypt(data_offset,
2535
0
            json_segment_get_iv_offset(jobj_tmp),
2536
0
            device_size ? &device_size : NULL,
2537
0
            json_segment_get_cipher(jobj_tmp),
2538
0
            NULL, 0, /* integrity */
2539
0
            json_segment_get_sector_size(jobj_tmp),
2540
0
            0);
2541
0
    } else {
2542
0
      if (json_object_copy(LUKS2_get_segment_jobj(hdr, CRYPT_DEFAULT_SEGMENT), &jobj_segment_old)) {
2543
0
        r = -EINVAL;
2544
0
        goto err;
2545
0
      }
2546
0
    }
2547
0
  } else if (params->mode == CRYPT_REENCRYPT_ENCRYPT) {
2548
0
    r = LUKS2_get_data_size(hdr, &tmp, NULL);
2549
0
    if (r)
2550
0
      goto err;
2551
2552
0
    if (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT)
2553
0
      jobj_segment_old = json_segment_create_linear(0, tmp ? &tmp : NULL, 0);
2554
0
    else
2555
0
      jobj_segment_old = json_segment_create_linear(data_offset, tmp ? &tmp : NULL, 0);
2556
0
  }
2557
2558
0
  if (!jobj_segment_old) {
2559
0
    r = -EINVAL;
2560
0
    goto err;
2561
0
  }
2562
2563
0
  r = LUKS2_segment_set_flag(jobj_segment_old, "backup-previous");
2564
0
  if (r)
2565
0
    goto err;
2566
2567
0
  r = json_object_object_add_by_uint_by_ref(LUKS2_get_segments_jobj(hdr), segment, &jobj_segment_old);
2568
0
  if (r)
2569
0
    goto err;
2570
2571
0
  if (digest_old >= 0 && (r = LUKS2_digest_segment_assign(cd, hdr, segment, digest_old, 1, 0)))
2572
0
    goto err;
2573
2574
0
  segment++;
2575
2576
0
  if (digest_new >= 0) {
2577
0
    segment_offset = data_offset;
2578
0
    if (params->mode != CRYPT_REENCRYPT_ENCRYPT &&
2579
0
        modify_offset(&segment_offset, data_shift, params->direction)) {
2580
0
      r = -EINVAL;
2581
0
      goto err;
2582
0
    }
2583
0
    jobj_segment_new = json_segment_create_crypt(segment_offset,
2584
0
              crypt_get_iv_offset(cd),
2585
0
              NULL, cipher, NULL, 0, sector_size, 0);
2586
0
  } else if (params->mode == CRYPT_REENCRYPT_DECRYPT) {
2587
0
    segment_offset = data_offset;
2588
0
    if (modify_offset(&segment_offset, data_shift, params->direction)) {
2589
0
      r = -EINVAL;
2590
0
      goto err;
2591
0
    }
2592
0
    jobj_segment_new = json_segment_create_linear(segment_offset, NULL, 0);
2593
0
  }
2594
2595
0
  if (!jobj_segment_new) {
2596
0
    r = -EINVAL;
2597
0
    goto err;
2598
0
  }
2599
2600
0
  r = LUKS2_segment_set_flag(jobj_segment_new, "backup-final");
2601
0
  if (r)
2602
0
    goto err;
2603
2604
0
  r = json_object_object_add_by_uint_by_ref(LUKS2_get_segments_jobj(hdr), segment, &jobj_segment_new);
2605
0
  if (r)
2606
0
    goto err;
2607
2608
0
  if (digest_new >= 0 && (r = LUKS2_digest_segment_assign(cd, hdr, segment, digest_new, 1, 0)))
2609
0
    goto err;
2610
2611
  /* FIXME: also check occupied space by keyslot in shrunk area */
2612
0
  if (params->direction == CRYPT_REENCRYPT_FORWARD && data_shift &&
2613
0
      crypt_metadata_device(cd) == crypt_data_device(cd) &&
2614
0
      LUKS2_set_keyslots_size(hdr, json_segment_get_offset(reencrypt_segment_new(hdr), 0))) {
2615
0
    log_err(cd, _("Failed to set new keyslots area size."));
2616
0
    r = -EINVAL;
2617
0
    goto err;
2618
0
  }
2619
2620
0
  return 0;
2621
0
err:
2622
0
  json_object_put(jobj_segment_new);
2623
0
  json_object_put(jobj_segment_old);
2624
0
  json_object_put(jobj_segment_bcp);
2625
0
  return r;
2626
0
}
2627
2628
static int reencrypt_verify_single_key(struct crypt_device *cd, int digest, struct volume_key *vks)
2629
0
{
2630
0
  struct volume_key *vk;
2631
2632
0
  vk = crypt_volume_key_by_id(vks, digest);
2633
0
  if (!vk)
2634
0
    return -ENOENT;
2635
2636
0
  if (LUKS2_digest_verify_by_digest(cd, digest, vk) != digest)
2637
0
    return -EINVAL;
2638
2639
0
  return 0;
2640
0
}
2641
2642
static int reencrypt_verify_keys(struct crypt_device *cd,
2643
  int digest_old,
2644
  int digest_new,
2645
  struct volume_key *vks)
2646
0
{
2647
0
  int r;
2648
2649
0
  if (digest_new >= 0 && (r = reencrypt_verify_single_key(cd, digest_new, vks)))
2650
0
    return r;
2651
2652
0
  if (digest_old >= 0 && (r = reencrypt_verify_single_key(cd, digest_old, vks)))
2653
0
    return r;
2654
2655
0
  return 0;
2656
0
}
2657
2658
static int reencrypt_upload_single_key(struct crypt_device *cd,
2659
  int digest,
2660
  struct volume_key *vks)
2661
0
{
2662
0
  struct volume_key *vk;
2663
2664
0
  vk = crypt_volume_key_by_id(vks, digest);
2665
0
  if (!vk)
2666
0
    return -EINVAL;
2667
2668
0
  return LUKS2_volume_key_load_in_keyring_by_digest(cd, vk, digest);
2669
0
}
2670
2671
static int reencrypt_upload_keys(struct crypt_device *cd,
2672
  struct luks2_hdr *hdr,
2673
  int digest_old,
2674
  int digest_new,
2675
  struct volume_key *vks)
2676
0
{
2677
0
  int r;
2678
2679
0
  if (!crypt_use_keyring_for_vk(cd))
2680
0
    return 0;
2681
2682
0
  if (digest_new >= 0 && !crypt_is_cipher_null(reencrypt_segment_cipher_new(hdr)) &&
2683
0
      (r = reencrypt_upload_single_key(cd, digest_new, vks)))
2684
0
    return r;
2685
2686
0
  if (digest_old >= 0 && !crypt_is_cipher_null(reencrypt_segment_cipher_old(hdr)) &&
2687
0
      (r = reencrypt_upload_single_key(cd, digest_old, vks))) {
2688
0
    crypt_drop_uploaded_keyring_key(cd, vks);
2689
0
    return r;
2690
0
  }
2691
2692
0
  return 0;
2693
0
}
2694
2695
static int reencrypt_verify_and_upload_keys(struct crypt_device *cd,
2696
  struct luks2_hdr *hdr,
2697
  int digest_old,
2698
  int digest_new,
2699
  struct volume_key *vks)
2700
0
{
2701
0
  int r;
2702
2703
0
  r = reencrypt_verify_keys(cd, digest_old, digest_new, vks);
2704
0
  if (r)
2705
0
    return r;
2706
2707
0
  r = reencrypt_upload_keys(cd, hdr, digest_old, digest_new, vks);
2708
0
  if (r)
2709
0
    return r;
2710
2711
0
  return 0;
2712
0
}
2713
2714
static int reencrypt_verify_checksum_params(struct crypt_device *cd,
2715
    const struct crypt_params_reencrypt *params)
2716
0
{
2717
0
  size_t len;
2718
0
  struct crypt_hash *ch;
2719
2720
0
  assert(params);
2721
2722
0
  if (!params->hash)
2723
0
    return -EINVAL;
2724
2725
0
  len = strlen(params->hash);
2726
0
  if (!len || len > (LUKS2_CHECKSUM_ALG_L - 1))
2727
0
    return -EINVAL;
2728
2729
0
  if (crypt_hash_size(params->hash) <= 0)
2730
0
    return -EINVAL;
2731
2732
0
  if (crypt_hash_init(&ch, params->hash)) {
2733
0
    log_err(cd, _("Hash algorithm %s is not available."), params->hash);
2734
0
    return -EINVAL;
2735
0
  }
2736
  /* We just check for alg availability */
2737
0
  crypt_hash_destroy(ch);
2738
2739
0
  return 0;
2740
0
}
2741
2742
static int reencrypt_verify_datashift_params(struct crypt_device *cd,
2743
    const struct crypt_params_reencrypt *params,
2744
    uint32_t sector_size)
2745
0
{
2746
0
  assert(params);
2747
2748
0
  if (!params->data_shift)
2749
0
    return -EINVAL;
2750
0
  if (MISALIGNED(params->data_shift, sector_size >> SECTOR_SHIFT)) {
2751
0
    log_err(cd, _("Data shift value is not aligned to encryption sector size (%" PRIu32 " bytes)."),
2752
0
      sector_size);
2753
0
    return -EINVAL;
2754
0
  }
2755
2756
0
  return 0;
2757
0
}
2758
2759
static int reencrypt_verify_resilience_params(struct crypt_device *cd,
2760
    const struct crypt_params_reencrypt *params,
2761
    uint32_t sector_size, bool move_first_segment)
2762
0
{
2763
  /* no change requested */
2764
0
  if (!params || !params->resilience)
2765
0
    return 0;
2766
2767
0
  if (!strcmp(params->resilience, "journal"))
2768
0
    return (params->data_shift || move_first_segment) ? -EINVAL : 0;
2769
0
  else if (!strcmp(params->resilience, "none"))
2770
0
    return (params->data_shift || move_first_segment) ? -EINVAL : 0;
2771
0
  else if (!strcmp(params->resilience, "datashift"))
2772
0
    return reencrypt_verify_datashift_params(cd, params, sector_size);
2773
0
  else if (!strcmp(params->resilience, "checksum")) {
2774
0
    if (params->data_shift || move_first_segment)
2775
0
      return -EINVAL;
2776
0
    return reencrypt_verify_checksum_params(cd, params);
2777
0
  } else if (!strcmp(params->resilience, "datashift-checksum")) {
2778
0
    if (!move_first_segment ||
2779
0
         reencrypt_verify_datashift_params(cd, params, sector_size))
2780
0
      return -EINVAL;
2781
0
    return reencrypt_verify_checksum_params(cd, params);
2782
0
  } else if (!strcmp(params->resilience, "datashift-journal")) {
2783
0
    if (!move_first_segment)
2784
0
      return -EINVAL;
2785
0
    return reencrypt_verify_datashift_params(cd, params, sector_size);
2786
0
  }
2787
2788
0
  log_err(cd, _("Unsupported resilience mode %s"), params->resilience);
2789
0
  return -EINVAL;
2790
0
}
2791
2792
static int reencrypt_decrypt_with_datashift_init(struct crypt_device *cd,
2793
    const char *name,
2794
    struct luks2_hdr *hdr,
2795
    int reencrypt_keyslot,
2796
    uint32_t sector_size,
2797
    uint64_t data_size,
2798
    uint64_t data_offset,
2799
    struct crypt_keyslot_context *kc_old,
2800
    int keyslot_old,
2801
    const struct crypt_params_reencrypt *params,
2802
    struct volume_key **vks)
2803
0
{
2804
0
  bool clear_table = false;
2805
0
  int r, devfd = -1;
2806
0
  uint64_t data_shift, max_moved_segment_length, moved_segment_length;
2807
0
  struct reenc_protection check_rp = {};
2808
0
  struct crypt_dm_active_device dmd_target, dmd_source = {
2809
0
    .uuid = crypt_get_uuid(cd),
2810
0
    .flags = CRYPT_ACTIVATE_SHARED /* turn off exclusive open checks */
2811
0
  };
2812
0
  json_object *jobj_segments_old;
2813
2814
0
  assert(hdr);
2815
0
  assert(params);
2816
0
  assert(params->resilience);
2817
0
  assert(params->data_shift);
2818
0
  assert(vks);
2819
2820
0
  if (!data_offset)
2821
0
    return -EINVAL;
2822
2823
0
  if (params->max_hotzone_size > params->data_shift) {
2824
0
    log_err(cd, _("Moved segment size can not be greater than data shift value."));
2825
0
    return -EINVAL;
2826
0
  }
2827
2828
0
  log_dbg(cd, "Initializing decryption with datashift.");
2829
2830
0
  data_shift = params->data_shift << SECTOR_SHIFT;
2831
2832
  /*
2833
   * In offline mode we must perform data move with exclusively opened data
2834
   * device in order to exclude LUKS2 decryption process and filesystem mount.
2835
   */
2836
0
  if (name)
2837
0
    devfd = device_open(cd, crypt_data_device(cd), O_RDWR);
2838
0
  else
2839
0
    devfd = device_open_excl(cd, crypt_data_device(cd), O_RDWR);
2840
0
  if (devfd < 0)
2841
0
    return -EINVAL;
2842
2843
  /* in-memory only */
2844
0
  moved_segment_length = params->max_hotzone_size << SECTOR_SHIFT;
2845
0
  if (!moved_segment_length)
2846
0
    moved_segment_length = data_shift < LUKS2_DEFAULT_NONE_REENCRYPTION_LENGTH ?
2847
0
               data_shift : LUKS2_DEFAULT_NONE_REENCRYPTION_LENGTH;
2848
2849
0
  if (moved_segment_length > data_size)
2850
0
    moved_segment_length = data_size;
2851
2852
0
  r = reencrypt_set_decrypt_shift_segments(cd, hdr, data_size,
2853
0
             moved_segment_length,
2854
0
             params->direction);
2855
0
  if (r)
2856
0
    goto out;
2857
2858
0
  r = reencrypt_make_backup_segments(cd, hdr, CRYPT_ANY_DIGEST, NULL, data_offset, params);
2859
0
  if (r) {
2860
0
    log_dbg(cd, "Failed to create reencryption backup device segments.");
2861
0
    goto out;
2862
0
  }
2863
2864
0
  r = reencrypt_verify_resilience_params(cd, params, sector_size, true);
2865
0
  if (r < 0) {
2866
0
    log_err(cd, _("Invalid reencryption resilience parameters."));
2867
0
    goto out;
2868
0
  }
2869
2870
0
  r = LUKS2_keyslot_reencrypt_allocate(cd, hdr, reencrypt_keyslot,
2871
0
             params, reencrypt_get_alignment(cd, hdr));
2872
0
  if (r < 0)
2873
0
    goto out;
2874
2875
0
  r = LUKS2_keyslot_reencrypt_load(cd, hdr, reencrypt_keyslot, &check_rp, false);
2876
0
  if (r < 0)
2877
0
    goto out;
2878
2879
0
  r = LUKS2_reencrypt_max_hotzone_size(cd, hdr, &check_rp,
2880
0
               reencrypt_keyslot,
2881
0
               &max_moved_segment_length);
2882
0
  if (r < 0)
2883
0
    goto out;
2884
2885
0
  LUKS2_reencrypt_protection_erase(&check_rp);
2886
2887
0
  if (moved_segment_length > max_moved_segment_length) {
2888
0
    log_err(cd, _("Moved segment too large. Requested size %" PRIu64 ", available space for: %" PRIu64 "."),
2889
0
      moved_segment_length, max_moved_segment_length);
2890
0
    r = -EINVAL;
2891
0
    goto out;
2892
0
  }
2893
2894
0
  r = LUKS2_keyslot_context_open_all_segments(cd, keyslot_old, CRYPT_ANY_SLOT,
2895
0
                kc_old, NULL, vks);
2896
0
  if (r < 0)
2897
0
    goto out;
2898
2899
0
  r = LUKS2_keyslot_reencrypt_digest_create(cd, hdr, LUKS2_DECRYPT_DATASHIFT_REQ_VERSION, *vks);
2900
0
  if (r < 0)
2901
0
    goto out;
2902
2903
0
  if (name) {
2904
0
    r = reencrypt_verify_and_upload_keys(cd, hdr,
2905
0
                 LUKS2_reencrypt_digest_old(hdr),
2906
0
                 LUKS2_reencrypt_digest_new(hdr),
2907
0
                 *vks);
2908
0
    if (r)
2909
0
      goto out;
2910
2911
0
    r = dm_query_device(cd, name, DM_ACTIVE_UUID | DM_ACTIVE_DEVICE |
2912
0
            DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY |
2913
0
            DM_ACTIVE_CRYPT_CIPHER, &dmd_target);
2914
0
    if (r < 0)
2915
0
      goto out;
2916
2917
0
    jobj_segments_old = reencrypt_segments_old(hdr);
2918
0
    if (!jobj_segments_old) {
2919
0
      dm_targets_free(cd, &dmd_target);
2920
0
      free(CONST_CAST(void*)dmd_target.uuid);
2921
0
      r = -EINVAL;
2922
0
      goto out;
2923
0
    }
2924
0
    r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, jobj_segments_old, &dmd_source);
2925
0
    if (!r) {
2926
0
      r = crypt_compare_dm_devices(cd, &dmd_source, &dmd_target);
2927
0
      if (r)
2928
0
        log_err(cd, _("Mismatching parameters on device %s."), name);
2929
0
    }
2930
0
    json_object_put(jobj_segments_old);
2931
2932
0
    dm_targets_free(cd, &dmd_source);
2933
0
    dm_targets_free(cd, &dmd_target);
2934
0
    free(CONST_CAST(void*)dmd_target.uuid);
2935
2936
0
    if (r)
2937
0
      goto out;
2938
2939
0
    dmd_source.size = dmd_target.size;
2940
0
    r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, LUKS2_get_segments_jobj(hdr), &dmd_source);
2941
0
    if (!r) {
2942
0
      r = dm_reload_device(cd, name, &dmd_source, dmd_target.flags, 0);
2943
0
      if (r)
2944
0
        log_err(cd, _("Failed to reload device %s."), name);
2945
0
      else
2946
0
        clear_table = true;
2947
0
    }
2948
2949
0
    dm_targets_free(cd, &dmd_source);
2950
2951
0
    if (r)
2952
0
      goto out;
2953
0
  }
2954
2955
0
  if (name) {
2956
0
    r = dm_suspend_device(cd, name, DM_SUSPEND_SKIP_LOCKFS);
2957
0
    if (r) {
2958
0
      log_err(cd, _("Failed to suspend device %s."), name);
2959
0
      goto out;
2960
0
    }
2961
0
  }
2962
2963
0
  if (reencrypt_move_data(cd, devfd, data_shift, params->mode)) {
2964
0
    r = -EIO;
2965
0
    goto out;
2966
0
  }
2967
2968
  /* This must be first and only write in LUKS2 metadata during _reencrypt_init */
2969
0
  r = reencrypt_update_flag(cd, LUKS2_DECRYPT_DATASHIFT_REQ_VERSION, true, true);
2970
0
  if (r) {
2971
0
    log_dbg(cd, "Failed to set online-reencryption requirement.");
2972
0
    r = -EINVAL;
2973
0
  } else
2974
0
    r = reencrypt_keyslot;
2975
0
out:
2976
0
  if (r < 0 && clear_table && dm_clear_device(cd, name))
2977
0
    log_err(cd, _("Failed to clear table."));
2978
0
  else if (clear_table && dm_resume_device(cd, name, DM_SUSPEND_SKIP_LOCKFS))
2979
0
    log_err(cd, _("Failed to resume device %s."), name);
2980
2981
0
  device_release_excl(cd, crypt_data_device(cd));
2982
0
  if (r < 0 && LUKS2_hdr_rollback(cd, hdr) < 0)
2983
0
    log_dbg(cd, "Failed to rollback LUKS2 metadata after failure.");
2984
2985
0
  return r;
2986
0
}
2987
2988
/* This function must be called with metadata lock held */
2989
static int reencrypt_init(struct crypt_device *cd,
2990
    const char *name,
2991
    struct luks2_hdr *hdr,
2992
    struct crypt_keyslot_context *kc_old,
2993
    struct crypt_keyslot_context *kc_new,
2994
    int keyslot_old,
2995
    int keyslot_new,
2996
    const char *cipher,
2997
    const char *cipher_mode,
2998
    const struct crypt_params_reencrypt *params,
2999
    struct volume_key **vks)
3000
0
{
3001
0
  bool move_first_segment;
3002
0
  char _cipher[128];
3003
0
  uint32_t check_sector_size, new_sector_size, old_sector_size;
3004
0
  int digest_new, r, reencrypt_keyslot, devfd = -1;
3005
0
  uint64_t data_offset_bytes, data_size_bytes, data_shift_bytes, device_size_bytes;
3006
0
  struct volume_key *vk;
3007
0
  struct crypt_dm_active_device dmd_target, dmd_source = {
3008
0
    .uuid = crypt_get_uuid(cd),
3009
0
    .flags = CRYPT_ACTIVATE_SHARED /* turn off exclusive open checks */
3010
0
  };
3011
3012
0
  assert(cd);
3013
0
  assert(hdr);
3014
3015
0
  if (!params || !params->resilience || params->mode > CRYPT_REENCRYPT_DECRYPT)
3016
0
    return -EINVAL;
3017
3018
0
  if (params->mode != CRYPT_REENCRYPT_DECRYPT &&
3019
0
      (!params->luks2 || !(cipher && cipher_mode) ||
3020
0
       (keyslot_new < 0 && !(params->flags & CRYPT_REENCRYPT_CREATE_NEW_DIGEST))))
3021
0
    return -EINVAL;
3022
3023
0
  log_dbg(cd, "Initializing reencryption (mode: %s) in LUKS2 metadata.",
3024
0
        crypt_reencrypt_mode_to_str(params->mode));
3025
3026
0
  move_first_segment = (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT);
3027
3028
0
  old_sector_size = LUKS2_get_sector_size(hdr);
3029
3030
  /* implicit sector size 512 for decryption */
3031
0
  new_sector_size = params->luks2 ? params->luks2->sector_size : SECTOR_SIZE;
3032
0
  if (new_sector_size < SECTOR_SIZE || new_sector_size > MAX_SECTOR_SIZE ||
3033
0
      NOTPOW2(new_sector_size)) {
3034
0
    log_err(cd, _("Unsupported encryption sector size."));
3035
0
    return -EINVAL;
3036
0
  }
3037
  /* check the larger encryption sector size only */
3038
0
  check_sector_size = new_sector_size > old_sector_size ? new_sector_size : old_sector_size;
3039
3040
0
  if (!cipher_mode || *cipher_mode == '\0')
3041
0
    r = snprintf(_cipher, sizeof(_cipher), "%s", cipher);
3042
0
  else
3043
0
    r = snprintf(_cipher, sizeof(_cipher), "%s-%s", cipher, cipher_mode);
3044
0
  if (r < 0 || (size_t)r >= sizeof(_cipher))
3045
0
    return -EINVAL;
3046
3047
0
  data_offset_bytes = LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
3048
3049
0
  r = device_check_access(cd, crypt_data_device(cd), DEV_OK);
3050
0
  if (r)
3051
0
    return r;
3052
3053
0
  r = device_check_size(cd, crypt_data_device(cd), data_offset_bytes, 1);
3054
0
  if (r)
3055
0
    return r;
3056
3057
0
  r = device_size(crypt_data_device(cd), &device_size_bytes);
3058
0
  if (r)
3059
0
    return r;
3060
3061
0
  if (move_first_segment && params->mode == CRYPT_REENCRYPT_ENCRYPT &&
3062
0
      params->data_shift < LUKS2_get_data_offset(hdr)) {
3063
0
    log_err(cd, _("Data shift (%" PRIu64 " sectors) is less than future data offset (%" PRIu64 " sectors)."),
3064
0
      params->data_shift, LUKS2_get_data_offset(hdr));
3065
0
    return -EINVAL;
3066
0
  }
3067
3068
0
  device_size_bytes -= data_offset_bytes;
3069
0
  data_shift_bytes = params->data_shift << SECTOR_SHIFT;
3070
0
  data_size_bytes = params->device_size << SECTOR_SHIFT;
3071
3072
0
  if (device_size_bytes < data_shift_bytes && params->direction == CRYPT_REENCRYPT_BACKWARD) {
3073
0
    log_err(cd, _("Device %s is too small."), device_path(crypt_data_device(cd)));
3074
0
    return -EINVAL;
3075
0
  }
3076
3077
0
  if (data_size_bytes > device_size_bytes) {
3078
0
    log_err(cd, _("Reduced data size is larger than real device size."));
3079
0
    return -EINVAL;
3080
0
  }
3081
3082
0
  if (data_size_bytes && params->mode == CRYPT_REENCRYPT_ENCRYPT &&
3083
0
      move_first_segment && data_shift_bytes) {
3084
0
    if (data_size_bytes > device_size_bytes - data_shift_bytes) {
3085
0
      log_err(cd, _("Reduced data size is larger than real device size."));
3086
0
      return -EINVAL;
3087
0
    }
3088
0
  } else if (!data_size_bytes && params->mode == CRYPT_REENCRYPT_ENCRYPT &&
3089
0
      move_first_segment && data_shift_bytes)
3090
0
    data_size_bytes = device_size_bytes - data_shift_bytes;
3091
0
  else if (!data_size_bytes)
3092
0
    data_size_bytes = device_size_bytes;
3093
3094
0
  if (MISALIGNED(data_size_bytes, check_sector_size)) {
3095
0
    log_err(cd, _("Data device is not aligned to encryption sector size (%" PRIu32 " bytes)."), check_sector_size);
3096
0
    return -EINVAL;
3097
0
  }
3098
3099
0
  reencrypt_keyslot = LUKS2_keyslot_find_empty(cd, hdr, 0);
3100
0
  if (reencrypt_keyslot < 0) {
3101
0
    log_err(cd, _("All key slots full."));
3102
0
    return -EINVAL;
3103
0
  }
3104
3105
0
  if (params->mode == CRYPT_REENCRYPT_DECRYPT && data_shift_bytes && move_first_segment)
3106
0
    return reencrypt_decrypt_with_datashift_init(cd, name, hdr,
3107
0
                   reencrypt_keyslot,
3108
0
                   check_sector_size,
3109
0
                   data_size_bytes,
3110
0
                   data_offset_bytes,
3111
0
                   kc_old,
3112
0
                   keyslot_old,
3113
0
                   params,
3114
0
                   vks);
3115
3116
  /*
3117
   * We must perform data move with exclusive open data device
3118
   * to exclude another cryptsetup process to colide with
3119
   * encryption initialization (or mount)
3120
   */
3121
0
  if (move_first_segment) {
3122
0
    devfd = device_open_excl(cd, crypt_data_device(cd), O_RDWR);
3123
0
    if (devfd < 0) {
3124
0
      if (devfd == -EBUSY)
3125
0
        log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."),
3126
0
          device_path(crypt_data_device(cd)));
3127
0
      return -EINVAL;
3128
0
    }
3129
0
  }
3130
3131
0
  if (params->mode == CRYPT_REENCRYPT_ENCRYPT) {
3132
    /* in-memory only */
3133
0
    r = reencrypt_set_encrypt_segments(cd, hdr, device_size_bytes, data_size_bytes,
3134
0
               data_shift_bytes,
3135
0
               move_first_segment,
3136
0
               params->direction);
3137
0
    if (r)
3138
0
      goto out;
3139
0
  }
3140
3141
0
  if (params->flags & CRYPT_REENCRYPT_CREATE_NEW_DIGEST) {
3142
0
    assert(kc_new->get_luks2_key);
3143
0
    r = kc_new->get_luks2_key(cd, kc_new, CRYPT_ANY_SLOT, CRYPT_ANY_SEGMENT, &vk);
3144
0
    if (r < 0)
3145
0
      goto out;
3146
3147
    /* do not create new digest in case it matches the current one */
3148
0
    r = LUKS2_digest_verify_by_segment(cd, hdr, CRYPT_DEFAULT_SEGMENT, vk);
3149
0
    if (r == -EPERM || r == -ENOENT)
3150
0
      r = LUKS2_digest_create(cd, "pbkdf2", hdr, vk);
3151
3152
0
    crypt_free_volume_key(vk);
3153
0
    if (r < 0)
3154
0
      goto out;
3155
0
    digest_new = r;
3156
0
  } else
3157
0
    digest_new = LUKS2_digest_by_keyslot(hdr, keyslot_new);
3158
3159
0
  r = reencrypt_make_backup_segments(cd, hdr, digest_new, _cipher, data_offset_bytes, params);
3160
0
  if (r) {
3161
0
    log_dbg(cd, "Failed to create reencryption backup device segments.");
3162
0
    goto out;
3163
0
  }
3164
3165
0
  r = reencrypt_verify_resilience_params(cd, params, check_sector_size, move_first_segment);
3166
0
  if (r < 0)
3167
0
    goto out;
3168
3169
0
  r = LUKS2_keyslot_reencrypt_allocate(cd, hdr, reencrypt_keyslot, params,
3170
0
      reencrypt_get_alignment(cd, hdr));
3171
0
  if (r < 0)
3172
0
    goto out;
3173
3174
0
  r = LUKS2_keyslot_context_open_all_segments(cd, keyslot_old, keyslot_new, kc_old, kc_new, vks);
3175
0
  if (r < 0)
3176
0
    goto out;
3177
3178
0
  r = LUKS2_keyslot_reencrypt_digest_create(cd, hdr, LUKS2_REENCRYPT_REQ_VERSION, *vks);
3179
0
  if (r < 0)
3180
0
    goto out;
3181
3182
0
  if (name && params->mode != CRYPT_REENCRYPT_ENCRYPT) {
3183
0
    r = reencrypt_verify_and_upload_keys(cd, hdr, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks);
3184
0
    if (r)
3185
0
      goto out;
3186
3187
0
    r = dm_query_device(cd, name, DM_ACTIVE_UUID | DM_ACTIVE_DEVICE |
3188
0
            DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY |
3189
0
            DM_ACTIVE_CRYPT_CIPHER, &dmd_target);
3190
0
    if (r < 0)
3191
0
      goto out;
3192
3193
0
    r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, LUKS2_get_segments_jobj(hdr), &dmd_source);
3194
0
    if (!r) {
3195
0
      r = crypt_compare_dm_devices(cd, &dmd_source, &dmd_target);
3196
0
      if (r)
3197
0
        log_err(cd, _("Mismatching parameters on device %s."), name);
3198
0
    }
3199
3200
0
    dm_targets_free(cd, &dmd_source);
3201
0
    dm_targets_free(cd, &dmd_target);
3202
0
    free(CONST_CAST(void*)dmd_target.uuid);
3203
3204
0
    if (r)
3205
0
      goto out;
3206
0
  }
3207
3208
0
  if (move_first_segment && reencrypt_move_data(cd, devfd, data_shift_bytes, params->mode)) {
3209
0
    r = -EIO;
3210
0
    goto out;
3211
0
  }
3212
3213
  /* This must be first and only write in LUKS2 metadata during reencrypt_init */
3214
0
  r = reencrypt_update_flag(cd, LUKS2_REENCRYPT_REQ_VERSION, true, true);
3215
0
  if (r) {
3216
0
    log_dbg(cd, "Failed to set online-reencryption requirement.");
3217
0
    r = -EINVAL;
3218
0
  } else
3219
0
    r = reencrypt_keyslot;
3220
0
out:
3221
0
  device_release_excl(cd, crypt_data_device(cd));
3222
0
  if (r < 0 && LUKS2_hdr_rollback(cd, hdr) < 0)
3223
0
    log_dbg(cd, "Failed to rollback LUKS2 metadata after failure.");
3224
3225
0
  return r;
3226
0
}
3227
3228
static int reencrypt_hotzone_protect_final(struct crypt_device *cd,
3229
  struct luks2_hdr *hdr, int reencrypt_keyslot,
3230
  const struct reenc_protection *rp,
3231
  const void *buffer, size_t buffer_len)
3232
0
{
3233
0
  const void *pbuffer;
3234
0
  size_t data_offset, len;
3235
0
  int r;
3236
3237
0
  assert(hdr);
3238
0
  assert(rp);
3239
3240
0
  if (rp->type == REENC_PROTECTION_NONE)
3241
0
    return 0;
3242
3243
0
  if (rp->type == REENC_PROTECTION_CHECKSUM) {
3244
0
    log_dbg(cd, "Checksums hotzone resilience.");
3245
3246
0
    for (data_offset = 0, len = 0; data_offset < buffer_len; data_offset += rp->p.csum.block_size, len += rp->p.csum.hash_size) {
3247
0
      if (crypt_hash_write(rp->p.csum.ch, (const char *)buffer + data_offset, rp->p.csum.block_size)) {
3248
0
        log_dbg(cd, "Failed to hash sector at offset %zu.", data_offset);
3249
0
        return -EINVAL;
3250
0
      }
3251
0
      if (crypt_hash_final(rp->p.csum.ch, (char *)rp->p.csum.checksums + len, rp->p.csum.hash_size)) {
3252
0
        log_dbg(cd, "Failed to finalize hash.");
3253
0
        return -EINVAL;
3254
0
      }
3255
0
    }
3256
0
    pbuffer = rp->p.csum.checksums;
3257
0
  } else if (rp->type == REENC_PROTECTION_JOURNAL) {
3258
0
    log_dbg(cd, "Journal hotzone resilience.");
3259
0
    len = buffer_len;
3260
0
    pbuffer = buffer;
3261
0
  } else if (rp->type == REENC_PROTECTION_DATASHIFT) {
3262
0
    log_dbg(cd, "Data shift hotzone resilience.");
3263
0
    return LUKS2_hdr_write(cd, hdr);
3264
0
  } else
3265
0
    return -EINVAL;
3266
3267
0
  log_dbg(cd, "Going to store %zu bytes in reencrypt keyslot.", len);
3268
3269
0
  r = LUKS2_keyslot_reencrypt_store(cd, hdr, reencrypt_keyslot, pbuffer, len);
3270
3271
0
  return r > 0 ? 0 : r;
3272
0
}
3273
3274
static int reencrypt_context_update(struct crypt_device *cd,
3275
  struct luks2_reencrypt *rh)
3276
0
{
3277
0
  if (rh->read < 0)
3278
0
    return -EINVAL;
3279
3280
0
  if (rh->direction == CRYPT_REENCRYPT_BACKWARD) {
3281
0
    if (rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->mode == CRYPT_REENCRYPT_ENCRYPT) {
3282
0
      if (rh->offset)
3283
0
        rh->offset -= data_shift_value(&rh->rp);
3284
0
      if (rh->offset && (rh->offset < data_shift_value(&rh->rp))) {
3285
0
        rh->length = rh->offset;
3286
0
        rh->offset = data_shift_value(&rh->rp);
3287
0
      }
3288
0
      if (!rh->offset)
3289
0
        rh->length = data_shift_value(&rh->rp);
3290
0
    } else {
3291
0
      if (rh->offset < rh->length)
3292
0
        rh->length = rh->offset;
3293
0
      rh->offset -= rh->length;
3294
0
    }
3295
0
  } else if (rh->direction == CRYPT_REENCRYPT_FORWARD) {
3296
0
    rh->offset += (uint64_t)rh->read;
3297
0
    if (rh->device_size == rh->offset &&
3298
0
        rh->jobj_segment_moved &&
3299
0
        rh->mode == CRYPT_REENCRYPT_DECRYPT &&
3300
0
        rh->rp.type == REENC_PROTECTION_DATASHIFT) {
3301
0
      rh->offset = 0;
3302
0
      rh->length = json_segment_get_size(rh->jobj_segment_moved, 0);
3303
0
    }
3304
    /* it fails in-case of device_size < rh->offset later */
3305
0
    else if (rh->device_size - rh->offset < rh->length)
3306
0
      rh->length = rh->device_size - rh->offset;
3307
0
  } else
3308
0
    return -EINVAL;
3309
3310
0
  if (rh->device_size < rh->offset) {
3311
0
    log_dbg(cd, "Calculated reencryption offset %" PRIu64 " is beyond device size %" PRIu64 ".", rh->offset, rh->device_size);
3312
0
    return -EINVAL;
3313
0
  }
3314
3315
0
  rh->progress += (uint64_t)rh->read;
3316
3317
0
  return 0;
3318
0
}
3319
3320
static int reencrypt_load(struct crypt_device *cd, struct luks2_hdr *hdr,
3321
    uint64_t device_size,
3322
    uint64_t max_hotzone_size,
3323
    uint64_t required_device_size,
3324
    struct volume_key *vks,
3325
    struct luks2_reencrypt **rh)
3326
0
{
3327
0
  int r;
3328
0
  struct luks2_reencrypt *tmp = NULL;
3329
0
  crypt_reencrypt_info ri = LUKS2_reencrypt_status(hdr);
3330
3331
0
  if (ri == CRYPT_REENCRYPT_NONE) {
3332
0
    log_err(cd, _("Device not marked for LUKS2 reencryption."));
3333
0
    return -EINVAL;
3334
0
  } else if (ri == CRYPT_REENCRYPT_INVALID)
3335
0
    return -EINVAL;
3336
3337
0
  r = LUKS2_reencrypt_digest_verify(cd, hdr, vks);
3338
0
  if (r < 0)
3339
0
    return r;
3340
3341
0
  if (ri == CRYPT_REENCRYPT_CLEAN)
3342
0
    r = reencrypt_load_clean(cd, hdr, device_size, max_hotzone_size, required_device_size, &tmp);
3343
0
  else if (ri == CRYPT_REENCRYPT_CRASH)
3344
0
    r = reencrypt_load_crashed(cd, hdr, device_size, &tmp);
3345
0
  else
3346
0
    r = -EINVAL;
3347
3348
0
  if (r < 0 || !tmp) {
3349
0
    log_err(cd, _("Failed to load LUKS2 reencryption context."));
3350
0
    return r < 0 ? r : -EINVAL;
3351
0
  }
3352
3353
0
  *rh = tmp;
3354
3355
0
  return 0;
3356
0
}
3357
#else
3358
int LUKS2_reencrypt_max_hotzone_size(struct crypt_device *cd __attribute__((unused)),
3359
  struct luks2_hdr *hdr __attribute__((unused)),
3360
  const struct reenc_protection *rp __attribute__((unused)),
3361
  int reencrypt_keyslot __attribute__((unused)),
3362
  uint64_t *r_length __attribute__((unused)))
3363
{
3364
  return -ENOTSUP;
3365
}
3366
#endif
3367
3368
static int reencrypt_lock_internal(struct crypt_device *cd, const char *uuid, struct crypt_lock_handle **reencrypt_lock)
3369
0
{
3370
0
  int r;
3371
0
  char *lock_resource;
3372
3373
0
  if (!crypt_metadata_locking_enabled()) {
3374
0
    *reencrypt_lock = NULL;
3375
0
    return 0;
3376
0
  }
3377
3378
0
  r = asprintf(&lock_resource, "LUKS2-reencryption-%s", uuid);
3379
0
  if (r < 0)
3380
0
    return -ENOMEM;
3381
0
  if (r < 20) {
3382
0
    free(lock_resource);
3383
0
    return -EINVAL;
3384
0
  }
3385
3386
0
  r = crypt_write_lock(cd, lock_resource, false, reencrypt_lock);
3387
3388
0
  free(lock_resource);
3389
3390
0
  return r;
3391
0
}
3392
3393
/* internal only */
3394
int LUKS2_reencrypt_lock_by_dm_uuid(struct crypt_device *cd, const char *dm_uuid,
3395
  struct crypt_lock_handle **reencrypt_lock)
3396
0
{
3397
0
  int r;
3398
0
  char hdr_uuid[37];
3399
0
  const char *uuid = crypt_get_uuid(cd);
3400
3401
0
  if (!dm_uuid)
3402
0
    return -EINVAL;
3403
3404
0
  if (!uuid) {
3405
0
    r = snprintf(hdr_uuid, sizeof(hdr_uuid), "%.8s-%.4s-%.4s-%.4s-%.12s",
3406
0
       dm_uuid + 6, dm_uuid + 14, dm_uuid + 18, dm_uuid + 22, dm_uuid + 26);
3407
0
    if (r < 0 || (size_t)r != (sizeof(hdr_uuid) - 1))
3408
0
      return -EINVAL;
3409
0
  } else if (dm_uuid_cmp(dm_uuid, uuid))
3410
0
    return -EINVAL;
3411
3412
0
  return reencrypt_lock_internal(cd, uuid, reencrypt_lock);
3413
0
}
3414
3415
/* internal only */
3416
int LUKS2_reencrypt_lock(struct crypt_device *cd, struct crypt_lock_handle **reencrypt_lock)
3417
0
{
3418
0
  if (!cd || !crypt_get_type(cd) || strcmp(crypt_get_type(cd), CRYPT_LUKS2))
3419
0
    return -EINVAL;
3420
3421
0
  return reencrypt_lock_internal(cd, crypt_get_uuid(cd), reencrypt_lock);
3422
0
}
3423
3424
/* internal only */
3425
void LUKS2_reencrypt_unlock(struct crypt_device *cd, struct crypt_lock_handle *reencrypt_lock)
3426
0
{
3427
0
  crypt_unlock_internal(cd, reencrypt_lock);
3428
0
}
3429
#if USE_LUKS2_REENCRYPTION
3430
static int reencrypt_lock_and_verify(struct crypt_device *cd, struct luks2_hdr *hdr,
3431
    struct crypt_lock_handle **reencrypt_lock)
3432
0
{
3433
0
  int r;
3434
0
  crypt_reencrypt_info ri;
3435
0
  struct crypt_lock_handle *h;
3436
3437
0
  ri = LUKS2_reencrypt_status(hdr);
3438
0
  if (ri == CRYPT_REENCRYPT_INVALID)
3439
0
    return -EINVAL;
3440
0
  if (ri < CRYPT_REENCRYPT_CLEAN) {
3441
0
    log_err(cd, _("Device is not in reencryption."));
3442
0
    return -EINVAL;
3443
0
  }
3444
3445
0
  r = LUKS2_reencrypt_lock(cd, &h);
3446
0
  if (r < 0) {
3447
0
    if (r == -EBUSY)
3448
0
      log_err(cd, _("Reencryption process is already running."));
3449
0
    else
3450
0
      log_err(cd, _("Failed to acquire reencryption lock."));
3451
0
    return r;
3452
0
  }
3453
3454
  /* With reencryption lock held, reload device context and verify metadata state */
3455
0
  r = crypt_load(cd, CRYPT_LUKS2, NULL);
3456
0
  if (r) {
3457
0
    LUKS2_reencrypt_unlock(cd, h);
3458
0
    return r;
3459
0
  }
3460
3461
0
  ri = LUKS2_reencrypt_status(hdr);
3462
0
  if (ri == CRYPT_REENCRYPT_CLEAN) {
3463
0
    *reencrypt_lock = h;
3464
0
    return 0;
3465
0
  }
3466
3467
0
  LUKS2_reencrypt_unlock(cd, h);
3468
0
  log_err(cd, _("Cannot proceed with reencryption. Run reencryption recovery first."));
3469
0
  return -EINVAL;
3470
0
}
3471
3472
static int reencrypt_load_by_keyslot_context(struct crypt_device *cd,
3473
    const char *name,
3474
    struct crypt_keyslot_context *kc_old,
3475
    struct crypt_keyslot_context *kc_new,
3476
    int keyslot_old,
3477
    int keyslot_new,
3478
    struct volume_key **vks,
3479
    const struct crypt_params_reencrypt *params)
3480
0
{
3481
0
  int r, reencrypt_slot;
3482
0
  struct luks2_hdr *hdr;
3483
0
  struct crypt_lock_handle *reencrypt_lock;
3484
0
  struct luks2_reencrypt *rh;
3485
0
  const struct volume_key *vk;
3486
0
  size_t alignment;
3487
0
  uint32_t old_sector_size, new_sector_size, sector_size;
3488
0
  struct crypt_dm_active_device dmd_target, dmd_source = {
3489
0
    .uuid = crypt_get_uuid(cd),
3490
0
    .flags = CRYPT_ACTIVATE_SHARED /* turn off exclusive open checks */
3491
0
  };
3492
0
  uint64_t minimal_size, device_size, mapping_size = 0, required_size = 0,
3493
0
     max_hotzone_size = 0;
3494
0
  bool dynamic;
3495
0
  uint32_t flags = 0;
3496
3497
0
  assert(cd);
3498
3499
0
  hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
3500
0
  if (!hdr)
3501
0
    return -EINVAL;
3502
3503
0
  log_dbg(cd, "Loading LUKS2 reencryption context.");
3504
3505
0
  old_sector_size = reencrypt_get_sector_size_old(hdr);
3506
0
  new_sector_size = reencrypt_get_sector_size_new(hdr);
3507
0
  sector_size = new_sector_size > old_sector_size ? new_sector_size : old_sector_size;
3508
3509
0
  r = reencrypt_verify_resilience_params(cd, params, sector_size,
3510
0
                 LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0);
3511
0
  if (r < 0)
3512
0
    return r;
3513
3514
0
  if (params) {
3515
0
    required_size = params->device_size;
3516
0
    max_hotzone_size = params->max_hotzone_size;
3517
0
  }
3518
3519
0
  rh = crypt_get_luks2_reencrypt(cd);
3520
0
  if (rh) {
3521
0
    LUKS2_reencrypt_free(cd, rh);
3522
0
    crypt_set_luks2_reencrypt(cd, NULL);
3523
0
    rh = NULL;
3524
0
  }
3525
3526
0
  r = reencrypt_lock_and_verify(cd, hdr, &reencrypt_lock);
3527
0
  if (r)
3528
0
    return r;
3529
3530
0
  reencrypt_slot = LUKS2_find_keyslot(hdr, "reencrypt");
3531
0
  if (reencrypt_slot < 0) {
3532
0
    r = -EINVAL;
3533
0
    goto err;
3534
0
  }
3535
3536
  /* From now on we hold reencryption lock */
3537
3538
0
  if (LUKS2_get_data_size(hdr, &minimal_size, &dynamic)) {
3539
0
    r = -EINVAL;
3540
0
    goto err;
3541
0
  }
3542
3543
  /* some configurations provides fixed device size */
3544
0
  r = LUKS2_reencrypt_check_device_size(cd, hdr, minimal_size, &device_size, false, dynamic);
3545
0
  if (r) {
3546
0
    r = -EINVAL;
3547
0
    goto err;
3548
0
  }
3549
3550
0
  minimal_size >>= SECTOR_SHIFT;
3551
3552
0
  r = reencrypt_verify_keys(cd, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks);
3553
0
  if (r == -ENOENT) {
3554
0
    log_dbg(cd, "Keys are not ready. Unlocking all volume keys.");
3555
0
    r = LUKS2_keyslot_context_open_all_segments(cd, keyslot_old, keyslot_new,
3556
0
                  kc_old, kc_new, vks);
3557
0
  }
3558
3559
0
  if (r < 0)
3560
0
    goto err;
3561
3562
0
  if (name) {
3563
0
    r = reencrypt_upload_keys(cd, hdr, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks);
3564
0
    if (r < 0)
3565
0
      goto err;
3566
3567
0
    r = dm_query_device(cd, name, DM_ACTIVE_UUID | DM_ACTIVE_DEVICE |
3568
0
            DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY |
3569
0
            DM_ACTIVE_CRYPT_CIPHER, &dmd_target);
3570
0
    if (r < 0)
3571
0
      goto err;
3572
0
    flags = dmd_target.flags;
3573
3574
    /*
3575
     * By default reencryption code aims to retain flags from existing dm device.
3576
     * The keyring activation flag can not be inherited if original cipher is null.
3577
     *
3578
     * In this case override the flag based on decision made in reencrypt_upload_keys
3579
     * above. The code checks if new VK is eligible for keyring.
3580
     */
3581
0
    vk = crypt_volume_key_by_id(*vks, LUKS2_reencrypt_digest_new(hdr));
3582
0
    if (vk && crypt_volume_key_description(vk) && crypt_is_cipher_null(reencrypt_segment_cipher_old(hdr))) {
3583
0
      flags |= CRYPT_ACTIVATE_KEYRING_KEY;
3584
0
      dmd_source.flags |= CRYPT_ACTIVATE_KEYRING_KEY;
3585
0
    }
3586
3587
0
    r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, LUKS2_get_segments_jobj(hdr), &dmd_source);
3588
0
    if (!r) {
3589
0
      r = crypt_compare_dm_devices(cd, &dmd_source, &dmd_target);
3590
0
      if (r)
3591
0
        log_err(cd, _("Mismatching parameters on device %s."), name);
3592
0
    }
3593
3594
0
    dm_targets_free(cd, &dmd_source);
3595
0
    dm_targets_free(cd, &dmd_target);
3596
0
    free(CONST_CAST(void*)dmd_target.uuid);
3597
0
    if (r)
3598
0
      goto err;
3599
0
    mapping_size = dmd_target.size;
3600
0
  }
3601
3602
0
  r = -EINVAL;
3603
0
  if (required_size && mapping_size && (required_size != mapping_size)) {
3604
0
    log_err(cd, _("Active device size and requested reencryption size don't match."));
3605
0
    goto err;
3606
0
  }
3607
3608
0
  if (mapping_size)
3609
0
    required_size = mapping_size;
3610
3611
0
  if (required_size) {
3612
    /* TODO: Add support for changing fixed minimal size in reencryption mda where possible */
3613
0
    if ((minimal_size && (required_size < minimal_size)) ||
3614
0
        (required_size > (device_size >> SECTOR_SHIFT)) ||
3615
0
        (!dynamic && (required_size != minimal_size)) ||
3616
0
        (old_sector_size > 0 && MISALIGNED(required_size, old_sector_size >> SECTOR_SHIFT)) ||
3617
0
        (new_sector_size > 0 && MISALIGNED(required_size, new_sector_size >> SECTOR_SHIFT))) {
3618
0
      log_err(cd, _("Illegal device size requested in reencryption parameters."));
3619
0
      goto err;
3620
0
    }
3621
0
  }
3622
3623
0
  alignment = reencrypt_get_alignment(cd, hdr);
3624
3625
0
  r = LUKS2_keyslot_reencrypt_update_needed(cd, hdr, reencrypt_slot, params, alignment);
3626
0
  if (r > 0) /* metadata update needed */
3627
0
    r = LUKS2_keyslot_reencrypt_update(cd, hdr, reencrypt_slot, params, alignment, *vks);
3628
0
  if (r < 0)
3629
0
    goto err;
3630
3631
0
  r = reencrypt_load(cd, hdr, device_size, max_hotzone_size, required_size, *vks, &rh);
3632
0
  if (r < 0 || !rh)
3633
0
    goto err;
3634
3635
0
  if (name && (r = reencrypt_context_set_names(rh, name)))
3636
0
    goto err;
3637
3638
  /* Reassure device is not mounted and there's no dm mapping active */
3639
0
  if (!name && (device_open_excl(cd, crypt_data_device(cd), O_RDONLY) < 0)) {
3640
0
    log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."), device_path(crypt_data_device(cd)));
3641
0
    r = -EBUSY;
3642
0
    goto err;
3643
0
  }
3644
0
  device_release_excl(cd, crypt_data_device(cd));
3645
3646
  /* There's a race for dm device activation not managed by cryptsetup.
3647
   *
3648
   * 1) excl close
3649
   * 2) rogue dm device activation
3650
   * 3) one or more dm-crypt based wrapper activation
3651
   * 4) next excl open gets skipped due to 3) device from 2) remains undetected.
3652
   */
3653
0
  r = reencrypt_init_storage_wrappers(cd, hdr, rh, *vks);
3654
0
  if (r)
3655
0
    goto err;
3656
3657
  /* If one of wrappers is based on dmcrypt fallback it already blocked mount */
3658
0
  if (!name && crypt_storage_wrapper_get_type(rh->cw1) != DMCRYPT &&
3659
0
      crypt_storage_wrapper_get_type(rh->cw2) != DMCRYPT) {
3660
0
    if (device_open_excl(cd, crypt_data_device(cd), O_RDONLY) < 0) {
3661
0
      log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."), device_path(crypt_data_device(cd)));
3662
0
      r = -EBUSY;
3663
0
      goto err;
3664
0
    }
3665
0
  }
3666
3667
0
  rh->flags = flags;
3668
3669
0
  MOVE_REF(rh->vks, *vks);
3670
0
  MOVE_REF(rh->reenc_lock, reencrypt_lock);
3671
3672
0
  crypt_set_luks2_reencrypt(cd, rh);
3673
3674
0
  return 0;
3675
0
err:
3676
0
  LUKS2_reencrypt_unlock(cd, reencrypt_lock);
3677
0
  LUKS2_reencrypt_free(cd, rh);
3678
0
  return r;
3679
0
}
3680
3681
static int reencrypt_locked_recovery(struct crypt_device *cd,
3682
  int keyslot_old,
3683
  int keyslot_new,
3684
  struct crypt_keyslot_context *kc_old,
3685
  struct crypt_keyslot_context *kc_new,
3686
  struct volume_key **r_vks)
3687
0
{
3688
0
  int keyslot, r = -EINVAL;
3689
0
  struct volume_key *_vks = NULL;
3690
3691
0
  r = LUKS2_keyslot_context_open_all_segments(cd, keyslot_old, keyslot_new,
3692
0
                kc_old, kc_new, &_vks);
3693
0
  if (r < 0)
3694
0
    return r;
3695
0
  keyslot = r;
3696
3697
0
  r = LUKS2_reencrypt_locked_recovery_by_vks(cd, _vks);
3698
0
  if (!r && r_vks)
3699
0
    MOVE_REF(*r_vks, _vks);
3700
3701
0
  crypt_free_volume_key(_vks);
3702
3703
0
  return r < 0 ? r : keyslot;
3704
0
}
3705
3706
static int reencrypt_recovery_by_keyslot_context(struct crypt_device *cd,
3707
  struct luks2_hdr *hdr,
3708
  int keyslot_old,
3709
  int keyslot_new,
3710
  struct crypt_keyslot_context *kc_old,
3711
  struct crypt_keyslot_context *kc_new)
3712
0
{
3713
0
  int r;
3714
0
  crypt_reencrypt_info ri;
3715
0
  struct crypt_lock_handle *reencrypt_lock;
3716
3717
0
  r = LUKS2_reencrypt_lock(cd, &reencrypt_lock);
3718
0
  if (r) {
3719
0
    if (r == -EBUSY)
3720
0
      log_err(cd, _("Reencryption in-progress. Cannot perform recovery."));
3721
0
    else
3722
0
      log_err(cd, _("Failed to get reencryption lock."));
3723
0
    return r;
3724
0
  }
3725
3726
0
  if ((r = crypt_load(cd, CRYPT_LUKS2, NULL))) {
3727
0
    LUKS2_reencrypt_unlock(cd, reencrypt_lock);
3728
0
    return r;
3729
0
  }
3730
3731
0
  ri = LUKS2_reencrypt_status(hdr);
3732
0
  if (ri == CRYPT_REENCRYPT_INVALID) {
3733
0
    LUKS2_reencrypt_unlock(cd, reencrypt_lock);
3734
0
    return -EINVAL;
3735
0
  }
3736
3737
0
  if (ri == CRYPT_REENCRYPT_CRASH) {
3738
0
    r = reencrypt_locked_recovery(cd, keyslot_old, keyslot_new,
3739
0
                kc_old, kc_new, NULL);
3740
0
    if (r < 0)
3741
0
      log_err(cd, _("LUKS2 reencryption recovery failed."));
3742
0
  } else {
3743
0
    log_dbg(cd, "No LUKS2 reencryption recovery needed.");
3744
0
    r = 0;
3745
0
  }
3746
3747
0
  LUKS2_reencrypt_unlock(cd, reencrypt_lock);
3748
0
  return r;
3749
0
}
3750
3751
static int reencrypt_repair(
3752
    struct crypt_device *cd,
3753
    struct luks2_hdr *hdr,
3754
    int keyslot_old,
3755
    int keyslot_new,
3756
    struct crypt_keyslot_context *kc_old,
3757
    struct crypt_keyslot_context *kc_new)
3758
0
{
3759
0
  int r;
3760
0
  struct crypt_lock_handle *reencrypt_lock;
3761
0
  struct luks2_reencrypt *rh;
3762
0
  crypt_reencrypt_info ri;
3763
0
  uint8_t requirement_version;
3764
0
  const char *resilience;
3765
0
  struct volume_key *vks = NULL;
3766
3767
0
  log_dbg(cd, "Loading LUKS2 reencryption context for metadata repair.");
3768
3769
0
  rh = crypt_get_luks2_reencrypt(cd);
3770
0
  if (rh) {
3771
0
    LUKS2_reencrypt_free(cd, rh);
3772
0
    crypt_set_luks2_reencrypt(cd, NULL);
3773
0
    rh = NULL;
3774
0
  }
3775
3776
0
  ri = LUKS2_reencrypt_status(hdr);
3777
0
  if (ri == CRYPT_REENCRYPT_INVALID)
3778
0
    return -EINVAL;
3779
3780
0
  if (ri < CRYPT_REENCRYPT_CLEAN) {
3781
0
    log_err(cd, _("Device is not in reencryption."));
3782
0
    return -EINVAL;
3783
0
  }
3784
3785
0
  r = LUKS2_reencrypt_lock(cd, &reencrypt_lock);
3786
0
  if (r < 0) {
3787
0
    if (r == -EBUSY)
3788
0
      log_err(cd, _("Reencryption process is already running."));
3789
0
    else
3790
0
      log_err(cd, _("Failed to acquire reencryption lock."));
3791
0
    return r;
3792
0
  }
3793
3794
  /* With reencryption lock held, reload device context and verify metadata state */
3795
0
  r = crypt_load(cd, CRYPT_LUKS2, NULL);
3796
0
  if (r)
3797
0
    goto out;
3798
3799
0
  ri = LUKS2_reencrypt_status(hdr);
3800
0
  if (ri == CRYPT_REENCRYPT_INVALID) {
3801
0
    r = -EINVAL;
3802
0
    goto out;
3803
0
  }
3804
0
  if (ri == CRYPT_REENCRYPT_NONE) {
3805
0
    r = 0;
3806
0
    goto out;
3807
0
  }
3808
3809
0
  resilience = reencrypt_resilience_type(hdr);
3810
0
  if (!resilience) {
3811
0
    r = -EINVAL;
3812
0
    goto out;
3813
0
  }
3814
3815
0
  if (reencrypt_mode(hdr) == CRYPT_REENCRYPT_DECRYPT &&
3816
0
      !strncmp(resilience, "datashift-", 10) &&
3817
0
      LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0)
3818
0
    requirement_version = LUKS2_DECRYPT_DATASHIFT_REQ_VERSION;
3819
0
  else
3820
0
    requirement_version = LUKS2_REENCRYPT_REQ_VERSION;
3821
3822
0
  r = LUKS2_keyslot_context_open_all_segments(cd, keyslot_old, keyslot_new, kc_old, kc_new, &vks);
3823
0
  if (r < 0)
3824
0
    goto out;
3825
3826
0
  r = LUKS2_keyslot_reencrypt_digest_create(cd, hdr, requirement_version, vks);
3827
0
  crypt_free_volume_key(vks);
3828
0
  vks = NULL;
3829
0
  if (r < 0)
3830
0
    goto out;
3831
3832
  /* replaces old online-reencrypt flag with updated version and commits metadata */
3833
0
  r = reencrypt_update_flag(cd, requirement_version, true, true);
3834
0
out:
3835
0
  LUKS2_reencrypt_unlock(cd, reencrypt_lock);
3836
0
  crypt_free_volume_key(vks);
3837
0
  return r;
3838
3839
0
}
3840
3841
static int reencrypt_init_by_keyslot_context(struct crypt_device *cd,
3842
  const char *name,
3843
  struct crypt_keyslot_context *kc_old,
3844
  struct crypt_keyslot_context *kc_new,
3845
  int keyslot_old,
3846
  int keyslot_new,
3847
  const char *cipher,
3848
  const char *cipher_mode,
3849
  const struct crypt_params_reencrypt *params)
3850
0
{
3851
0
  int r;
3852
0
  crypt_reencrypt_info ri;
3853
0
  size_t key_length;
3854
0
  struct volume_key *vks = NULL;
3855
0
  uint32_t flags = params ? params->flags : 0;
3856
0
  struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
3857
3858
0
  if (params && (params->flags & CRYPT_REENCRYPT_CREATE_NEW_DIGEST) &&
3859
0
      (!kc_new || !kc_new->get_luks2_key || !kc_new->get_key_size ||
3860
0
       (params->flags & CRYPT_REENCRYPT_RESUME_ONLY)))
3861
0
    return -EINVAL;
3862
3863
  /* short-circuit in reencryption metadata update and finish immediately. */
3864
0
  if (flags & CRYPT_REENCRYPT_REPAIR_NEEDED)
3865
0
    return reencrypt_repair(cd, hdr, keyslot_old, keyslot_new, kc_old, kc_new);
3866
3867
  /* short-circuit in recovery and finish immediately. */
3868
0
  if (flags & CRYPT_REENCRYPT_RECOVERY)
3869
0
    return reencrypt_recovery_by_keyslot_context(cd, hdr, keyslot_old, keyslot_new, kc_old, kc_new);
3870
3871
0
  if (name && !device_direct_io(crypt_data_device(cd))) {
3872
0
    log_dbg(cd, "Device %s does not support direct I/O.", device_path(crypt_data_device(cd)));
3873
    /* FIXME: Add more specific error message for translation later. */
3874
0
    log_err(cd, _("Failed to initialize reencryption device stack."));
3875
0
    return -EINVAL;
3876
0
  }
3877
3878
0
  if (cipher && !crypt_cipher_wrapped_key(cipher, cipher_mode)) {
3879
0
    if (keyslot_new == CRYPT_ANY_SLOT && kc_new && kc_new->get_key_size)
3880
0
      r = kc_new->get_key_size(cd, kc_new, &key_length);
3881
0
    else {
3882
0
      r = crypt_keyslot_get_key_size(cd, keyslot_new);
3883
0
      if (r >= 0)
3884
0
        key_length = r;
3885
0
    }
3886
0
    if (r < 0)
3887
0
      return r;
3888
0
    r = LUKS2_check_cipher(cd, key_length, cipher, cipher_mode);
3889
0
    if (r < 0) {
3890
0
      log_err(cd, _("Unable to use cipher specification %s-%s for LUKS2."), cipher, cipher_mode);
3891
0
      return r;
3892
0
    }
3893
0
  }
3894
3895
0
  r = LUKS2_device_write_lock(cd, hdr, crypt_metadata_device(cd));
3896
0
  if (r)
3897
0
    return r;
3898
3899
0
  ri = LUKS2_reencrypt_status(hdr);
3900
0
  if (ri == CRYPT_REENCRYPT_INVALID) {
3901
0
    device_write_unlock(cd, crypt_metadata_device(cd));
3902
0
    return -EINVAL;
3903
0
  }
3904
3905
0
  if ((ri > CRYPT_REENCRYPT_NONE) && (flags & CRYPT_REENCRYPT_INITIALIZE_ONLY)) {
3906
0
    device_write_unlock(cd, crypt_metadata_device(cd));
3907
0
    log_err(cd, _("LUKS2 reencryption already initialized in metadata."));
3908
0
    return -EBUSY;
3909
0
  }
3910
3911
0
  if (ri == CRYPT_REENCRYPT_NONE && !(flags & CRYPT_REENCRYPT_RESUME_ONLY)) {
3912
0
    r = reencrypt_init(cd, name, hdr, kc_old, kc_new, keyslot_old,
3913
0
           keyslot_new, cipher, cipher_mode, params, &vks);
3914
0
    if (r < 0)
3915
0
      log_err(cd, _("Failed to initialize LUKS2 reencryption in metadata."));
3916
0
  } else if (ri > CRYPT_REENCRYPT_NONE) {
3917
0
    log_dbg(cd, "LUKS2 reencryption already initialized.");
3918
0
    r = 0;
3919
0
  }
3920
3921
0
  device_write_unlock(cd, crypt_metadata_device(cd));
3922
3923
0
  if (r < 0 || (flags & CRYPT_REENCRYPT_INITIALIZE_ONLY))
3924
0
    goto out;
3925
3926
0
  r = reencrypt_load_by_keyslot_context(cd, name, kc_old, kc_new, keyslot_old,
3927
0
                keyslot_new, &vks, params);
3928
0
out:
3929
0
  if (r < 0)
3930
0
    crypt_drop_uploaded_keyring_key(cd, vks);
3931
0
  crypt_free_volume_key(vks);
3932
0
  return r < 0 ? r : LUKS2_find_keyslot(hdr, "reencrypt");
3933
0
}
3934
#else
3935
static int reencrypt_init_by_keyslot_context(struct crypt_device *cd,
3936
  const char *name __attribute__((unused)),
3937
  struct crypt_keyslot_context *kc_old __attribute__((unused)),
3938
  struct crypt_keyslot_context *kc_new __attribute__((unused)),
3939
  int keyslot_old __attribute__((unused)),
3940
  int keyslot_new __attribute__((unused)),
3941
  const char *cipher __attribute__((unused)),
3942
  const char *cipher_mode __attribute__((unused)),
3943
  const struct crypt_params_reencrypt *params __attribute__((unused)))
3944
{
3945
  log_err(cd, _("This operation is not supported for this device type."));
3946
  return -ENOTSUP;
3947
}
3948
#endif
3949
3950
int crypt_reencrypt_init_by_keyring(struct crypt_device *cd,
3951
  const char *name,
3952
  const char *passphrase_description,
3953
  int keyslot_old,
3954
  int keyslot_new,
3955
  const char *cipher,
3956
  const char *cipher_mode,
3957
  const struct crypt_params_reencrypt *params)
3958
0
{
3959
0
  int r;
3960
0
  struct crypt_keyslot_context kc = {0};
3961
3962
0
  if (onlyLUKS2reencrypt(cd) || !passphrase_description)
3963
0
    return -EINVAL;
3964
0
  if (params && (params->flags & CRYPT_REENCRYPT_INITIALIZE_ONLY) && (params->flags & CRYPT_REENCRYPT_RESUME_ONLY))
3965
0
    return -EINVAL;
3966
3967
0
  if (device_is_dax(crypt_data_device(cd)) > 0) {
3968
0
    log_err(cd, _("Reencryption is not supported for DAX (persistent memory) devices."));
3969
0
    return -EINVAL;
3970
0
  }
3971
3972
0
  crypt_keyslot_context_init_by_keyring_internal(&kc, passphrase_description);
3973
0
  r = reencrypt_init_by_keyslot_context(cd, name, &kc, &kc, keyslot_old,
3974
0
                keyslot_new, cipher, cipher_mode, params);
3975
3976
0
  crypt_keyslot_context_destroy_internal(&kc);
3977
3978
0
  return r;
3979
0
}
3980
3981
int crypt_reencrypt_init_by_passphrase(struct crypt_device *cd,
3982
  const char *name,
3983
  const char *passphrase,
3984
  size_t passphrase_size,
3985
  int keyslot_old,
3986
  int keyslot_new,
3987
  const char *cipher,
3988
  const char *cipher_mode,
3989
  const struct crypt_params_reencrypt *params)
3990
0
{
3991
0
  int r;
3992
0
  struct crypt_keyslot_context kc = {0};
3993
3994
0
  if (onlyLUKS2reencrypt(cd) || !passphrase)
3995
0
    return -EINVAL;
3996
0
  if (params && (params->flags & CRYPT_REENCRYPT_INITIALIZE_ONLY) && (params->flags & CRYPT_REENCRYPT_RESUME_ONLY))
3997
0
    return -EINVAL;
3998
3999
0
  if (device_is_dax(crypt_data_device(cd)) > 0) {
4000
0
    log_err(cd, _("Reencryption is not supported for DAX (persistent memory) devices."));
4001
0
    return -EINVAL;
4002
0
  }
4003
4004
0
  crypt_keyslot_context_init_by_passphrase_internal(&kc, passphrase, passphrase_size);
4005
4006
0
  r = reencrypt_init_by_keyslot_context(cd, name, &kc, &kc, keyslot_old,
4007
0
                keyslot_new, cipher, cipher_mode, params);
4008
4009
0
  crypt_keyslot_context_destroy_internal(&kc);
4010
4011
0
  return r;
4012
0
}
4013
4014
int crypt_reencrypt_init_by_keyslot_context(struct crypt_device *cd,
4015
  const char *name,
4016
  struct crypt_keyslot_context *kc_old,
4017
  struct crypt_keyslot_context *kc_new,
4018
  int keyslot_old,
4019
  int keyslot_new,
4020
  const char *cipher,
4021
  const char *cipher_mode,
4022
  const struct crypt_params_reencrypt *params)
4023
0
{
4024
0
  if (onlyLUKS2reencrypt(cd) || (!kc_old && !kc_new))
4025
0
    return -EINVAL;
4026
0
  if (params && (params->flags & CRYPT_REENCRYPT_INITIALIZE_ONLY) && (params->flags & CRYPT_REENCRYPT_RESUME_ONLY))
4027
0
    return -EINVAL;
4028
4029
0
  if (device_is_dax(crypt_data_device(cd)) > 0) {
4030
0
    log_err(cd, _("Reencryption is not supported for DAX (persistent memory) devices."));
4031
0
    return -EINVAL;
4032
0
  }
4033
4034
0
  return reencrypt_init_by_keyslot_context(cd, name, kc_old, kc_new, keyslot_old, keyslot_new, cipher, cipher_mode, params);
4035
0
}
4036
4037
#if USE_LUKS2_REENCRYPTION
4038
static reenc_status_t reencrypt_step(struct crypt_device *cd,
4039
    struct luks2_hdr *hdr,
4040
    struct luks2_reencrypt *rh,
4041
    uint64_t device_size,
4042
    bool online)
4043
0
{
4044
0
  int r;
4045
0
  struct reenc_protection *rp;
4046
4047
0
  assert(hdr);
4048
0
  assert(rh);
4049
4050
0
  rp = &rh->rp;
4051
4052
  /* in memory only */
4053
0
  r = reencrypt_make_segments(cd, hdr, rh, device_size);
4054
0
  if (r)
4055
0
    return REENC_ERR;
4056
4057
0
  r = reencrypt_assign_segments(cd, hdr, rh, 1, 0);
4058
0
  if (r) {
4059
0
    log_err(cd, _("Failed to set device segments for next reencryption hotzone."));
4060
0
    return REENC_ERR;
4061
0
  }
4062
4063
0
  log_dbg(cd, "Reencrypting chunk starting at offset: %" PRIu64 ", size :%" PRIu64 ".", rh->offset, rh->length);
4064
0
  log_dbg(cd, "data_offset: %" PRIu64, crypt_get_data_offset(cd) << SECTOR_SHIFT);
4065
4066
0
  if (!rh->offset && rp->type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) {
4067
0
    crypt_storage_wrapper_destroy(rh->cw1);
4068
0
    log_dbg(cd, "Reinitializing old segment storage wrapper for moved segment.");
4069
0
    r = crypt_storage_wrapper_init(cd, &rh->cw1, crypt_data_device(cd),
4070
0
        LUKS2_reencrypt_get_data_offset_moved(hdr),
4071
0
        crypt_get_iv_offset(cd),
4072
0
        reencrypt_get_sector_size_old(hdr),
4073
0
        reencrypt_segment_cipher_old(hdr),
4074
0
        crypt_volume_key_by_id(rh->vks, rh->digest_old),
4075
0
        rh->wflags1);
4076
0
    if (r) {
4077
0
      log_err(cd, _("Failed to initialize old segment storage wrapper."));
4078
0
      return REENC_ROLLBACK;
4079
0
    }
4080
4081
0
    if (rh->rp_moved_segment.type != REENC_PROTECTION_NOT_SET) {
4082
0
      log_dbg(cd, "Switching to moved segment resilience type.");
4083
0
      rp = &rh->rp_moved_segment;
4084
0
    }
4085
0
  }
4086
4087
0
  r = reencrypt_hotzone_protect_ready(cd, rp);
4088
0
  if (r) {
4089
0
    log_err(cd, _("Failed to initialize hotzone protection."));
4090
0
    return REENC_ROLLBACK;
4091
0
  }
4092
4093
0
  if (online) {
4094
0
    r = reencrypt_refresh_overlay_devices(cd, hdr, rh->overlay_name, rh->hotzone_name,
4095
0
                  rh->hotzone_device, rh->vks, rh->device_size, rh->flags);
4096
    /* Teardown overlay devices with dm-error. None bio shall pass! */
4097
0
    if (r != REENC_OK)
4098
0
      return r;
4099
0
  }
4100
4101
0
  rh->read = crypt_storage_wrapper_read(rh->cw1, rh->offset, rh->reenc_buffer, rh->length);
4102
0
  if (rh->read < 0) {
4103
    /* severity normal */
4104
0
    log_err(cd, _("Failed to read hotzone area starting at %" PRIu64 "."), rh->offset);
4105
0
    return REENC_ROLLBACK;
4106
0
  }
4107
4108
  /* metadata commit point */
4109
0
  r = reencrypt_hotzone_protect_final(cd, hdr, rh->reenc_keyslot, rp, rh->reenc_buffer, rh->read);
4110
0
  if (r < 0) {
4111
    /* severity normal */
4112
0
    log_err(cd, _("Failed to write reencryption resilience metadata."));
4113
0
    return REENC_ROLLBACK;
4114
0
  }
4115
4116
0
  r = crypt_storage_wrapper_decrypt(rh->cw1, rh->offset, rh->reenc_buffer, rh->read);
4117
0
  if (r) {
4118
    /* severity normal */
4119
0
    log_err(cd, _("Decryption failed."));
4120
0
    return REENC_ROLLBACK;
4121
0
  }
4122
0
  if (rh->read != crypt_storage_wrapper_encrypt_write(rh->cw2, rh->offset, rh->reenc_buffer, rh->read)) {
4123
    /* severity fatal */
4124
0
    log_err(cd, _("Failed to write hotzone area starting at %" PRIu64 "."), rh->offset);
4125
0
    return REENC_FATAL;
4126
0
  }
4127
4128
0
  if (rp->type != REENC_PROTECTION_NONE && crypt_storage_wrapper_datasync(rh->cw2)) {
4129
0
    log_err(cd, _("Failed to sync data."));
4130
0
    return REENC_FATAL;
4131
0
  }
4132
4133
  /* metadata commit safe point */
4134
0
  r = reencrypt_assign_segments(cd, hdr, rh, 0, rp->type != REENC_PROTECTION_NONE);
4135
0
  if (r) {
4136
    /* severity fatal */
4137
0
    log_err(cd, _("Failed to update metadata after current reencryption hotzone completed."));
4138
0
    return REENC_FATAL;
4139
0
  }
4140
4141
0
  if (online) {
4142
    /* severity normal */
4143
0
    log_dbg(cd, "Resuming device %s", rh->hotzone_name);
4144
0
    r = dm_resume_device(cd, rh->hotzone_name, DM_RESUME_PRIVATE);
4145
0
    if (r) {
4146
0
      log_err(cd, _("Failed to resume device %s."), rh->hotzone_name);
4147
0
      return REENC_ERR;
4148
0
    }
4149
0
  }
4150
4151
0
  return REENC_OK;
4152
0
}
4153
4154
static int reencrypt_erase_backup_segments(struct crypt_device *cd,
4155
    struct luks2_hdr *hdr)
4156
0
{
4157
0
  int segment = LUKS2_get_segment_id_by_flag(hdr, "backup-previous");
4158
0
  if (segment >= 0) {
4159
0
    if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0))
4160
0
      return -EINVAL;
4161
0
    json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment);
4162
0
  }
4163
0
  segment = LUKS2_get_segment_id_by_flag(hdr, "backup-final");
4164
0
  if (segment >= 0) {
4165
0
    if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0))
4166
0
      return -EINVAL;
4167
0
    json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment);
4168
0
  }
4169
0
  segment = LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment");
4170
0
  if (segment >= 0) {
4171
0
    if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0))
4172
0
      return -EINVAL;
4173
0
    json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment);
4174
0
  }
4175
4176
0
  return 0;
4177
0
}
4178
4179
static int reencrypt_wipe_unused_device_area(struct crypt_device *cd, struct luks2_reencrypt *rh)
4180
0
{
4181
0
  uint64_t offset, length, dev_size;
4182
0
  int r = 0;
4183
4184
0
  assert(cd);
4185
0
  assert(rh);
4186
4187
0
  if (rh->jobj_segment_moved && rh->mode == CRYPT_REENCRYPT_ENCRYPT) {
4188
0
    offset = json_segment_get_offset(rh->jobj_segment_moved, 0);
4189
0
    length = json_segment_get_size(rh->jobj_segment_moved, 0);
4190
0
    log_dbg(cd, "Wiping %" PRIu64 " bytes of backup segment data at offset %" PRIu64,
4191
0
      length, offset);
4192
0
    r = crypt_wipe_device(cd, crypt_data_device(cd), CRYPT_WIPE_RANDOM,
4193
0
        offset, length, 1024 * 1024, NULL, NULL);
4194
0
  }
4195
4196
0
  if (r < 0)
4197
0
    return r;
4198
4199
0
  if (rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->direction == CRYPT_REENCRYPT_FORWARD) {
4200
0
    r = device_size(crypt_data_device(cd), &dev_size);
4201
0
    if (r < 0)
4202
0
      return r;
4203
4204
0
    if (dev_size < data_shift_value(&rh->rp))
4205
0
      return -EINVAL;
4206
4207
0
    offset = dev_size - data_shift_value(&rh->rp);
4208
0
    length = data_shift_value(&rh->rp);
4209
0
    log_dbg(cd, "Wiping %" PRIu64 " bytes of data at offset %" PRIu64,
4210
0
      length, offset);
4211
0
    r = crypt_wipe_device(cd, crypt_data_device(cd), CRYPT_WIPE_RANDOM,
4212
0
        offset, length, 1024 * 1024, NULL, NULL);
4213
0
  }
4214
4215
0
  return r;
4216
0
}
4217
4218
static int reencrypt_teardown_ok(struct crypt_device *cd, struct luks2_hdr *hdr, struct luks2_reencrypt *rh)
4219
0
{
4220
0
  int i, r;
4221
0
  uint64_t dmt_flags;
4222
0
  bool finished = !(rh->device_size > rh->progress);
4223
4224
0
  if (rh->rp.type == REENC_PROTECTION_NONE &&
4225
0
      LUKS2_hdr_write(cd, hdr)) {
4226
0
    log_err(cd, _("Failed to write LUKS2 metadata."));
4227
0
    return -EINVAL;
4228
0
  }
4229
4230
0
  if (rh->online) {
4231
0
    r = LUKS2_reload(cd, rh->device_name, rh->vks, rh->device_size, rh->flags);
4232
0
    if (r)
4233
0
      log_err(cd, _("Failed to reload device %s."), rh->device_name);
4234
0
    if (!r) {
4235
0
      r = dm_resume_device(cd, rh->device_name, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
4236
0
      if (r)
4237
0
        log_err(cd, _("Failed to resume device %s."), rh->device_name);
4238
0
    }
4239
0
    dm_remove_device(cd, rh->overlay_name, 0);
4240
0
    dm_remove_device(cd, rh->hotzone_name, 0);
4241
4242
0
    if (!r && finished && rh->mode == CRYPT_REENCRYPT_DECRYPT &&
4243
0
        !dm_flags(cd, DM_LINEAR, &dmt_flags) && (dmt_flags & DM_DEFERRED_SUPPORTED))
4244
0
        dm_remove_device(cd, rh->device_name, CRYPT_DEACTIVATE_DEFERRED);
4245
0
  }
4246
4247
0
  if (finished) {
4248
0
    if (reencrypt_wipe_unused_device_area(cd, rh))
4249
0
      log_err(cd, _("Failed to wipe unused data device area."));
4250
0
    if (reencrypt_get_data_offset_new(hdr) && LUKS2_set_keyslots_size(hdr, reencrypt_get_data_offset_new(hdr)))
4251
0
      log_dbg(cd, "Failed to set new keyslots area size.");
4252
0
    if (rh->digest_old >= 0 && rh->digest_new != rh->digest_old)
4253
0
      for (i = 0; i < LUKS2_KEYSLOTS_MAX; i++)
4254
0
        if (LUKS2_digest_by_keyslot(hdr, i) == rh->digest_old && crypt_keyslot_destroy(cd, i))
4255
0
          log_err(cd, _("Failed to remove unused (unbound) keyslot %d."), i);
4256
4257
0
    if (reencrypt_erase_backup_segments(cd, hdr))
4258
0
      log_dbg(cd, "Failed to erase backup segments");
4259
4260
0
    if (reencrypt_update_flag(cd, 0, false, false))
4261
0
      log_dbg(cd, "Failed to disable reencryption requirement flag.");
4262
4263
    /* metadata commit point also removing reencryption flag on-disk */
4264
0
    if (crypt_keyslot_destroy(cd, rh->reenc_keyslot)) {
4265
0
      log_err(cd, _("Failed to remove reencryption keyslot."));
4266
0
      return -EINVAL;
4267
0
    }
4268
0
  }
4269
4270
0
  return 0;
4271
0
}
4272
4273
static void reencrypt_teardown_fatal(struct crypt_device *cd, struct luks2_reencrypt *rh)
4274
0
{
4275
0
  log_err(cd, _("Fatal error while reencrypting chunk starting at %" PRIu64 ", %" PRIu64 " sectors long."),
4276
0
    (rh->offset >> SECTOR_SHIFT) + crypt_get_data_offset(cd), rh->length >> SECTOR_SHIFT);
4277
4278
0
  if (rh->online) {
4279
0
    log_err(cd, _("Online reencryption failed."));
4280
0
    if (dm_status_suspended(cd, rh->hotzone_name) > 0) {
4281
0
      log_dbg(cd, "Hotzone device %s suspended, replacing with dm-error.", rh->hotzone_name);
4282
0
      if (dm_error_device(cd, rh->hotzone_name)) {
4283
0
        log_err(cd, _("Failed to replace suspended device %s with dm-error target."), rh->hotzone_name);
4284
0
        log_err(cd, _("Do not resume the device unless replaced with error target manually."));
4285
0
      }
4286
0
    }
4287
0
  }
4288
0
}
4289
4290
static int reencrypt_teardown(struct crypt_device *cd, struct luks2_hdr *hdr,
4291
    struct luks2_reencrypt *rh, reenc_status_t rs, bool interrupted,
4292
    int (*progress)(uint64_t size, uint64_t offset, void *usrptr),
4293
    void *usrptr)
4294
0
{
4295
0
  int r;
4296
4297
0
  switch (rs) {
4298
0
  case REENC_OK:
4299
0
    if (progress && !interrupted)
4300
0
      progress(rh->device_size, rh->progress, usrptr);
4301
0
    r = reencrypt_teardown_ok(cd, hdr, rh);
4302
0
    break;
4303
0
  case REENC_FATAL:
4304
0
    reencrypt_teardown_fatal(cd, rh);
4305
    /* fall-through */
4306
0
  default:
4307
0
    r = -EIO;
4308
0
  }
4309
4310
  /* this frees reencryption lock */
4311
0
  LUKS2_reencrypt_free(cd, rh);
4312
0
  crypt_set_luks2_reencrypt(cd, NULL);
4313
4314
0
  return r;
4315
0
}
4316
4317
int crypt_reencrypt_run(
4318
  struct crypt_device *cd,
4319
  int (*progress)(uint64_t size, uint64_t offset, void *usrptr),
4320
  void *usrptr)
4321
0
{
4322
0
  int r;
4323
0
  crypt_reencrypt_info ri;
4324
0
  struct luks2_hdr *hdr;
4325
0
  struct luks2_reencrypt *rh;
4326
0
  reenc_status_t rs;
4327
0
  bool quit = false;
4328
4329
0
  if (onlyLUKS2reencrypt(cd))
4330
0
    return -EINVAL;
4331
4332
0
  hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
4333
4334
0
  ri = LUKS2_reencrypt_status(hdr);
4335
0
  if (ri > CRYPT_REENCRYPT_CLEAN) {
4336
0
    log_err(cd, _("Cannot proceed with reencryption. Unexpected reencryption status."));
4337
0
    return -EINVAL;
4338
0
  }
4339
4340
0
  rh = crypt_get_luks2_reencrypt(cd);
4341
0
  if (!rh || (!rh->reenc_lock && crypt_metadata_locking_enabled())) {
4342
0
    log_err(cd, _("Missing or invalid reencrypt context."));
4343
0
    return -EINVAL;
4344
0
  }
4345
4346
0
  log_dbg(cd, "Resuming LUKS2 reencryption.");
4347
4348
0
  if (rh->online) {
4349
    /* This is last resort to avoid data corruption. Abort is justified here. */
4350
0
    assert(device_direct_io(crypt_data_device(cd)));
4351
4352
0
    if (reencrypt_init_device_stack(cd, rh)) {
4353
0
      log_err(cd, _("Failed to initialize reencryption device stack."));
4354
0
      return -EINVAL;
4355
0
    }
4356
0
  }
4357
4358
0
  log_dbg(cd, "Progress %" PRIu64 ", device_size %" PRIu64, rh->progress, rh->device_size);
4359
4360
0
  rs = REENC_OK;
4361
4362
0
  if (progress && progress(rh->device_size, rh->progress, usrptr))
4363
0
    quit = true;
4364
4365
0
  while (!quit && (rh->device_size > rh->progress)) {
4366
0
    rs = reencrypt_step(cd, hdr, rh, rh->device_size, rh->online);
4367
0
    if (rs != REENC_OK)
4368
0
      break;
4369
4370
0
    log_dbg(cd, "Progress %" PRIu64 ", device_size %" PRIu64, rh->progress, rh->device_size);
4371
0
    if (progress && progress(rh->device_size, rh->progress, usrptr))
4372
0
      quit = true;
4373
4374
0
    r = reencrypt_context_update(cd, rh);
4375
0
    if (r) {
4376
0
      log_err(cd, _("Failed to update reencryption context."));
4377
0
      rs = REENC_ERR;
4378
0
      break;
4379
0
    }
4380
4381
0
    log_dbg(cd, "Next reencryption offset will be %" PRIu64 " sectors.", rh->offset);
4382
0
    log_dbg(cd, "Next reencryption chunk size will be %" PRIu64 " sectors).", rh->length);
4383
0
  }
4384
4385
0
  r = reencrypt_teardown(cd, hdr, rh, rs, quit, progress, usrptr);
4386
0
  return r;
4387
0
}
4388
4389
4390
static int reencrypt_recovery(struct crypt_device *cd,
4391
    struct luks2_hdr *hdr,
4392
    uint64_t device_size,
4393
    struct volume_key *vks)
4394
0
{
4395
0
  int r;
4396
0
  struct luks2_reencrypt *rh = NULL;
4397
4398
0
  r = reencrypt_load(cd, hdr, device_size, 0, 0, vks, &rh);
4399
0
  if (r < 0) {
4400
0
    log_err(cd, _("Failed to load LUKS2 reencryption context."));
4401
0
    return r;
4402
0
  }
4403
4404
0
  r = reencrypt_recover_segment(cd, hdr, rh, vks);
4405
0
  if (r < 0)
4406
0
    goto out;
4407
4408
0
  if ((r = reencrypt_assign_segments(cd, hdr, rh, 0, 0)))
4409
0
    goto out;
4410
4411
0
  r = reencrypt_context_update(cd, rh);
4412
0
  if (r) {
4413
0
    log_err(cd, _("Failed to update reencryption context."));
4414
0
    goto out;
4415
0
  }
4416
4417
0
  r = reencrypt_teardown_ok(cd, hdr, rh);
4418
0
  if (!r)
4419
0
    r = LUKS2_hdr_write(cd, hdr);
4420
0
out:
4421
0
  LUKS2_reencrypt_free(cd, rh);
4422
4423
0
  return r;
4424
0
}
4425
#else /* USE_LUKS2_REENCRYPTION */
4426
int crypt_reencrypt_run(
4427
  struct crypt_device *cd,
4428
  int (*progress)(uint64_t size, uint64_t offset, void *usrptr),
4429
  void *usrptr)
4430
{
4431
  UNUSED(progress);
4432
  UNUSED(usrptr);
4433
4434
  log_err(cd, _("This operation is not supported for this device type."));
4435
  return -ENOTSUP;
4436
}
4437
#endif
4438
4439
int crypt_reencrypt(
4440
  struct crypt_device *cd,
4441
  int (*progress)(uint64_t size, uint64_t offset, void *usrptr))
4442
0
{
4443
0
  return crypt_reencrypt_run(cd, progress, NULL);
4444
0
}
4445
4446
/*
4447
 * use only for calculation of minimal data device size.
4448
 * The real data offset is taken directly from segments!
4449
 */
4450
int LUKS2_reencrypt_data_offset(struct luks2_hdr *hdr, bool blockwise)
4451
0
{
4452
0
  crypt_reencrypt_info ri = LUKS2_reencrypt_status(hdr);
4453
0
  uint64_t data_offset = LUKS2_get_data_offset(hdr);
4454
4455
0
  if (ri == CRYPT_REENCRYPT_CLEAN && reencrypt_direction(hdr) == CRYPT_REENCRYPT_FORWARD)
4456
0
    data_offset += reencrypt_data_shift(hdr) >> SECTOR_SHIFT;
4457
4458
0
  return blockwise ? data_offset : data_offset << SECTOR_SHIFT;
4459
0
}
4460
4461
/* internal only */
4462
int LUKS2_reencrypt_check_device_size(struct crypt_device *cd, struct luks2_hdr *hdr,
4463
  uint64_t check_size, uint64_t *dev_size, bool device_exclusive_check, bool dynamic)
4464
0
{
4465
0
  int r;
4466
0
  uint64_t data_offset, real_size = 0;
4467
4468
0
  if (reencrypt_direction(hdr) == CRYPT_REENCRYPT_BACKWARD &&
4469
0
      (LUKS2_get_segment_by_flag(hdr, "backup-moved-segment") || dynamic))
4470
0
    check_size += reencrypt_data_shift(hdr);
4471
4472
0
  r = device_check_access(cd, crypt_data_device(cd),
4473
0
        device_exclusive_check ? DEV_EXCL : DEV_OK);
4474
0
  if (r)
4475
0
    return r;
4476
4477
0
  data_offset = LUKS2_reencrypt_data_offset(hdr, false);
4478
4479
0
  r = device_check_size(cd, crypt_data_device(cd), data_offset, 1);
4480
0
  if (r)
4481
0
    return r;
4482
4483
0
  r = device_size(crypt_data_device(cd), &real_size);
4484
0
  if (r)
4485
0
    return r;
4486
4487
0
  log_dbg(cd, "Required minimal device size: %" PRIu64 " (%" PRIu64 " sectors)"
4488
0
        ", real device size: %" PRIu64 " (%" PRIu64 " sectors) "
4489
0
        "calculated device size: %" PRIu64 " (%" PRIu64 " sectors)",
4490
0
        check_size, check_size >> SECTOR_SHIFT, real_size, real_size >> SECTOR_SHIFT,
4491
0
        real_size - data_offset, (real_size - data_offset) >> SECTOR_SHIFT);
4492
4493
0
  if (real_size < data_offset || (check_size && real_size < check_size)) {
4494
0
    log_err(cd, _("Device %s is too small."), device_path(crypt_data_device(cd)));
4495
0
    return -EINVAL;
4496
0
  }
4497
4498
0
  *dev_size = real_size - data_offset;
4499
4500
0
  return 0;
4501
0
}
4502
#if USE_LUKS2_REENCRYPTION
4503
/* returns keyslot number on success (>= 0) or negative errnor otherwise */
4504
int LUKS2_reencrypt_locked_recovery_by_vks(struct crypt_device *cd,
4505
  struct volume_key *vks)
4506
0
{
4507
0
  uint64_t minimal_size, device_size;
4508
0
  int r = -EINVAL;
4509
0
  struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
4510
4511
0
  log_dbg(cd, "Entering reencryption crash recovery.");
4512
4513
0
  if (LUKS2_get_data_size(hdr, &minimal_size, NULL))
4514
0
    return r;
4515
0
  if (LUKS2_reencrypt_check_device_size(cd, hdr, minimal_size, &device_size, true, false))
4516
0
    goto out;
4517
4518
0
  r = reencrypt_recovery(cd, hdr, device_size, vks);
4519
4520
0
out:
4521
0
  if (r < 0)
4522
0
    crypt_drop_uploaded_keyring_key(cd, vks);
4523
0
  return r;
4524
0
}
4525
#endif
4526
crypt_reencrypt_info LUKS2_reencrypt_get_params(struct luks2_hdr *hdr,
4527
  struct crypt_params_reencrypt *params)
4528
0
{
4529
0
  crypt_reencrypt_info ri;
4530
0
  int digest;
4531
0
  uint8_t version;
4532
4533
0
  if (params)
4534
0
    memset(params, 0, sizeof(*params));
4535
4536
0
  ri = LUKS2_reencrypt_status(hdr);
4537
0
  if (ri == CRYPT_REENCRYPT_NONE || ri == CRYPT_REENCRYPT_INVALID || !params)
4538
0
    return ri;
4539
4540
0
  digest = LUKS2_digest_by_keyslot(hdr, LUKS2_find_keyslot(hdr, "reencrypt"));
4541
0
  if (digest < 0 && digest != -ENOENT)
4542
0
    return CRYPT_REENCRYPT_INVALID;
4543
4544
  /*
4545
   * In case there's an old "online-reencrypt" requirement or reencryption
4546
   * keyslot digest is missing inform caller reencryption metadata requires repair.
4547
   */
4548
0
  if (!LUKS2_config_get_reencrypt_version(hdr, &version) &&
4549
0
      (version < 2 || digest == -ENOENT)) {
4550
0
    params->flags |= CRYPT_REENCRYPT_REPAIR_NEEDED;
4551
0
    return ri;
4552
0
  }
4553
4554
0
  params->mode = reencrypt_mode(hdr);
4555
0
  params->direction = reencrypt_direction(hdr);
4556
0
  params->resilience = reencrypt_resilience_type(hdr);
4557
0
  params->hash = reencrypt_resilience_hash(hdr);
4558
0
  params->data_shift = reencrypt_data_shift(hdr) >> SECTOR_SHIFT;
4559
0
  params->max_hotzone_size = 0;
4560
0
  if (LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0)
4561
0
    params->flags |= CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT;
4562
4563
0
  return ri;
4564
0
}