Coverage Report

Created: 2026-04-28 06:29

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/cryptsetup/lib/luks2/luks2_reencrypt.c
Line
Count
Source
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
 * LUKS - Linux Unified Key Setup v2, reencryption helpers
4
 *
5
 * Copyright (C) 2015-2025 Red Hat, Inc. All rights reserved.
6
 * Copyright (C) 2015-2025 Ondrej Kozina
7
 */
8
9
#include "luks2_internal.h"
10
#include "utils_device_locking.h"
11
#include "keyslot_context.h"
12
13
struct luks2_reencrypt {
14
  /* reencryption window attributes */
15
  uint64_t offset;
16
  uint64_t progress;
17
  uint64_t length;
18
  uint64_t device_size;
19
  bool online;
20
  bool fixed_length;
21
  crypt_reencrypt_direction_info direction;
22
  crypt_reencrypt_mode_info mode;
23
24
  char *device_name;
25
  char *hotzone_name;
26
  char *overlay_name;
27
  uint32_t flags;
28
29
  /* reencryption window persistence attributes */
30
  struct reenc_protection rp;
31
  struct reenc_protection rp_moved_segment;
32
33
  int reenc_keyslot;
34
35
  /* already running reencryption */
36
  json_object *jobj_segs_hot;
37
  json_object *jobj_segs_post;
38
39
  /* backup segments */
40
  json_object *jobj_segment_new;
41
  int digest_new;
42
  json_object *jobj_segment_old;
43
  int digest_old;
44
  json_object *jobj_segment_moved;
45
46
  struct volume_key *vks;
47
48
  void *reenc_buffer;
49
  ssize_t read;
50
51
  struct crypt_storage_wrapper *cw1;
52
  struct crypt_storage_wrapper *cw2;
53
54
  uint32_t wflags1;
55
  uint32_t wflags2;
56
57
  struct device *hotzone_device;
58
59
  struct crypt_lock_handle *reenc_lock;
60
};
61
#if USE_LUKS2_REENCRYPTION
62
static uint64_t data_shift_value(struct reenc_protection *rp)
63
0
{
64
0
  return rp->type == REENC_PROTECTION_DATASHIFT ? rp->p.ds.data_shift : 0;
65
0
}
66
67
static json_object *reencrypt_segment(struct luks2_hdr *hdr, unsigned new)
68
0
{
69
0
  return LUKS2_get_segment_by_flag(hdr, new ? "backup-final" : "backup-previous");
70
0
}
71
72
static json_object *reencrypt_segment_new(struct luks2_hdr *hdr)
73
0
{
74
0
  return reencrypt_segment(hdr, 1);
75
0
}
76
77
static json_object *reencrypt_segment_old(struct luks2_hdr *hdr)
78
0
{
79
0
  return reencrypt_segment(hdr, 0);
80
0
}
81
82
static json_object *reencrypt_segments_old(struct luks2_hdr *hdr)
83
0
{
84
0
  json_object *jobj_segments, *jobj = NULL;
85
86
0
  if (json_object_copy(reencrypt_segment_old(hdr), &jobj))
87
0
    return NULL;
88
89
0
  json_segment_remove_flag(jobj, "backup-previous");
90
91
0
  jobj_segments = json_object_new_object();
92
0
  if (!jobj_segments) {
93
0
    json_object_put(jobj);
94
0
    return NULL;
95
0
  }
96
97
0
  if (json_object_object_add_by_uint(jobj_segments, 0, jobj)) {
98
0
    json_object_put(jobj);
99
0
    json_object_put(jobj_segments);
100
0
    return NULL;
101
0
  }
102
103
0
  return jobj_segments;
104
0
}
105
106
static const char *reencrypt_segment_cipher_new(struct luks2_hdr *hdr)
107
0
{
108
0
  return json_segment_get_cipher(reencrypt_segment(hdr, 1));
109
0
}
110
111
static const char *reencrypt_segment_cipher_old(struct luks2_hdr *hdr)
112
0
{
113
0
  return json_segment_get_cipher(reencrypt_segment(hdr, 0));
114
0
}
115
116
static uint32_t reencrypt_get_sector_size_new(struct luks2_hdr *hdr)
117
0
{
118
0
  return json_segment_get_sector_size(reencrypt_segment(hdr, 1));
119
0
}
120
121
static uint32_t reencrypt_get_sector_size_old(struct luks2_hdr *hdr)
122
0
{
123
0
  return json_segment_get_sector_size(reencrypt_segment(hdr, 0));
124
0
}
125
126
static uint64_t reencrypt_data_offset(struct luks2_hdr *hdr, unsigned new)
127
0
{
128
0
  json_object *jobj = reencrypt_segment(hdr, new);
129
0
  if (jobj)
130
0
    return json_segment_get_offset(jobj, 0);
131
132
0
  return LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
133
0
}
134
135
static uint64_t LUKS2_reencrypt_get_data_offset_moved(struct luks2_hdr *hdr)
136
0
{
137
0
  json_object *jobj_segment = LUKS2_get_segment_by_flag(hdr, "backup-moved-segment");
138
139
0
  if (!jobj_segment)
140
0
    return 0;
141
142
0
  return json_segment_get_offset(jobj_segment, 0);
143
0
}
144
145
static uint64_t reencrypt_get_data_offset_new(struct luks2_hdr *hdr)
146
0
{
147
0
  return reencrypt_data_offset(hdr, 1);
148
0
}
149
150
static uint64_t reencrypt_get_data_offset_old(struct luks2_hdr *hdr)
151
0
{
152
0
  return reencrypt_data_offset(hdr, 0);
153
0
}
154
#endif
155
156
static int reencrypt_digest(struct luks2_hdr *hdr, unsigned new)
157
0
{
158
0
  int segment = LUKS2_get_segment_id_by_flag(hdr, new ? "backup-final" : "backup-previous");
159
160
0
  if (segment < 0)
161
0
    return segment;
162
163
0
  return LUKS2_digest_by_segment(hdr, segment);
164
0
}
165
166
int LUKS2_reencrypt_digest_new(struct luks2_hdr *hdr)
167
0
{
168
0
  return reencrypt_digest(hdr, 1);
169
0
}
170
171
int LUKS2_reencrypt_digest_old(struct luks2_hdr *hdr)
172
0
{
173
0
  return reencrypt_digest(hdr, 0);
174
0
}
175
176
int LUKS2_reencrypt_segment_new(struct luks2_hdr *hdr)
177
0
{
178
0
  return LUKS2_get_segment_id_by_flag(hdr, "backup-final");
179
0
}
180
181
int LUKS2_reencrypt_segment_old(struct luks2_hdr *hdr)
182
0
{
183
0
  return LUKS2_get_segment_id_by_flag(hdr, "backup-previous");
184
0
}
185
186
unsigned LUKS2_reencrypt_vks_count(struct luks2_hdr *hdr)
187
0
{
188
0
  int digest_old, digest_new;
189
0
  unsigned vks_count = 0;
190
191
0
  if ((digest_new = LUKS2_reencrypt_digest_new(hdr)) >= 0)
192
0
    vks_count++;
193
0
  if ((digest_old = LUKS2_reencrypt_digest_old(hdr)) >= 0) {
194
0
    if (digest_old != digest_new)
195
0
      vks_count++;
196
0
  }
197
198
0
  return vks_count;
199
0
}
200
201
/* none, checksums, journal or shift */
202
static const char *reencrypt_resilience_type(struct luks2_hdr *hdr)
203
0
{
204
0
  json_object *jobj_keyslot, *jobj_area, *jobj_type;
205
0
  int ks = LUKS2_find_keyslot(hdr, "reencrypt");
206
207
0
  if (ks < 0)
208
0
    return NULL;
209
210
0
  jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
211
212
0
  json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
213
0
  if (!json_object_object_get_ex(jobj_area, "type", &jobj_type))
214
0
    return NULL;
215
216
0
  return json_object_get_string(jobj_type);
217
0
}
218
219
static const char *reencrypt_resilience_hash(struct luks2_hdr *hdr)
220
0
{
221
0
  json_object *jobj_keyslot, *jobj_area, *jobj_type, *jobj_hash;
222
0
  int ks = LUKS2_find_keyslot(hdr, "reencrypt");
223
224
0
  if (ks < 0)
225
0
    return NULL;
226
227
0
  jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
228
229
0
  json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
230
0
  if (!json_object_object_get_ex(jobj_area, "type", &jobj_type))
231
0
    return NULL;
232
0
  if (strcmp(json_object_get_string(jobj_type), "checksum"))
233
0
    return NULL;
234
0
  if (!json_object_object_get_ex(jobj_area, "hash", &jobj_hash))
235
0
    return NULL;
236
237
0
  return json_object_get_string(jobj_hash);
238
0
}
239
#if USE_LUKS2_REENCRYPTION
240
static json_object *_enc_create_segments_shift_after(struct luks2_reencrypt *rh, uint64_t data_offset)
241
0
{
242
0
  int reenc_seg, i = 0;
243
0
  json_object *jobj, *jobj_copy = NULL, *jobj_seg_new = NULL, *jobj_segs_post = json_object_new_object();
244
0
  uint64_t tmp;
245
246
0
  if (!rh->jobj_segs_hot || !jobj_segs_post)
247
0
    goto err;
248
249
0
  if (json_segments_count(rh->jobj_segs_hot) == 0)
250
0
    return jobj_segs_post;
251
252
0
  reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
253
0
  if (reenc_seg < 0)
254
0
    goto err;
255
256
0
  while (i < reenc_seg) {
257
0
    jobj_copy = json_segments_get_segment(rh->jobj_segs_hot, i);
258
0
    if (!jobj_copy || json_object_object_add_by_uint(jobj_segs_post, i++, json_object_get(jobj_copy)))
259
0
      goto err;
260
0
  }
261
0
  jobj_copy = NULL;
262
263
0
  jobj = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1);
264
0
  if (!jobj) {
265
0
    jobj = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg);
266
0
    if (!jobj || json_object_copy(jobj, &jobj_seg_new))
267
0
      goto err;
268
0
    json_segment_remove_flag(jobj_seg_new, "in-reencryption");
269
0
    tmp = rh->length;
270
0
  } else {
271
0
    if (json_object_copy(jobj, &jobj_seg_new))
272
0
      goto err;
273
0
    json_object_object_add(jobj_seg_new, "offset", crypt_jobj_new_uint64(rh->offset + data_offset));
274
0
    json_object_object_add(jobj_seg_new, "iv_tweak", crypt_jobj_new_uint64(rh->offset >> SECTOR_SHIFT));
275
0
    tmp = json_segment_get_size(jobj_seg_new, 0) + rh->length;
276
0
  }
277
278
  /* alter size of new segment, reenc_seg == 0 we're finished */
279
0
  json_object_object_add(jobj_seg_new, "size", reenc_seg > 0 ? crypt_jobj_new_uint64(tmp) : json_object_new_string("dynamic"));
280
0
  if (!json_object_object_add_by_uint(jobj_segs_post, reenc_seg, jobj_seg_new))
281
0
    return jobj_segs_post;
282
283
0
err:
284
0
  json_object_put(jobj_seg_new);
285
0
  json_object_put(jobj_copy);
286
0
  json_object_put(jobj_segs_post);
287
0
  return NULL;
288
0
}
289
290
static json_object *reencrypt_make_hot_segments_encrypt_shift(struct luks2_hdr *hdr,
291
  struct luks2_reencrypt *rh,
292
  uint64_t data_offset)
293
0
{
294
0
  int sg, crypt_seg, i = 0;
295
0
  uint64_t segment_size;
296
0
  json_object *jobj_seg_shrunk = NULL, *jobj_seg_new = NULL, *jobj_copy = NULL, *jobj_enc_seg = NULL,
297
0
         *jobj_segs_hot = json_object_new_object();
298
299
0
  if (!jobj_segs_hot)
300
0
    return NULL;
301
302
0
  crypt_seg = LUKS2_segment_by_type(hdr, "crypt");
303
304
  /* FIXME: This is hack. Find proper way to fix it. */
305
0
  sg = LUKS2_last_segment_by_type(hdr, "linear");
306
0
  if (rh->offset && sg < 0)
307
0
    goto err;
308
0
  if (sg < 0)
309
0
    return jobj_segs_hot;
310
311
0
  jobj_enc_seg = json_segment_create_crypt(data_offset + rh->offset,
312
0
                  rh->offset >> SECTOR_SHIFT,
313
0
                  &rh->length,
314
0
                  reencrypt_segment_cipher_new(hdr),
315
0
                  NULL, 0, /* integrity */
316
0
                  reencrypt_get_sector_size_new(hdr),
317
0
                  1);
318
319
0
  while (i < sg) {
320
0
    jobj_copy = LUKS2_get_segment_jobj(hdr, i);
321
0
    if (!jobj_copy || json_object_object_add_by_uint(jobj_segs_hot, i++, json_object_get(jobj_copy)))
322
0
      goto err;
323
0
  }
324
0
  jobj_copy = NULL;
325
326
0
  segment_size = LUKS2_segment_size(hdr, sg, 0);
327
0
  if (segment_size > rh->length) {
328
0
    if (json_object_copy(LUKS2_get_segment_jobj(hdr, sg), &jobj_seg_shrunk))
329
0
      goto err;
330
0
    json_object_object_add(jobj_seg_shrunk, "size", crypt_jobj_new_uint64(segment_size - rh->length));
331
0
    if (json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_seg_shrunk))
332
0
      goto err;
333
0
  }
334
335
0
  if (json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_enc_seg))
336
0
    goto err;
337
338
  /* first crypt segment after encryption ? */
339
0
  if (crypt_seg >= 0) {
340
0
    jobj_seg_new = LUKS2_get_segment_jobj(hdr, crypt_seg);
341
0
    if (!jobj_seg_new || json_object_object_add_by_uint(jobj_segs_hot, sg, json_object_get(jobj_seg_new)))
342
0
      goto err;
343
0
  }
344
345
0
  return jobj_segs_hot;
346
0
err:
347
0
  json_object_put(jobj_copy);
348
0
  json_object_put(jobj_seg_new);
349
0
  json_object_put(jobj_seg_shrunk);
350
0
  json_object_put(jobj_enc_seg);
351
0
  json_object_put(jobj_segs_hot);
352
353
0
  return NULL;
354
0
}
355
356
static json_object *reencrypt_make_segment_new(struct crypt_device *cd,
357
    struct luks2_hdr *hdr,
358
    const struct luks2_reencrypt *rh,
359
    uint64_t data_offset,
360
    uint64_t segment_offset,
361
    uint64_t iv_offset,
362
    const uint64_t *segment_length)
363
0
{
364
0
  switch (rh->mode) {
365
0
  case CRYPT_REENCRYPT_REENCRYPT:
366
0
  case CRYPT_REENCRYPT_ENCRYPT:
367
0
    return json_segment_create_crypt(data_offset + segment_offset,
368
0
              crypt_get_iv_offset(cd) + (iv_offset >> SECTOR_SHIFT),
369
0
              segment_length,
370
0
              reencrypt_segment_cipher_new(hdr),
371
0
              NULL, 0, /* integrity */
372
0
              reencrypt_get_sector_size_new(hdr), 0);
373
0
  case CRYPT_REENCRYPT_DECRYPT:
374
0
    return json_segment_create_linear(data_offset + segment_offset, segment_length, 0);
375
0
  }
376
377
0
  return NULL;
378
0
}
379
380
static json_object *reencrypt_make_post_segments_forward(struct crypt_device *cd,
381
  struct luks2_hdr *hdr,
382
  struct luks2_reencrypt *rh,
383
  uint64_t data_offset)
384
0
{
385
0
  int reenc_seg;
386
0
  json_object *jobj_old_seg, *jobj_new_seg_after = NULL, *jobj_old_seg_copy = NULL,
387
0
        *jobj_segs_post = json_object_new_object();
388
0
  uint64_t fixed_length = rh->offset + rh->length;
389
390
0
  if (!rh->jobj_segs_hot || !jobj_segs_post)
391
0
    goto err;
392
393
0
  reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
394
0
  if (reenc_seg < 0)
395
0
    goto err;
396
397
0
  jobj_old_seg = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1);
398
399
  /*
400
   * if there's no old segment after reencryption, we're done.
401
   * Set size to 'dynamic' again.
402
   */
403
0
  jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, 0, 0, jobj_old_seg ? &fixed_length : NULL);
404
0
  if (!jobj_new_seg_after || json_object_object_add_by_uint_by_ref(jobj_segs_post, 0, &jobj_new_seg_after))
405
0
    goto err;
406
407
0
  if (jobj_old_seg) {
408
0
    if (rh->fixed_length) {
409
0
      if (json_object_copy(jobj_old_seg, &jobj_old_seg_copy))
410
0
        goto err;
411
0
      fixed_length = rh->device_size - fixed_length;
412
0
      json_object_object_add(jobj_old_seg_copy, "size", crypt_jobj_new_uint64(fixed_length));
413
0
    } else
414
0
      jobj_old_seg_copy = json_object_get(jobj_old_seg);
415
416
0
    if (json_object_object_add_by_uint_by_ref(jobj_segs_post, 1, &jobj_old_seg_copy))
417
0
      goto err;
418
0
  }
419
420
0
  return jobj_segs_post;
421
0
err:
422
0
  json_object_put(jobj_new_seg_after);
423
0
  json_object_put(jobj_old_seg_copy);
424
0
  json_object_put(jobj_segs_post);
425
0
  return NULL;
426
0
}
427
428
static json_object *reencrypt_make_post_segments_backward(struct crypt_device *cd,
429
  struct luks2_hdr *hdr,
430
  struct luks2_reencrypt *rh,
431
  uint64_t data_offset)
432
0
{
433
0
  int reenc_seg;
434
0
  uint64_t fixed_length;
435
436
0
  json_object *jobj_new_seg_after = NULL, *jobj_old_seg = NULL,
437
0
        *jobj_segs_post = json_object_new_object();
438
439
0
  if (!rh->jobj_segs_hot || !jobj_segs_post)
440
0
    goto err;
441
442
0
  reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
443
0
  if (reenc_seg < 0)
444
0
    goto err;
445
446
0
  jobj_old_seg = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg - 1);
447
0
  if (jobj_old_seg) {
448
0
    json_object_get(jobj_old_seg);
449
0
    if (json_object_object_add_by_uint_by_ref(jobj_segs_post, reenc_seg - 1, &jobj_old_seg))
450
0
      goto err;
451
0
  }
452
453
0
  if (rh->fixed_length && rh->offset) {
454
0
    fixed_length = rh->device_size - rh->offset;
455
0
    jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset, rh->offset, &fixed_length);
456
0
  } else
457
0
    jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset, rh->offset, NULL);
458
459
0
  if (jobj_new_seg_after && !json_object_object_add_by_uint(jobj_segs_post, reenc_seg, jobj_new_seg_after))
460
0
    return jobj_segs_post;
461
0
err:
462
0
  json_object_put(jobj_new_seg_after);
463
0
  json_object_put(jobj_old_seg);
464
0
  json_object_put(jobj_segs_post);
465
0
  return NULL;
466
0
}
467
468
static json_object *reencrypt_make_segment_reencrypt(struct crypt_device *cd,
469
    struct luks2_hdr *hdr,
470
    const struct luks2_reencrypt *rh,
471
    uint64_t data_offset,
472
    uint64_t segment_offset,
473
    uint64_t iv_offset,
474
    const uint64_t *segment_length)
475
0
{
476
0
  switch (rh->mode) {
477
0
  case CRYPT_REENCRYPT_REENCRYPT:
478
0
  case CRYPT_REENCRYPT_ENCRYPT:
479
0
    return json_segment_create_crypt(data_offset + segment_offset,
480
0
        crypt_get_iv_offset(cd) + (iv_offset >> SECTOR_SHIFT),
481
0
        segment_length,
482
0
        reencrypt_segment_cipher_new(hdr),
483
0
              NULL, 0, /* integrity */
484
0
        reencrypt_get_sector_size_new(hdr), 1);
485
0
  case CRYPT_REENCRYPT_DECRYPT:
486
0
    return json_segment_create_linear(data_offset + segment_offset, segment_length, 1);
487
0
  }
488
489
0
  return NULL;
490
0
}
491
492
static json_object *reencrypt_make_segment_old(struct crypt_device *cd,
493
    struct luks2_hdr *hdr,
494
    const struct luks2_reencrypt *rh,
495
    uint64_t data_offset,
496
    uint64_t segment_offset,
497
    const uint64_t *segment_length)
498
0
{
499
0
  json_object *jobj_old_seg = NULL;
500
501
0
  switch (rh->mode) {
502
0
  case CRYPT_REENCRYPT_REENCRYPT:
503
0
  case CRYPT_REENCRYPT_DECRYPT:
504
0
    jobj_old_seg = json_segment_create_crypt(data_offset + segment_offset,
505
0
                crypt_get_iv_offset(cd) + (segment_offset >> SECTOR_SHIFT),
506
0
                segment_length,
507
0
                reencrypt_segment_cipher_old(hdr),
508
0
                NULL, 0, /* integrity */
509
0
                reencrypt_get_sector_size_old(hdr),
510
0
                0);
511
0
    break;
512
0
  case CRYPT_REENCRYPT_ENCRYPT:
513
0
    jobj_old_seg = json_segment_create_linear(data_offset + segment_offset, segment_length, 0);
514
0
  }
515
516
0
  return jobj_old_seg;
517
0
}
518
519
static json_object *reencrypt_make_hot_segments_forward(struct crypt_device *cd,
520
    struct luks2_hdr *hdr,
521
    struct luks2_reencrypt *rh,
522
    uint64_t device_size,
523
    uint64_t data_offset)
524
0
{
525
0
  uint64_t fixed_length, tmp = rh->offset + rh->length;
526
0
  json_object *jobj_segs_hot = json_object_new_object(), *jobj_reenc_seg = NULL,
527
0
        *jobj_old_seg = NULL, *jobj_new_seg = NULL;
528
0
  unsigned int sg = 0;
529
530
0
  if (!jobj_segs_hot)
531
0
    return NULL;
532
533
0
  if (rh->offset) {
534
0
    jobj_new_seg = reencrypt_make_segment_new(cd, hdr, rh, data_offset, 0, 0, &rh->offset);
535
0
    if (!jobj_new_seg || json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_new_seg))
536
0
      goto err;
537
0
  }
538
539
0
  jobj_reenc_seg = reencrypt_make_segment_reencrypt(cd, hdr, rh, data_offset, rh->offset, rh->offset, &rh->length);
540
0
  if (!jobj_reenc_seg)
541
0
    goto err;
542
543
0
  if (json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_reenc_seg))
544
0
    goto err;
545
546
0
  if (tmp < device_size) {
547
0
    fixed_length = device_size - tmp;
548
0
    jobj_old_seg = reencrypt_make_segment_old(cd, hdr, rh, data_offset + data_shift_value(&rh->rp),
549
0
                rh->offset + rh->length, rh->fixed_length ? &fixed_length : NULL);
550
0
    if (!jobj_old_seg || json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg, &jobj_old_seg))
551
0
      goto err;
552
0
  }
553
554
0
  return jobj_segs_hot;
555
0
err:
556
0
  json_object_put(jobj_reenc_seg);
557
0
  json_object_put(jobj_old_seg);
558
0
  json_object_put(jobj_new_seg);
559
0
  json_object_put(jobj_segs_hot);
560
0
  return NULL;
561
0
}
562
563
static json_object *reencrypt_make_hot_segments_decrypt_shift(struct crypt_device *cd,
564
  struct luks2_hdr *hdr, struct luks2_reencrypt *rh,
565
  uint64_t device_size, uint64_t data_offset)
566
0
{
567
0
  uint64_t fixed_length, tmp = rh->offset + rh->length, linear_length = rh->progress;
568
0
  json_object *jobj, *jobj_segs_hot = json_object_new_object(), *jobj_reenc_seg = NULL,
569
0
        *jobj_old_seg = NULL, *jobj_new_seg = NULL;
570
0
  unsigned int sg = 0;
571
572
0
  if (!jobj_segs_hot)
573
0
    return NULL;
574
575
0
  if (rh->offset) {
576
0
    jobj = LUKS2_get_segment_jobj(hdr, 0);
577
0
    if (!jobj)
578
0
      goto err;
579
580
0
    jobj_new_seg = json_object_get(jobj);
581
0
    if (json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_new_seg))
582
0
      goto err;
583
584
0
    if (linear_length) {
585
0
      jobj_new_seg = reencrypt_make_segment_new(cd, hdr, rh,
586
0
                  data_offset,
587
0
                  json_segment_get_size(jobj, 0),
588
0
                  0,
589
0
                  &linear_length);
590
0
      if (!jobj_new_seg || json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_new_seg))
591
0
        goto err;
592
0
    }
593
0
  }
594
595
0
  jobj_reenc_seg = reencrypt_make_segment_reencrypt(cd, hdr, rh, data_offset,
596
0
                rh->offset,
597
0
                rh->offset,
598
0
                &rh->length);
599
0
  if (!jobj_reenc_seg || json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_reenc_seg))
600
0
    goto err;
601
602
0
  if (!rh->offset && (jobj = LUKS2_get_segment_jobj(hdr, 1)) &&
603
0
      !json_segment_is_backup(jobj)) {
604
0
    jobj_new_seg = json_object_get(jobj);
605
0
    if (json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_new_seg))
606
0
      goto err;
607
0
  } else if (tmp < device_size) {
608
0
    fixed_length = device_size - tmp;
609
0
    jobj_old_seg = reencrypt_make_segment_old(cd, hdr, rh,
610
0
                data_offset + data_shift_value(&rh->rp),
611
0
                rh->offset + rh->length,
612
0
                rh->fixed_length ? &fixed_length : NULL);
613
0
    if (!jobj_old_seg || json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg, &jobj_old_seg))
614
0
      goto err;
615
0
  }
616
617
0
  return jobj_segs_hot;
618
0
err:
619
0
  json_object_put(jobj_reenc_seg);
620
0
  json_object_put(jobj_old_seg);
621
0
  json_object_put(jobj_new_seg);
622
0
  json_object_put(jobj_segs_hot);
623
0
  return NULL;
624
0
}
625
626
static json_object *_dec_create_segments_shift_after(struct crypt_device *cd,
627
  struct luks2_hdr *hdr,
628
  struct luks2_reencrypt *rh,
629
  uint64_t data_offset)
630
0
{
631
0
  int reenc_seg, i = 0;
632
0
  json_object *jobj_seg_old, *jobj_copy = NULL, *jobj_seg_old_copy = NULL, *jobj_seg_new = NULL,
633
0
        *jobj_segs_post = json_object_new_object();
634
0
  unsigned segs;
635
0
  uint64_t tmp;
636
637
0
  if (!rh->jobj_segs_hot || !jobj_segs_post)
638
0
    goto err;
639
640
0
  segs = json_segments_count(rh->jobj_segs_hot);
641
0
  if (segs == 0)
642
0
    return jobj_segs_post;
643
644
0
  reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
645
0
  if (reenc_seg < 0)
646
0
    goto err;
647
648
0
  if (reenc_seg == 0) {
649
0
    jobj_seg_new = reencrypt_make_segment_new(cd, hdr, rh, data_offset, 0, 0, NULL);
650
0
    if (!jobj_seg_new || json_object_object_add_by_uint(jobj_segs_post, 0, jobj_seg_new))
651
0
      goto err;
652
653
0
    return jobj_segs_post;
654
0
  }
655
656
0
  jobj_copy = json_segments_get_segment(rh->jobj_segs_hot, 0);
657
0
  if (!jobj_copy)
658
0
    goto err;
659
0
  json_object_get(jobj_copy);
660
0
  if (json_object_object_add_by_uint_by_ref(jobj_segs_post, i++, &jobj_copy))
661
0
    goto err;
662
663
0
  if ((jobj_seg_old = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1)))
664
0
    jobj_seg_old_copy = json_object_get(jobj_seg_old);
665
666
0
  tmp = rh->length + rh->progress;
667
0
  jobj_seg_new = reencrypt_make_segment_new(cd, hdr, rh, data_offset,
668
0
              json_segment_get_size(rh->jobj_segment_moved, 0),
669
0
              data_shift_value(&rh->rp),
670
0
              jobj_seg_old ? &tmp : NULL);
671
0
  if (!jobj_seg_new || json_object_object_add_by_uint_by_ref(jobj_segs_post, i++, &jobj_seg_new))
672
0
    goto err;
673
674
0
  if (jobj_seg_old_copy && json_object_object_add_by_uint(jobj_segs_post, i, jobj_seg_old_copy))
675
0
    goto err;
676
677
0
  return jobj_segs_post;
678
0
err:
679
0
  json_object_put(jobj_copy);
680
0
  json_object_put(jobj_seg_old_copy);
681
0
  json_object_put(jobj_seg_new);
682
0
  json_object_put(jobj_segs_post);
683
0
  return NULL;
684
0
}
685
686
static json_object *reencrypt_make_hot_segments_backward(struct crypt_device *cd,
687
    struct luks2_hdr *hdr,
688
    struct luks2_reencrypt *rh,
689
    uint64_t device_size,
690
    uint64_t data_offset)
691
0
{
692
0
  uint64_t fixed_length, tmp = rh->offset + rh->length;
693
0
  json_object *jobj_reenc_seg = NULL, *jobj_new_seg = NULL, *jobj_old_seg = NULL,
694
0
        *jobj_segs_hot = json_object_new_object();
695
0
  int sg = 0;
696
697
0
  if (!jobj_segs_hot)
698
0
    return NULL;
699
700
0
  if (rh->offset) {
701
0
    if (json_object_copy(LUKS2_get_segment_jobj(hdr, 0), &jobj_old_seg))
702
0
      goto err;
703
0
    json_object_object_add(jobj_old_seg, "size", crypt_jobj_new_uint64(rh->offset));
704
705
0
    if (json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_old_seg))
706
0
      goto err;
707
0
  }
708
709
0
  jobj_reenc_seg = reencrypt_make_segment_reencrypt(cd, hdr, rh, data_offset, rh->offset, rh->offset, &rh->length);
710
0
  if (!jobj_reenc_seg || json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_reenc_seg))
711
0
    goto err;
712
713
0
  if (tmp < device_size) {
714
0
    fixed_length = device_size - tmp;
715
0
    jobj_new_seg = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset + rh->length,
716
0
                rh->offset + rh->length, rh->fixed_length ? &fixed_length : NULL);
717
0
    if (!jobj_new_seg || json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg, &jobj_new_seg))
718
0
      goto err;
719
0
  }
720
721
0
  return jobj_segs_hot;
722
0
err:
723
0
  json_object_put(jobj_reenc_seg);
724
0
  json_object_put(jobj_new_seg);
725
0
  json_object_put(jobj_old_seg);
726
0
  json_object_put(jobj_segs_hot);
727
0
  return NULL;
728
0
}
729
730
static int reencrypt_make_hot_segments(struct crypt_device *cd,
731
    struct luks2_hdr *hdr,
732
    struct luks2_reencrypt *rh,
733
    uint64_t device_size,
734
    uint64_t data_offset)
735
0
{
736
0
  rh->jobj_segs_hot = NULL;
737
738
0
  if (rh->mode == CRYPT_REENCRYPT_ENCRYPT && rh->direction == CRYPT_REENCRYPT_BACKWARD &&
739
0
      rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) {
740
0
    log_dbg(cd, "Calculating hot segments for encryption with data move.");
741
0
    rh->jobj_segs_hot = reencrypt_make_hot_segments_encrypt_shift(hdr, rh, data_offset);
742
0
  } else if (rh->mode == CRYPT_REENCRYPT_DECRYPT && rh->direction == CRYPT_REENCRYPT_FORWARD &&
743
0
       rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) {
744
0
    log_dbg(cd, "Calculating hot segments for decryption with data move.");
745
0
    rh->jobj_segs_hot = reencrypt_make_hot_segments_decrypt_shift(cd, hdr, rh, device_size, data_offset);
746
0
  } else if (rh->direction == CRYPT_REENCRYPT_FORWARD) {
747
0
    log_dbg(cd, "Calculating hot segments (forward direction).");
748
0
    rh->jobj_segs_hot = reencrypt_make_hot_segments_forward(cd, hdr, rh, device_size, data_offset);
749
0
  } else if (rh->direction == CRYPT_REENCRYPT_BACKWARD) {
750
0
    log_dbg(cd, "Calculating hot segments (backward direction).");
751
0
    rh->jobj_segs_hot = reencrypt_make_hot_segments_backward(cd, hdr, rh, device_size, data_offset);
752
0
  }
753
754
0
  return rh->jobj_segs_hot ? 0 : -EINVAL;
755
0
}
756
757
static int reencrypt_make_post_segments(struct crypt_device *cd,
758
    struct luks2_hdr *hdr,
759
    struct luks2_reencrypt *rh,
760
    uint64_t data_offset)
761
0
{
762
0
  rh->jobj_segs_post = NULL;
763
764
0
  if (rh->mode == CRYPT_REENCRYPT_ENCRYPT && rh->direction == CRYPT_REENCRYPT_BACKWARD &&
765
0
      rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) {
766
0
    log_dbg(cd, "Calculating post segments for encryption with data move.");
767
0
    rh->jobj_segs_post = _enc_create_segments_shift_after(rh, data_offset);
768
0
  } else if (rh->mode == CRYPT_REENCRYPT_DECRYPT && rh->direction == CRYPT_REENCRYPT_FORWARD &&
769
0
       rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) {
770
0
    log_dbg(cd, "Calculating post segments for decryption with data move.");
771
0
    rh->jobj_segs_post = _dec_create_segments_shift_after(cd, hdr, rh, data_offset);
772
0
  } else if (rh->direction == CRYPT_REENCRYPT_FORWARD) {
773
0
    log_dbg(cd, "Calculating post segments (forward direction).");
774
0
    rh->jobj_segs_post = reencrypt_make_post_segments_forward(cd, hdr, rh, data_offset);
775
0
  } else if (rh->direction == CRYPT_REENCRYPT_BACKWARD) {
776
0
    log_dbg(cd, "Calculating segments (backward direction).");
777
0
    rh->jobj_segs_post = reencrypt_make_post_segments_backward(cd, hdr, rh, data_offset);
778
0
  }
779
780
0
  return rh->jobj_segs_post ? 0 : -EINVAL;
781
0
}
782
#endif
783
784
static uint64_t reencrypt_data_shift(struct luks2_hdr *hdr)
785
0
{
786
0
  json_object *jobj_keyslot, *jobj_area, *jobj_data_shift;
787
0
  int ks = LUKS2_find_keyslot(hdr, "reencrypt");
788
789
0
  if (ks < 0)
790
0
    return 0;
791
792
0
  jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
793
794
0
  json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
795
0
  if (!json_object_object_get_ex(jobj_area, "shift_size", &jobj_data_shift))
796
0
    return 0;
797
798
0
  return crypt_jobj_get_uint64(jobj_data_shift);
799
0
}
800
801
static crypt_reencrypt_mode_info reencrypt_mode(struct luks2_hdr *hdr)
802
0
{
803
0
  const char *mode;
804
0
  crypt_reencrypt_mode_info mi = CRYPT_REENCRYPT_REENCRYPT;
805
0
  json_object *jobj_keyslot, *jobj_mode;
806
807
0
  jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, LUKS2_find_keyslot(hdr, "reencrypt"));
808
0
  if (!jobj_keyslot)
809
0
    return mi;
810
811
0
  json_object_object_get_ex(jobj_keyslot, "mode", &jobj_mode);
812
0
  mode = json_object_get_string(jobj_mode);
813
814
  /* validation enforces allowed values */
815
0
  if (!strcmp(mode, "encrypt"))
816
0
    mi = CRYPT_REENCRYPT_ENCRYPT;
817
0
  else if (!strcmp(mode, "decrypt"))
818
0
    mi = CRYPT_REENCRYPT_DECRYPT;
819
820
0
  return mi;
821
0
}
822
823
static crypt_reencrypt_direction_info reencrypt_direction(struct luks2_hdr *hdr)
824
0
{
825
0
  const char *value;
826
0
  json_object *jobj_keyslot, *jobj_mode;
827
0
  crypt_reencrypt_direction_info di = CRYPT_REENCRYPT_FORWARD;
828
829
0
  jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, LUKS2_find_keyslot(hdr, "reencrypt"));
830
0
  if (!jobj_keyslot)
831
0
    return di;
832
833
0
  json_object_object_get_ex(jobj_keyslot, "direction", &jobj_mode);
834
0
  value = json_object_get_string(jobj_mode);
835
836
  /* validation enforces allowed values */
837
0
  if (strcmp(value, "forward"))
838
0
    di = CRYPT_REENCRYPT_BACKWARD;
839
840
0
  return di;
841
0
}
842
843
typedef enum { REENC_OK = 0,
844
    /*
845
     * The state not requiring LUKS2 reencryption recovery. We can rollback
846
     * to last known safe state (hold in memory since last metadata write)
847
     * and teardown reencryption device stack (if used).
848
     * The reencryption fails but does not require recovery
849
     */
850
         REENC_ERR_ROLLBACK_MEMORY,
851
         /*
852
    * Error while writing hotzone (short write or sync fail) or failed metadata
853
    * update post hotzone write.
854
    */
855
         REENC_ERR_FATAL
856
} reenc_status_t;
857
858
void LUKS2_reencrypt_protection_erase(struct reenc_protection *rp)
859
0
{
860
0
  if (!rp || rp->type != REENC_PROTECTION_CHECKSUM)
861
0
    return;
862
863
0
  if (rp->p.csum.ch) {
864
0
    crypt_hash_destroy(rp->p.csum.ch);
865
0
    rp->p.csum.ch = NULL;
866
0
  }
867
868
0
  if (rp->p.csum.checksums) {
869
0
    crypt_safe_memzero(rp->p.csum.checksums, rp->p.csum.checksums_len);
870
0
    free(rp->p.csum.checksums);
871
0
    rp->p.csum.checksums = NULL;
872
0
  }
873
0
}
874
875
void LUKS2_reencrypt_free(struct crypt_device *cd, struct luks2_reencrypt *rh)
876
2.17k
{
877
2.17k
  if (!rh)
878
2.17k
    return;
879
880
0
  LUKS2_reencrypt_protection_erase(&rh->rp);
881
0
  LUKS2_reencrypt_protection_erase(&rh->rp_moved_segment);
882
883
0
  json_object_put(rh->jobj_segs_hot);
884
0
  rh->jobj_segs_hot = NULL;
885
0
  json_object_put(rh->jobj_segs_post);
886
0
  rh->jobj_segs_post = NULL;
887
0
  json_object_put(rh->jobj_segment_old);
888
0
  rh->jobj_segment_old = NULL;
889
0
  json_object_put(rh->jobj_segment_new);
890
0
  rh->jobj_segment_new = NULL;
891
0
  json_object_put(rh->jobj_segment_moved);
892
0
  rh->jobj_segment_moved = NULL;
893
894
0
  free(rh->reenc_buffer);
895
0
  rh->reenc_buffer = NULL;
896
0
  crypt_storage_wrapper_destroy(rh->cw1);
897
0
  rh->cw1 = NULL;
898
0
  crypt_storage_wrapper_destroy(rh->cw2);
899
0
  rh->cw2 = NULL;
900
0
  device_free(cd, rh->hotzone_device);
901
0
  rh->hotzone_device = NULL;
902
903
0
  free(rh->device_name);
904
0
  free(rh->overlay_name);
905
0
  free(rh->hotzone_name);
906
0
  crypt_drop_uploaded_keyring_key(cd, rh->vks);
907
0
  crypt_free_volume_key(rh->vks);
908
0
  device_release_excl(cd, crypt_data_device(cd));
909
0
  crypt_unlock_internal(cd, rh->reenc_lock);
910
0
  free(rh);
911
0
}
912
913
#if USE_LUKS2_REENCRYPTION
914
int LUKS2_reencrypt_max_hotzone_size(struct crypt_device *cd __attribute__((unused)),
915
  struct luks2_hdr *hdr,
916
  const struct reenc_protection *rp,
917
  int reencrypt_keyslot,
918
  uint64_t *r_length)
919
0
{
920
0
  int r;
921
0
  uint64_t dummy, area_length;
922
923
0
  assert(hdr);
924
0
  assert(rp);
925
0
  assert(r_length);
926
927
0
  if (rp->type <= REENC_PROTECTION_NONE) {
928
0
    *r_length = LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH;
929
0
    return 0;
930
0
  }
931
932
0
  if (rp->type == REENC_PROTECTION_DATASHIFT) {
933
0
    *r_length = rp->p.ds.data_shift;
934
0
    return 0;
935
0
  }
936
937
0
  r = LUKS2_keyslot_area(hdr, reencrypt_keyslot, &dummy, &area_length);
938
0
  if (r < 0)
939
0
    return -EINVAL;
940
941
0
  if (rp->type == REENC_PROTECTION_JOURNAL) {
942
0
    *r_length = area_length;
943
0
    return 0;
944
0
  }
945
946
0
  if (rp->type == REENC_PROTECTION_CHECKSUM) {
947
0
    *r_length = (area_length / rp->p.csum.hash_size) * rp->p.csum.block_size;
948
0
    return 0;
949
0
  }
950
951
0
  return -EINVAL;
952
0
}
953
954
static size_t reencrypt_get_alignment(struct crypt_device *cd,
955
    struct luks2_hdr *hdr)
956
0
{
957
0
  size_t ss, alignment = device_block_size(cd, crypt_data_device(cd));
958
959
0
  ss = reencrypt_get_sector_size_old(hdr);
960
0
  if (ss > alignment)
961
0
    alignment = ss;
962
0
  ss = reencrypt_get_sector_size_new(hdr);
963
0
  if (ss > alignment)
964
0
    alignment = ss;
965
966
0
  return alignment;
967
0
}
968
969
/* returns void because it must not fail on valid LUKS2 header */
970
static void _load_backup_segments(struct luks2_hdr *hdr,
971
    struct luks2_reencrypt *rh)
972
0
{
973
0
  int segment = LUKS2_get_segment_id_by_flag(hdr, "backup-final");
974
975
0
  if (segment >= 0) {
976
0
    rh->jobj_segment_new = json_object_get(LUKS2_get_segment_jobj(hdr, segment));
977
0
    rh->digest_new = LUKS2_digest_by_segment(hdr, segment);
978
0
  } else {
979
0
    rh->jobj_segment_new = NULL;
980
0
    rh->digest_new = -ENOENT;
981
0
  }
982
983
0
  segment = LUKS2_get_segment_id_by_flag(hdr, "backup-previous");
984
0
  if (segment >= 0) {
985
0
    rh->jobj_segment_old = json_object_get(LUKS2_get_segment_jobj(hdr, segment));
986
0
    rh->digest_old = LUKS2_digest_by_segment(hdr, segment);
987
0
  } else {
988
0
    rh->jobj_segment_old = NULL;
989
0
    rh->digest_old = -ENOENT;
990
0
  }
991
992
0
  segment = LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment");
993
0
  if (segment >= 0)
994
0
    rh->jobj_segment_moved = json_object_get(LUKS2_get_segment_jobj(hdr, segment));
995
0
  else
996
0
    rh->jobj_segment_moved = NULL;
997
0
}
998
999
static int reencrypt_offset_backward_moved(struct luks2_hdr *hdr, json_object *jobj_segments,
1000
             uint64_t *reencrypt_length, uint64_t data_shift, uint64_t *offset)
1001
0
{
1002
0
  uint64_t tmp, linear_length = 0;
1003
0
  int sg, segs = json_segments_count(jobj_segments);
1004
1005
  /* find reencrypt offset with data shift */
1006
0
  for (sg = 0; sg < segs; sg++)
1007
0
    if (LUKS2_segment_is_type(hdr, sg, "linear"))
1008
0
      linear_length += LUKS2_segment_size(hdr, sg, 0);
1009
1010
  /* all active linear segments length */
1011
0
  if (linear_length && segs > 1) {
1012
0
    if (linear_length < data_shift)
1013
0
      return -EINVAL;
1014
0
    tmp = linear_length - data_shift;
1015
0
    if (tmp && tmp < data_shift) {
1016
0
      *offset = data_shift;
1017
0
      *reencrypt_length = tmp;
1018
0
    } else
1019
0
      *offset = tmp;
1020
0
    return 0;
1021
0
  }
1022
1023
0
  if (segs == 1) {
1024
0
    *offset = 0;
1025
0
    return 0;
1026
0
  }
1027
1028
  /* should be unreachable */
1029
1030
0
  return -EINVAL;
1031
0
}
1032
1033
static int reencrypt_offset_forward_moved(struct luks2_hdr *hdr,
1034
  uint64_t data_shift,
1035
  uint64_t *offset)
1036
0
{
1037
0
  int last_crypt = LUKS2_last_segment_by_type(hdr, "crypt");
1038
1039
  /* if last crypt segment exists and it's first one, just return offset = 0 */
1040
0
  if (last_crypt <= 0) {
1041
0
    *offset = 0;
1042
0
    return 0;
1043
0
  }
1044
1045
0
  *offset = LUKS2_segment_offset(hdr, last_crypt, 0) - data_shift;
1046
0
  return 0;
1047
0
}
1048
1049
static int _offset_forward(json_object *jobj_segments, uint64_t *offset)
1050
0
{
1051
0
  int segs = json_segments_count(jobj_segments);
1052
1053
0
  if (segs == 1)
1054
0
    *offset = 0;
1055
0
  else if (segs == 2) {
1056
0
    *offset = json_segment_get_size(json_segments_get_segment(jobj_segments, 0), 0);
1057
0
    if (!*offset)
1058
0
      return -EINVAL;
1059
0
  } else
1060
0
    return -EINVAL;
1061
1062
0
  return 0;
1063
0
}
1064
1065
static int _offset_backward(json_object *jobj_segments, uint64_t device_size, uint64_t *length, uint64_t *offset)
1066
0
{
1067
0
  int segs = json_segments_count(jobj_segments);
1068
0
  uint64_t tmp;
1069
1070
0
  if (segs == 1) {
1071
0
    if (device_size < *length)
1072
0
      *length = device_size;
1073
0
    *offset = device_size - *length;
1074
0
  } else if (segs == 2) {
1075
0
    tmp = json_segment_get_size(json_segments_get_segment(jobj_segments, 0), 0);
1076
0
    if (tmp < *length)
1077
0
      *length = tmp;
1078
0
    *offset =  tmp - *length;
1079
0
  } else
1080
0
    return -EINVAL;
1081
1082
0
  return 0;
1083
0
}
1084
1085
/* must be always relative to data offset */
1086
/* the LUKS2 header MUST be valid */
1087
static int reencrypt_offset(struct luks2_hdr *hdr,
1088
    crypt_reencrypt_direction_info di,
1089
    uint64_t device_size,
1090
    uint64_t *reencrypt_length,
1091
    uint64_t *offset)
1092
0
{
1093
0
  int r, sg;
1094
0
  json_object *jobj_segments;
1095
0
  uint64_t data_shift = reencrypt_data_shift(hdr);
1096
1097
0
  if (!offset)
1098
0
    return -EINVAL;
1099
1100
  /* if there's segment in reencryption return directly offset of it */
1101
0
  json_object_object_get_ex(hdr->jobj, "segments", &jobj_segments);
1102
0
  sg = json_segments_segment_in_reencrypt(jobj_segments);
1103
0
  if (sg >= 0) {
1104
0
    *offset = LUKS2_segment_offset(hdr, sg, 0) - (reencrypt_get_data_offset_new(hdr));
1105
0
    return 0;
1106
0
  }
1107
1108
0
  if (di == CRYPT_REENCRYPT_FORWARD) {
1109
0
    if (reencrypt_mode(hdr) == CRYPT_REENCRYPT_DECRYPT &&
1110
0
        LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0) {
1111
0
      r = reencrypt_offset_forward_moved(hdr, data_shift, offset);
1112
0
      if (!r && *offset > device_size)
1113
0
        *offset = device_size;
1114
0
      return r;
1115
0
    }
1116
0
    return _offset_forward(jobj_segments, offset);
1117
0
  } else if (di == CRYPT_REENCRYPT_BACKWARD) {
1118
0
    if (reencrypt_mode(hdr) == CRYPT_REENCRYPT_ENCRYPT &&
1119
0
        LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0)
1120
0
      return reencrypt_offset_backward_moved(hdr, jobj_segments, reencrypt_length, data_shift, offset);
1121
0
    return _offset_backward(jobj_segments, device_size, reencrypt_length, offset);
1122
0
  }
1123
1124
0
  return -EINVAL;
1125
0
}
1126
1127
static uint64_t reencrypt_length(struct crypt_device *cd,
1128
    struct reenc_protection *rp,
1129
    uint64_t keyslot_area_length,
1130
    uint64_t length_max,
1131
    size_t alignment)
1132
0
{
1133
0
  unsigned long dummy, optimal_alignment;
1134
0
  uint64_t length, soft_mem_limit;
1135
1136
0
  if (rp->type == REENC_PROTECTION_NONE)
1137
0
    length = length_max ?: LUKS2_DEFAULT_NONE_REENCRYPTION_LENGTH;
1138
0
  else if (rp->type == REENC_PROTECTION_CHECKSUM)
1139
0
    length = (keyslot_area_length / rp->p.csum.hash_size) * rp->p.csum.block_size;
1140
0
  else if (rp->type == REENC_PROTECTION_DATASHIFT)
1141
0
    return rp->p.ds.data_shift;
1142
0
  else
1143
0
    length = keyslot_area_length;
1144
1145
  /* hard limit */
1146
0
  if (length > LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH)
1147
0
    length = LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH;
1148
1149
  /* soft limit is 1/4 of system memory */
1150
0
  soft_mem_limit = crypt_getphysmemory_kb() << 8; /* multiply by (1024/4) */
1151
1152
0
  if (soft_mem_limit && length > soft_mem_limit)
1153
0
    length = soft_mem_limit;
1154
1155
0
  if (length_max && length > length_max)
1156
0
    length = length_max;
1157
1158
0
  length -= (length % alignment);
1159
1160
  /* Emits error later */
1161
0
  if (!length)
1162
0
    return length;
1163
1164
0
  device_topology_alignment(cd, crypt_data_device(cd), &optimal_alignment, &dummy, length);
1165
1166
  /* we have to stick with encryption sector size alignment */
1167
0
  if (optimal_alignment % alignment)
1168
0
    return length;
1169
1170
  /* align to opt-io size only if remaining size allows it */
1171
0
  if (length > optimal_alignment)
1172
0
    length -= (length % optimal_alignment);
1173
1174
0
  return length;
1175
0
}
1176
1177
static int reencrypt_context_init(struct crypt_device *cd,
1178
  struct luks2_hdr *hdr,
1179
  struct luks2_reencrypt *rh,
1180
  uint64_t device_size,
1181
  uint64_t max_hotzone_size,
1182
  uint64_t fixed_device_size)
1183
0
{
1184
0
  int r;
1185
0
  size_t alignment;
1186
0
  uint64_t dummy, area_length;
1187
1188
0
  rh->reenc_keyslot = LUKS2_find_keyslot(hdr, "reencrypt");
1189
0
  if (rh->reenc_keyslot < 0)
1190
0
    return -EINVAL;
1191
0
  if (LUKS2_keyslot_area(hdr, rh->reenc_keyslot, &dummy, &area_length) < 0)
1192
0
    return -EINVAL;
1193
1194
0
  rh->mode = reencrypt_mode(hdr);
1195
1196
0
  rh->direction = reencrypt_direction(hdr);
1197
1198
0
  r = LUKS2_keyslot_reencrypt_load(cd, hdr, rh->reenc_keyslot, &rh->rp, true);
1199
0
  if (r < 0)
1200
0
    return r;
1201
1202
0
  if (rh->rp.type == REENC_PROTECTION_CHECKSUM)
1203
0
    alignment = rh->rp.p.csum.block_size;
1204
0
  else
1205
0
    alignment = reencrypt_get_alignment(cd, hdr);
1206
1207
0
  if (!alignment)
1208
0
    return -EINVAL;
1209
1210
0
  if ((max_hotzone_size << SECTOR_SHIFT) % alignment) {
1211
0
    log_err(cd, _("Hotzone size must be multiple of calculated zone alignment (%zu bytes)."), alignment);
1212
0
    return -EINVAL;
1213
0
  }
1214
1215
0
  if ((fixed_device_size << SECTOR_SHIFT) % alignment) {
1216
0
    log_err(cd, _("Device size must be multiple of calculated zone alignment (%zu bytes)."), alignment);
1217
0
    return -EINVAL;
1218
0
  }
1219
1220
0
  if (fixed_device_size) {
1221
0
    log_dbg(cd, "Switching reencryption to fixed size mode.");
1222
0
    device_size = fixed_device_size << SECTOR_SHIFT;
1223
0
    rh->fixed_length = true;
1224
0
  } else
1225
0
    rh->fixed_length = false;
1226
1227
0
  rh->length = reencrypt_length(cd, &rh->rp, area_length, max_hotzone_size << SECTOR_SHIFT, alignment);
1228
0
  if (!rh->length) {
1229
0
    log_dbg(cd, "Invalid reencryption length.");
1230
0
    return -EINVAL;
1231
0
  }
1232
1233
0
  if (reencrypt_offset(hdr, rh->direction, device_size, &rh->length, &rh->offset)) {
1234
0
    log_dbg(cd, "Failed to get reencryption offset.");
1235
0
    return -EINVAL;
1236
0
  }
1237
1238
0
  if (rh->offset > device_size)
1239
0
    return -EINVAL;
1240
0
  if (rh->length > device_size - rh->offset)
1241
0
    rh->length = device_size - rh->offset;
1242
1243
0
  _load_backup_segments(hdr, rh);
1244
1245
0
  r = LUKS2_keyslot_reencrypt_load(cd, hdr, rh->reenc_keyslot, &rh->rp_moved_segment, false);
1246
0
  if (r < 0)
1247
0
    return r;
1248
1249
0
  if (rh->rp_moved_segment.type == REENC_PROTECTION_NOT_SET)
1250
0
    log_dbg(cd, "No moved segment resilience configured.");
1251
1252
0
  if (rh->direction == CRYPT_REENCRYPT_BACKWARD)
1253
0
    rh->progress = device_size - rh->offset - rh->length;
1254
0
  else if (rh->jobj_segment_moved && rh->direction == CRYPT_REENCRYPT_FORWARD) {
1255
0
    if (rh->offset == json_segment_get_offset(LUKS2_get_segment_by_flag(hdr, "backup-moved-segment"), false))
1256
0
      rh->progress = device_size - json_segment_get_size(LUKS2_get_segment_by_flag(hdr, "backup-moved-segment"), false);
1257
0
    else
1258
0
      rh->progress = rh->offset - json_segment_get_size(rh->jobj_segment_moved, 0);
1259
0
  } else
1260
0
    rh->progress = rh->offset;
1261
1262
0
  log_dbg(cd, "reencrypt-direction: %s", rh->direction == CRYPT_REENCRYPT_FORWARD ? "forward" : "backward");
1263
0
  log_dbg(cd, "backup-previous digest id: %d", rh->digest_old);
1264
0
  log_dbg(cd, "backup-final digest id: %d", rh->digest_new);
1265
0
  log_dbg(cd, "reencrypt length: %" PRIu64, rh->length);
1266
0
  log_dbg(cd, "reencrypt offset: %" PRIu64, rh->offset);
1267
0
  log_dbg(cd, "reencrypt shift: %s%" PRIu64,
1268
0
    (rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->direction == CRYPT_REENCRYPT_BACKWARD ? "-" : ""),
1269
0
    data_shift_value(&rh->rp));
1270
0
  log_dbg(cd, "reencrypt alignment: %zu", alignment);
1271
0
  log_dbg(cd, "reencrypt progress: %" PRIu64, rh->progress);
1272
1273
0
  rh->device_size = device_size;
1274
1275
0
  return rh->length < 512 ? -EINVAL : 0;
1276
0
}
1277
1278
static size_t reencrypt_buffer_length(struct luks2_reencrypt *rh)
1279
0
{
1280
0
  if (rh->rp.type == REENC_PROTECTION_DATASHIFT)
1281
0
    return data_shift_value(&rh->rp);
1282
0
  return rh->length;
1283
0
}
1284
1285
static int reencrypt_load_clean(struct crypt_device *cd,
1286
  struct luks2_hdr *hdr,
1287
  uint64_t device_size,
1288
  uint64_t max_hotzone_size,
1289
  uint64_t fixed_device_size,
1290
  struct luks2_reencrypt **rh)
1291
0
{
1292
0
  int r;
1293
0
  struct luks2_reencrypt *tmp = crypt_zalloc(sizeof (*tmp));
1294
1295
0
  if (!tmp)
1296
0
    return -ENOMEM;
1297
1298
0
  log_dbg(cd, "Loading stored reencryption context.");
1299
1300
0
  r = reencrypt_context_init(cd, hdr, tmp, device_size, max_hotzone_size, fixed_device_size);
1301
0
  if (r)
1302
0
    goto err;
1303
1304
0
  if (posix_memalign(&tmp->reenc_buffer, device_alignment(crypt_data_device(cd)),
1305
0
         reencrypt_buffer_length(tmp))) {
1306
0
    r = -ENOMEM;
1307
0
    goto err;
1308
0
  }
1309
1310
0
  *rh = tmp;
1311
1312
0
  return 0;
1313
0
err:
1314
0
  LUKS2_reencrypt_free(cd, tmp);
1315
1316
0
  return r;
1317
0
}
1318
1319
static int reencrypt_make_segments(struct crypt_device *cd,
1320
  struct luks2_hdr *hdr,
1321
  struct luks2_reencrypt *rh,
1322
  uint64_t device_size)
1323
0
{
1324
0
  int r;
1325
0
  uint64_t data_offset = reencrypt_get_data_offset_new(hdr);
1326
1327
0
  log_dbg(cd, "Calculating segments.");
1328
1329
0
  r = reencrypt_make_hot_segments(cd, hdr, rh, device_size, data_offset);
1330
0
  if (!r) {
1331
0
    r = reencrypt_make_post_segments(cd, hdr, rh, data_offset);
1332
0
    if (r)
1333
0
      json_object_put(rh->jobj_segs_hot);
1334
0
  }
1335
1336
0
  if (r)
1337
0
    log_dbg(cd, "Failed to make reencryption segments.");
1338
1339
0
  return r;
1340
0
}
1341
1342
static int reencrypt_make_segments_crashed(struct crypt_device *cd,
1343
        struct luks2_hdr *hdr,
1344
              struct luks2_reencrypt *rh)
1345
0
{
1346
0
  int r;
1347
0
  uint64_t data_offset = crypt_get_data_offset(cd) << SECTOR_SHIFT;
1348
1349
0
  if (!rh)
1350
0
    return -EINVAL;
1351
1352
0
  rh->jobj_segs_hot = json_object_new_object();
1353
0
  if (!rh->jobj_segs_hot)
1354
0
    return -ENOMEM;
1355
1356
0
  json_object_object_foreach(LUKS2_get_segments_jobj(hdr), key, val) {
1357
0
    if (json_segment_is_backup(val))
1358
0
      continue;
1359
0
    json_object_object_add(rh->jobj_segs_hot, key, json_object_get(val));
1360
0
  }
1361
1362
0
  r = reencrypt_make_post_segments(cd, hdr, rh, data_offset);
1363
0
  if (r) {
1364
0
    json_object_put(rh->jobj_segs_hot);
1365
0
    rh->jobj_segs_hot = NULL;
1366
0
  }
1367
1368
0
  return r;
1369
0
}
1370
1371
static int reencrypt_load_crashed(struct crypt_device *cd,
1372
  struct luks2_hdr *hdr, uint64_t device_size, struct luks2_reencrypt **rh)
1373
0
{
1374
0
  bool dynamic;
1375
0
  uint64_t required_device_size;
1376
0
  int r, reenc_seg;
1377
1378
0
  if (LUKS2_get_data_size(hdr, &required_device_size, &dynamic))
1379
0
    return -EINVAL;
1380
1381
0
  if (dynamic)
1382
0
    required_device_size = 0;
1383
0
  else
1384
0
    required_device_size >>= SECTOR_SHIFT;
1385
1386
0
  r = reencrypt_load_clean(cd, hdr, device_size, 0, required_device_size, rh);
1387
1388
0
  if (!r) {
1389
0
    reenc_seg = json_segments_segment_in_reencrypt(LUKS2_get_segments_jobj(hdr));
1390
0
    if (reenc_seg < 0)
1391
0
      r = -EINVAL;
1392
0
    else
1393
0
      (*rh)->length = LUKS2_segment_size(hdr, reenc_seg, 0);
1394
0
  }
1395
1396
0
  if (!r)
1397
0
    r = reencrypt_make_segments_crashed(cd, hdr, *rh);
1398
1399
0
  if (r) {
1400
0
    LUKS2_reencrypt_free(cd, *rh);
1401
0
    *rh = NULL;
1402
0
  }
1403
0
  return r;
1404
0
}
1405
1406
static int reencrypt_init_storage_wrappers(struct crypt_device *cd,
1407
    struct luks2_hdr *hdr,
1408
    struct luks2_reencrypt *rh,
1409
    struct volume_key *vks)
1410
0
{
1411
0
  int r;
1412
0
  struct volume_key *vk;
1413
0
  uint32_t wrapper_flags = (getuid() || geteuid()) ? 0 : DISABLE_KCAPI;
1414
1415
0
  vk = crypt_volume_key_by_id(vks, rh->digest_old);
1416
0
  r = crypt_storage_wrapper_init(cd, &rh->cw1, crypt_data_device(cd),
1417
0
      reencrypt_get_data_offset_old(hdr),
1418
0
      crypt_get_iv_offset(cd),
1419
0
      reencrypt_get_sector_size_old(hdr),
1420
0
      reencrypt_segment_cipher_old(hdr),
1421
0
      vk, wrapper_flags | OPEN_READONLY);
1422
0
  if (r) {
1423
0
    log_err(cd, _("Failed to initialize old segment storage wrapper."));
1424
0
    return r;
1425
0
  }
1426
0
  rh->wflags1 = wrapper_flags | OPEN_READONLY;
1427
0
  log_dbg(cd, "Old cipher storage wrapper type: %d.", crypt_storage_wrapper_get_type(rh->cw1));
1428
1429
0
  vk = crypt_volume_key_by_id(vks, rh->digest_new);
1430
0
  r = crypt_storage_wrapper_init(cd, &rh->cw2, crypt_data_device(cd),
1431
0
      reencrypt_get_data_offset_new(hdr),
1432
0
      crypt_get_iv_offset(cd),
1433
0
      reencrypt_get_sector_size_new(hdr),
1434
0
      reencrypt_segment_cipher_new(hdr),
1435
0
      vk, wrapper_flags);
1436
0
  if (r) {
1437
0
    log_err(cd, _("Failed to initialize new segment storage wrapper."));
1438
0
    return r;
1439
0
  }
1440
0
  rh->wflags2 = wrapper_flags;
1441
0
  log_dbg(cd, "New cipher storage wrapper type: %d", crypt_storage_wrapper_get_type(rh->cw2));
1442
1443
0
  return 0;
1444
0
}
1445
1446
static int reencrypt_context_set_names(struct luks2_reencrypt *rh, const char *name)
1447
0
{
1448
0
  if (!rh || !name)
1449
0
    return -EINVAL;
1450
1451
0
  if (*name == '/') {
1452
0
    if (!(rh->device_name = dm_device_name(name)))
1453
0
      return -EINVAL;
1454
0
  } else if (!(rh->device_name = strdup(name)))
1455
0
    return -ENOMEM;
1456
1457
0
  if (asprintf(&rh->hotzone_name, "%s-hotzone-%s", rh->device_name,
1458
0
         rh->direction == CRYPT_REENCRYPT_FORWARD ? "forward" : "backward") < 0) {
1459
0
    rh->hotzone_name = NULL;
1460
0
    return -ENOMEM;
1461
0
  }
1462
0
  if (asprintf(&rh->overlay_name, "%s-overlay", rh->device_name) < 0) {
1463
0
    rh->overlay_name = NULL;
1464
0
    return -ENOMEM;
1465
0
  }
1466
1467
0
  rh->online = true;
1468
0
  return 0;
1469
0
}
1470
1471
static int modify_offset(uint64_t *offset, uint64_t data_shift, crypt_reencrypt_direction_info di)
1472
0
{
1473
0
  int r = -EINVAL;
1474
1475
0
  if (!offset)
1476
0
    return r;
1477
1478
0
  if (di == CRYPT_REENCRYPT_FORWARD) {
1479
0
    if (*offset >= data_shift) {
1480
0
      *offset -= data_shift;
1481
0
      r = 0;
1482
0
    }
1483
0
  } else if (di == CRYPT_REENCRYPT_BACKWARD) {
1484
0
    *offset += data_shift;
1485
0
    r = 0;
1486
0
  }
1487
1488
0
  return r;
1489
0
}
1490
1491
static int reencrypt_update_flag(struct crypt_device *cd, uint8_t version,
1492
  bool enable, bool commit)
1493
0
{
1494
0
  uint32_t reqs;
1495
0
  struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
1496
1497
0
  if (enable) {
1498
0
    log_dbg(cd, "Going to store reencryption requirement flag (version: %u).", version);
1499
0
    return LUKS2_config_set_requirement_version(cd, hdr, CRYPT_REQUIREMENT_ONLINE_REENCRYPT, version, commit);
1500
0
  }
1501
1502
0
  LUKS2_config_get_requirements(cd, hdr, &reqs);
1503
1504
0
  reqs &= ~CRYPT_REQUIREMENT_ONLINE_REENCRYPT;
1505
1506
0
  log_dbg(cd, "Going to wipe reencryption requirement flag.");
1507
1508
0
  return LUKS2_config_set_requirements(cd, hdr, reqs, commit);
1509
0
}
1510
1511
static int reencrypt_hotzone_protect_ready(struct crypt_device *cd,
1512
  struct reenc_protection *rp)
1513
0
{
1514
0
  assert(rp);
1515
1516
0
  if (rp->type == REENC_PROTECTION_NOT_SET)
1517
0
    return -EINVAL;
1518
1519
0
  if (rp->type != REENC_PROTECTION_CHECKSUM)
1520
0
    return 0;
1521
1522
0
  if (!rp->p.csum.checksums) {
1523
0
    log_dbg(cd, "Allocating buffer for storing resilience checksums.");
1524
0
    if (posix_memalign(&rp->p.csum.checksums, device_alignment(crypt_metadata_device(cd)),
1525
0
           rp->p.csum.checksums_len))
1526
0
      return -ENOMEM;
1527
0
  }
1528
1529
0
  return 0;
1530
0
}
1531
1532
static int reencrypt_recover_segment(struct crypt_device *cd,
1533
  struct luks2_hdr *hdr,
1534
  struct luks2_reencrypt *rh,
1535
  struct volume_key *vks)
1536
0
{
1537
0
  struct volume_key *vk_old, *vk_new;
1538
0
  size_t count, s;
1539
0
  ssize_t read, w;
1540
0
  struct reenc_protection *rp;
1541
0
  int devfd, r, new_sector_size, old_sector_size, rseg;
1542
0
  uint64_t area_offset, area_length, area_length_read, crash_iv_offset,
1543
0
     data_offset = crypt_get_data_offset(cd) << SECTOR_SHIFT;
1544
0
  char *checksum_tmp = NULL, *data_buffer = NULL;
1545
0
  struct crypt_storage_wrapper *cw1 = NULL, *cw2 = NULL;
1546
1547
0
  assert(hdr);
1548
0
  assert(rh);
1549
0
  assert(vks);
1550
1551
0
  rseg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
1552
0
  if (rh->offset == 0 && rh->rp_moved_segment.type > REENC_PROTECTION_NOT_SET) {
1553
0
    log_dbg(cd, "Recovery using moved segment protection.");
1554
0
    rp = &rh->rp_moved_segment;
1555
0
  } else
1556
0
    rp = &rh->rp;
1557
1558
0
  if (rseg < 0 || rh->length < 512)
1559
0
    return -EINVAL;
1560
1561
0
  r = reencrypt_hotzone_protect_ready(cd, rp);
1562
0
  if (r) {
1563
0
    log_err(cd, _("Failed to initialize hotzone protection."));
1564
0
    return -EINVAL;
1565
0
  }
1566
1567
0
  vk_new = crypt_volume_key_by_id(vks, rh->digest_new);
1568
0
  if (!vk_new && rh->mode != CRYPT_REENCRYPT_DECRYPT)
1569
0
    return -EINVAL;
1570
0
  vk_old = crypt_volume_key_by_id(vks, rh->digest_old);
1571
0
  if (!vk_old && rh->mode != CRYPT_REENCRYPT_ENCRYPT)
1572
0
    return -EINVAL;
1573
0
  old_sector_size = json_segment_get_sector_size(reencrypt_segment_old(hdr));
1574
0
  new_sector_size = json_segment_get_sector_size(reencrypt_segment_new(hdr));
1575
0
  if (rh->mode == CRYPT_REENCRYPT_DECRYPT)
1576
0
    crash_iv_offset = rh->offset >> SECTOR_SHIFT; /* TODO: + old iv_tweak */
1577
0
  else
1578
0
    crash_iv_offset = json_segment_get_iv_offset(json_segments_get_segment(rh->jobj_segs_hot, rseg));
1579
1580
0
  log_dbg(cd, "crash_offset: %" PRIu64 ", crash_length: %" PRIu64 ",  crash_iv_offset: %" PRIu64,
1581
0
    data_offset + rh->offset, rh->length, crash_iv_offset);
1582
1583
0
  r = crypt_storage_wrapper_init(cd, &cw2, crypt_data_device(cd),
1584
0
      data_offset + rh->offset, crash_iv_offset, new_sector_size,
1585
0
      reencrypt_segment_cipher_new(hdr), vk_new, 0);
1586
0
  if (r) {
1587
0
    log_err(cd, _("Failed to initialize new segment storage wrapper."));
1588
0
    return r;
1589
0
  }
1590
1591
0
  if (LUKS2_keyslot_area(hdr, rh->reenc_keyslot, &area_offset, &area_length)) {
1592
0
    r = -EINVAL;
1593
0
    goto out;
1594
0
  }
1595
1596
0
  if (posix_memalign((void**)&data_buffer, device_alignment(crypt_data_device(cd)), rh->length)) {
1597
0
    r = -ENOMEM;
1598
0
    goto out;
1599
0
  }
1600
1601
0
  switch (rp->type) {
1602
0
  case  REENC_PROTECTION_CHECKSUM:
1603
0
    log_dbg(cd, "Checksums based recovery.");
1604
1605
0
    r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd),
1606
0
        data_offset + rh->offset, crash_iv_offset, old_sector_size,
1607
0
        reencrypt_segment_cipher_old(hdr), vk_old, 0);
1608
0
    if (r) {
1609
0
      log_err(cd, _("Failed to initialize old segment storage wrapper."));
1610
0
      goto out;
1611
0
    }
1612
1613
0
    count = rh->length / rp->p.csum.block_size;
1614
0
    area_length_read = count * rp->p.csum.hash_size;
1615
0
    if (area_length_read > area_length) {
1616
0
      log_dbg(cd, "Internal error in calculated area_length.");
1617
0
      r = -EINVAL;
1618
0
      goto out;
1619
0
    }
1620
1621
0
    checksum_tmp = malloc(rp->p.csum.hash_size);
1622
0
    if (!checksum_tmp) {
1623
0
      r = -ENOMEM;
1624
0
      goto out;
1625
0
    }
1626
1627
    /* TODO: lock for read */
1628
0
    devfd = device_open(cd, crypt_metadata_device(cd), O_RDONLY);
1629
0
    if (devfd < 0)
1630
0
      goto out;
1631
1632
    /* read old data checksums */
1633
0
    read = read_lseek_blockwise(devfd, device_block_size(cd, crypt_metadata_device(cd)),
1634
0
          device_alignment(crypt_metadata_device(cd)), rp->p.csum.checksums, area_length_read, area_offset);
1635
0
    if (read < 0 || (size_t)read != area_length_read) {
1636
0
      log_err(cd, _("Failed to read checksums for current hotzone."));
1637
0
      r = -EINVAL;
1638
0
      goto out;
1639
0
    }
1640
1641
0
    read = crypt_storage_wrapper_read(cw2, 0, data_buffer, rh->length);
1642
0
    if (read < 0 || (size_t)read != rh->length) {
1643
0
      log_err(cd, _("Failed to read hotzone area starting at %" PRIu64 "."), rh->offset + data_offset);
1644
0
      r = -EINVAL;
1645
0
      goto out;
1646
0
    }
1647
1648
0
    for (s = 0; s < count; s++) {
1649
0
      if (crypt_hash_write(rp->p.csum.ch, data_buffer + (s * rp->p.csum.block_size), rp->p.csum.block_size)) {
1650
0
        log_dbg(cd, "Failed to write hash.");
1651
0
        r = -EINVAL;
1652
0
        goto out;
1653
0
      }
1654
0
      if (crypt_hash_final(rp->p.csum.ch, checksum_tmp, rp->p.csum.hash_size)) {
1655
0
        log_dbg(cd, "Failed to finalize hash.");
1656
0
        r = -EINVAL;
1657
0
        goto out;
1658
0
      }
1659
0
      if (!memcmp(checksum_tmp, (char *)rp->p.csum.checksums + (s * rp->p.csum.hash_size), rp->p.csum.hash_size)) {
1660
0
        log_dbg(cd, "Sector %zu (size %zu, offset %zu) needs recovery", s, rp->p.csum.block_size, s * rp->p.csum.block_size);
1661
0
        if (crypt_storage_wrapper_decrypt(cw1, s * rp->p.csum.block_size, data_buffer + (s * rp->p.csum.block_size), rp->p.csum.block_size)) {
1662
0
          log_err(cd, _("Failed to decrypt sector %zu."), s);
1663
0
          r = -EINVAL;
1664
0
          goto out;
1665
0
        }
1666
0
        w = crypt_storage_wrapper_encrypt_write(cw2, s * rp->p.csum.block_size, data_buffer + (s * rp->p.csum.block_size), rp->p.csum.block_size);
1667
0
        if (w < 0 || (size_t)w != rp->p.csum.block_size) {
1668
0
          log_err(cd, _("Failed to recover sector %zu."), s);
1669
0
          r = -EINVAL;
1670
0
          goto out;
1671
0
        }
1672
0
      }
1673
0
    }
1674
1675
0
    r = 0;
1676
0
    break;
1677
0
  case  REENC_PROTECTION_JOURNAL:
1678
0
    log_dbg(cd, "Journal based recovery.");
1679
1680
    /* FIXME: validation candidate */
1681
0
    if (rh->length > area_length) {
1682
0
      r = -EINVAL;
1683
0
      log_dbg(cd, "Invalid journal size.");
1684
0
      goto out;
1685
0
    }
1686
1687
    /* TODO locking */
1688
0
    r = crypt_storage_wrapper_init(cd, &cw1, crypt_metadata_device(cd),
1689
0
        area_offset, crash_iv_offset, old_sector_size,
1690
0
        reencrypt_segment_cipher_old(hdr), vk_old, 0);
1691
0
    if (r) {
1692
0
      log_err(cd, _("Failed to initialize old segment storage wrapper."));
1693
0
      goto out;
1694
0
    }
1695
0
    read = crypt_storage_wrapper_read_decrypt(cw1, 0, data_buffer, rh->length);
1696
0
    if (read < 0 || (size_t)read != rh->length) {
1697
0
      log_dbg(cd, "Failed to read journaled data.");
1698
0
      r = -EIO;
1699
      /* may content plaintext */
1700
0
      crypt_safe_memzero(data_buffer, rh->length);
1701
0
      goto out;
1702
0
    }
1703
0
    read = crypt_storage_wrapper_encrypt_write(cw2, 0, data_buffer, rh->length);
1704
    /* may content plaintext */
1705
0
    crypt_safe_memzero(data_buffer, rh->length);
1706
0
    if (read < 0 || (size_t)read != rh->length) {
1707
0
      log_dbg(cd, "recovery write failed.");
1708
0
      r = -EINVAL;
1709
0
      goto out;
1710
0
    }
1711
1712
0
    r = 0;
1713
0
    break;
1714
0
  case  REENC_PROTECTION_DATASHIFT:
1715
0
    log_dbg(cd, "Data shift based recovery.");
1716
1717
0
    if (rseg == 0) {
1718
0
      r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd),
1719
0
          json_segment_get_offset(rh->jobj_segment_moved, 0), 0,
1720
0
          reencrypt_get_sector_size_old(hdr),
1721
0
          reencrypt_segment_cipher_old(hdr), vk_old, 0);
1722
0
    } else {
1723
0
      if (rh->direction == CRYPT_REENCRYPT_FORWARD)
1724
0
        data_offset = data_offset + rh->offset + data_shift_value(rp);
1725
0
      else
1726
0
        data_offset = data_offset + rh->offset - data_shift_value(rp);
1727
0
      r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd),
1728
0
          data_offset,
1729
0
          crash_iv_offset,
1730
0
          reencrypt_get_sector_size_old(hdr),
1731
0
          reencrypt_segment_cipher_old(hdr), vk_old, 0);
1732
0
    }
1733
0
    if (r) {
1734
0
      log_err(cd, _("Failed to initialize old segment storage wrapper."));
1735
0
      goto out;
1736
0
    }
1737
1738
0
    read = crypt_storage_wrapper_read_decrypt(cw1, 0, data_buffer, rh->length);
1739
0
    if (read < 0 || (size_t)read != rh->length) {
1740
0
      log_dbg(cd, "Failed to read data.");
1741
0
      r = -EIO;
1742
      /* may content plaintext */
1743
0
      crypt_safe_memzero(data_buffer, rh->length);
1744
0
      goto out;
1745
0
    }
1746
1747
0
    read = crypt_storage_wrapper_encrypt_write(cw2, 0, data_buffer, rh->length);
1748
    /* may content plaintext */
1749
0
    crypt_safe_memzero(data_buffer, rh->length);
1750
0
    if (read < 0 || (size_t)read != rh->length) {
1751
0
      log_dbg(cd, "recovery write failed.");
1752
0
      r = -EINVAL;
1753
0
      goto out;
1754
0
    }
1755
0
    r = 0;
1756
0
    break;
1757
0
  default:
1758
0
    r = -EINVAL;
1759
0
  }
1760
1761
0
  if (!r)
1762
0
    rh->read = rh->length;
1763
0
out:
1764
0
  free(data_buffer);
1765
0
  free(checksum_tmp);
1766
0
  crypt_storage_wrapper_destroy(cw1);
1767
0
  crypt_storage_wrapper_destroy(cw2);
1768
1769
0
  return r;
1770
0
}
1771
1772
static int reencrypt_add_moved_segment(struct crypt_device *cd, struct luks2_hdr *hdr, struct luks2_reencrypt *rh)
1773
0
{
1774
0
  int digest = rh->digest_old, s = LUKS2_segment_first_unused_id(hdr);
1775
1776
0
  if (!rh->jobj_segment_moved)
1777
0
    return 0;
1778
1779
0
  if (s < 0)
1780
0
    return s;
1781
1782
0
  if (json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), s, json_object_get(rh->jobj_segment_moved))) {
1783
0
    json_object_put(rh->jobj_segment_moved);
1784
0
    return -EINVAL;
1785
0
  }
1786
1787
0
  if (!strcmp(json_segment_type(rh->jobj_segment_moved), "crypt"))
1788
0
    return LUKS2_digest_segment_assign(cd, hdr, s, digest, 1, 0);
1789
1790
0
  return 0;
1791
0
}
1792
1793
static int reencrypt_add_backup_segment(struct crypt_device *cd,
1794
    struct luks2_hdr *hdr,
1795
    struct luks2_reencrypt *rh,
1796
    unsigned final)
1797
0
{
1798
0
  int digest, s = LUKS2_segment_first_unused_id(hdr);
1799
0
  json_object *jobj;
1800
1801
0
  if (s < 0)
1802
0
    return s;
1803
1804
0
  digest = final ? rh->digest_new : rh->digest_old;
1805
0
  jobj = final ? rh->jobj_segment_new : rh->jobj_segment_old;
1806
1807
0
  if (json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), s, json_object_get(jobj))) {
1808
0
    json_object_put(jobj);
1809
0
    return -EINVAL;
1810
0
  }
1811
1812
0
  if (strcmp(json_segment_type(jobj), "crypt"))
1813
0
    return 0;
1814
1815
0
  return LUKS2_digest_segment_assign(cd, hdr, s, digest, 1, 0);
1816
0
}
1817
1818
static int reencrypt_assign_segments_simple(struct crypt_device *cd,
1819
  struct luks2_hdr *hdr,
1820
  struct luks2_reencrypt *rh,
1821
  unsigned hot,
1822
  unsigned commit)
1823
0
{
1824
0
  int r, sg;
1825
1826
0
  if (hot && json_segments_count(rh->jobj_segs_hot) > 0) {
1827
0
    log_dbg(cd, "Setting 'hot' segments.");
1828
1829
0
    r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_hot, 0);
1830
0
    if (!r)
1831
0
      rh->jobj_segs_hot = NULL;
1832
0
  } else if (!hot && json_segments_count(rh->jobj_segs_post) > 0) {
1833
0
    log_dbg(cd, "Setting 'post' segments.");
1834
0
    r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_post, 0);
1835
0
    if (!r)
1836
0
      rh->jobj_segs_post = NULL;
1837
0
  } else {
1838
0
    log_dbg(cd, "No segments to set.");
1839
0
    return -EINVAL;
1840
0
  }
1841
1842
0
  if (r) {
1843
0
    log_dbg(cd, "Failed to assign new enc segments.");
1844
0
    return r;
1845
0
  }
1846
1847
0
  r = reencrypt_add_backup_segment(cd, hdr, rh, 0);
1848
0
  if (r) {
1849
0
    log_dbg(cd, "Failed to assign reencryption previous backup segment.");
1850
0
    return r;
1851
0
  }
1852
1853
0
  r = reencrypt_add_backup_segment(cd, hdr, rh, 1);
1854
0
  if (r) {
1855
0
    log_dbg(cd, "Failed to assign reencryption final backup segment.");
1856
0
    return r;
1857
0
  }
1858
1859
0
  r = reencrypt_add_moved_segment(cd, hdr, rh);
1860
0
  if (r) {
1861
0
    log_dbg(cd, "Failed to assign reencryption moved backup segment.");
1862
0
    return r;
1863
0
  }
1864
1865
0
  for (sg = 0; sg < LUKS2_segments_count(hdr); sg++) {
1866
0
    if (LUKS2_segment_is_type(hdr, sg, "crypt") &&
1867
0
        LUKS2_digest_segment_assign(cd, hdr, sg, rh->mode == CRYPT_REENCRYPT_ENCRYPT ? rh->digest_new : rh->digest_old, 1, 0)) {
1868
0
      log_dbg(cd, "Failed to assign digest %u to segment %u.", rh->digest_new, sg);
1869
0
      return -EINVAL;
1870
0
    }
1871
0
  }
1872
1873
0
  return commit ? LUKS2_hdr_write(cd, hdr) : 0;
1874
0
}
1875
1876
static int reencrypt_assign_segments(struct crypt_device *cd,
1877
    struct luks2_hdr *hdr,
1878
    struct luks2_reencrypt *rh,
1879
    unsigned hot,
1880
    unsigned commit)
1881
0
{
1882
0
  bool forward;
1883
0
  int rseg, scount, r = -EINVAL;
1884
1885
  /* FIXME: validate in reencrypt context load */
1886
0
  if (rh->digest_new < 0 && rh->mode != CRYPT_REENCRYPT_DECRYPT)
1887
0
    return -EINVAL;
1888
1889
0
  if (LUKS2_digest_segment_assign(cd, hdr, CRYPT_ANY_SEGMENT, CRYPT_ANY_DIGEST, 0, 0))
1890
0
    return -EINVAL;
1891
1892
0
  if (rh->mode == CRYPT_REENCRYPT_ENCRYPT || rh->mode == CRYPT_REENCRYPT_DECRYPT)
1893
0
    return reencrypt_assign_segments_simple(cd, hdr, rh, hot, commit);
1894
1895
0
  if (hot && rh->jobj_segs_hot) {
1896
0
    log_dbg(cd, "Setting 'hot' segments.");
1897
1898
0
    r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_hot, 0);
1899
0
    if (!r)
1900
0
      rh->jobj_segs_hot = NULL;
1901
0
  } else if (!hot && rh->jobj_segs_post) {
1902
0
    log_dbg(cd, "Setting 'post' segments.");
1903
0
    r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_post, 0);
1904
0
    if (!r)
1905
0
      rh->jobj_segs_post = NULL;
1906
0
  }
1907
1908
0
  if (r)
1909
0
    return r;
1910
1911
0
  scount = LUKS2_segments_count(hdr);
1912
1913
  /* segment in reencryption has to hold reference on both digests */
1914
0
  rseg = json_segments_segment_in_reencrypt(LUKS2_get_segments_jobj(hdr));
1915
0
  if (rseg < 0 && hot)
1916
0
    return -EINVAL;
1917
1918
0
  if (rseg >= 0) {
1919
0
    LUKS2_digest_segment_assign(cd, hdr, rseg, rh->digest_new, 1, 0);
1920
0
    LUKS2_digest_segment_assign(cd, hdr, rseg, rh->digest_old, 1, 0);
1921
0
  }
1922
1923
0
  forward = (rh->direction == CRYPT_REENCRYPT_FORWARD);
1924
0
  if (hot) {
1925
0
    if (rseg > 0)
1926
0
      LUKS2_digest_segment_assign(cd, hdr, 0, forward ? rh->digest_new : rh->digest_old, 1, 0);
1927
0
    if (scount > rseg + 1)
1928
0
      LUKS2_digest_segment_assign(cd, hdr, rseg + 1, forward ? rh->digest_old : rh->digest_new, 1, 0);
1929
0
  } else {
1930
0
    LUKS2_digest_segment_assign(cd, hdr, 0, forward || scount == 1 ? rh->digest_new : rh->digest_old, 1, 0);
1931
0
    if (scount > 1)
1932
0
      LUKS2_digest_segment_assign(cd, hdr, 1, forward ? rh->digest_old : rh->digest_new, 1, 0);
1933
0
  }
1934
1935
0
  r = reencrypt_add_backup_segment(cd, hdr, rh, 0);
1936
0
  if (r) {
1937
0
    log_dbg(cd, "Failed to assign hot reencryption backup segment.");
1938
0
    return r;
1939
0
  }
1940
0
  r = reencrypt_add_backup_segment(cd, hdr, rh, 1);
1941
0
  if (r) {
1942
0
    log_dbg(cd, "Failed to assign post reencryption backup segment.");
1943
0
    return r;
1944
0
  }
1945
1946
0
  return commit ? LUKS2_hdr_write(cd, hdr) : 0;
1947
0
}
1948
1949
static int reencrypt_set_encrypt_segments(struct crypt_device *cd, struct luks2_hdr *hdr,
1950
            uint64_t dev_size, uint64_t data_size, uint64_t data_shift, bool move_first_segment,
1951
            crypt_reencrypt_direction_info di)
1952
0
{
1953
0
  int r;
1954
0
  uint64_t first_segment_offset, first_segment_length,
1955
0
     second_segment_offset, second_segment_length,
1956
0
     data_offset = LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
1957
0
  json_object *jobj_segment_first = NULL, *jobj_segment_second = NULL, *jobj_segments;
1958
1959
0
  if (dev_size < data_shift)
1960
0
    return -EINVAL;
1961
1962
0
  if (data_shift && (di == CRYPT_REENCRYPT_FORWARD))
1963
0
    return -ENOTSUP;
1964
1965
0
  if (move_first_segment) {
1966
    /*
1967
     * future data_device layout:
1968
     * [future LUKS2 header (data shift size)][second data segment][gap (data shift size)][first data segment (data shift size)]
1969
     */
1970
0
    first_segment_offset = dev_size;
1971
0
    if (data_size < data_shift) {
1972
0
      first_segment_length = data_size;
1973
0
      second_segment_length = second_segment_offset = 0;
1974
0
    } else {
1975
0
      first_segment_length = data_shift;
1976
0
      second_segment_offset = data_shift;
1977
0
      second_segment_length = data_size - data_shift;
1978
0
    }
1979
0
  } else if (data_shift) {
1980
0
    first_segment_offset = data_offset;
1981
0
    first_segment_length = dev_size;
1982
0
  } else {
1983
    /* future data_device layout with detached header: [first data segment] */
1984
0
    first_segment_offset = data_offset;
1985
0
    first_segment_length = 0; /* dynamic */
1986
0
  }
1987
1988
0
  jobj_segments = json_object_new_object();
1989
0
  if (!jobj_segments)
1990
0
    return -ENOMEM;
1991
1992
0
  r = -EINVAL;
1993
0
  if (move_first_segment) {
1994
0
    jobj_segment_first =  json_segment_create_linear(first_segment_offset, &first_segment_length, 0);
1995
0
    if (second_segment_length &&
1996
0
        !(jobj_segment_second = json_segment_create_linear(second_segment_offset, &second_segment_length, 0))) {
1997
0
      log_dbg(cd, "Failed generate 2nd segment.");
1998
0
      return r;
1999
0
    }
2000
0
  } else
2001
0
    jobj_segment_first =  json_segment_create_linear(first_segment_offset, first_segment_length ? &first_segment_length : NULL, 0);
2002
2003
0
  if (!jobj_segment_first) {
2004
0
    log_dbg(cd, "Failed generate 1st segment.");
2005
0
    return r;
2006
0
  }
2007
2008
0
  json_object_object_add(jobj_segments, "0", jobj_segment_first);
2009
0
  if (jobj_segment_second)
2010
0
    json_object_object_add(jobj_segments, "1", jobj_segment_second);
2011
2012
0
  r = LUKS2_digest_segment_assign(cd, hdr, CRYPT_ANY_SEGMENT, CRYPT_ANY_DIGEST, 0, 0);
2013
2014
0
  return r ?: LUKS2_segments_set(cd, hdr, jobj_segments, 0);
2015
0
}
2016
2017
static int reencrypt_set_decrypt_shift_segments(struct crypt_device *cd,
2018
  struct luks2_hdr *hdr,
2019
  uint64_t dev_size,
2020
  uint64_t moved_segment_length,
2021
  crypt_reencrypt_direction_info di)
2022
0
{
2023
0
  int digest, r;
2024
0
  uint64_t data_offset = LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
2025
0
  json_object *jobj_segment_first = NULL, *jobj_segment_second = NULL, *jobj_segments;
2026
2027
0
  if (di == CRYPT_REENCRYPT_BACKWARD)
2028
0
    return -ENOTSUP;
2029
2030
0
  digest = LUKS2_digest_by_segment(hdr, CRYPT_DEFAULT_SEGMENT);
2031
0
  if (digest < 0)
2032
0
    return -EINVAL;
2033
2034
  /*
2035
   * future data_device layout:
2036
   * [encrypted first segment (max data shift size)][gap (data shift size)][second encrypted data segment]
2037
   */
2038
0
  jobj_segments = json_object_new_object();
2039
0
  if (!jobj_segments)
2040
0
    return -ENOMEM;
2041
2042
0
  r = -EINVAL;
2043
0
  jobj_segment_first = json_segment_create_crypt(0, crypt_get_iv_offset(cd),
2044
0
        &moved_segment_length, crypt_get_cipher_spec(cd),
2045
0
        NULL, 0, crypt_get_sector_size(cd), 0);
2046
2047
0
  if (!jobj_segment_first) {
2048
0
    log_dbg(cd, "Failed generate 1st segment.");
2049
0
    goto err;
2050
0
  }
2051
2052
0
  r = json_object_object_add_by_uint_by_ref(jobj_segments, 0, &jobj_segment_first);
2053
0
  if (r)
2054
0
    goto err;
2055
2056
0
  if (dev_size > moved_segment_length) {
2057
0
    jobj_segment_second = json_segment_create_crypt(data_offset + moved_segment_length,
2058
0
                crypt_get_iv_offset(cd) + (moved_segment_length >> SECTOR_SHIFT),
2059
0
                NULL,
2060
0
                crypt_get_cipher_spec(cd),
2061
0
                NULL, 0, /* integrity */
2062
0
                crypt_get_sector_size(cd), 0);
2063
0
    if (!jobj_segment_second) {
2064
0
      r = -EINVAL;
2065
0
      log_dbg(cd, "Failed generate 2nd segment.");
2066
0
      goto err;
2067
0
    }
2068
2069
0
    r = json_object_object_add_by_uint_by_ref(jobj_segments, 1, &jobj_segment_second);
2070
0
    if (r)
2071
0
      goto err;
2072
0
  }
2073
2074
0
  if (!(r = LUKS2_segments_set(cd, hdr, jobj_segments, 0)))
2075
0
    return LUKS2_digest_segment_assign(cd, hdr, CRYPT_ANY_SEGMENT, digest, 1, 0);
2076
0
err:
2077
0
  json_object_put(jobj_segment_first);
2078
0
  json_object_put(jobj_segment_second);
2079
0
  json_object_put(jobj_segments);
2080
0
  return r;
2081
0
}
2082
2083
static int reencrypt_make_targets(struct crypt_device *cd,
2084
        struct luks2_hdr *hdr,
2085
        struct device *hz_device,
2086
        struct volume_key *vks,
2087
        struct dm_target *result,
2088
        uint64_t size)
2089
0
{
2090
0
  bool reenc_seg;
2091
0
  struct volume_key *vk;
2092
0
  uint64_t segment_size, segment_offset, segment_start = 0;
2093
0
  int r;
2094
0
  int s = 0;
2095
0
  json_object *jobj, *jobj_segments = LUKS2_get_segments_jobj(hdr);
2096
2097
0
  while (result) {
2098
0
    jobj = json_segments_get_segment(jobj_segments, s);
2099
0
    if (!jobj) {
2100
0
      log_dbg(cd, "Internal error. Segment %u is null.", s);
2101
0
      return -EINVAL;
2102
0
    }
2103
2104
0
    reenc_seg = (s == json_segments_segment_in_reencrypt(jobj_segments));
2105
2106
0
    segment_offset = json_segment_get_offset(jobj, 1);
2107
0
    segment_size = json_segment_get_size(jobj, 1);
2108
    /* 'dynamic' length allowed in last segment only */
2109
0
    if (!segment_size && !result->next)
2110
0
      segment_size = (size >> SECTOR_SHIFT) - segment_start;
2111
0
    if (!segment_size) {
2112
0
      log_dbg(cd, "Internal error. Wrong segment size %u", s);
2113
0
      return -EINVAL;
2114
0
    }
2115
2116
0
    if (reenc_seg)
2117
0
      segment_offset -= crypt_get_data_offset(cd);
2118
2119
0
    if (!strcmp(json_segment_type(jobj), "crypt")) {
2120
0
      vk = crypt_volume_key_by_id(vks, reenc_seg ? LUKS2_reencrypt_digest_new(hdr) : LUKS2_digest_by_segment(hdr, s));
2121
0
      if (!vk) {
2122
0
        log_err(cd, _("Missing key for dm-crypt segment %u"), s);
2123
0
        return -EINVAL;
2124
0
      }
2125
2126
0
      r = dm_crypt_target_set(result, segment_start, segment_size,
2127
0
            reenc_seg ? hz_device : crypt_data_device(cd),
2128
0
            vk,
2129
0
            json_segment_get_cipher(jobj),
2130
0
            json_segment_get_iv_offset(jobj),
2131
0
            segment_offset,
2132
0
            "none", 0, 0,
2133
0
            json_segment_get_sector_size(jobj));
2134
0
      if (r) {
2135
0
        log_err(cd, _("Failed to set dm-crypt segment."));
2136
0
        return r;
2137
0
      }
2138
0
    } else if (!strcmp(json_segment_type(jobj), "linear")) {
2139
0
      r = dm_linear_target_set(result, segment_start, segment_size, reenc_seg ? hz_device : crypt_data_device(cd), segment_offset);
2140
0
      if (r) {
2141
0
        log_err(cd, _("Failed to set dm-linear segment."));
2142
0
        return r;
2143
0
      }
2144
0
    } else
2145
0
      return EINVAL;
2146
2147
0
    segment_start += segment_size;
2148
0
    s++;
2149
0
    result = result->next;
2150
0
  }
2151
2152
0
  return s;
2153
0
}
2154
2155
/* GLOBAL FIXME: audit function names and parameters names */
2156
2157
/* FIXME:
2158
 *  1) audit log routines
2159
 *  2) can't we derive hotzone device name from crypt context? (unlocked name, device uuid, etc?)
2160
 */
2161
static int reencrypt_load_overlay_device(struct crypt_device *cd, struct luks2_hdr *hdr,
2162
  const char *overlay, struct device *hotzone_device, struct volume_key *vks, uint64_t size,
2163
  uint32_t flags)
2164
0
{
2165
0
  int r;
2166
2167
0
  struct crypt_dm_active_device dmd = {
2168
0
    .flags = flags,
2169
0
  };
2170
2171
0
  log_dbg(cd, "Loading new table for overlay device %s.", overlay);
2172
2173
0
  r = dm_targets_allocate(&dmd.segment, LUKS2_segments_count(hdr));
2174
0
  if (r)
2175
0
    goto out;
2176
2177
0
  r = reencrypt_make_targets(cd, hdr, hotzone_device, vks, &dmd.segment, size);
2178
0
  if (r < 0)
2179
0
    goto out;
2180
2181
0
  r = dm_reload_device(cd, overlay, &dmd, 0, 0);
2182
2183
  /* what else on error here ? */
2184
0
out:
2185
0
  dm_targets_free(cd, &dmd);
2186
2187
0
  return r;
2188
0
}
2189
2190
static int reencrypt_replace_device(struct crypt_device *cd, const char *target, const char *source, uint32_t flags)
2191
0
{
2192
0
  int r, exists = 1;
2193
0
  struct crypt_dm_active_device dmd_source, dmd_target = {};
2194
0
  uint64_t dmflags = DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH;
2195
2196
0
  log_dbg(cd, "Replacing table in device %s with table from device %s.", target, source);
2197
2198
  /* check only whether target device exists */
2199
0
  r = dm_status_device(cd, target);
2200
0
  if (r < 0) {
2201
0
    if (r == -ENODEV)
2202
0
      exists = 0;
2203
0
    else
2204
0
      return r;
2205
0
  }
2206
2207
0
  r = dm_query_device(cd, source, DM_ACTIVE_DEVICE | DM_ACTIVE_CRYPT_CIPHER |
2208
0
          DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY, &dmd_source);
2209
2210
0
  if (r < 0)
2211
0
    return r;
2212
2213
0
  if (exists && ((r = dm_query_device(cd, target, 0, &dmd_target)) < 0))
2214
0
    goto out;
2215
2216
0
  dmd_source.flags |= flags;
2217
0
  dmd_source.uuid = crypt_get_uuid(cd);
2218
2219
0
  if (exists) {
2220
0
    if (dmd_target.size != dmd_source.size) {
2221
0
      log_err(cd, _("Source and target device sizes don't match. Source %" PRIu64 ", target: %" PRIu64 "."),
2222
0
        dmd_source.size, dmd_target.size);
2223
0
      r = -EINVAL;
2224
0
      goto out;
2225
0
    }
2226
0
    r = dm_reload_device(cd, target, &dmd_source, 0, 0);
2227
0
    if (!r) {
2228
0
      log_dbg(cd, "Resuming device %s", target);
2229
0
      r = dm_resume_device(cd, target, dmflags | act2dmflags(dmd_source.flags));
2230
0
    }
2231
0
  } else
2232
0
    r = dm_create_device(cd, target, CRYPT_SUBDEV, &dmd_source);
2233
0
out:
2234
0
  dm_targets_free(cd, &dmd_source);
2235
0
  dm_targets_free(cd, &dmd_target);
2236
2237
0
  return r;
2238
0
}
2239
2240
static int reencrypt_swap_backing_device(struct crypt_device *cd, const char *name,
2241
            const char *new_backend_name)
2242
0
{
2243
0
  int r;
2244
0
  struct device *overlay_dev = NULL;
2245
0
  char overlay_path[PATH_MAX] = { 0 };
2246
0
  struct crypt_dm_active_device dmd = {};
2247
2248
0
  log_dbg(cd, "Redirecting %s mapping to new backing device: %s.", name, new_backend_name);
2249
2250
0
  r = snprintf(overlay_path, PATH_MAX, "%s/%s", dm_get_dir(), new_backend_name);
2251
0
  if (r < 0 || r >= PATH_MAX) {
2252
0
    r = -EINVAL;
2253
0
    goto out;
2254
0
  }
2255
2256
0
  r = device_alloc(cd, &overlay_dev, overlay_path);
2257
0
  if (r)
2258
0
    goto out;
2259
2260
0
  r = device_block_adjust(cd, overlay_dev, DEV_OK,
2261
0
        0, &dmd.size, &dmd.flags);
2262
0
  if (r)
2263
0
    goto out;
2264
2265
0
  r = dm_linear_target_set(&dmd.segment, 0, dmd.size, overlay_dev, 0);
2266
0
  if (r)
2267
0
    goto out;
2268
2269
0
  r = dm_reload_device(cd, name, &dmd, 0, 0);
2270
0
  if (!r) {
2271
0
    log_dbg(cd, "Resuming device %s", name);
2272
0
    r = dm_resume_device(cd, name, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
2273
0
  }
2274
2275
0
out:
2276
0
  dm_targets_free(cd, &dmd);
2277
0
  device_free(cd, overlay_dev);
2278
2279
0
  return r;
2280
0
}
2281
2282
static int reencrypt_activate_hotzone_device(struct crypt_device *cd, const char *name, uint64_t device_size, uint32_t flags)
2283
0
{
2284
0
  int r;
2285
0
  uint64_t new_offset = reencrypt_get_data_offset_new(crypt_get_hdr(cd, CRYPT_LUKS2)) >> SECTOR_SHIFT;
2286
2287
0
  struct crypt_dm_active_device dmd = {
2288
0
    .flags = flags,
2289
0
    .uuid = crypt_get_uuid(cd),
2290
0
    .size = device_size >> SECTOR_SHIFT
2291
0
  };
2292
2293
0
  log_dbg(cd, "Activating hotzone device %s.", name);
2294
2295
0
  r = device_block_adjust(cd, crypt_data_device(cd), DEV_OK,
2296
0
        new_offset, &dmd.size, &dmd.flags);
2297
0
  if (r)
2298
0
    goto out;
2299
2300
0
  r = dm_linear_target_set(&dmd.segment, 0, dmd.size, crypt_data_device(cd), new_offset);
2301
0
  if (r)
2302
0
    goto out;
2303
2304
0
  r = dm_create_device(cd, name, CRYPT_SUBDEV, &dmd);
2305
0
out:
2306
0
  dm_targets_free(cd, &dmd);
2307
2308
0
  return r;
2309
0
}
2310
2311
static int reencrypt_init_device_stack(struct crypt_device *cd,
2312
                         struct luks2_reencrypt *rh)
2313
0
{
2314
0
  int r;
2315
0
  char hz_path[PATH_MAX];
2316
2317
0
  assert(rh);
2318
0
  assert(!rh->hotzone_device);
2319
2320
  /* Activate hotzone device 1:1 linear mapping to data_device */
2321
0
  r = reencrypt_activate_hotzone_device(cd, rh->hotzone_name, rh->device_size, CRYPT_ACTIVATE_PRIVATE);
2322
0
  if (r) {
2323
0
    log_err(cd, _("Failed to activate hotzone device %s."), rh->hotzone_name);
2324
0
    return r;
2325
0
  }
2326
2327
0
  r = snprintf(hz_path, PATH_MAX, "%s/%s", dm_get_dir(), rh->hotzone_name);
2328
0
  if (r < 0 || r >= PATH_MAX) {
2329
0
    r = -EINVAL;
2330
0
    goto err;
2331
0
  }
2332
2333
0
  r = device_alloc(cd, &rh->hotzone_device, hz_path);
2334
0
  if (r) {
2335
0
    log_err(cd, _("Failed to allocate hotzone device %s."), rh->hotzone_name);
2336
0
    goto err;
2337
0
  }
2338
2339
  /*
2340
   * Activate overlay device with exactly same table as original 'name' mapping.
2341
   * Note that within this step the 'name' device may already include a table
2342
   * constructed from more than single dm-crypt segment. Therefore transfer
2343
   * mapping as is.
2344
   *
2345
   * If we're about to resume reencryption orig mapping has to be already validated for
2346
   * abrupt shutdown and rchunk_offset has to point on next chunk to reencrypt!
2347
   *
2348
   * TODO: in crypt_activate_by*
2349
   */
2350
0
  r = reencrypt_replace_device(cd, rh->overlay_name, rh->device_name, CRYPT_ACTIVATE_PRIVATE);
2351
0
  if (r) {
2352
0
    log_err(cd, _("Failed to activate overlay device %s with actual origin table."), rh->overlay_name);
2353
0
    goto err;
2354
0
  }
2355
2356
  /* swap origin mapping to overlay device */
2357
0
  r = reencrypt_swap_backing_device(cd, rh->device_name, rh->overlay_name);
2358
0
  if (r) {
2359
0
    log_err(cd, _("Failed to load new mapping for device %s."), rh->device_name);
2360
0
    goto err;
2361
0
  }
2362
2363
  /*
2364
   * Now the 'name' (unlocked luks) device is mapped via dm-linear to an overlay dev.
2365
   * The overlay device has a original live table of 'name' device in-before the swap.
2366
   */
2367
2368
0
  return 0;
2369
0
err:
2370
  /* TODO: force error helper devices on error path */
2371
0
  dm_remove_device(cd, rh->overlay_name, 0);
2372
0
  dm_remove_device(cd, rh->hotzone_name, 0);
2373
2374
0
  return r;
2375
0
}
2376
2377
static reenc_status_t reenc_refresh_helper_devices(struct crypt_device *cd, const char *overlay,
2378
    const char *hotzone)
2379
0
{
2380
0
  int r;
2381
2382
  /*
2383
   * we have to explicitly suspend the overlay device before suspending
2384
   * the hotzone one. Resuming overlay device (aka switching tables) only
2385
   * after suspending the hotzone may lead to deadlock.
2386
   *
2387
   * In other words: always suspend the stack from top to bottom!
2388
   */
2389
0
  r = dm_suspend_device(cd, overlay, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
2390
0
  if (r) {
2391
0
    log_err(cd, _("Failed to suspend device %s."), overlay);
2392
0
    return REENC_ERR_ROLLBACK_MEMORY;
2393
0
  }
2394
2395
  /* suspend HZ device */
2396
0
  r = dm_suspend_device(cd, hotzone, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
2397
0
  if (r) {
2398
0
    log_err(cd, _("Failed to suspend device %s."), hotzone);
2399
0
    return REENC_ERR_ROLLBACK_MEMORY;
2400
0
  }
2401
2402
  /* resume overlay device: inactive table (with hotozne) -> live */
2403
0
  r = dm_resume_device(cd, overlay, DM_RESUME_PRIVATE);
2404
0
  if (r) {
2405
0
    log_err(cd, _("Failed to resume device %s."), overlay);
2406
0
    return REENC_ERR_ROLLBACK_MEMORY;
2407
0
  }
2408
2409
0
  return REENC_OK;
2410
0
}
2411
2412
static reenc_status_t reencrypt_refresh_overlay_devices(struct crypt_device *cd,
2413
    struct luks2_hdr *hdr,
2414
    const char *overlay,
2415
    const char *hotzone,
2416
    struct device *hotzone_device,
2417
    struct volume_key *vks,
2418
    uint64_t device_size,
2419
    uint32_t flags)
2420
0
{
2421
0
  int r = reencrypt_load_overlay_device(cd, hdr, overlay, hotzone_device, vks, device_size, flags);
2422
0
  if (r) {
2423
0
    log_err(cd, _("Failed to reload device %s."), overlay);
2424
0
    return REENC_ERR_ROLLBACK_MEMORY;
2425
0
  }
2426
2427
0
  r = reenc_refresh_helper_devices(cd, overlay, hotzone);
2428
0
  if (r != REENC_OK)
2429
0
    log_err(cd, _("Failed to refresh reencryption devices stack."));
2430
2431
0
  return r;
2432
0
}
2433
2434
static int reencrypt_move_data(struct crypt_device *cd,
2435
  int devfd,
2436
  uint64_t data_shift,
2437
  crypt_reencrypt_mode_info mode)
2438
0
{
2439
0
  void *buffer;
2440
0
  int r;
2441
0
  ssize_t ret;
2442
0
  uint64_t buffer_len, offset,
2443
0
     read_offset = (mode == CRYPT_REENCRYPT_ENCRYPT ? 0 : data_shift);
2444
0
  struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
2445
2446
0
  offset = json_segment_get_offset(LUKS2_get_segment_jobj(hdr, 0), 0);
2447
0
  buffer_len = json_segment_get_size(LUKS2_get_segment_jobj(hdr, 0), 0);
2448
0
  if (!buffer_len || buffer_len > data_shift)
2449
0
    return -EINVAL;
2450
2451
0
  if (posix_memalign(&buffer, device_alignment(crypt_data_device(cd)), buffer_len))
2452
0
    return -ENOMEM;
2453
2454
0
  ret = read_lseek_blockwise(devfd,
2455
0
      device_block_size(cd, crypt_data_device(cd)),
2456
0
      device_alignment(crypt_data_device(cd)),
2457
0
      buffer, buffer_len, read_offset);
2458
0
  if (ret < 0 || (uint64_t)ret != buffer_len) {
2459
0
    log_dbg(cd, "Failed to read data at offset %" PRIu64 " (size: %zu)",
2460
0
      read_offset, buffer_len);
2461
0
    r = -EIO;
2462
0
    goto out;
2463
0
  }
2464
2465
0
  log_dbg(cd, "Going to write %" PRIu64 " bytes read at offset %" PRIu64 " to new offset %" PRIu64,
2466
0
    buffer_len, read_offset, offset);
2467
0
  ret = write_lseek_blockwise(devfd,
2468
0
      device_block_size(cd, crypt_data_device(cd)),
2469
0
      device_alignment(crypt_data_device(cd)),
2470
0
      buffer, buffer_len, offset);
2471
0
  if (ret < 0 || (uint64_t)ret != buffer_len) {
2472
0
    log_dbg(cd, "Failed to write data at offset %" PRIu64 " (size: %zu)",
2473
0
      offset, buffer_len);
2474
0
    r = -EIO;
2475
0
    goto out;
2476
0
  }
2477
2478
0
  r = 0;
2479
0
out:
2480
0
  crypt_safe_memzero(buffer, buffer_len);
2481
0
  free(buffer);
2482
0
  return r;
2483
0
}
2484
2485
static int reencrypt_make_backup_segments(struct crypt_device *cd,
2486
    struct luks2_hdr *hdr,
2487
    int digest_new,
2488
    const char *cipher,
2489
    uint64_t data_offset,
2490
    const struct crypt_params_reencrypt *params)
2491
0
{
2492
0
  const char *type;
2493
0
  int r, segment, moved_segment = -1, digest_old = -1;
2494
0
  json_object *jobj_tmp, *jobj_segment_new = NULL, *jobj_segment_old = NULL, *jobj_segment_bcp = NULL;
2495
0
  uint32_t sector_size = params->luks2 ? params->luks2->sector_size : SECTOR_SIZE;
2496
0
  uint64_t segment_offset, tmp, data_shift = params->data_shift << SECTOR_SHIFT,
2497
0
     device_size = params->device_size << SECTOR_SHIFT;
2498
2499
0
  if (params->mode != CRYPT_REENCRYPT_DECRYPT && digest_new < 0)
2500
0
    return -EINVAL;
2501
2502
0
  if (params->mode != CRYPT_REENCRYPT_ENCRYPT) {
2503
0
    digest_old = LUKS2_digest_by_segment(hdr, CRYPT_DEFAULT_SEGMENT);
2504
0
    if (digest_old < 0)
2505
0
      return -EINVAL;
2506
0
  }
2507
2508
0
  segment = LUKS2_segment_first_unused_id(hdr);
2509
0
  if (segment < 0)
2510
0
    return -EINVAL;
2511
2512
0
  if (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT) {
2513
0
    if (json_object_copy(LUKS2_get_segment_jobj(hdr, 0), &jobj_segment_bcp)) {
2514
0
      r = -EINVAL;
2515
0
      goto err;
2516
0
    }
2517
0
    r = LUKS2_segment_set_flag(jobj_segment_bcp, "backup-moved-segment");
2518
0
    if (r)
2519
0
      goto err;
2520
0
    moved_segment = segment++;
2521
0
    r = json_object_object_add_by_uint_by_ref(LUKS2_get_segments_jobj(hdr), moved_segment, &jobj_segment_bcp);
2522
0
    if (r)
2523
0
      goto err;
2524
2525
0
    if (!(type = json_segment_type(LUKS2_get_segment_jobj(hdr, moved_segment)))) {
2526
0
      r = -EINVAL;
2527
0
      goto err;
2528
0
    }
2529
2530
0
    if (!strcmp(type, "crypt") && ((r = LUKS2_digest_segment_assign(cd, hdr, moved_segment, digest_old, 1, 0))))
2531
0
      goto err;
2532
0
  }
2533
2534
  /* FIXME: Add detection for case (digest old == digest new && old segment == new segment) */
2535
0
  if (digest_old >= 0) {
2536
0
    if (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT) {
2537
0
      jobj_tmp = LUKS2_get_segment_jobj(hdr, 0);
2538
0
      if (!jobj_tmp) {
2539
0
        r = -EINVAL;
2540
0
        goto err;
2541
0
      }
2542
2543
0
      jobj_segment_old = json_segment_create_crypt(data_offset,
2544
0
            json_segment_get_iv_offset(jobj_tmp),
2545
0
            device_size ? &device_size : NULL,
2546
0
            json_segment_get_cipher(jobj_tmp),
2547
0
            NULL, 0, /* integrity */
2548
0
            json_segment_get_sector_size(jobj_tmp),
2549
0
            0);
2550
0
    } else {
2551
0
      if (json_object_copy(LUKS2_get_segment_jobj(hdr, CRYPT_DEFAULT_SEGMENT), &jobj_segment_old)) {
2552
0
        r = -EINVAL;
2553
0
        goto err;
2554
0
      }
2555
0
    }
2556
0
  } else if (params->mode == CRYPT_REENCRYPT_ENCRYPT) {
2557
0
    r = LUKS2_get_data_size(hdr, &tmp, NULL);
2558
0
    if (r)
2559
0
      goto err;
2560
2561
0
    if (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT)
2562
0
      jobj_segment_old = json_segment_create_linear(0, tmp ? &tmp : NULL, 0);
2563
0
    else
2564
0
      jobj_segment_old = json_segment_create_linear(data_offset, tmp ? &tmp : NULL, 0);
2565
0
  }
2566
2567
0
  if (!jobj_segment_old) {
2568
0
    r = -EINVAL;
2569
0
    goto err;
2570
0
  }
2571
2572
0
  r = LUKS2_segment_set_flag(jobj_segment_old, "backup-previous");
2573
0
  if (r)
2574
0
    goto err;
2575
2576
0
  r = json_object_object_add_by_uint_by_ref(LUKS2_get_segments_jobj(hdr), segment, &jobj_segment_old);
2577
0
  if (r)
2578
0
    goto err;
2579
2580
0
  if (digest_old >= 0 && (r = LUKS2_digest_segment_assign(cd, hdr, segment, digest_old, 1, 0)))
2581
0
    goto err;
2582
2583
0
  segment++;
2584
2585
0
  if (digest_new >= 0) {
2586
0
    segment_offset = data_offset;
2587
0
    if (params->mode != CRYPT_REENCRYPT_ENCRYPT &&
2588
0
        modify_offset(&segment_offset, data_shift, params->direction)) {
2589
0
      r = -EINVAL;
2590
0
      goto err;
2591
0
    }
2592
0
    jobj_segment_new = json_segment_create_crypt(segment_offset,
2593
0
              crypt_get_iv_offset(cd),
2594
0
              NULL, cipher, NULL, 0, sector_size, 0);
2595
0
  } else if (params->mode == CRYPT_REENCRYPT_DECRYPT) {
2596
0
    segment_offset = data_offset;
2597
0
    if (modify_offset(&segment_offset, data_shift, params->direction)) {
2598
0
      r = -EINVAL;
2599
0
      goto err;
2600
0
    }
2601
0
    jobj_segment_new = json_segment_create_linear(segment_offset, NULL, 0);
2602
0
  }
2603
2604
0
  if (!jobj_segment_new) {
2605
0
    r = -EINVAL;
2606
0
    goto err;
2607
0
  }
2608
2609
0
  r = LUKS2_segment_set_flag(jobj_segment_new, "backup-final");
2610
0
  if (r)
2611
0
    goto err;
2612
2613
0
  r = json_object_object_add_by_uint_by_ref(LUKS2_get_segments_jobj(hdr), segment, &jobj_segment_new);
2614
0
  if (r)
2615
0
    goto err;
2616
2617
0
  if (digest_new >= 0 && (r = LUKS2_digest_segment_assign(cd, hdr, segment, digest_new, 1, 0)))
2618
0
    goto err;
2619
2620
  /* FIXME: also check occupied space by keyslot in shrunk area */
2621
0
  if (params->direction == CRYPT_REENCRYPT_FORWARD && data_shift &&
2622
0
      crypt_metadata_device(cd) == crypt_data_device(cd) &&
2623
0
      LUKS2_set_keyslots_size(hdr, json_segment_get_offset(reencrypt_segment_new(hdr), 0))) {
2624
0
    log_err(cd, _("Failed to set new keyslots area size."));
2625
0
    r = -EINVAL;
2626
0
    goto err;
2627
0
  }
2628
2629
0
  return 0;
2630
0
err:
2631
0
  json_object_put(jobj_segment_new);
2632
0
  json_object_put(jobj_segment_old);
2633
0
  json_object_put(jobj_segment_bcp);
2634
0
  return r;
2635
0
}
2636
2637
static int reencrypt_verify_single_key(struct crypt_device *cd, int digest, struct volume_key *vks)
2638
0
{
2639
0
  struct volume_key *vk;
2640
2641
0
  vk = crypt_volume_key_by_id(vks, digest);
2642
0
  if (!vk)
2643
0
    return -ENOENT;
2644
2645
0
  if (LUKS2_digest_verify_by_digest(cd, digest, vk) != digest)
2646
0
    return -EINVAL;
2647
2648
0
  return 0;
2649
0
}
2650
2651
static int reencrypt_verify_keys(struct crypt_device *cd,
2652
  int digest_old,
2653
  int digest_new,
2654
  struct volume_key *vks)
2655
0
{
2656
0
  int r;
2657
2658
0
  if (digest_new >= 0 && (r = reencrypt_verify_single_key(cd, digest_new, vks)))
2659
0
    return r;
2660
2661
0
  if (digest_old >= 0 && (r = reencrypt_verify_single_key(cd, digest_old, vks)))
2662
0
    return r;
2663
2664
0
  return 0;
2665
0
}
2666
2667
static int reencrypt_upload_single_key(struct crypt_device *cd,
2668
  int digest,
2669
  struct volume_key *vks)
2670
0
{
2671
0
  struct volume_key *vk;
2672
2673
0
  vk = crypt_volume_key_by_id(vks, digest);
2674
0
  if (!vk)
2675
0
    return -EINVAL;
2676
2677
0
  return LUKS2_volume_key_load_in_keyring_by_digest(cd, vk, digest);
2678
0
}
2679
2680
static int reencrypt_upload_keys(struct crypt_device *cd,
2681
  struct luks2_hdr *hdr,
2682
  int digest_old,
2683
  int digest_new,
2684
  struct volume_key *vks)
2685
0
{
2686
0
  int r;
2687
2688
0
  if (!crypt_use_keyring_for_vk(cd))
2689
0
    return 0;
2690
2691
0
  if (digest_new >= 0 && !crypt_is_cipher_null(reencrypt_segment_cipher_new(hdr)) &&
2692
0
      (r = reencrypt_upload_single_key(cd, digest_new, vks)))
2693
0
    return r;
2694
2695
0
  if (digest_old >= 0 && !crypt_is_cipher_null(reencrypt_segment_cipher_old(hdr)) &&
2696
0
      (r = reencrypt_upload_single_key(cd, digest_old, vks))) {
2697
0
    crypt_drop_uploaded_keyring_key(cd, vks);
2698
0
    return r;
2699
0
  }
2700
2701
0
  return 0;
2702
0
}
2703
2704
static int reencrypt_verify_and_upload_keys(struct crypt_device *cd,
2705
  struct luks2_hdr *hdr,
2706
  int digest_old,
2707
  int digest_new,
2708
  struct volume_key *vks)
2709
0
{
2710
0
  int r;
2711
2712
0
  r = reencrypt_verify_keys(cd, digest_old, digest_new, vks);
2713
0
  if (r)
2714
0
    return r;
2715
2716
0
  r = reencrypt_upload_keys(cd, hdr, digest_old, digest_new, vks);
2717
0
  if (r)
2718
0
    return r;
2719
2720
0
  return 0;
2721
0
}
2722
2723
static int reencrypt_verify_checksum_params(struct crypt_device *cd,
2724
    const struct crypt_params_reencrypt *params)
2725
0
{
2726
0
  size_t len;
2727
0
  struct crypt_hash *ch;
2728
2729
0
  assert(params);
2730
2731
0
  if (!params->hash)
2732
0
    return -EINVAL;
2733
2734
0
  len = strlen(params->hash);
2735
0
  if (!len || len > (LUKS2_CHECKSUM_ALG_L - 1))
2736
0
    return -EINVAL;
2737
2738
0
  if (crypt_hash_size(params->hash) <= 0)
2739
0
    return -EINVAL;
2740
2741
0
  if (crypt_hash_init(&ch, params->hash)) {
2742
0
    log_err(cd, _("Hash algorithm %s is not available."), params->hash);
2743
0
    return -EINVAL;
2744
0
  }
2745
  /* We just check for alg availability */
2746
0
  crypt_hash_destroy(ch);
2747
2748
0
  return 0;
2749
0
}
2750
2751
static int reencrypt_verify_datashift_params(struct crypt_device *cd,
2752
    const struct crypt_params_reencrypt *params,
2753
    uint32_t sector_size)
2754
0
{
2755
0
  assert(params);
2756
2757
0
  if (!params->data_shift)
2758
0
    return -EINVAL;
2759
0
  if (MISALIGNED(params->data_shift, sector_size >> SECTOR_SHIFT)) {
2760
0
    log_err(cd, _("Data shift value is not aligned to encryption sector size (%" PRIu32 " bytes)."),
2761
0
      sector_size);
2762
0
    return -EINVAL;
2763
0
  }
2764
2765
0
  return 0;
2766
0
}
2767
2768
static int reencrypt_verify_resilience_params(struct crypt_device *cd,
2769
    const struct crypt_params_reencrypt *params,
2770
    uint32_t sector_size, bool move_first_segment)
2771
0
{
2772
  /* no change requested */
2773
0
  if (!params || !params->resilience)
2774
0
    return 0;
2775
2776
0
  if (!strcmp(params->resilience, "journal"))
2777
0
    return (params->data_shift || move_first_segment) ? -EINVAL : 0;
2778
0
  else if (!strcmp(params->resilience, "none"))
2779
0
    return (params->data_shift || move_first_segment) ? -EINVAL : 0;
2780
0
  else if (!strcmp(params->resilience, "datashift"))
2781
0
    return reencrypt_verify_datashift_params(cd, params, sector_size);
2782
0
  else if (!strcmp(params->resilience, "checksum")) {
2783
0
    if (params->data_shift || move_first_segment)
2784
0
      return -EINVAL;
2785
0
    return reencrypt_verify_checksum_params(cd, params);
2786
0
  } else if (!strcmp(params->resilience, "datashift-checksum")) {
2787
0
    if (!move_first_segment ||
2788
0
         reencrypt_verify_datashift_params(cd, params, sector_size))
2789
0
      return -EINVAL;
2790
0
    return reencrypt_verify_checksum_params(cd, params);
2791
0
  } else if (!strcmp(params->resilience, "datashift-journal")) {
2792
0
    if (!move_first_segment)
2793
0
      return -EINVAL;
2794
0
    return reencrypt_verify_datashift_params(cd, params, sector_size);
2795
0
  }
2796
2797
0
  log_err(cd, _("Unsupported resilience mode %s"), params->resilience);
2798
0
  return -EINVAL;
2799
0
}
2800
2801
static int reencrypt_decrypt_with_datashift_init(struct crypt_device *cd,
2802
    const char *name,
2803
    struct luks2_hdr *hdr,
2804
    int reencrypt_keyslot,
2805
    uint32_t sector_size,
2806
    uint64_t data_size,
2807
    uint64_t data_offset,
2808
    struct crypt_keyslot_context *kc_old,
2809
    int keyslot_old,
2810
    const struct crypt_params_reencrypt *params,
2811
    struct volume_key **vks)
2812
0
{
2813
0
  bool clear_table = false;
2814
0
  int r, devfd = -1;
2815
0
  uint64_t data_shift, max_moved_segment_length, moved_segment_length;
2816
0
  struct reenc_protection check_rp = {};
2817
0
  struct crypt_dm_active_device dmd_target, dmd_source = {
2818
0
    .uuid = crypt_get_uuid(cd),
2819
0
    .flags = CRYPT_ACTIVATE_SHARED /* turn off exclusive open checks */
2820
0
  };
2821
0
  json_object *jobj_segments_old;
2822
2823
0
  assert(hdr);
2824
0
  assert(params);
2825
0
  assert(params->resilience);
2826
0
  assert(params->data_shift);
2827
0
  assert(vks);
2828
2829
0
  if (!data_offset)
2830
0
    return -EINVAL;
2831
2832
0
  if (params->max_hotzone_size > params->data_shift) {
2833
0
    log_err(cd, _("Moved segment size can not be greater than data shift value."));
2834
0
    return -EINVAL;
2835
0
  }
2836
2837
0
  log_dbg(cd, "Initializing decryption with datashift.");
2838
2839
0
  data_shift = params->data_shift << SECTOR_SHIFT;
2840
2841
  /*
2842
   * In offline mode we must perform data move with exclusively opened data
2843
   * device in order to exclude LUKS2 decryption process and filesystem mount.
2844
   */
2845
0
  if (name)
2846
0
    devfd = device_open(cd, crypt_data_device(cd), O_RDWR);
2847
0
  else
2848
0
    devfd = device_open_excl(cd, crypt_data_device(cd), O_RDWR);
2849
0
  if (devfd < 0)
2850
0
    return -EINVAL;
2851
2852
  /* in-memory only */
2853
0
  moved_segment_length = params->max_hotzone_size << SECTOR_SHIFT;
2854
0
  if (!moved_segment_length)
2855
0
    moved_segment_length = data_shift < LUKS2_DEFAULT_NONE_REENCRYPTION_LENGTH ?
2856
0
               data_shift : LUKS2_DEFAULT_NONE_REENCRYPTION_LENGTH;
2857
2858
0
  if (moved_segment_length > data_size)
2859
0
    moved_segment_length = data_size;
2860
2861
0
  r = reencrypt_set_decrypt_shift_segments(cd, hdr, data_size,
2862
0
             moved_segment_length,
2863
0
             params->direction);
2864
0
  if (r)
2865
0
    goto out;
2866
2867
0
  r = reencrypt_make_backup_segments(cd, hdr, CRYPT_ANY_DIGEST, NULL, data_offset, params);
2868
0
  if (r) {
2869
0
    log_dbg(cd, "Failed to create reencryption backup device segments.");
2870
0
    goto out;
2871
0
  }
2872
2873
0
  r = reencrypt_verify_resilience_params(cd, params, sector_size, true);
2874
0
  if (r < 0) {
2875
0
    log_err(cd, _("Invalid reencryption resilience parameters."));
2876
0
    goto out;
2877
0
  }
2878
2879
0
  r = LUKS2_keyslot_reencrypt_allocate(cd, hdr, reencrypt_keyslot,
2880
0
             params, reencrypt_get_alignment(cd, hdr));
2881
0
  if (r < 0)
2882
0
    goto out;
2883
2884
0
  r = LUKS2_keyslot_reencrypt_load(cd, hdr, reencrypt_keyslot, &check_rp, false);
2885
0
  if (r < 0)
2886
0
    goto out;
2887
2888
0
  r = LUKS2_reencrypt_max_hotzone_size(cd, hdr, &check_rp,
2889
0
               reencrypt_keyslot,
2890
0
               &max_moved_segment_length);
2891
0
  if (r < 0)
2892
0
    goto out;
2893
2894
0
  LUKS2_reencrypt_protection_erase(&check_rp);
2895
2896
0
  if (moved_segment_length > max_moved_segment_length) {
2897
0
    log_err(cd, _("Moved segment too large. Requested size %" PRIu64 ", available space for: %" PRIu64 "."),
2898
0
      moved_segment_length, max_moved_segment_length);
2899
0
    r = -EINVAL;
2900
0
    goto out;
2901
0
  }
2902
2903
0
  r = LUKS2_keyslot_context_open_all_segments(cd, keyslot_old, CRYPT_ANY_SLOT,
2904
0
                kc_old, NULL, vks);
2905
0
  if (r < 0)
2906
0
    goto out;
2907
2908
0
  r = LUKS2_keyslot_reencrypt_digest_create(cd, hdr, LUKS2_DECRYPT_DATASHIFT_REQ_VERSION, *vks);
2909
0
  if (r < 0)
2910
0
    goto out;
2911
2912
0
  if (name) {
2913
0
    r = reencrypt_verify_and_upload_keys(cd, hdr,
2914
0
                 LUKS2_reencrypt_digest_old(hdr),
2915
0
                 LUKS2_reencrypt_digest_new(hdr),
2916
0
                 *vks);
2917
0
    if (r)
2918
0
      goto out;
2919
2920
0
    r = dm_query_device(cd, name, DM_ACTIVE_UUID | DM_ACTIVE_DEVICE |
2921
0
            DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY |
2922
0
            DM_ACTIVE_CRYPT_CIPHER, &dmd_target);
2923
0
    if (r < 0)
2924
0
      goto out;
2925
2926
0
    jobj_segments_old = reencrypt_segments_old(hdr);
2927
0
    if (!jobj_segments_old) {
2928
0
      dm_targets_free(cd, &dmd_target);
2929
0
      free(CONST_CAST(void*)dmd_target.uuid);
2930
0
      r = -EINVAL;
2931
0
      goto out;
2932
0
    }
2933
0
    r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, jobj_segments_old, &dmd_source);
2934
0
    if (!r) {
2935
0
      r = crypt_compare_dm_devices(cd, &dmd_source, &dmd_target);
2936
0
      if (r)
2937
0
        log_err(cd, _("Mismatching parameters on device %s."), name);
2938
0
    }
2939
0
    json_object_put(jobj_segments_old);
2940
2941
0
    dm_targets_free(cd, &dmd_source);
2942
0
    dm_targets_free(cd, &dmd_target);
2943
0
    free(CONST_CAST(void*)dmd_target.uuid);
2944
2945
0
    if (r)
2946
0
      goto out;
2947
2948
0
    dmd_source.size = dmd_target.size;
2949
0
    r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, LUKS2_get_segments_jobj(hdr), &dmd_source);
2950
0
    if (!r) {
2951
0
      r = dm_reload_device(cd, name, &dmd_source, dmd_target.flags, 0);
2952
0
      if (r)
2953
0
        log_err(cd, _("Failed to reload device %s."), name);
2954
0
      else
2955
0
        clear_table = true;
2956
0
    }
2957
2958
0
    dm_targets_free(cd, &dmd_source);
2959
2960
0
    if (r)
2961
0
      goto out;
2962
0
  }
2963
2964
0
  if (name) {
2965
0
    r = dm_suspend_device(cd, name, DM_SUSPEND_SKIP_LOCKFS);
2966
0
    if (r) {
2967
0
      log_err(cd, _("Failed to suspend device %s."), name);
2968
0
      goto out;
2969
0
    }
2970
0
  }
2971
2972
0
  if (reencrypt_move_data(cd, devfd, data_shift, params->mode)) {
2973
0
    r = -EIO;
2974
0
    goto out;
2975
0
  }
2976
2977
  /* This must be first and only write in LUKS2 metadata during _reencrypt_init */
2978
0
  r = reencrypt_update_flag(cd, LUKS2_DECRYPT_DATASHIFT_REQ_VERSION, true, true);
2979
0
  if (r) {
2980
0
    log_dbg(cd, "Failed to set online-reencryption requirement.");
2981
0
    r = -EINVAL;
2982
0
  } else
2983
0
    r = reencrypt_keyslot;
2984
0
out:
2985
0
  if (r < 0 && clear_table && dm_clear_device(cd, name))
2986
0
    log_err(cd, _("Failed to clear table."));
2987
0
  else if (clear_table && dm_resume_device(cd, name, DM_SUSPEND_SKIP_LOCKFS))
2988
0
    log_err(cd, _("Failed to resume device %s."), name);
2989
2990
0
  device_release_excl(cd, crypt_data_device(cd));
2991
0
  if (r < 0 && LUKS2_hdr_rollback(cd, hdr) < 0)
2992
0
    log_dbg(cd, "Failed to rollback LUKS2 metadata after failure.");
2993
2994
0
  return r;
2995
0
}
2996
2997
/* This function must be called with metadata lock held */
2998
static int reencrypt_init(struct crypt_device *cd,
2999
    const char *name,
3000
    struct luks2_hdr *hdr,
3001
    struct crypt_keyslot_context *kc_old,
3002
    struct crypt_keyslot_context *kc_new,
3003
    int keyslot_old,
3004
    int keyslot_new,
3005
    const char *cipher,
3006
    const char *cipher_mode,
3007
    const struct crypt_params_reencrypt *params,
3008
    struct volume_key **vks)
3009
0
{
3010
0
  bool move_first_segment;
3011
0
  char _cipher[128];
3012
0
  uint32_t check_sector_size, new_sector_size, old_sector_size;
3013
0
  int digest_new, r, reencrypt_keyslot, devfd = -1;
3014
0
  uint64_t data_offset_bytes, data_size_bytes, data_shift_bytes, device_size_bytes;
3015
0
  struct volume_key *vk;
3016
0
  struct crypt_dm_active_device dmd_target, dmd_source = {
3017
0
    .uuid = crypt_get_uuid(cd),
3018
0
    .flags = CRYPT_ACTIVATE_SHARED /* turn off exclusive open checks */
3019
0
  };
3020
3021
0
  assert(cd);
3022
0
  assert(hdr);
3023
3024
0
  if (!params || !params->resilience || params->mode > CRYPT_REENCRYPT_DECRYPT)
3025
0
    return -EINVAL;
3026
3027
0
  if (params->mode != CRYPT_REENCRYPT_DECRYPT &&
3028
0
      (!params->luks2 || !(cipher && cipher_mode) ||
3029
0
       (keyslot_new < 0 && !(params->flags & CRYPT_REENCRYPT_CREATE_NEW_DIGEST))))
3030
0
    return -EINVAL;
3031
3032
0
  log_dbg(cd, "Initializing reencryption (mode: %s) in LUKS2 metadata.",
3033
0
        crypt_reencrypt_mode_to_str(params->mode));
3034
3035
0
  move_first_segment = (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT);
3036
3037
0
  old_sector_size = LUKS2_get_sector_size(hdr);
3038
3039
  /* implicit sector size 512 for decryption */
3040
0
  new_sector_size = params->luks2 ? params->luks2->sector_size : SECTOR_SIZE;
3041
0
  if (new_sector_size < SECTOR_SIZE || new_sector_size > MAX_SECTOR_SIZE ||
3042
0
      NOTPOW2(new_sector_size)) {
3043
0
    log_err(cd, _("Unsupported encryption sector size."));
3044
0
    return -EINVAL;
3045
0
  }
3046
  /* check the larger encryption sector size only */
3047
0
  check_sector_size = new_sector_size > old_sector_size ? new_sector_size : old_sector_size;
3048
3049
0
  if (!cipher_mode || *cipher_mode == '\0')
3050
0
    r = snprintf(_cipher, sizeof(_cipher), "%s", cipher);
3051
0
  else
3052
0
    r = snprintf(_cipher, sizeof(_cipher), "%s-%s", cipher, cipher_mode);
3053
0
  if (r < 0 || (size_t)r >= sizeof(_cipher))
3054
0
    return -EINVAL;
3055
3056
0
  data_offset_bytes = LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
3057
3058
0
  r = device_check_access(cd, crypt_data_device(cd), DEV_OK);
3059
0
  if (r)
3060
0
    return r;
3061
3062
0
  r = device_check_size(cd, crypt_data_device(cd), data_offset_bytes, 1);
3063
0
  if (r)
3064
0
    return r;
3065
3066
0
  r = device_size(crypt_data_device(cd), &device_size_bytes);
3067
0
  if (r)
3068
0
    return r;
3069
3070
0
  if (move_first_segment && params->mode == CRYPT_REENCRYPT_ENCRYPT &&
3071
0
      params->data_shift < LUKS2_get_data_offset(hdr)) {
3072
0
    log_err(cd, _("Data shift (%" PRIu64 " sectors) is less than future data offset (%" PRIu64 " sectors)."),
3073
0
      params->data_shift, LUKS2_get_data_offset(hdr));
3074
0
    return -EINVAL;
3075
0
  }
3076
3077
0
  device_size_bytes -= data_offset_bytes;
3078
0
  data_shift_bytes = params->data_shift << SECTOR_SHIFT;
3079
0
  data_size_bytes = params->device_size << SECTOR_SHIFT;
3080
3081
0
  if (device_size_bytes < data_shift_bytes && params->direction == CRYPT_REENCRYPT_BACKWARD) {
3082
0
    log_err(cd, _("Device %s is too small."), device_path(crypt_data_device(cd)));
3083
0
    return -EINVAL;
3084
0
  }
3085
3086
0
  if (data_size_bytes > device_size_bytes) {
3087
0
    log_err(cd, _("Reduced data size is larger than real device size."));
3088
0
    return -EINVAL;
3089
0
  }
3090
3091
0
  if (data_size_bytes && params->mode == CRYPT_REENCRYPT_ENCRYPT &&
3092
0
      move_first_segment && data_shift_bytes) {
3093
0
    if (data_size_bytes > device_size_bytes - data_shift_bytes) {
3094
0
      log_err(cd, _("Reduced data size is larger than real device size."));
3095
0
      return -EINVAL;
3096
0
    }
3097
0
  } else if (!data_size_bytes && params->mode == CRYPT_REENCRYPT_ENCRYPT &&
3098
0
      move_first_segment && data_shift_bytes)
3099
0
    data_size_bytes = device_size_bytes - data_shift_bytes;
3100
0
  else if (!data_size_bytes)
3101
0
    data_size_bytes = device_size_bytes;
3102
3103
0
  if (MISALIGNED(data_size_bytes, check_sector_size)) {
3104
0
    log_err(cd, _("Data device is not aligned to encryption sector size (%" PRIu32 " bytes)."), check_sector_size);
3105
0
    return -EINVAL;
3106
0
  }
3107
3108
0
  reencrypt_keyslot = LUKS2_keyslot_find_empty(cd, hdr, 0);
3109
0
  if (reencrypt_keyslot < 0) {
3110
0
    log_err(cd, _("All key slots full."));
3111
0
    return -EINVAL;
3112
0
  }
3113
3114
0
  if (params->mode == CRYPT_REENCRYPT_DECRYPT && data_shift_bytes && move_first_segment)
3115
0
    return reencrypt_decrypt_with_datashift_init(cd, name, hdr,
3116
0
                   reencrypt_keyslot,
3117
0
                   check_sector_size,
3118
0
                   data_size_bytes,
3119
0
                   data_offset_bytes,
3120
0
                   kc_old,
3121
0
                   keyslot_old,
3122
0
                   params,
3123
0
                   vks);
3124
3125
  /*
3126
   * We must perform data move with exclusive open data device
3127
   * to exclude another cryptsetup process to colide with
3128
   * encryption initialization (or mount)
3129
   */
3130
0
  if (move_first_segment) {
3131
0
    devfd = device_open_excl(cd, crypt_data_device(cd), O_RDWR);
3132
0
    if (devfd < 0) {
3133
0
      if (devfd == -EBUSY)
3134
0
        log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."),
3135
0
          device_path(crypt_data_device(cd)));
3136
0
      return -EINVAL;
3137
0
    }
3138
0
  }
3139
3140
0
  if (params->mode == CRYPT_REENCRYPT_ENCRYPT) {
3141
    /* in-memory only */
3142
0
    r = reencrypt_set_encrypt_segments(cd, hdr, device_size_bytes, data_size_bytes,
3143
0
               data_shift_bytes,
3144
0
               move_first_segment,
3145
0
               params->direction);
3146
0
    if (r)
3147
0
      goto out;
3148
0
  }
3149
3150
0
  if (params->flags & CRYPT_REENCRYPT_CREATE_NEW_DIGEST) {
3151
0
    assert(kc_new->get_luks2_key);
3152
0
    r = kc_new->get_luks2_key(cd, kc_new, CRYPT_ANY_SLOT, CRYPT_ANY_SEGMENT, &vk);
3153
0
    if (r < 0)
3154
0
      goto out;
3155
3156
    /* do not create new digest in case it matches the current one */
3157
0
    r = LUKS2_digest_verify_by_segment(cd, hdr, CRYPT_DEFAULT_SEGMENT, vk);
3158
0
    if (r == -EPERM || r == -ENOENT)
3159
0
      r = LUKS2_digest_create(cd, "pbkdf2", hdr, vk);
3160
3161
0
    crypt_free_volume_key(vk);
3162
0
    if (r < 0)
3163
0
      goto out;
3164
0
    digest_new = r;
3165
0
  } else
3166
0
    digest_new = LUKS2_digest_by_keyslot(hdr, keyslot_new);
3167
3168
0
  r = reencrypt_make_backup_segments(cd, hdr, digest_new, _cipher, data_offset_bytes, params);
3169
0
  if (r) {
3170
0
    log_dbg(cd, "Failed to create reencryption backup device segments.");
3171
0
    goto out;
3172
0
  }
3173
3174
0
  r = reencrypt_verify_resilience_params(cd, params, check_sector_size, move_first_segment);
3175
0
  if (r < 0)
3176
0
    goto out;
3177
3178
0
  r = LUKS2_keyslot_reencrypt_allocate(cd, hdr, reencrypt_keyslot, params,
3179
0
      reencrypt_get_alignment(cd, hdr));
3180
0
  if (r < 0)
3181
0
    goto out;
3182
3183
0
  r = LUKS2_keyslot_context_open_all_segments(cd, keyslot_old, keyslot_new, kc_old, kc_new, vks);
3184
0
  if (r < 0)
3185
0
    goto out;
3186
3187
0
  r = LUKS2_keyslot_reencrypt_digest_create(cd, hdr, LUKS2_REENCRYPT_REQ_VERSION, *vks);
3188
0
  if (r < 0)
3189
0
    goto out;
3190
3191
0
  if (name && params->mode != CRYPT_REENCRYPT_ENCRYPT) {
3192
0
    r = reencrypt_verify_and_upload_keys(cd, hdr, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks);
3193
0
    if (r)
3194
0
      goto out;
3195
3196
0
    r = dm_query_device(cd, name, DM_ACTIVE_UUID | DM_ACTIVE_DEVICE |
3197
0
            DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY |
3198
0
            DM_ACTIVE_CRYPT_CIPHER, &dmd_target);
3199
0
    if (r < 0)
3200
0
      goto out;
3201
3202
0
    r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, LUKS2_get_segments_jobj(hdr), &dmd_source);
3203
0
    if (!r) {
3204
0
      r = crypt_compare_dm_devices(cd, &dmd_source, &dmd_target);
3205
0
      if (r)
3206
0
        log_err(cd, _("Mismatching parameters on device %s."), name);
3207
0
    }
3208
3209
0
    dm_targets_free(cd, &dmd_source);
3210
0
    dm_targets_free(cd, &dmd_target);
3211
0
    free(CONST_CAST(void*)dmd_target.uuid);
3212
3213
0
    if (r)
3214
0
      goto out;
3215
0
  }
3216
3217
0
  if (move_first_segment && reencrypt_move_data(cd, devfd, data_shift_bytes, params->mode)) {
3218
0
    r = -EIO;
3219
0
    goto out;
3220
0
  }
3221
3222
  /* This must be first and only write in LUKS2 metadata during reencrypt_init */
3223
0
  r = reencrypt_update_flag(cd, LUKS2_REENCRYPT_REQ_VERSION, true, true);
3224
0
  if (r) {
3225
0
    log_dbg(cd, "Failed to set online-reencryption requirement.");
3226
0
    r = -EINVAL;
3227
0
  } else
3228
0
    r = reencrypt_keyslot;
3229
0
out:
3230
0
  device_release_excl(cd, crypt_data_device(cd));
3231
0
  if (r < 0 && LUKS2_hdr_rollback(cd, hdr) < 0)
3232
0
    log_dbg(cd, "Failed to rollback LUKS2 metadata after failure.");
3233
3234
0
  return r;
3235
0
}
3236
3237
static int reencrypt_hotzone_protect_final(struct crypt_device *cd,
3238
  struct luks2_hdr *hdr, int reencrypt_keyslot,
3239
  const struct reenc_protection *rp,
3240
  const void *buffer, size_t buffer_len)
3241
0
{
3242
0
  const void *pbuffer;
3243
0
  size_t data_offset, len;
3244
0
  int r;
3245
3246
0
  assert(hdr);
3247
0
  assert(rp);
3248
3249
0
  if (rp->type == REENC_PROTECTION_NONE)
3250
0
    return 0;
3251
3252
0
  if (rp->type == REENC_PROTECTION_CHECKSUM) {
3253
0
    log_dbg(cd, "Checksums hotzone resilience.");
3254
3255
0
    for (data_offset = 0, len = 0; data_offset < buffer_len; data_offset += rp->p.csum.block_size, len += rp->p.csum.hash_size) {
3256
0
      if (crypt_hash_write(rp->p.csum.ch, (const char *)buffer + data_offset, rp->p.csum.block_size)) {
3257
0
        log_dbg(cd, "Failed to hash sector at offset %zu.", data_offset);
3258
0
        return -EINVAL;
3259
0
      }
3260
0
      if (crypt_hash_final(rp->p.csum.ch, (char *)rp->p.csum.checksums + len, rp->p.csum.hash_size)) {
3261
0
        log_dbg(cd, "Failed to finalize hash.");
3262
0
        return -EINVAL;
3263
0
      }
3264
0
    }
3265
0
    pbuffer = rp->p.csum.checksums;
3266
0
  } else if (rp->type == REENC_PROTECTION_JOURNAL) {
3267
0
    log_dbg(cd, "Journal hotzone resilience.");
3268
0
    len = buffer_len;
3269
0
    pbuffer = buffer;
3270
0
  } else if (rp->type == REENC_PROTECTION_DATASHIFT) {
3271
0
    log_dbg(cd, "Data shift hotzone resilience.");
3272
0
    return LUKS2_hdr_write(cd, hdr);
3273
0
  } else
3274
0
    return -EINVAL;
3275
3276
0
  log_dbg(cd, "Going to store %zu bytes in reencrypt keyslot.", len);
3277
3278
0
  r = LUKS2_keyslot_reencrypt_store(cd, hdr, reencrypt_keyslot, pbuffer, len);
3279
3280
0
  return r > 0 ? 0 : r;
3281
0
}
3282
3283
static int reencrypt_context_update(struct crypt_device *cd,
3284
  struct luks2_reencrypt *rh)
3285
0
{
3286
0
  if (rh->read < 0)
3287
0
    return -EINVAL;
3288
3289
0
  if (rh->direction == CRYPT_REENCRYPT_BACKWARD) {
3290
0
    if (rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->mode == CRYPT_REENCRYPT_ENCRYPT) {
3291
0
      if (rh->offset)
3292
0
        rh->offset -= data_shift_value(&rh->rp);
3293
0
      if (rh->offset && (rh->offset < data_shift_value(&rh->rp))) {
3294
0
        rh->length = rh->offset;
3295
0
        rh->offset = data_shift_value(&rh->rp);
3296
0
      }
3297
0
      if (!rh->offset)
3298
0
        rh->length = data_shift_value(&rh->rp);
3299
0
    } else {
3300
0
      if (rh->offset < rh->length)
3301
0
        rh->length = rh->offset;
3302
0
      rh->offset -= rh->length;
3303
0
    }
3304
0
  } else if (rh->direction == CRYPT_REENCRYPT_FORWARD) {
3305
0
    rh->offset += (uint64_t)rh->read;
3306
0
    if (rh->device_size == rh->offset &&
3307
0
        rh->jobj_segment_moved &&
3308
0
        rh->mode == CRYPT_REENCRYPT_DECRYPT &&
3309
0
        rh->rp.type == REENC_PROTECTION_DATASHIFT) {
3310
0
      rh->offset = 0;
3311
0
      rh->length = json_segment_get_size(rh->jobj_segment_moved, 0);
3312
0
    }
3313
    /* it fails in-case of device_size < rh->offset later */
3314
0
    else if (rh->device_size - rh->offset < rh->length)
3315
0
      rh->length = rh->device_size - rh->offset;
3316
0
  } else
3317
0
    return -EINVAL;
3318
3319
0
  if (rh->device_size < rh->offset) {
3320
0
    log_dbg(cd, "Calculated reencryption offset %" PRIu64 " is beyond device size %" PRIu64 ".", rh->offset, rh->device_size);
3321
0
    return -EINVAL;
3322
0
  }
3323
3324
0
  rh->progress += (uint64_t)rh->read;
3325
3326
0
  return 0;
3327
0
}
3328
3329
static int reencrypt_load(struct crypt_device *cd, struct luks2_hdr *hdr,
3330
    uint64_t device_size,
3331
    uint64_t max_hotzone_size,
3332
    uint64_t required_device_size,
3333
    struct volume_key *vks,
3334
    struct luks2_reencrypt **rh)
3335
0
{
3336
0
  int r;
3337
0
  struct luks2_reencrypt *tmp = NULL;
3338
0
  crypt_reencrypt_info ri = LUKS2_reencrypt_status(hdr);
3339
3340
0
  if (ri == CRYPT_REENCRYPT_NONE) {
3341
0
    log_err(cd, _("Device not marked for LUKS2 reencryption."));
3342
0
    return -EINVAL;
3343
0
  } else if (ri == CRYPT_REENCRYPT_INVALID)
3344
0
    return -EINVAL;
3345
3346
0
  r = LUKS2_reencrypt_digest_verify(cd, hdr, vks);
3347
0
  if (r < 0)
3348
0
    return r;
3349
3350
0
  if (ri == CRYPT_REENCRYPT_CLEAN)
3351
0
    r = reencrypt_load_clean(cd, hdr, device_size, max_hotzone_size, required_device_size, &tmp);
3352
0
  else if (ri == CRYPT_REENCRYPT_CRASH)
3353
0
    r = reencrypt_load_crashed(cd, hdr, device_size, &tmp);
3354
0
  else
3355
0
    r = -EINVAL;
3356
3357
0
  if (r < 0 || !tmp) {
3358
0
    log_err(cd, _("Failed to load LUKS2 reencryption context."));
3359
0
    return r < 0 ? r : -EINVAL;
3360
0
  }
3361
3362
0
  *rh = tmp;
3363
3364
0
  return 0;
3365
0
}
3366
#else
3367
int LUKS2_reencrypt_max_hotzone_size(struct crypt_device *cd __attribute__((unused)),
3368
  struct luks2_hdr *hdr __attribute__((unused)),
3369
  const struct reenc_protection *rp __attribute__((unused)),
3370
  int reencrypt_keyslot __attribute__((unused)),
3371
  uint64_t *r_length __attribute__((unused)))
3372
{
3373
  return -ENOTSUP;
3374
}
3375
#endif
3376
3377
static int reencrypt_lock_internal(struct crypt_device *cd, const char *uuid, struct crypt_lock_handle **reencrypt_lock)
3378
0
{
3379
0
  int r;
3380
0
  char *lock_resource;
3381
3382
0
  assert(uuid);
3383
3384
0
  if (!crypt_metadata_locking_enabled()) {
3385
0
    *reencrypt_lock = NULL;
3386
0
    return 0;
3387
0
  }
3388
3389
0
  r = asprintf(&lock_resource, "LUKS2-reencryption-%s", uuid);
3390
0
  if (r < 0)
3391
0
    return -ENOMEM;
3392
0
  if (r < 20) {
3393
0
    free(lock_resource);
3394
0
    return -EINVAL;
3395
0
  }
3396
3397
0
  r = crypt_write_lock(cd, lock_resource, false, reencrypt_lock);
3398
3399
0
  free(lock_resource);
3400
3401
0
  return r;
3402
0
}
3403
3404
/* internal only */
3405
int LUKS2_reencrypt_lock_by_dm_uuid(struct crypt_device *cd, const char *dm_uuid,
3406
  struct crypt_lock_handle **reencrypt_lock)
3407
0
{
3408
0
  int r;
3409
0
  char hdr_uuid[37];
3410
0
  const char *uuid = crypt_get_uuid(cd);
3411
3412
0
  if (!dm_uuid)
3413
0
    return -EINVAL;
3414
3415
0
  if (!uuid) {
3416
0
    r = snprintf(hdr_uuid, sizeof(hdr_uuid), "%.8s-%.4s-%.4s-%.4s-%.12s",
3417
0
       dm_uuid + 6, dm_uuid + 14, dm_uuid + 18, dm_uuid + 22, dm_uuid + 26);
3418
0
    if (r < 0 || (size_t)r != (sizeof(hdr_uuid) - 1))
3419
0
      return -EINVAL;
3420
0
    uuid = hdr_uuid;
3421
0
  } else if (dm_uuid_cmp(dm_uuid, uuid))
3422
0
    return -EINVAL;
3423
3424
0
  return reencrypt_lock_internal(cd, uuid, reencrypt_lock);
3425
0
}
3426
3427
/* internal only */
3428
int LUKS2_reencrypt_lock(struct crypt_device *cd, struct crypt_lock_handle **reencrypt_lock)
3429
0
{
3430
0
  if (!cd || !crypt_get_type(cd) || strcmp(crypt_get_type(cd), CRYPT_LUKS2))
3431
0
    return -EINVAL;
3432
3433
0
  return reencrypt_lock_internal(cd, crypt_get_uuid(cd), reencrypt_lock);
3434
0
}
3435
3436
/* internal only */
3437
void LUKS2_reencrypt_unlock(struct crypt_device *cd, struct crypt_lock_handle *reencrypt_lock)
3438
0
{
3439
0
  crypt_unlock_internal(cd, reencrypt_lock);
3440
0
}
3441
#if USE_LUKS2_REENCRYPTION
3442
static int reencrypt_lock_and_verify(struct crypt_device *cd, struct luks2_hdr *hdr,
3443
    struct crypt_lock_handle **reencrypt_lock)
3444
0
{
3445
0
  int r;
3446
0
  crypt_reencrypt_info ri;
3447
0
  struct crypt_lock_handle *h;
3448
3449
0
  ri = LUKS2_reencrypt_status(hdr);
3450
0
  if (ri == CRYPT_REENCRYPT_INVALID)
3451
0
    return -EINVAL;
3452
0
  if (ri < CRYPT_REENCRYPT_CLEAN) {
3453
0
    log_err(cd, _("Device is not in reencryption."));
3454
0
    return -EINVAL;
3455
0
  }
3456
3457
0
  r = LUKS2_reencrypt_lock(cd, &h);
3458
0
  if (r < 0) {
3459
0
    if (r == -EBUSY)
3460
0
      log_err(cd, _("Reencryption process is already running."));
3461
0
    else
3462
0
      log_err(cd, _("Failed to acquire reencryption lock."));
3463
0
    return r;
3464
0
  }
3465
3466
  /* With reencryption lock held, reload device context and verify metadata state */
3467
0
  r = crypt_load(cd, CRYPT_LUKS2, NULL);
3468
0
  if (r) {
3469
0
    LUKS2_reencrypt_unlock(cd, h);
3470
0
    return r;
3471
0
  }
3472
3473
0
  ri = LUKS2_reencrypt_status(hdr);
3474
0
  if (ri == CRYPT_REENCRYPT_CLEAN) {
3475
0
    *reencrypt_lock = h;
3476
0
    return 0;
3477
0
  }
3478
3479
0
  LUKS2_reencrypt_unlock(cd, h);
3480
0
  log_err(cd, _("Cannot proceed with reencryption. Run reencryption recovery first."));
3481
0
  return -EINVAL;
3482
0
}
3483
3484
static int reencrypt_load_by_keyslot_context(struct crypt_device *cd,
3485
    const char *name,
3486
    struct crypt_keyslot_context *kc_old,
3487
    struct crypt_keyslot_context *kc_new,
3488
    int keyslot_old,
3489
    int keyslot_new,
3490
    struct volume_key **vks,
3491
    const struct crypt_params_reencrypt *params)
3492
0
{
3493
0
  int r, reencrypt_slot;
3494
0
  struct luks2_hdr *hdr;
3495
0
  struct crypt_lock_handle *reencrypt_lock;
3496
0
  struct luks2_reencrypt *rh;
3497
0
  const struct volume_key *vk;
3498
0
  size_t alignment;
3499
0
  uint32_t old_sector_size, new_sector_size, sector_size;
3500
0
  struct crypt_dm_active_device dmd_target, dmd_source = {
3501
0
    .uuid = crypt_get_uuid(cd),
3502
0
    .flags = CRYPT_ACTIVATE_SHARED /* turn off exclusive open checks */
3503
0
  };
3504
0
  uint64_t minimal_size, device_size, mapping_size = 0, required_size = 0,
3505
0
     max_hotzone_size = 0;
3506
0
  bool dynamic;
3507
0
  uint32_t flags = 0;
3508
3509
0
  assert(cd);
3510
3511
0
  hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
3512
0
  if (!hdr)
3513
0
    return -EINVAL;
3514
3515
0
  log_dbg(cd, "Loading LUKS2 reencryption context.");
3516
3517
0
  old_sector_size = reencrypt_get_sector_size_old(hdr);
3518
0
  new_sector_size = reencrypt_get_sector_size_new(hdr);
3519
0
  sector_size = new_sector_size > old_sector_size ? new_sector_size : old_sector_size;
3520
3521
0
  r = reencrypt_verify_resilience_params(cd, params, sector_size,
3522
0
                 LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0);
3523
0
  if (r < 0)
3524
0
    return r;
3525
3526
0
  if (params) {
3527
0
    required_size = params->device_size;
3528
0
    max_hotzone_size = params->max_hotzone_size;
3529
0
  }
3530
3531
0
  rh = crypt_get_luks2_reencrypt(cd);
3532
0
  if (rh) {
3533
0
    LUKS2_reencrypt_free(cd, rh);
3534
0
    crypt_set_luks2_reencrypt(cd, NULL);
3535
0
    rh = NULL;
3536
0
  }
3537
3538
0
  r = reencrypt_lock_and_verify(cd, hdr, &reencrypt_lock);
3539
0
  if (r)
3540
0
    return r;
3541
3542
0
  reencrypt_slot = LUKS2_find_keyslot(hdr, "reencrypt");
3543
0
  if (reencrypt_slot < 0) {
3544
0
    r = -EINVAL;
3545
0
    goto err;
3546
0
  }
3547
3548
  /* From now on we hold reencryption lock */
3549
3550
0
  if (LUKS2_get_data_size(hdr, &minimal_size, &dynamic)) {
3551
0
    r = -EINVAL;
3552
0
    goto err;
3553
0
  }
3554
3555
  /* some configurations provides fixed device size */
3556
0
  r = LUKS2_reencrypt_check_device_size(cd, hdr, minimal_size, &device_size, false, dynamic);
3557
0
  if (r) {
3558
0
    r = -EINVAL;
3559
0
    goto err;
3560
0
  }
3561
3562
0
  minimal_size >>= SECTOR_SHIFT;
3563
3564
0
  r = reencrypt_verify_keys(cd, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks);
3565
0
  if (r == -ENOENT) {
3566
0
    log_dbg(cd, "Keys are not ready. Unlocking all volume keys.");
3567
0
    r = LUKS2_keyslot_context_open_all_segments(cd, keyslot_old, keyslot_new,
3568
0
                  kc_old, kc_new, vks);
3569
0
  }
3570
3571
0
  if (r < 0)
3572
0
    goto err;
3573
3574
0
  if (name) {
3575
0
    r = reencrypt_upload_keys(cd, hdr, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks);
3576
0
    if (r < 0)
3577
0
      goto err;
3578
3579
0
    r = dm_query_device(cd, name, DM_ACTIVE_UUID | DM_ACTIVE_DEVICE |
3580
0
            DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY |
3581
0
            DM_ACTIVE_CRYPT_CIPHER, &dmd_target);
3582
0
    if (r < 0)
3583
0
      goto err;
3584
0
    flags = dmd_target.flags;
3585
3586
    /*
3587
     * By default reencryption code aims to retain flags from existing dm device.
3588
     * The keyring activation flag can not be inherited if original cipher is null.
3589
     *
3590
     * In this case override the flag based on decision made in reencrypt_upload_keys
3591
     * above. The code checks if new VK is eligible for keyring.
3592
     */
3593
0
    vk = crypt_volume_key_by_id(*vks, LUKS2_reencrypt_digest_new(hdr));
3594
0
    if (vk && crypt_volume_key_description(vk) && crypt_is_cipher_null(reencrypt_segment_cipher_old(hdr))) {
3595
0
      flags |= CRYPT_ACTIVATE_KEYRING_KEY;
3596
0
      dmd_source.flags |= CRYPT_ACTIVATE_KEYRING_KEY;
3597
0
    }
3598
3599
0
    r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, LUKS2_get_segments_jobj(hdr), &dmd_source);
3600
0
    if (!r) {
3601
0
      r = crypt_compare_dm_devices(cd, &dmd_source, &dmd_target);
3602
0
      if (r)
3603
0
        log_err(cd, _("Mismatching parameters on device %s."), name);
3604
0
    }
3605
3606
0
    dm_targets_free(cd, &dmd_source);
3607
0
    dm_targets_free(cd, &dmd_target);
3608
0
    free(CONST_CAST(void*)dmd_target.uuid);
3609
0
    if (r)
3610
0
      goto err;
3611
0
    mapping_size = dmd_target.size;
3612
0
  }
3613
3614
0
  r = -EINVAL;
3615
0
  if (required_size && mapping_size && (required_size != mapping_size)) {
3616
0
    log_err(cd, _("Active device size and requested reencryption size don't match."));
3617
0
    goto err;
3618
0
  }
3619
3620
0
  if (mapping_size)
3621
0
    required_size = mapping_size;
3622
3623
0
  if (required_size) {
3624
    /* TODO: Add support for changing fixed minimal size in reencryption mda where possible */
3625
0
    if ((minimal_size && (required_size < minimal_size)) ||
3626
0
        (required_size > (device_size >> SECTOR_SHIFT)) ||
3627
0
        (!dynamic && (required_size != minimal_size)) ||
3628
0
        (old_sector_size > 0 && MISALIGNED(required_size, old_sector_size >> SECTOR_SHIFT)) ||
3629
0
        (new_sector_size > 0 && MISALIGNED(required_size, new_sector_size >> SECTOR_SHIFT))) {
3630
0
      log_err(cd, _("Illegal device size requested in reencryption parameters."));
3631
0
      goto err;
3632
0
    }
3633
0
  }
3634
3635
0
  alignment = reencrypt_get_alignment(cd, hdr);
3636
3637
0
  r = LUKS2_keyslot_reencrypt_update_needed(cd, hdr, reencrypt_slot, params, alignment);
3638
0
  if (r > 0) /* metadata update needed */
3639
0
    r = LUKS2_keyslot_reencrypt_update(cd, hdr, reencrypt_slot, params, alignment, *vks);
3640
0
  if (r < 0)
3641
0
    goto err;
3642
3643
0
  r = reencrypt_load(cd, hdr, device_size, max_hotzone_size, required_size, *vks, &rh);
3644
0
  if (r < 0 || !rh)
3645
0
    goto err;
3646
3647
0
  if (name && (r = reencrypt_context_set_names(rh, name)))
3648
0
    goto err;
3649
3650
  /* Reassure device is not mounted and there's no dm mapping active */
3651
0
  if (!name && (device_open_excl(cd, crypt_data_device(cd), O_RDONLY) < 0)) {
3652
0
    log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."), device_path(crypt_data_device(cd)));
3653
0
    r = -EBUSY;
3654
0
    goto err;
3655
0
  }
3656
0
  device_release_excl(cd, crypt_data_device(cd));
3657
3658
  /* There's a race for dm device activation not managed by cryptsetup.
3659
   *
3660
   * 1) excl close
3661
   * 2) rogue dm device activation
3662
   * 3) one or more dm-crypt based wrapper activation
3663
   * 4) next excl open gets skipped due to 3) device from 2) remains undetected.
3664
   */
3665
0
  r = reencrypt_init_storage_wrappers(cd, hdr, rh, *vks);
3666
0
  if (r)
3667
0
    goto err;
3668
3669
  /* If one of wrappers is based on dmcrypt fallback it already blocked mount */
3670
0
  if (!name && crypt_storage_wrapper_get_type(rh->cw1) != DMCRYPT &&
3671
0
      crypt_storage_wrapper_get_type(rh->cw2) != DMCRYPT) {
3672
0
    if (device_open_excl(cd, crypt_data_device(cd), O_RDONLY) < 0) {
3673
0
      log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."), device_path(crypt_data_device(cd)));
3674
0
      r = -EBUSY;
3675
0
      goto err;
3676
0
    }
3677
0
  }
3678
3679
0
  rh->flags = flags;
3680
3681
0
  MOVE_REF(rh->vks, *vks);
3682
0
  MOVE_REF(rh->reenc_lock, reencrypt_lock);
3683
3684
0
  crypt_set_luks2_reencrypt(cd, rh);
3685
3686
0
  return 0;
3687
0
err:
3688
0
  LUKS2_reencrypt_unlock(cd, reencrypt_lock);
3689
0
  LUKS2_reencrypt_free(cd, rh);
3690
0
  return r;
3691
0
}
3692
3693
static int reencrypt_locked_recovery(struct crypt_device *cd,
3694
  int keyslot_old,
3695
  int keyslot_new,
3696
  struct crypt_keyslot_context *kc_old,
3697
  struct crypt_keyslot_context *kc_new,
3698
  struct volume_key **r_vks)
3699
0
{
3700
0
  int keyslot, r = -EINVAL;
3701
0
  struct volume_key *_vks = NULL;
3702
3703
0
  r = LUKS2_keyslot_context_open_all_segments(cd, keyslot_old, keyslot_new,
3704
0
                kc_old, kc_new, &_vks);
3705
0
  if (r < 0)
3706
0
    return r;
3707
0
  keyslot = r;
3708
3709
0
  r = LUKS2_reencrypt_locked_recovery_by_vks(cd, _vks);
3710
0
  if (!r && r_vks)
3711
0
    MOVE_REF(*r_vks, _vks);
3712
3713
0
  crypt_free_volume_key(_vks);
3714
3715
0
  return r < 0 ? r : keyslot;
3716
0
}
3717
3718
static int reencrypt_recovery_by_keyslot_context(struct crypt_device *cd,
3719
  struct luks2_hdr *hdr,
3720
  int keyslot_old,
3721
  int keyslot_new,
3722
  struct crypt_keyslot_context *kc_old,
3723
  struct crypt_keyslot_context *kc_new)
3724
0
{
3725
0
  int r;
3726
0
  crypt_reencrypt_info ri;
3727
0
  struct crypt_lock_handle *reencrypt_lock;
3728
3729
0
  r = LUKS2_reencrypt_lock(cd, &reencrypt_lock);
3730
0
  if (r) {
3731
0
    if (r == -EBUSY)
3732
0
      log_err(cd, _("Reencryption in-progress. Cannot perform recovery."));
3733
0
    else
3734
0
      log_err(cd, _("Failed to get reencryption lock."));
3735
0
    return r;
3736
0
  }
3737
3738
0
  if ((r = crypt_load(cd, CRYPT_LUKS2, NULL))) {
3739
0
    LUKS2_reencrypt_unlock(cd, reencrypt_lock);
3740
0
    return r;
3741
0
  }
3742
3743
0
  ri = LUKS2_reencrypt_status(hdr);
3744
0
  if (ri == CRYPT_REENCRYPT_INVALID) {
3745
0
    LUKS2_reencrypt_unlock(cd, reencrypt_lock);
3746
0
    return -EINVAL;
3747
0
  }
3748
3749
0
  if (ri == CRYPT_REENCRYPT_CRASH) {
3750
0
    r = reencrypt_locked_recovery(cd, keyslot_old, keyslot_new,
3751
0
                kc_old, kc_new, NULL);
3752
0
    if (r < 0)
3753
0
      log_err(cd, _("LUKS2 reencryption recovery failed."));
3754
0
  } else {
3755
0
    log_dbg(cd, "No LUKS2 reencryption recovery needed.");
3756
0
    r = 0;
3757
0
  }
3758
3759
0
  LUKS2_reencrypt_unlock(cd, reencrypt_lock);
3760
0
  return r;
3761
0
}
3762
3763
static int reencrypt_repair(
3764
    struct crypt_device *cd,
3765
    struct luks2_hdr *hdr,
3766
    int keyslot_old,
3767
    int keyslot_new,
3768
    struct crypt_keyslot_context *kc_old,
3769
    struct crypt_keyslot_context *kc_new)
3770
0
{
3771
0
  int r;
3772
0
  struct crypt_lock_handle *reencrypt_lock;
3773
0
  struct luks2_reencrypt *rh;
3774
0
  crypt_reencrypt_info ri;
3775
0
  uint8_t requirement_version;
3776
0
  const char *resilience;
3777
0
  struct volume_key *vks = NULL;
3778
3779
0
  log_dbg(cd, "Loading LUKS2 reencryption context for metadata repair.");
3780
3781
0
  rh = crypt_get_luks2_reencrypt(cd);
3782
0
  if (rh) {
3783
0
    LUKS2_reencrypt_free(cd, rh);
3784
0
    crypt_set_luks2_reencrypt(cd, NULL);
3785
0
    rh = NULL;
3786
0
  }
3787
3788
0
  ri = LUKS2_reencrypt_status(hdr);
3789
0
  if (ri == CRYPT_REENCRYPT_INVALID)
3790
0
    return -EINVAL;
3791
3792
0
  if (ri < CRYPT_REENCRYPT_CLEAN) {
3793
0
    log_err(cd, _("Device is not in reencryption."));
3794
0
    return -EINVAL;
3795
0
  }
3796
3797
0
  r = LUKS2_reencrypt_lock(cd, &reencrypt_lock);
3798
0
  if (r < 0) {
3799
0
    if (r == -EBUSY)
3800
0
      log_err(cd, _("Reencryption process is already running."));
3801
0
    else
3802
0
      log_err(cd, _("Failed to acquire reencryption lock."));
3803
0
    return r;
3804
0
  }
3805
3806
  /* With reencryption lock held, reload device context and verify metadata state */
3807
0
  r = crypt_load(cd, CRYPT_LUKS2, NULL);
3808
0
  if (r)
3809
0
    goto out;
3810
3811
0
  ri = LUKS2_reencrypt_status(hdr);
3812
0
  if (ri == CRYPT_REENCRYPT_INVALID) {
3813
0
    r = -EINVAL;
3814
0
    goto out;
3815
0
  }
3816
0
  if (ri == CRYPT_REENCRYPT_NONE) {
3817
0
    r = 0;
3818
0
    goto out;
3819
0
  }
3820
3821
0
  resilience = reencrypt_resilience_type(hdr);
3822
0
  if (!resilience) {
3823
0
    r = -EINVAL;
3824
0
    goto out;
3825
0
  }
3826
3827
0
  if (reencrypt_mode(hdr) == CRYPT_REENCRYPT_DECRYPT &&
3828
0
      !strncmp(resilience, "datashift-", 10) &&
3829
0
      LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0)
3830
0
    requirement_version = LUKS2_DECRYPT_DATASHIFT_REQ_VERSION;
3831
0
  else
3832
0
    requirement_version = LUKS2_REENCRYPT_REQ_VERSION;
3833
3834
0
  r = LUKS2_keyslot_context_open_all_segments(cd, keyslot_old, keyslot_new, kc_old, kc_new, &vks);
3835
0
  if (r < 0)
3836
0
    goto out;
3837
3838
0
  r = LUKS2_keyslot_reencrypt_digest_create(cd, hdr, requirement_version, vks);
3839
0
  crypt_free_volume_key(vks);
3840
0
  vks = NULL;
3841
0
  if (r < 0)
3842
0
    goto out;
3843
3844
  /* replaces old online-reencrypt flag with updated version and commits metadata */
3845
0
  r = reencrypt_update_flag(cd, requirement_version, true, true);
3846
0
out:
3847
0
  LUKS2_reencrypt_unlock(cd, reencrypt_lock);
3848
0
  crypt_free_volume_key(vks);
3849
0
  return r;
3850
3851
0
}
3852
3853
static int reencrypt_init_by_keyslot_context(struct crypt_device *cd,
3854
  const char *name,
3855
  struct crypt_keyslot_context *kc_old,
3856
  struct crypt_keyslot_context *kc_new,
3857
  int keyslot_old,
3858
  int keyslot_new,
3859
  const char *cipher,
3860
  const char *cipher_mode,
3861
  const struct crypt_params_reencrypt *params)
3862
0
{
3863
0
  int r;
3864
0
  crypt_reencrypt_info ri;
3865
0
  size_t key_length;
3866
0
  struct volume_key *vks = NULL;
3867
0
  uint32_t flags = params ? params->flags : 0;
3868
0
  struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
3869
3870
0
  if (params && (params->flags & CRYPT_REENCRYPT_CREATE_NEW_DIGEST) &&
3871
0
      (!kc_new || !kc_new->get_luks2_key || !kc_new->get_key_size ||
3872
0
       (params->flags & CRYPT_REENCRYPT_RESUME_ONLY)))
3873
0
    return -EINVAL;
3874
3875
  /* short-circuit in reencryption metadata update and finish immediately. */
3876
0
  if (flags & CRYPT_REENCRYPT_REPAIR_NEEDED)
3877
0
    return reencrypt_repair(cd, hdr, keyslot_old, keyslot_new, kc_old, kc_new);
3878
3879
  /* short-circuit in recovery and finish immediately. */
3880
0
  if (flags & CRYPT_REENCRYPT_RECOVERY)
3881
0
    return reencrypt_recovery_by_keyslot_context(cd, hdr, keyslot_old, keyslot_new, kc_old, kc_new);
3882
3883
0
  if (name && !device_direct_io(crypt_data_device(cd))) {
3884
0
    log_dbg(cd, "Device %s does not support direct I/O.", device_path(crypt_data_device(cd)));
3885
    /* FIXME: Add more specific error message for translation later. */
3886
0
    log_err(cd, _("Failed to initialize reencryption device stack."));
3887
0
    return -EINVAL;
3888
0
  }
3889
3890
0
  if (cipher && !crypt_cipher_wrapped_key(cipher, cipher_mode)) {
3891
0
    if (keyslot_new == CRYPT_ANY_SLOT && kc_new && kc_new->get_key_size)
3892
0
      r = kc_new->get_key_size(cd, kc_new, &key_length);
3893
0
    else {
3894
0
      r = crypt_keyslot_get_key_size(cd, keyslot_new);
3895
0
      if (r >= 0)
3896
0
        key_length = r;
3897
0
    }
3898
0
    if (r < 0)
3899
0
      return r;
3900
0
    r = LUKS2_check_cipher(cd, key_length, cipher, cipher_mode);
3901
0
    if (r < 0) {
3902
0
      log_err(cd, _("Unable to use cipher specification %s-%s for LUKS2."), cipher, cipher_mode);
3903
0
      return r;
3904
0
    }
3905
0
  }
3906
3907
0
  r = LUKS2_device_write_lock(cd, hdr, crypt_metadata_device(cd));
3908
0
  if (r)
3909
0
    return r;
3910
3911
0
  ri = LUKS2_reencrypt_status(hdr);
3912
0
  if (ri == CRYPT_REENCRYPT_INVALID) {
3913
0
    device_write_unlock(cd, crypt_metadata_device(cd));
3914
0
    return -EINVAL;
3915
0
  }
3916
3917
0
  if ((ri > CRYPT_REENCRYPT_NONE) && (flags & CRYPT_REENCRYPT_INITIALIZE_ONLY)) {
3918
0
    device_write_unlock(cd, crypt_metadata_device(cd));
3919
0
    log_err(cd, _("LUKS2 reencryption already initialized in metadata."));
3920
0
    return -EBUSY;
3921
0
  }
3922
3923
0
  if (ri == CRYPT_REENCRYPT_NONE && !(flags & CRYPT_REENCRYPT_RESUME_ONLY)) {
3924
0
    r = reencrypt_init(cd, name, hdr, kc_old, kc_new, keyslot_old,
3925
0
           keyslot_new, cipher, cipher_mode, params, &vks);
3926
0
    if (r < 0)
3927
0
      log_err(cd, _("Failed to initialize LUKS2 reencryption in metadata."));
3928
0
  } else if (ri > CRYPT_REENCRYPT_NONE) {
3929
0
    log_dbg(cd, "LUKS2 reencryption already initialized.");
3930
0
    r = 0;
3931
0
  }
3932
3933
0
  device_write_unlock(cd, crypt_metadata_device(cd));
3934
3935
0
  if (r < 0 || (flags & CRYPT_REENCRYPT_INITIALIZE_ONLY))
3936
0
    goto out;
3937
3938
0
  r = reencrypt_load_by_keyslot_context(cd, name, kc_old, kc_new, keyslot_old,
3939
0
                keyslot_new, &vks, params);
3940
0
out:
3941
0
  if (r < 0)
3942
0
    crypt_drop_uploaded_keyring_key(cd, vks);
3943
0
  crypt_free_volume_key(vks);
3944
0
  return r < 0 ? r : LUKS2_find_keyslot(hdr, "reencrypt");
3945
0
}
3946
#else
3947
static int reencrypt_init_by_keyslot_context(struct crypt_device *cd,
3948
  const char *name __attribute__((unused)),
3949
  struct crypt_keyslot_context *kc_old __attribute__((unused)),
3950
  struct crypt_keyslot_context *kc_new __attribute__((unused)),
3951
  int keyslot_old __attribute__((unused)),
3952
  int keyslot_new __attribute__((unused)),
3953
  const char *cipher __attribute__((unused)),
3954
  const char *cipher_mode __attribute__((unused)),
3955
  const struct crypt_params_reencrypt *params __attribute__((unused)))
3956
{
3957
  log_err(cd, _("This operation is not supported for this device type."));
3958
  return -ENOTSUP;
3959
}
3960
#endif
3961
3962
int crypt_reencrypt_init_by_keyring(struct crypt_device *cd,
3963
  const char *name,
3964
  const char *passphrase_description,
3965
  int keyslot_old,
3966
  int keyslot_new,
3967
  const char *cipher,
3968
  const char *cipher_mode,
3969
  const struct crypt_params_reencrypt *params)
3970
0
{
3971
0
  int r;
3972
0
  struct crypt_keyslot_context kc = {0};
3973
3974
0
  if (onlyLUKS2reencrypt(cd) || !passphrase_description)
3975
0
    return -EINVAL;
3976
0
  if (params && (params->flags & CRYPT_REENCRYPT_INITIALIZE_ONLY) && (params->flags & CRYPT_REENCRYPT_RESUME_ONLY))
3977
0
    return -EINVAL;
3978
3979
0
  if (device_is_dax(crypt_data_device(cd)) > 0) {
3980
0
    log_err(cd, _("Reencryption is not supported for DAX (persistent memory) devices."));
3981
0
    return -EINVAL;
3982
0
  }
3983
3984
0
  crypt_keyslot_context_init_by_keyring_internal(&kc, passphrase_description);
3985
0
  r = reencrypt_init_by_keyslot_context(cd, name, &kc, &kc, keyslot_old,
3986
0
                keyslot_new, cipher, cipher_mode, params);
3987
3988
0
  crypt_keyslot_context_destroy_internal(&kc);
3989
3990
0
  return r;
3991
0
}
3992
3993
int crypt_reencrypt_init_by_passphrase(struct crypt_device *cd,
3994
  const char *name,
3995
  const char *passphrase,
3996
  size_t passphrase_size,
3997
  int keyslot_old,
3998
  int keyslot_new,
3999
  const char *cipher,
4000
  const char *cipher_mode,
4001
  const struct crypt_params_reencrypt *params)
4002
0
{
4003
0
  int r;
4004
0
  struct crypt_keyslot_context kc = {0};
4005
4006
0
  if (onlyLUKS2reencrypt(cd) || !passphrase)
4007
0
    return -EINVAL;
4008
0
  if (params && (params->flags & CRYPT_REENCRYPT_INITIALIZE_ONLY) && (params->flags & CRYPT_REENCRYPT_RESUME_ONLY))
4009
0
    return -EINVAL;
4010
4011
0
  if (device_is_dax(crypt_data_device(cd)) > 0) {
4012
0
    log_err(cd, _("Reencryption is not supported for DAX (persistent memory) devices."));
4013
0
    return -EINVAL;
4014
0
  }
4015
4016
0
  crypt_keyslot_context_init_by_passphrase_internal(&kc, passphrase, passphrase_size);
4017
4018
0
  r = reencrypt_init_by_keyslot_context(cd, name, &kc, &kc, keyslot_old,
4019
0
                keyslot_new, cipher, cipher_mode, params);
4020
4021
0
  crypt_keyslot_context_destroy_internal(&kc);
4022
4023
0
  return r;
4024
0
}
4025
4026
int crypt_reencrypt_init_by_keyslot_context(struct crypt_device *cd,
4027
  const char *name,
4028
  struct crypt_keyslot_context *kc_old,
4029
  struct crypt_keyslot_context *kc_new,
4030
  int keyslot_old,
4031
  int keyslot_new,
4032
  const char *cipher,
4033
  const char *cipher_mode,
4034
  const struct crypt_params_reencrypt *params)
4035
0
{
4036
0
  if (onlyLUKS2reencrypt(cd) || (!kc_old && !kc_new))
4037
0
    return -EINVAL;
4038
0
  if (params && (params->flags & CRYPT_REENCRYPT_INITIALIZE_ONLY) && (params->flags & CRYPT_REENCRYPT_RESUME_ONLY))
4039
0
    return -EINVAL;
4040
4041
0
  if (device_is_dax(crypt_data_device(cd)) > 0) {
4042
0
    log_err(cd, _("Reencryption is not supported for DAX (persistent memory) devices."));
4043
0
    return -EINVAL;
4044
0
  }
4045
4046
0
  return reencrypt_init_by_keyslot_context(cd, name, kc_old, kc_new, keyslot_old, keyslot_new, cipher, cipher_mode, params);
4047
0
}
4048
4049
#if USE_LUKS2_REENCRYPTION
4050
static reenc_status_t reencrypt_step(struct crypt_device *cd,
4051
    struct luks2_hdr *hdr,
4052
    struct luks2_reencrypt *rh,
4053
    uint64_t device_size,
4054
    bool online)
4055
0
{
4056
0
  int r;
4057
0
  struct reenc_protection *rp;
4058
4059
0
  assert(hdr);
4060
0
  assert(rh);
4061
4062
0
  rp = &rh->rp;
4063
4064
  /* in memory only */
4065
0
  r = reencrypt_make_segments(cd, hdr, rh, device_size);
4066
0
  if (r)
4067
0
    return REENC_ERR_ROLLBACK_MEMORY;
4068
4069
0
  r = reencrypt_assign_segments(cd, hdr, rh, 1, 0);
4070
0
  if (r) {
4071
0
    log_err(cd, _("Failed to set device segments for next reencryption hotzone."));
4072
0
    return REENC_ERR_ROLLBACK_MEMORY;
4073
0
  }
4074
4075
0
  log_dbg(cd, "Reencrypting chunk starting at offset: %" PRIu64 ", size :%" PRIu64 ".", rh->offset, rh->length);
4076
0
  log_dbg(cd, "data_offset: %" PRIu64, crypt_get_data_offset(cd) << SECTOR_SHIFT);
4077
4078
0
  if (!rh->offset && rp->type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) {
4079
0
    crypt_storage_wrapper_destroy(rh->cw1);
4080
0
    log_dbg(cd, "Reinitializing old segment storage wrapper for moved segment.");
4081
0
    r = crypt_storage_wrapper_init(cd, &rh->cw1, crypt_data_device(cd),
4082
0
        LUKS2_reencrypt_get_data_offset_moved(hdr),
4083
0
        crypt_get_iv_offset(cd),
4084
0
        reencrypt_get_sector_size_old(hdr),
4085
0
        reencrypt_segment_cipher_old(hdr),
4086
0
        crypt_volume_key_by_id(rh->vks, rh->digest_old),
4087
0
        rh->wflags1);
4088
0
    if (r) {
4089
0
      log_err(cd, _("Failed to initialize old segment storage wrapper."));
4090
0
      return REENC_ERR_ROLLBACK_MEMORY;
4091
0
    }
4092
4093
0
    if (rh->rp_moved_segment.type != REENC_PROTECTION_NOT_SET) {
4094
0
      log_dbg(cd, "Switching to moved segment resilience type.");
4095
0
      rp = &rh->rp_moved_segment;
4096
0
    }
4097
0
  }
4098
4099
0
  r = reencrypt_hotzone_protect_ready(cd, rp);
4100
0
  if (r) {
4101
0
    log_err(cd, _("Failed to initialize hotzone protection."));
4102
0
    return REENC_ERR_ROLLBACK_MEMORY;
4103
0
  }
4104
4105
0
  if (online) {
4106
0
    r = reencrypt_refresh_overlay_devices(cd, hdr, rh->overlay_name, rh->hotzone_name,
4107
0
                  rh->hotzone_device, rh->vks, rh->device_size, rh->flags);
4108
    /* Teardown overlay devices with dm-error. None bio shall pass! */
4109
0
    if (r != REENC_OK)
4110
0
      return r;
4111
0
  }
4112
4113
0
  rh->read = crypt_storage_wrapper_read(rh->cw1, rh->offset, rh->reenc_buffer, rh->length);
4114
0
  if (rh->read < 0) {
4115
    /* severity normal */
4116
0
    log_err(cd, _("Failed to read hotzone area starting at %" PRIu64 "."), rh->offset);
4117
0
    return REENC_ERR_ROLLBACK_MEMORY;
4118
0
  }
4119
4120
  /* metadata commit point */
4121
0
  r = reencrypt_hotzone_protect_final(cd, hdr, rh->reenc_keyslot, rp, rh->reenc_buffer, rh->read);
4122
0
  if (r < 0) {
4123
    /*
4124
     * Nothing was written in hotzone area yet. Even if metadata write failed the previous
4125
     * state is still valid. If the metadata write passed and there was another
4126
     * error it's harmless to do recovery. Recovery may be run several times with no
4127
     * negative side effect.
4128
     */
4129
0
    log_err(cd, _("Failed to write reencryption resilience metadata."));
4130
0
    return REENC_ERR_ROLLBACK_MEMORY;
4131
0
  }
4132
4133
0
  r = crypt_storage_wrapper_decrypt(rh->cw1, rh->offset, rh->reenc_buffer, rh->read);
4134
0
  if (r) {
4135
    /*
4136
     * Ideally, this would be specific error (REENC_ERR_ROLLBACK_METADATA) case where
4137
     * it would rollback on-disk metadata to the last valid state (still no write in
4138
     * hotzone area). But it's not worth the effort. This will trigger full LUKS2
4139
     * reencryption recovery despite not being necessary.
4140
     */
4141
0
    log_err(cd, _("Decryption failed."));
4142
0
    return REENC_ERR_ROLLBACK_MEMORY;
4143
0
  }
4144
0
  if (rh->read != crypt_storage_wrapper_encrypt_write(rh->cw2, rh->offset, rh->reenc_buffer, rh->read)) {
4145
    /* severity fatal */
4146
0
    log_err(cd, _("Failed to write hotzone area starting at %" PRIu64 "."), rh->offset);
4147
0
    return REENC_ERR_FATAL;
4148
0
  }
4149
4150
0
  if (rp->type != REENC_PROTECTION_NONE && crypt_storage_wrapper_datasync(rh->cw2)) {
4151
0
    log_err(cd, _("Failed to sync data."));
4152
0
    return REENC_ERR_FATAL;
4153
0
  }
4154
4155
  /* metadata commit safe point */
4156
0
  r = reencrypt_assign_segments(cd, hdr, rh, 0, rp->type != REENC_PROTECTION_NONE);
4157
0
  if (r) {
4158
    /* severity fatal */
4159
0
    log_err(cd, _("Failed to update metadata after current reencryption hotzone completed."));
4160
0
    return REENC_ERR_FATAL;
4161
0
  }
4162
4163
0
  if (online) {
4164
0
    log_dbg(cd, "Resuming device %s", rh->hotzone_name);
4165
0
    r = dm_resume_device(cd, rh->hotzone_name, DM_RESUME_PRIVATE);
4166
0
    if (r) {
4167
0
      log_err(cd, _("Failed to resume device %s."), rh->hotzone_name);
4168
0
      return REENC_ERR_ROLLBACK_MEMORY;
4169
0
    }
4170
0
  }
4171
4172
0
  return REENC_OK;
4173
0
}
4174
4175
static int reencrypt_erase_backup_segments(struct crypt_device *cd,
4176
    struct luks2_hdr *hdr)
4177
0
{
4178
0
  int segment = LUKS2_get_segment_id_by_flag(hdr, "backup-previous");
4179
0
  if (segment >= 0) {
4180
0
    if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0))
4181
0
      return -EINVAL;
4182
0
    json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment);
4183
0
  }
4184
0
  segment = LUKS2_get_segment_id_by_flag(hdr, "backup-final");
4185
0
  if (segment >= 0) {
4186
0
    if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0))
4187
0
      return -EINVAL;
4188
0
    json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment);
4189
0
  }
4190
0
  segment = LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment");
4191
0
  if (segment >= 0) {
4192
0
    if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0))
4193
0
      return -EINVAL;
4194
0
    json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment);
4195
0
  }
4196
4197
0
  return 0;
4198
0
}
4199
4200
static int reencrypt_wipe_unused_device_area(struct crypt_device *cd, struct luks2_reencrypt *rh)
4201
0
{
4202
0
  uint64_t offset, length, dev_size;
4203
0
  int r = 0;
4204
4205
0
  assert(cd);
4206
0
  assert(rh);
4207
4208
0
  if (rh->jobj_segment_moved && rh->mode == CRYPT_REENCRYPT_ENCRYPT) {
4209
0
    offset = json_segment_get_offset(rh->jobj_segment_moved, 0);
4210
0
    length = json_segment_get_size(rh->jobj_segment_moved, 0);
4211
0
    log_dbg(cd, "Wiping %" PRIu64 " bytes of backup segment data at offset %" PRIu64,
4212
0
      length, offset);
4213
0
    r = crypt_wipe_device(cd, crypt_data_device(cd), CRYPT_WIPE_RANDOM,
4214
0
        offset, length, 1024 * 1024, NULL, NULL);
4215
0
  }
4216
4217
0
  if (r < 0)
4218
0
    return r;
4219
4220
0
  if (rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->direction == CRYPT_REENCRYPT_FORWARD) {
4221
0
    r = device_size(crypt_data_device(cd), &dev_size);
4222
0
    if (r < 0)
4223
0
      return r;
4224
4225
0
    if (dev_size < data_shift_value(&rh->rp))
4226
0
      return -EINVAL;
4227
4228
0
    offset = dev_size - data_shift_value(&rh->rp);
4229
0
    length = data_shift_value(&rh->rp);
4230
0
    log_dbg(cd, "Wiping %" PRIu64 " bytes of data at offset %" PRIu64,
4231
0
      length, offset);
4232
0
    r = crypt_wipe_device(cd, crypt_data_device(cd), CRYPT_WIPE_RANDOM,
4233
0
        offset, length, 1024 * 1024, NULL, NULL);
4234
0
  }
4235
4236
0
  return r;
4237
0
}
4238
4239
static int replace_hotzone_device_with_error(struct crypt_device *cd, struct luks2_reencrypt *rh)
4240
0
{
4241
0
  log_dbg(cd, "Replacing device %s with dm-error.", rh->hotzone_name);
4242
0
  if (dm_error_device(cd, rh->hotzone_name)) {
4243
0
    log_err(cd, _("Failed to replace suspended device %s with dm-error target."), rh->hotzone_name);
4244
0
    log_err(cd, _("Do not resume the device unless replaced with error target manually."));
4245
0
    return -EIO;
4246
0
  }
4247
4248
0
  return 0;
4249
0
}
4250
4251
static int teardown_overlay_devices(struct crypt_device *cd, struct luks2_reencrypt *rh)
4252
0
{
4253
0
  bool overlay_suspended, hotzone_suspended;
4254
0
  int r;
4255
4256
  /* Reload device with current LUKS2 segments */
4257
0
  r = LUKS2_reload(cd, rh->device_name, rh->vks, rh->device_size, rh->flags);
4258
0
  if (r) {
4259
0
    log_err(cd, _("Failed to reload device %s."), rh->device_name);
4260
0
    return r;
4261
0
  }
4262
4263
0
  overlay_suspended = dm_status_suspended(cd, rh->overlay_name) > 0;
4264
0
  hotzone_suspended = dm_status_suspended(cd, rh->hotzone_name) > 0;
4265
4266
  /*
4267
   * The overlay (if suspended) may hold already queued I/Os.
4268
   * Reload the overlay device with the table identical to the one
4269
   * loaded to the top level device. The overlay device will dropped
4270
   * shortly after successful top level device resume.
4271
   */
4272
0
  if (overlay_suspended) {
4273
0
    log_dbg(cd, "Reverting suspended device %s to previous metadata segments", rh->overlay_name);
4274
0
    r = LUKS2_reload(cd, rh->overlay_name, rh->vks, rh->device_size, rh->flags);
4275
0
    if (r) {
4276
0
      log_err(cd, _("Failed to reload device %s."), rh->overlay_name);
4277
0
      return r;
4278
0
    }
4279
0
  }
4280
4281
  /*
4282
   * if the hotzone is suspended we must error all pending I/O waiting in the device. The
4283
   * reencryption step was not completed and the pending I/O would corrupt the data on data
4284
   * device.
4285
   *
4286
   * If the hotzone table replacement fails we must abort!
4287
   */
4288
0
  if (hotzone_suspended && (r = replace_hotzone_device_with_error(cd, rh)))
4289
0
    return r;
4290
4291
0
  if (overlay_suspended) {
4292
    /* Resume will pass since the hotzone (if previously suspended) is now
4293
     * replaced with live dm-error table */
4294
0
    r = dm_resume_device(cd, rh->overlay_name, DM_RESUME_PRIVATE);
4295
0
    if (r) {
4296
0
      log_err(cd, _("Failed to resume device %s."), rh->overlay_name);
4297
0
      return r;
4298
0
    }
4299
0
  }
4300
4301
  /* Now we can switch original top level device away from overlay device */
4302
0
  r = dm_resume_device(cd, rh->device_name, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
4303
0
  if (r) {
4304
0
    log_err(cd, _("Failed to resume device %s."), rh->device_name);
4305
0
    return r;
4306
0
  }
4307
4308
  /*
4309
   * This should not affect teardown return value. There may be other processes
4310
   * touching those devices despite being private.
4311
   */
4312
0
  if (dm_remove_device(cd, rh->overlay_name, 0))
4313
0
    log_dbg(cd, "Failed to remove unused device %s", rh->overlay_name);
4314
0
  if (dm_remove_device(cd, rh->hotzone_name, 0))
4315
0
    log_dbg(cd, "Failed to remove unused device %s", rh->hotzone_name);
4316
4317
0
  return 0;
4318
0
}
4319
4320
static int reencrypt_teardown_ok(struct crypt_device *cd, struct luks2_hdr *hdr, struct luks2_reencrypt *rh)
4321
0
{
4322
0
  int i, r;
4323
0
  uint64_t dmt_flags;
4324
0
  bool finished = !(rh->device_size > rh->progress);
4325
4326
0
  if (rh->rp.type == REENC_PROTECTION_NONE &&
4327
0
      LUKS2_hdr_write(cd, hdr)) {
4328
0
    log_err(cd, _("Failed to write LUKS2 metadata."));
4329
0
    return -EINVAL;
4330
0
  }
4331
4332
0
  if (rh->online) {
4333
0
    r = teardown_overlay_devices(cd, rh);
4334
0
    if (r)
4335
0
      return r;
4336
4337
0
    if (finished && rh->mode == CRYPT_REENCRYPT_DECRYPT &&
4338
0
        !dm_flags(cd, DM_LINEAR, &dmt_flags) && (dmt_flags & DM_DEFERRED_SUPPORTED))
4339
0
        dm_remove_device(cd, rh->device_name, CRYPT_DEACTIVATE_DEFERRED);
4340
0
  }
4341
4342
0
  if (finished) {
4343
0
    if (reencrypt_wipe_unused_device_area(cd, rh))
4344
0
      log_err(cd, _("Failed to wipe unused data device area."));
4345
0
    if (reencrypt_get_data_offset_new(hdr) && LUKS2_set_keyslots_size(hdr, reencrypt_get_data_offset_new(hdr)))
4346
0
      log_dbg(cd, "Failed to set new keyslots area size.");
4347
0
    if (rh->digest_old >= 0 && rh->digest_new != rh->digest_old)
4348
0
      for (i = 0; i < LUKS2_KEYSLOTS_MAX; i++)
4349
0
        if (LUKS2_digest_by_keyslot(hdr, i) == rh->digest_old && crypt_keyslot_destroy(cd, i))
4350
0
          log_err(cd, _("Failed to remove unused (unbound) keyslot %d."), i);
4351
4352
0
    if (reencrypt_erase_backup_segments(cd, hdr))
4353
0
      log_dbg(cd, "Failed to erase backup segments");
4354
4355
0
    if (reencrypt_update_flag(cd, 0, false, false))
4356
0
      log_dbg(cd, "Failed to disable reencryption requirement flag.");
4357
4358
    /* metadata commit point also removing reencryption flag on-disk */
4359
0
    if (crypt_keyslot_destroy(cd, rh->reenc_keyslot)) {
4360
0
      log_err(cd, _("Failed to remove reencryption keyslot."));
4361
0
      return -EINVAL;
4362
0
    }
4363
0
  }
4364
4365
0
  return 0;
4366
0
}
4367
4368
static void reencrypt_teardown_rollback(struct crypt_device *cd, struct luks2_hdr *hdr,
4369
    struct luks2_reencrypt *rh)
4370
0
{
4371
  /*
4372
   * We cannot rollback for REENC_PROTECTION_NONE. It does not commit metadata as
4373
   * it progresses. In this case, the device stack is intentionally left as-is.
4374
   */
4375
0
  if (rh->rp.type <= REENC_PROTECTION_NONE)
4376
0
    return;
4377
4378
  /*
4379
   * If metadata rollback fails, we cannot proceed with device teardown
4380
   * as we do not have proper metadata snapshot for LUKS2_reload().
4381
   */
4382
0
  if (LUKS2_hdr_rollback(cd, hdr))
4383
0
    return;
4384
4385
0
  if (!rh->online)
4386
0
    return;
4387
4388
0
  teardown_overlay_devices(cd, rh);
4389
0
}
4390
4391
static void reencrypt_teardown_fatal(struct crypt_device *cd, struct luks2_reencrypt *rh)
4392
0
{
4393
0
  log_err(cd, _("Fatal error while reencrypting chunk starting at %" PRIu64 ", %" PRIu64 " sectors long."),
4394
0
    (rh->offset >> SECTOR_SHIFT) + crypt_get_data_offset(cd), rh->length >> SECTOR_SHIFT);
4395
4396
0
  if (rh->online) {
4397
0
    log_err(cd, _("Online reencryption failed."));
4398
0
    if (dm_status_suspended(cd, rh->hotzone_name) > 0)
4399
0
      replace_hotzone_device_with_error(cd, rh);
4400
0
  }
4401
0
}
4402
4403
static int reencrypt_teardown(struct crypt_device *cd, struct luks2_hdr *hdr,
4404
    struct luks2_reencrypt *rh, reenc_status_t rs, bool interrupted,
4405
    int (*progress)(uint64_t size, uint64_t offset, void *usrptr),
4406
    void *usrptr)
4407
0
{
4408
0
  int r;
4409
4410
0
  switch (rs) {
4411
0
  case REENC_OK:
4412
0
    if (progress && !interrupted)
4413
0
      progress(rh->device_size, rh->progress, usrptr);
4414
0
    r = reencrypt_teardown_ok(cd, hdr, rh);
4415
0
    break;
4416
0
  case REENC_ERR_ROLLBACK_MEMORY:
4417
0
    reencrypt_teardown_rollback(cd, hdr, rh);
4418
0
    r = -EINVAL;
4419
0
    break;
4420
0
  case REENC_ERR_FATAL:
4421
0
    reencrypt_teardown_fatal(cd, rh);
4422
    /* fall-through */
4423
0
  default:
4424
0
    r = -EIO;
4425
0
  }
4426
4427
  /* this frees reencryption lock */
4428
0
  LUKS2_reencrypt_free(cd, rh);
4429
0
  crypt_set_luks2_reencrypt(cd, NULL);
4430
4431
0
  return r;
4432
0
}
4433
4434
int crypt_reencrypt_run(
4435
  struct crypt_device *cd,
4436
  int (*progress)(uint64_t size, uint64_t offset, void *usrptr),
4437
  void *usrptr)
4438
0
{
4439
0
  int r;
4440
0
  crypt_reencrypt_info ri;
4441
0
  struct luks2_hdr *hdr;
4442
0
  struct luks2_reencrypt *rh;
4443
0
  reenc_status_t rs;
4444
0
  bool quit = false;
4445
4446
0
  if (onlyLUKS2reencrypt(cd))
4447
0
    return -EINVAL;
4448
4449
0
  hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
4450
4451
0
  ri = LUKS2_reencrypt_status(hdr);
4452
0
  if (ri > CRYPT_REENCRYPT_CLEAN) {
4453
0
    log_err(cd, _("Cannot proceed with reencryption. Unexpected reencryption status."));
4454
0
    return -EINVAL;
4455
0
  }
4456
4457
0
  rh = crypt_get_luks2_reencrypt(cd);
4458
0
  if (!rh || (!rh->reenc_lock && crypt_metadata_locking_enabled())) {
4459
0
    log_err(cd, _("Missing or invalid reencrypt context."));
4460
0
    return -EINVAL;
4461
0
  }
4462
4463
0
  log_dbg(cd, "Resuming LUKS2 reencryption.");
4464
4465
0
  if (rh->online) {
4466
    /* This is last resort to avoid data corruption. Abort is justified here. */
4467
0
    assert(device_direct_io(crypt_data_device(cd)));
4468
4469
0
    if (reencrypt_init_device_stack(cd, rh)) {
4470
0
      log_err(cd, _("Failed to initialize reencryption device stack."));
4471
0
      return -EINVAL;
4472
0
    }
4473
0
  }
4474
4475
0
  log_dbg(cd, "Progress %" PRIu64 ", device_size %" PRIu64, rh->progress, rh->device_size);
4476
4477
0
  rs = REENC_OK;
4478
4479
0
  if (progress && progress(rh->device_size, rh->progress, usrptr))
4480
0
    quit = true;
4481
4482
0
  while (!quit && (rh->device_size > rh->progress)) {
4483
0
    rs = reencrypt_step(cd, hdr, rh, rh->device_size, rh->online);
4484
0
    if (rs != REENC_OK)
4485
0
      break;
4486
4487
0
    log_dbg(cd, "Progress %" PRIu64 ", device_size %" PRIu64, rh->progress, rh->device_size);
4488
0
    if (progress && progress(rh->device_size, rh->progress, usrptr))
4489
0
      quit = true;
4490
4491
0
    r = reencrypt_context_update(cd, rh);
4492
0
    if (r) {
4493
0
      log_err(cd, _("Failed to update reencryption context."));
4494
0
      rs = REENC_ERR_ROLLBACK_MEMORY;
4495
0
      break;
4496
0
    }
4497
4498
0
    log_dbg(cd, "Next reencryption offset will be %" PRIu64 " sectors.", rh->offset);
4499
0
    log_dbg(cd, "Next reencryption chunk size will be %" PRIu64 " sectors).", rh->length);
4500
0
  }
4501
4502
0
  r = reencrypt_teardown(cd, hdr, rh, rs, quit, progress, usrptr);
4503
0
  return r;
4504
0
}
4505
4506
4507
static int reencrypt_recovery(struct crypt_device *cd,
4508
    struct luks2_hdr *hdr,
4509
    uint64_t device_size,
4510
    struct volume_key *vks)
4511
0
{
4512
0
  int r;
4513
0
  struct luks2_reencrypt *rh = NULL;
4514
4515
0
  r = reencrypt_load(cd, hdr, device_size, 0, 0, vks, &rh);
4516
0
  if (r < 0) {
4517
0
    log_err(cd, _("Failed to load LUKS2 reencryption context."));
4518
0
    return r;
4519
0
  }
4520
4521
0
  r = reencrypt_recover_segment(cd, hdr, rh, vks);
4522
0
  if (r < 0)
4523
0
    goto out;
4524
4525
0
  if ((r = reencrypt_assign_segments(cd, hdr, rh, 0, 0)))
4526
0
    goto out;
4527
4528
0
  r = reencrypt_context_update(cd, rh);
4529
0
  if (r) {
4530
0
    log_err(cd, _("Failed to update reencryption context."));
4531
0
    goto out;
4532
0
  }
4533
4534
0
  r = reencrypt_teardown_ok(cd, hdr, rh);
4535
0
  if (!r)
4536
0
    r = LUKS2_hdr_write(cd, hdr);
4537
0
out:
4538
0
  LUKS2_reencrypt_free(cd, rh);
4539
4540
0
  return r;
4541
0
}
4542
#else /* USE_LUKS2_REENCRYPTION */
4543
int crypt_reencrypt_run(
4544
  struct crypt_device *cd,
4545
  int (*progress)(uint64_t size, uint64_t offset, void *usrptr),
4546
  void *usrptr)
4547
{
4548
  UNUSED(progress);
4549
  UNUSED(usrptr);
4550
4551
  log_err(cd, _("This operation is not supported for this device type."));
4552
  return -ENOTSUP;
4553
}
4554
#endif
4555
4556
int crypt_reencrypt(
4557
  struct crypt_device *cd,
4558
  int (*progress)(uint64_t size, uint64_t offset, void *usrptr))
4559
0
{
4560
0
  return crypt_reencrypt_run(cd, progress, NULL);
4561
0
}
4562
4563
/*
4564
 * use only for calculation of minimal data device size.
4565
 * The real data offset is taken directly from segments!
4566
 */
4567
uint64_t LUKS2_reencrypt_data_offset(struct luks2_hdr *hdr, bool blockwise)
4568
0
{
4569
0
  crypt_reencrypt_info ri = LUKS2_reencrypt_status(hdr);
4570
0
  uint64_t data_offset = LUKS2_get_data_offset(hdr);
4571
4572
0
  if (ri == CRYPT_REENCRYPT_CLEAN && reencrypt_direction(hdr) == CRYPT_REENCRYPT_FORWARD)
4573
0
    data_offset += reencrypt_data_shift(hdr) >> SECTOR_SHIFT;
4574
4575
0
  return blockwise ? data_offset : data_offset << SECTOR_SHIFT;
4576
0
}
4577
4578
/* internal only */
4579
int LUKS2_reencrypt_check_device_size(struct crypt_device *cd, struct luks2_hdr *hdr,
4580
  uint64_t check_size, uint64_t *dev_size, bool device_exclusive_check, bool dynamic)
4581
0
{
4582
0
  int r;
4583
0
  uint64_t data_offset, real_size = 0;
4584
4585
0
  if (reencrypt_direction(hdr) == CRYPT_REENCRYPT_BACKWARD &&
4586
0
      (LUKS2_get_segment_by_flag(hdr, "backup-moved-segment") || dynamic))
4587
0
    check_size += reencrypt_data_shift(hdr);
4588
4589
0
  r = device_check_access(cd, crypt_data_device(cd),
4590
0
        device_exclusive_check ? DEV_EXCL : DEV_OK);
4591
0
  if (r)
4592
0
    return r;
4593
4594
0
  data_offset = LUKS2_reencrypt_data_offset(hdr, false);
4595
4596
0
  r = device_check_size(cd, crypt_data_device(cd), data_offset, 1);
4597
0
  if (r)
4598
0
    return r;
4599
4600
0
  r = device_size(crypt_data_device(cd), &real_size);
4601
0
  if (r)
4602
0
    return r;
4603
4604
0
  log_dbg(cd, "Required minimal device size: %" PRIu64 " (%" PRIu64 " sectors)"
4605
0
        ", real device size: %" PRIu64 " (%" PRIu64 " sectors) "
4606
0
        "calculated device size: %" PRIu64 " (%" PRIu64 " sectors)",
4607
0
        check_size, check_size >> SECTOR_SHIFT, real_size, real_size >> SECTOR_SHIFT,
4608
0
        real_size - data_offset, (real_size - data_offset) >> SECTOR_SHIFT);
4609
4610
0
  if (real_size < data_offset || (check_size && real_size < check_size)) {
4611
0
    log_err(cd, _("Device %s is too small."), device_path(crypt_data_device(cd)));
4612
0
    return -EINVAL;
4613
0
  }
4614
4615
0
  *dev_size = real_size - data_offset;
4616
4617
0
  return 0;
4618
0
}
4619
#if USE_LUKS2_REENCRYPTION
4620
/* returns keyslot number on success (>= 0) or negative errnor otherwise */
4621
int LUKS2_reencrypt_locked_recovery_by_vks(struct crypt_device *cd,
4622
  struct volume_key *vks)
4623
0
{
4624
0
  uint64_t minimal_size, device_size;
4625
0
  int r = -EINVAL;
4626
0
  struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
4627
4628
0
  log_dbg(cd, "Entering reencryption crash recovery.");
4629
4630
0
  if (LUKS2_get_data_size(hdr, &minimal_size, NULL))
4631
0
    return r;
4632
0
  if (LUKS2_reencrypt_check_device_size(cd, hdr, minimal_size, &device_size, true, false))
4633
0
    goto out;
4634
4635
0
  r = reencrypt_recovery(cd, hdr, device_size, vks);
4636
4637
0
out:
4638
0
  if (r < 0)
4639
0
    crypt_drop_uploaded_keyring_key(cd, vks);
4640
0
  return r;
4641
0
}
4642
#endif
4643
crypt_reencrypt_info LUKS2_reencrypt_get_params(struct luks2_hdr *hdr,
4644
  struct crypt_params_reencrypt *params)
4645
0
{
4646
0
  crypt_reencrypt_info ri;
4647
0
  int digest;
4648
0
  uint8_t version;
4649
4650
0
  if (params)
4651
0
    memset(params, 0, sizeof(*params));
4652
4653
0
  ri = LUKS2_reencrypt_status(hdr);
4654
0
  if (ri == CRYPT_REENCRYPT_NONE || ri == CRYPT_REENCRYPT_INVALID || !params)
4655
0
    return ri;
4656
4657
0
  digest = LUKS2_digest_by_keyslot(hdr, LUKS2_find_keyslot(hdr, "reencrypt"));
4658
0
  if (digest < 0 && digest != -ENOENT)
4659
0
    return CRYPT_REENCRYPT_INVALID;
4660
4661
  /*
4662
   * In case there's an old "online-reencrypt" requirement or reencryption
4663
   * keyslot digest is missing inform caller reencryption metadata requires repair.
4664
   */
4665
0
  if (!LUKS2_config_get_reencrypt_version(hdr, &version) &&
4666
0
      (version < 2 || digest == -ENOENT)) {
4667
0
    params->flags |= CRYPT_REENCRYPT_REPAIR_NEEDED;
4668
0
    return ri;
4669
0
  }
4670
4671
0
  params->mode = reencrypt_mode(hdr);
4672
0
  params->direction = reencrypt_direction(hdr);
4673
0
  params->resilience = reencrypt_resilience_type(hdr);
4674
0
  params->hash = reencrypt_resilience_hash(hdr);
4675
0
  params->data_shift = reencrypt_data_shift(hdr) >> SECTOR_SHIFT;
4676
0
  params->max_hotzone_size = 0;
4677
0
  if (LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0)
4678
0
    params->flags |= CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT;
4679
4680
0
  return ri;
4681
0
}