Coverage Report

Created: 2025-10-12 06:56

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/cryptsetup/lib/integrity/integrity.c
Line
Count
Source
1
// SPDX-License-Identifier: LGPL-2.1-or-later
2
/*
3
 * Integrity volume handling
4
 *
5
 * Copyright (C) 2016-2025 Milan Broz
6
 */
7
8
#include <errno.h>
9
#include <stdio.h>
10
#include <stdlib.h>
11
#include <string.h>
12
#include <uuid/uuid.h>
13
14
#include "integrity.h"
15
#include "internal.h"
16
17
/* For LUKS2, integrity metadata are on DATA device even for detached header! */
18
static struct device *INTEGRITY_metadata_device(struct crypt_device *cd)
19
0
{
20
0
  const char *type = crypt_get_type(cd);
21
22
0
  if (type && !strcmp(type, CRYPT_LUKS2))
23
0
    return crypt_data_device(cd);
24
25
0
  return crypt_metadata_device(cd);
26
0
}
27
28
static int INTEGRITY_read_superblock(struct crypt_device *cd,
29
             struct device *device,
30
             uint64_t offset, struct superblock *sb)
31
0
{
32
0
  int devfd, r;
33
34
0
  log_dbg(cd, "Reading kernel dm-integrity metadata on %s.", device_path(device));
35
36
0
  devfd = device_open(cd, device, O_RDONLY);
37
0
  if(devfd < 0)
38
0
    return -EINVAL;
39
40
0
  if (read_lseek_blockwise(devfd, device_block_size(cd, device),
41
0
      device_alignment(device), sb, sizeof(*sb), offset) != sizeof(*sb)) {
42
0
    log_dbg(cd, "Cannot read kernel dm-integrity metadata on %s.", device_path(device));
43
0
    return -EINVAL;
44
0
  }
45
46
0
  if (memcmp(sb->magic, SB_MAGIC, sizeof(sb->magic))) {
47
0
    log_dbg(cd, "No kernel dm-integrity metadata detected on %s.", device_path(device));
48
0
    r = -EINVAL;
49
0
  } else if (sb->version < SB_VERSION_1 || sb->version > SB_VERSION_6) {
50
0
    log_err(cd, _("Incompatible kernel dm-integrity metadata (version %u) detected on %s."),
51
0
      sb->version, device_path(device));
52
0
    r = -EINVAL;
53
0
  } else {
54
0
    sb->integrity_tag_size = le16toh(sb->integrity_tag_size);
55
0
    sb->journal_sections = le32toh(sb->journal_sections);
56
0
    sb->provided_data_sectors = le64toh(sb->provided_data_sectors);
57
0
    sb->recalc_sector = le64toh(sb->recalc_sector);
58
0
    sb->flags = le32toh(sb->flags);
59
0
    r = 0;
60
0
  }
61
62
0
  return r;
63
0
}
64
65
int INTEGRITY_read_sb(struct crypt_device *cd,
66
          struct crypt_params_integrity *params,
67
          uint32_t *flags)
68
0
{
69
0
  struct superblock sb;
70
0
  int r;
71
72
0
  r = INTEGRITY_read_superblock(cd, INTEGRITY_metadata_device(cd), 0, &sb);
73
0
  if (r)
74
0
    return r;
75
76
0
  if (params) {
77
0
    params->sector_size = SECTOR_SIZE << sb.log2_sectors_per_block;
78
0
    params->tag_size = sb.integrity_tag_size;
79
0
  }
80
81
0
  if (flags)
82
0
    *flags = sb.flags;
83
84
0
  return 0;
85
0
}
86
87
int INTEGRITY_dump(struct crypt_device *cd, struct device *device, uint64_t offset)
88
0
{
89
0
  struct superblock sb;
90
0
  uint64_t sector_size;
91
0
  int r;
92
93
0
  r = INTEGRITY_read_superblock(cd, device, offset, &sb);
94
0
  if (r)
95
0
    return r;
96
97
0
  sector_size = (uint64_t)SECTOR_SIZE << sb.log2_sectors_per_block;
98
0
  log_std(cd, "INTEGRITY header information for %s.\n", device_path(device));
99
0
  log_std(cd, "version: %d\n", (unsigned)sb.version);
100
0
  log_std(cd, "tag size: %u [bytes]\n", sb.integrity_tag_size);
101
0
  log_std(cd, "sector size: %" PRIu64 " [bytes]\n", sector_size);
102
0
  log_std(cd, "data size: %" PRIu64 " [512-byte units] (%" PRIu64 " [bytes])\n",
103
0
    sb.provided_data_sectors, sb.provided_data_sectors * SECTOR_SIZE);
104
0
  if (sb.version >= SB_VERSION_2 && (sb.flags & SB_FLAG_RECALCULATING))
105
0
    log_std(cd, "recalculate sector: %" PRIu64 "\n", sb.recalc_sector);
106
0
  log_std(cd, "journal sections: %u\n", sb.journal_sections);
107
0
  log_std(cd, "log2 interleave sectors: %d\n", sb.log2_interleave_sectors);
108
0
  log_std(cd, "log2 blocks per bitmap: %u\n", sb.log2_blocks_per_bitmap_bit);
109
0
  log_std(cd, "flags: %s%s%s%s%s%s\n",
110
0
    sb.flags & SB_FLAG_HAVE_JOURNAL_MAC ? "have_journal_mac " : "",
111
0
    sb.flags & SB_FLAG_RECALCULATING ? "recalculating " : "",
112
0
    sb.flags & SB_FLAG_DIRTY_BITMAP ? "dirty_bitmap " : "",
113
0
    sb.flags & SB_FLAG_FIXED_PADDING ? "fix_padding " : "",
114
0
    sb.flags & SB_FLAG_FIXED_HMAC ? "fix_hmac " : "",
115
0
    sb.flags & SB_FLAG_INLINE ? "inline " : "");
116
117
0
  return 0;
118
0
}
119
120
int INTEGRITY_data_sectors(struct crypt_device *cd,
121
         struct device *device, uint64_t offset,
122
         uint64_t *data_sectors)
123
0
{
124
0
  struct superblock sb;
125
0
  int r;
126
127
0
  r = INTEGRITY_read_superblock(cd, device, offset, &sb);
128
0
  if (r)
129
0
    return r;
130
131
0
  *data_sectors = sb.provided_data_sectors;
132
0
  return 0;
133
0
}
134
135
int INTEGRITY_key_size(const char *integrity, int required_key_size)
136
0
{
137
0
  int ks = 0;
138
139
0
  if (!integrity && required_key_size)
140
0
    return -EINVAL;
141
142
0
  if (!integrity)
143
0
    return 0;
144
145
  //FIXME: use crypto backend hash size
146
0
  if (!strcmp(integrity, "aead"))
147
0
    ks = 0;
148
0
  else if (!strcmp(integrity, "hmac(sha1)"))
149
0
    ks = required_key_size ?: 20;
150
0
  else if (!strcmp(integrity, "hmac(sha256)"))
151
0
    ks = required_key_size ?: 32;
152
0
  else if (!strcmp(integrity, "hmac(sha512)"))
153
0
    ks = required_key_size ?: 64;
154
0
  else if (!strcmp(integrity, "poly1305"))
155
0
    ks = 0;
156
0
  else if (!strcmp(integrity, "none"))
157
0
    ks = 0;
158
0
  else
159
0
    return -EINVAL;
160
161
0
  if (required_key_size && ks != required_key_size)
162
0
    return -EINVAL;
163
164
0
  return ks;
165
0
}
166
167
/* Return hash or hmac(hash) size, if known */
168
int INTEGRITY_hash_tag_size(const char *integrity)
169
0
{
170
0
  char hash[MAX_CIPHER_LEN];
171
0
  int r;
172
173
0
  if (!integrity)
174
0
    return 0;
175
176
0
  if (!strcmp(integrity, "crc32") || !strcmp(integrity, "crc32c"))
177
0
    return 4;
178
179
0
  if (!strcmp(integrity, "xxhash64"))
180
0
    return 8;
181
182
0
  r = sscanf(integrity, "hmac(%" MAX_CIPHER_LEN_STR "[^)]s", hash);
183
0
  if (r == 1)
184
0
    r = crypt_hash_size(hash);
185
0
  else
186
0
    r = crypt_hash_size(integrity);
187
188
0
  return r < 0 ? 0 : r;
189
0
}
190
191
int INTEGRITY_tag_size(const char *integrity,
192
           const char *cipher,
193
           const char *cipher_mode)
194
0
{
195
0
  int iv_tag_size = 0, auth_tag_size = 0;
196
197
0
  if (!cipher_mode)
198
0
    iv_tag_size = 0;
199
0
  else if (!strcmp(cipher_mode, "xts-random"))
200
0
    iv_tag_size = 16;
201
0
  else if (!strcmp(cipher_mode, "gcm-random"))
202
0
    iv_tag_size = 12;
203
0
  else if (!strcmp(cipher_mode, "ccm-random"))
204
0
    iv_tag_size = 8;
205
0
  else if (!strcmp(cipher_mode, "ctr-random"))
206
0
    iv_tag_size = 16;
207
0
  else if (!strcmp(cipher, "aegis256") && !strcmp(cipher_mode, "random"))
208
0
    iv_tag_size = 32;
209
0
  else if (!strcmp(cipher_mode, "random"))
210
0
    iv_tag_size = 16;
211
212
  //FIXME: use crypto backend hash size
213
0
  if (!integrity || !strcmp(integrity, "none"))
214
0
    auth_tag_size = 0;
215
0
  else if (!strcmp(integrity, "aead"))
216
0
    auth_tag_size = 16; /* gcm- mode only */
217
0
  else if (!strcmp(integrity, "cmac(aes)"))
218
0
    auth_tag_size = 16;
219
0
  else if (!strcmp(integrity, "hmac(sha1)"))
220
0
    auth_tag_size = 20;
221
0
  else if (!strcmp(integrity, "hmac(sha256)"))
222
0
    auth_tag_size = 32;
223
0
  else if (!strcmp(integrity, "hmac(sha512)"))
224
0
    auth_tag_size = 64;
225
0
  else if (!strcmp(integrity, "poly1305")) {
226
0
    if (iv_tag_size)
227
0
      iv_tag_size = 12;
228
0
    auth_tag_size = 16;
229
0
  }
230
231
0
  return iv_tag_size + auth_tag_size;
232
0
}
233
234
int INTEGRITY_create_dmd_device(struct crypt_device *cd,
235
           const struct crypt_params_integrity *params,
236
           struct volume_key *vk,
237
           struct volume_key *journal_crypt_key,
238
           struct volume_key *journal_mac_key,
239
           struct crypt_dm_active_device *dmd,
240
           uint32_t flags, uint32_t sb_flags)
241
0
{
242
0
  int r;
243
244
0
  if (!dmd)
245
0
    return -EINVAL;
246
247
0
  *dmd = (struct crypt_dm_active_device) {
248
0
    .flags = flags,
249
0
  };
250
251
  /* Workaround for kernel dm-integrity table bug */
252
0
  if (sb_flags & SB_FLAG_RECALCULATING)
253
0
    dmd->flags |= CRYPT_ACTIVATE_RECALCULATE;
254
255
0
  if (sb_flags & SB_FLAG_INLINE)
256
0
    dmd->flags |= (CRYPT_ACTIVATE_NO_JOURNAL | CRYPT_ACTIVATE_INLINE_MODE);
257
258
0
  r = INTEGRITY_data_sectors(cd, INTEGRITY_metadata_device(cd),
259
0
           crypt_get_data_offset(cd) * SECTOR_SIZE, &dmd->size);
260
0
  if (r < 0)
261
0
    return r;
262
263
0
  return dm_integrity_target_set(cd, &dmd->segment, 0, dmd->size,
264
0
      INTEGRITY_metadata_device(cd), crypt_data_device(cd),
265
0
      crypt_get_integrity_tag_size(cd), crypt_get_data_offset(cd),
266
0
      crypt_get_sector_size(cd), vk, journal_crypt_key,
267
0
      journal_mac_key, params);
268
0
}
269
270
int INTEGRITY_activate_dmd_device(struct crypt_device *cd,
271
           const char *name,
272
           const char *type,
273
           struct crypt_dm_active_device *dmd,
274
           uint32_t sb_flags)
275
0
{
276
0
  int r;
277
0
  uint64_t dmi_flags;
278
0
  struct dm_target *tgt = &dmd->segment;
279
280
0
  if (!single_segment(dmd) || tgt->type != DM_INTEGRITY)
281
0
    return -EINVAL;
282
283
0
  log_dbg(cd, "Trying to activate INTEGRITY device on top of %s, using name %s, tag size %d%s, provided sectors %" PRIu64".",
284
0
    device_path(tgt->data_device), name, tgt->u.integrity.tag_size,
285
0
    (sb_flags & SB_FLAG_INLINE) ? " (inline)" :"", dmd->size);
286
287
0
  r = create_or_reload_device(cd, name, type, dmd);
288
289
0
  if (r < 0 && (dm_flags(cd, DM_INTEGRITY, &dmi_flags) || !(dmi_flags & DM_INTEGRITY_SUPPORTED))) {
290
0
    log_err(cd, _("Kernel does not support dm-integrity mapping."));
291
0
    return -ENOTSUP;
292
0
  }
293
294
0
  if (r < 0 && (sb_flags & SB_FLAG_FIXED_PADDING) && !dm_flags(cd, DM_INTEGRITY, &dmi_flags) &&
295
0
      !(dmi_flags & DM_INTEGRITY_FIX_PADDING_SUPPORTED)) {
296
0
    log_err(cd, _("Kernel does not support dm-integrity fixed metadata alignment."));
297
0
    return -ENOTSUP;
298
0
  }
299
300
0
  if (r < 0 && (dmd->flags & CRYPT_ACTIVATE_RECALCULATE) &&
301
0
      !(crypt_get_compatibility(cd) & CRYPT_COMPAT_LEGACY_INTEGRITY_RECALC) &&
302
0
      ((sb_flags & SB_FLAG_FIXED_HMAC) ?
303
0
      (tgt->u.integrity.vk && !tgt->u.integrity.journal_integrity_key) :
304
0
      (tgt->u.integrity.vk || tgt->u.integrity.journal_integrity_key))) {
305
0
    log_err(cd, _("Kernel refuses to activate insecure recalculate option (see legacy activation options to override)."));
306
0
    return -ENOTSUP;
307
0
  }
308
309
0
  if (r < 0 && (sb_flags & SB_FLAG_INLINE) && !dm_flags(cd, DM_INTEGRITY, &dmi_flags) &&
310
0
      !(dmi_flags & DM_INTEGRITY_INLINE_MODE_SUPPORTED)) {
311
0
    log_err(cd, _("Kernel does not support dm-integrity inline mode."));
312
0
    return -ENOTSUP;
313
0
  }
314
315
0
  return r;
316
0
}
317
318
int INTEGRITY_activate(struct crypt_device *cd,
319
           const char *name,
320
           const struct crypt_params_integrity *params,
321
           struct volume_key *vk,
322
           struct volume_key *journal_crypt_key,
323
           struct volume_key *journal_mac_key,
324
           uint32_t flags, uint32_t sb_flags)
325
0
{
326
0
  struct crypt_dm_active_device dmdq = {}, dmd = {};
327
0
  int r;
328
329
0
  if (flags & CRYPT_ACTIVATE_REFRESH) {
330
0
    r = dm_query_device(cd, name, DM_ACTIVE_CRYPT_KEYSIZE |
331
0
                DM_ACTIVE_CRYPT_KEY |
332
0
                DM_ACTIVE_INTEGRITY_PARAMS |
333
0
                DM_ACTIVE_JOURNAL_CRYPT_KEY |
334
0
                DM_ACTIVE_JOURNAL_MAC_KEY, &dmdq);
335
0
    if (r < 0)
336
0
      return r;
337
338
0
    r = INTEGRITY_create_dmd_device(cd, params, vk ?: dmdq.segment.u.integrity.vk,
339
0
            journal_crypt_key ?: dmdq.segment.u.integrity.journal_crypt_key,
340
0
            journal_mac_key ?: dmdq.segment.u.integrity.journal_integrity_key,
341
0
            &dmd, flags, sb_flags);
342
343
0
    if (!r)
344
0
      dmd.size = dmdq.size;
345
0
  } else
346
0
    r = INTEGRITY_create_dmd_device(cd, params, vk, journal_crypt_key,
347
0
            journal_mac_key, &dmd, flags, sb_flags);
348
349
0
  if (!r)
350
0
    r = INTEGRITY_activate_dmd_device(cd, name, CRYPT_INTEGRITY, &dmd, sb_flags);
351
352
0
  dm_targets_free(cd, &dmdq);
353
0
  dm_targets_free(cd, &dmd);
354
0
  return r;
355
0
}
356
357
static int _create_reduced_device(struct crypt_device *cd,
358
          const char *name,
359
          uint64_t device_size_sectors,
360
          struct device **ret_device)
361
0
{
362
0
  int r;
363
0
  char path[PATH_MAX];
364
0
  struct device *dev;
365
366
0
  struct crypt_dm_active_device dmd = {
367
0
    .size = device_size_sectors,
368
0
    .flags = CRYPT_ACTIVATE_PRIVATE,
369
0
  };
370
371
0
  assert(cd);
372
0
  assert(name);
373
0
  assert(device_size_sectors);
374
0
  assert(ret_device);
375
376
0
  r = snprintf(path, sizeof(path), "%s/%s", dm_get_dir(), name);
377
0
  if (r < 0 || (size_t)r >= sizeof(path))
378
0
    return -EINVAL;
379
380
0
  r = device_block_adjust(cd, crypt_data_device(cd), DEV_OK,
381
0
        crypt_get_data_offset(cd), &device_size_sectors, &dmd.flags);
382
0
  if (r)
383
0
    return r;
384
385
0
  log_dbg(cd, "Activating reduced helper device %s.", name);
386
387
0
  r = dm_linear_target_set(&dmd.segment, 0, dmd.size, crypt_data_device(cd), crypt_get_data_offset(cd));
388
0
  if (!r)
389
0
    r = dm_create_device(cd, name, CRYPT_SUBDEV, &dmd);
390
0
  dm_targets_free(cd, &dmd);
391
0
  if (r < 0)
392
0
    return r;
393
394
0
  r = device_alloc(cd, &dev, path);
395
0
  if (!r) {
396
0
    *ret_device = dev;
397
0
    return 0;
398
0
  }
399
400
0
  dm_remove_device(cd, name, CRYPT_DEACTIVATE_FORCE);
401
402
0
  return r;
403
0
}
404
405
int INTEGRITY_format(struct crypt_device *cd,
406
         const struct crypt_params_integrity *params,
407
         struct volume_key *integrity_key,
408
         struct volume_key *journal_crypt_key,
409
         struct volume_key *journal_mac_key,
410
         uint64_t backing_device_sectors,
411
         uint32_t *sb_flags,
412
         bool integrity_inline)
413
0
{
414
0
  uint64_t dmi_flags;
415
0
  char reduced_device_name[70], tmp_name[64], tmp_uuid[40];
416
0
  struct crypt_dm_active_device dmdi = {
417
0
    .size = 8,
418
0
    .flags = CRYPT_ACTIVATE_PRIVATE, /* We always create journal but it can be unused later */
419
0
  };
420
0
  struct dm_target *tgt = &dmdi.segment;
421
0
  int r;
422
0
  uuid_t tmp_uuid_bin;
423
0
  uint64_t data_offset_sectors;
424
0
  struct device *p_metadata_device, *p_data_device, *reduced_device = NULL;
425
426
0
  uuid_generate(tmp_uuid_bin);
427
0
  uuid_unparse(tmp_uuid_bin, tmp_uuid);
428
429
0
  r = snprintf(tmp_name, sizeof(tmp_name), "temporary-cryptsetup-%s", tmp_uuid);
430
0
  if (r < 0 || (size_t)r >= sizeof(tmp_name))
431
0
    return -EINVAL;
432
433
0
  p_metadata_device = INTEGRITY_metadata_device(cd);
434
435
0
  if (backing_device_sectors) {
436
0
    r = snprintf(reduced_device_name, sizeof(reduced_device_name),
437
0
           "temporary-cryptsetup-reduced-%s", tmp_uuid);
438
0
    if (r < 0 || (size_t)r >= sizeof(reduced_device_name))
439
0
      return -EINVAL;
440
441
    /*
442
     * Creates reduced dm-linear mapping over data device starting at
443
     * crypt_data_offset(cd) and backing_device_sectors in size.
444
     */
445
0
    r = _create_reduced_device(cd, reduced_device_name,
446
0
             backing_device_sectors, &reduced_device);
447
0
    if (r < 0)
448
0
      return r;
449
450
0
    data_offset_sectors = 0;
451
0
    p_data_device = reduced_device;
452
0
    if (p_metadata_device == crypt_data_device(cd))
453
0
      p_metadata_device = reduced_device;
454
0
  } else {
455
0
    data_offset_sectors = crypt_get_data_offset(cd);
456
0
    p_data_device = crypt_data_device(cd);
457
0
  }
458
459
0
  if (integrity_inline)
460
0
    dmdi.flags |= (CRYPT_ACTIVATE_NO_JOURNAL | CRYPT_ACTIVATE_INLINE_MODE);
461
462
0
  r = dm_integrity_target_set(cd, tgt, 0, dmdi.size, p_metadata_device,
463
0
      p_data_device, crypt_get_integrity_tag_size(cd),
464
0
      data_offset_sectors, crypt_get_sector_size(cd), integrity_key,
465
0
      journal_crypt_key, journal_mac_key, params);
466
0
  if (r < 0)
467
0
    goto err;
468
469
0
  log_dbg(cd, "Trying to format INTEGRITY device on top of %s, tmp name %s, tag size %d%s.",
470
0
    device_path(tgt->data_device), tmp_name, tgt->u.integrity.tag_size, integrity_inline ? " (inline)" : "");
471
472
0
  r = device_block_adjust(cd, tgt->data_device, DEV_EXCL, tgt->u.integrity.offset, NULL, NULL);
473
0
  if (r < 0 && (dm_flags(cd, DM_INTEGRITY, &dmi_flags) || !(dmi_flags & DM_INTEGRITY_SUPPORTED))) {
474
0
    log_err(cd, _("Kernel does not support dm-integrity mapping."));
475
0
    r = -ENOTSUP;
476
0
  }
477
0
  if (r)
478
0
    goto err;
479
480
0
  if (tgt->u.integrity.meta_device) {
481
0
    r = device_block_adjust(cd, tgt->u.integrity.meta_device, DEV_EXCL, 0, NULL, NULL);
482
0
    if (r)
483
0
      goto err;
484
0
  }
485
486
0
  r = dm_create_device(cd, tmp_name, CRYPT_INTEGRITY, &dmdi);
487
0
  if (r)
488
0
    goto err;
489
490
0
  r = dm_remove_device(cd, tmp_name, CRYPT_DEACTIVATE_FORCE);
491
0
  if (r)
492
0
    goto err;
493
494
  /* reload sb_flags from superblock (important for SB_FLAG_INLINE) */
495
0
  if (sb_flags)
496
0
    r = INTEGRITY_read_sb(cd, NULL, sb_flags);
497
0
err:
498
0
  dm_targets_free(cd, &dmdi);
499
0
  if (reduced_device) {
500
    dm_remove_device(cd, reduced_device_name, CRYPT_DEACTIVATE_FORCE);
501
0
    device_free(cd, reduced_device);
502
0
  }
503
0
  return r;
504
0
}