/src/cryptsetup/lib/integrity/integrity.c
Line | Count | Source |
1 | | // SPDX-License-Identifier: LGPL-2.1-or-later |
2 | | /* |
3 | | * Integrity volume handling |
4 | | * |
5 | | * Copyright (C) 2016-2025 Milan Broz |
6 | | */ |
7 | | |
8 | | #include <errno.h> |
9 | | #include <stdio.h> |
10 | | #include <stdlib.h> |
11 | | #include <string.h> |
12 | | #include <uuid/uuid.h> |
13 | | |
14 | | #include "integrity.h" |
15 | | #include "internal.h" |
16 | | |
17 | | /* For LUKS2, integrity metadata are on DATA device even for detached header! */ |
18 | | static struct device *INTEGRITY_metadata_device(struct crypt_device *cd) |
19 | 0 | { |
20 | 0 | const char *type = crypt_get_type(cd); |
21 | |
|
22 | 0 | if (type && !strcmp(type, CRYPT_LUKS2)) |
23 | 0 | return crypt_data_device(cd); |
24 | | |
25 | 0 | return crypt_metadata_device(cd); |
26 | 0 | } |
27 | | |
28 | | static int INTEGRITY_read_superblock(struct crypt_device *cd, |
29 | | struct device *device, |
30 | | uint64_t offset, struct superblock *sb) |
31 | 0 | { |
32 | 0 | int devfd, r; |
33 | |
|
34 | 0 | log_dbg(cd, "Reading kernel dm-integrity metadata on %s.", device_path(device)); |
35 | |
|
36 | 0 | devfd = device_open(cd, device, O_RDONLY); |
37 | 0 | if(devfd < 0) |
38 | 0 | return -EINVAL; |
39 | | |
40 | 0 | if (read_lseek_blockwise(devfd, device_block_size(cd, device), |
41 | 0 | device_alignment(device), sb, sizeof(*sb), offset) != sizeof(*sb)) { |
42 | 0 | log_dbg(cd, "Cannot read kernel dm-integrity metadata on %s.", device_path(device)); |
43 | 0 | return -EINVAL; |
44 | 0 | } |
45 | | |
46 | 0 | if (memcmp(sb->magic, SB_MAGIC, sizeof(sb->magic))) { |
47 | 0 | log_dbg(cd, "No kernel dm-integrity metadata detected on %s.", device_path(device)); |
48 | 0 | r = -EINVAL; |
49 | 0 | } else if (sb->version < SB_VERSION_1 || sb->version > SB_VERSION_6) { |
50 | 0 | log_err(cd, _("Incompatible kernel dm-integrity metadata (version %u) detected on %s."), |
51 | 0 | sb->version, device_path(device)); |
52 | 0 | r = -EINVAL; |
53 | 0 | } else { |
54 | 0 | sb->integrity_tag_size = le16toh(sb->integrity_tag_size); |
55 | 0 | sb->journal_sections = le32toh(sb->journal_sections); |
56 | 0 | sb->provided_data_sectors = le64toh(sb->provided_data_sectors); |
57 | 0 | sb->recalc_sector = le64toh(sb->recalc_sector); |
58 | 0 | sb->flags = le32toh(sb->flags); |
59 | 0 | r = 0; |
60 | 0 | } |
61 | |
|
62 | 0 | return r; |
63 | 0 | } |
64 | | |
65 | | int INTEGRITY_read_sb(struct crypt_device *cd, |
66 | | struct crypt_params_integrity *params, |
67 | | uint32_t *flags) |
68 | 0 | { |
69 | 0 | struct superblock sb; |
70 | 0 | int r; |
71 | |
|
72 | 0 | r = INTEGRITY_read_superblock(cd, INTEGRITY_metadata_device(cd), 0, &sb); |
73 | 0 | if (r) |
74 | 0 | return r; |
75 | | |
76 | 0 | if (params) { |
77 | 0 | params->sector_size = SECTOR_SIZE << sb.log2_sectors_per_block; |
78 | 0 | params->tag_size = sb.integrity_tag_size; |
79 | 0 | } |
80 | |
|
81 | 0 | if (flags) |
82 | 0 | *flags = sb.flags; |
83 | |
|
84 | 0 | return 0; |
85 | 0 | } |
86 | | |
87 | | int INTEGRITY_dump(struct crypt_device *cd, struct device *device, uint64_t offset) |
88 | 0 | { |
89 | 0 | struct superblock sb; |
90 | 0 | uint64_t sector_size; |
91 | 0 | int r; |
92 | |
|
93 | 0 | r = INTEGRITY_read_superblock(cd, device, offset, &sb); |
94 | 0 | if (r) |
95 | 0 | return r; |
96 | | |
97 | 0 | sector_size = (uint64_t)SECTOR_SIZE << sb.log2_sectors_per_block; |
98 | 0 | log_std(cd, "INTEGRITY header information for %s.\n", device_path(device)); |
99 | 0 | log_std(cd, "version: %d\n", (unsigned)sb.version); |
100 | 0 | log_std(cd, "tag size: %u [bytes]\n", sb.integrity_tag_size); |
101 | 0 | log_std(cd, "sector size: %" PRIu64 " [bytes]\n", sector_size); |
102 | 0 | log_std(cd, "data size: %" PRIu64 " [512-byte units] (%" PRIu64 " [bytes])\n", |
103 | 0 | sb.provided_data_sectors, sb.provided_data_sectors * SECTOR_SIZE); |
104 | 0 | if (sb.version >= SB_VERSION_2 && (sb.flags & SB_FLAG_RECALCULATING)) |
105 | 0 | log_std(cd, "recalculate sector: %" PRIu64 "\n", sb.recalc_sector); |
106 | 0 | log_std(cd, "journal sections: %u\n", sb.journal_sections); |
107 | 0 | log_std(cd, "log2 interleave sectors: %d\n", sb.log2_interleave_sectors); |
108 | 0 | log_std(cd, "log2 blocks per bitmap: %u\n", sb.log2_blocks_per_bitmap_bit); |
109 | 0 | log_std(cd, "flags: %s%s%s%s%s%s\n", |
110 | 0 | sb.flags & SB_FLAG_HAVE_JOURNAL_MAC ? "have_journal_mac " : "", |
111 | 0 | sb.flags & SB_FLAG_RECALCULATING ? "recalculating " : "", |
112 | 0 | sb.flags & SB_FLAG_DIRTY_BITMAP ? "dirty_bitmap " : "", |
113 | 0 | sb.flags & SB_FLAG_FIXED_PADDING ? "fix_padding " : "", |
114 | 0 | sb.flags & SB_FLAG_FIXED_HMAC ? "fix_hmac " : "", |
115 | 0 | sb.flags & SB_FLAG_INLINE ? "inline " : ""); |
116 | |
|
117 | 0 | return 0; |
118 | 0 | } |
119 | | |
120 | | int INTEGRITY_data_sectors(struct crypt_device *cd, |
121 | | struct device *device, uint64_t offset, |
122 | | uint64_t *data_sectors) |
123 | 0 | { |
124 | 0 | struct superblock sb; |
125 | 0 | int r; |
126 | |
|
127 | 0 | r = INTEGRITY_read_superblock(cd, device, offset, &sb); |
128 | 0 | if (r) |
129 | 0 | return r; |
130 | | |
131 | 0 | *data_sectors = sb.provided_data_sectors; |
132 | 0 | return 0; |
133 | 0 | } |
134 | | |
135 | | int INTEGRITY_key_size(const char *integrity, int required_key_size) |
136 | 0 | { |
137 | 0 | int ks = 0; |
138 | |
|
139 | 0 | if (!integrity && required_key_size) |
140 | 0 | return -EINVAL; |
141 | | |
142 | 0 | if (!integrity) |
143 | 0 | return 0; |
144 | | |
145 | | //FIXME: use crypto backend hash size |
146 | 0 | if (!strcmp(integrity, "aead")) |
147 | 0 | ks = 0; |
148 | 0 | else if (!strcmp(integrity, "hmac(sha1)")) |
149 | 0 | ks = required_key_size ?: 20; |
150 | 0 | else if (!strcmp(integrity, "hmac(sha256)")) |
151 | 0 | ks = required_key_size ?: 32; |
152 | 0 | else if (!strcmp(integrity, "hmac(sha512)")) |
153 | 0 | ks = required_key_size ?: 64; |
154 | 0 | else if (!strcmp(integrity, "phmac(sha1)")) |
155 | 0 | ks = required_key_size ?: -EINVAL; |
156 | 0 | else if (!strcmp(integrity, "phmac(sha256)")) |
157 | 0 | ks = required_key_size ?: -EINVAL; |
158 | 0 | else if (!strcmp(integrity, "phmac(sha512)")) |
159 | 0 | ks = required_key_size ?: -EINVAL; |
160 | 0 | else if (!strcmp(integrity, "poly1305")) |
161 | 0 | ks = 0; |
162 | 0 | else if (!strcmp(integrity, "none")) |
163 | 0 | ks = 0; |
164 | 0 | else |
165 | 0 | return -EINVAL; |
166 | | |
167 | 0 | if (required_key_size && ks != required_key_size) |
168 | 0 | return -EINVAL; |
169 | | |
170 | 0 | return ks; |
171 | 0 | } |
172 | | |
173 | | /* Return hash or hmac(hash) size, if known */ |
174 | | int INTEGRITY_hash_tag_size(const char *integrity) |
175 | 0 | { |
176 | 0 | char hash[MAX_CIPHER_LEN]; |
177 | 0 | int r; |
178 | |
|
179 | 0 | if (!integrity) |
180 | 0 | return 0; |
181 | | |
182 | 0 | if (!strcmp(integrity, "crc32") || !strcmp(integrity, "crc32c")) |
183 | 0 | return 4; |
184 | | |
185 | 0 | if (!strcmp(integrity, "xxhash64")) |
186 | 0 | return 8; |
187 | | |
188 | 0 | r = sscanf(integrity, "hmac(%" MAX_CIPHER_LEN_STR "[^)]s", hash); |
189 | 0 | if (r != 1) |
190 | 0 | r = sscanf(integrity, "phmac(%" MAX_CIPHER_LEN_STR "[^)]s", hash); |
191 | 0 | if (r == 1) |
192 | 0 | r = crypt_hash_size(hash); |
193 | 0 | else |
194 | 0 | r = crypt_hash_size(integrity); |
195 | |
|
196 | 0 | return r < 0 ? 0 : r; |
197 | 0 | } |
198 | | |
199 | | int INTEGRITY_tag_size(const char *integrity, |
200 | | const char *cipher, |
201 | | const char *cipher_mode) |
202 | 0 | { |
203 | 0 | int iv_tag_size = 0, auth_tag_size = 0; |
204 | |
|
205 | 0 | if (!cipher_mode) |
206 | 0 | iv_tag_size = 0; |
207 | 0 | else if (!strcmp(cipher_mode, "xts-random")) |
208 | 0 | iv_tag_size = 16; |
209 | 0 | else if (!strcmp(cipher_mode, "gcm-random")) |
210 | 0 | iv_tag_size = 12; |
211 | 0 | else if (!strcmp(cipher_mode, "ccm-random")) |
212 | 0 | iv_tag_size = 8; |
213 | 0 | else if (!strcmp(cipher_mode, "ctr-random")) |
214 | 0 | iv_tag_size = 16; |
215 | 0 | else if (!strcmp(cipher, "aegis256") && !strcmp(cipher_mode, "random")) |
216 | 0 | iv_tag_size = 32; |
217 | 0 | else if (!strcmp(cipher_mode, "random")) |
218 | 0 | iv_tag_size = 16; |
219 | | |
220 | | //FIXME: use crypto backend hash size |
221 | 0 | if (!integrity || !strcmp(integrity, "none")) |
222 | 0 | auth_tag_size = 0; |
223 | 0 | else if (!strcmp(integrity, "aead")) |
224 | 0 | auth_tag_size = 16; /* gcm- mode only */ |
225 | 0 | else if (!strcmp(integrity, "cmac(aes)")) |
226 | 0 | auth_tag_size = 16; |
227 | 0 | else if (!strcmp(integrity, "hmac(sha1)")) |
228 | 0 | auth_tag_size = 20; |
229 | 0 | else if (!strcmp(integrity, "hmac(sha256)")) |
230 | 0 | auth_tag_size = 32; |
231 | 0 | else if (!strcmp(integrity, "hmac(sha512)")) |
232 | 0 | auth_tag_size = 64; |
233 | 0 | else if (!strcmp(integrity, "phmac(sha1)")) |
234 | 0 | auth_tag_size = 20; |
235 | 0 | else if (!strcmp(integrity, "phmac(sha256)")) |
236 | 0 | auth_tag_size = 32; |
237 | 0 | else if (!strcmp(integrity, "phmac(sha512)")) |
238 | 0 | auth_tag_size = 64; |
239 | 0 | else if (!strcmp(integrity, "poly1305")) { |
240 | 0 | if (iv_tag_size) |
241 | 0 | iv_tag_size = 12; |
242 | 0 | auth_tag_size = 16; |
243 | 0 | } |
244 | |
|
245 | 0 | return iv_tag_size + auth_tag_size; |
246 | 0 | } |
247 | | |
248 | | int INTEGRITY_create_dmd_device(struct crypt_device *cd, |
249 | | const struct crypt_params_integrity *params, |
250 | | struct volume_key *vk, |
251 | | struct volume_key *journal_crypt_key, |
252 | | struct volume_key *journal_mac_key, |
253 | | struct crypt_dm_active_device *dmd, |
254 | | uint32_t flags, uint32_t sb_flags) |
255 | 0 | { |
256 | 0 | int r; |
257 | |
|
258 | 0 | if (!dmd) |
259 | 0 | return -EINVAL; |
260 | | |
261 | 0 | *dmd = (struct crypt_dm_active_device) { |
262 | 0 | .flags = flags, |
263 | 0 | }; |
264 | | |
265 | | /* Workaround for kernel dm-integrity table bug */ |
266 | 0 | if (sb_flags & SB_FLAG_RECALCULATING) |
267 | 0 | dmd->flags |= CRYPT_ACTIVATE_RECALCULATE; |
268 | |
|
269 | 0 | if (sb_flags & SB_FLAG_INLINE) |
270 | 0 | dmd->flags |= (CRYPT_ACTIVATE_NO_JOURNAL | CRYPT_ACTIVATE_INLINE_MODE); |
271 | |
|
272 | 0 | r = INTEGRITY_data_sectors(cd, INTEGRITY_metadata_device(cd), |
273 | 0 | crypt_get_data_offset(cd) * SECTOR_SIZE, &dmd->size); |
274 | 0 | if (r < 0) |
275 | 0 | return r; |
276 | | |
277 | 0 | return dm_integrity_target_set(cd, &dmd->segment, 0, dmd->size, |
278 | 0 | INTEGRITY_metadata_device(cd), crypt_data_device(cd), |
279 | 0 | crypt_get_integrity_tag_size(cd), crypt_get_data_offset(cd), |
280 | 0 | crypt_get_sector_size(cd), vk, journal_crypt_key, |
281 | 0 | journal_mac_key, params); |
282 | 0 | } |
283 | | |
284 | | int INTEGRITY_activate_dmd_device(struct crypt_device *cd, |
285 | | const char *name, |
286 | | const char *type, |
287 | | struct crypt_dm_active_device *dmd, |
288 | | uint32_t sb_flags) |
289 | 0 | { |
290 | 0 | int r; |
291 | 0 | uint64_t dmi_flags; |
292 | 0 | struct dm_target *tgt = &dmd->segment; |
293 | |
|
294 | 0 | if (!single_segment(dmd) || tgt->type != DM_INTEGRITY) |
295 | 0 | return -EINVAL; |
296 | | |
297 | 0 | log_dbg(cd, "Trying to activate INTEGRITY device on top of %s, using name %s, tag size %d%s, provided sectors %" PRIu64".", |
298 | 0 | device_path(tgt->data_device), name, tgt->u.integrity.tag_size, |
299 | 0 | (sb_flags & SB_FLAG_INLINE) ? " (inline)" :"", dmd->size); |
300 | |
|
301 | 0 | r = create_or_reload_device(cd, name, type, dmd); |
302 | |
|
303 | 0 | if (r < 0 && (dm_flags(cd, DM_INTEGRITY, &dmi_flags) || !(dmi_flags & DM_INTEGRITY_SUPPORTED))) { |
304 | 0 | log_err(cd, _("Kernel does not support dm-integrity mapping.")); |
305 | 0 | return -ENOTSUP; |
306 | 0 | } |
307 | | |
308 | 0 | if (r < 0 && (sb_flags & SB_FLAG_FIXED_PADDING) && !dm_flags(cd, DM_INTEGRITY, &dmi_flags) && |
309 | 0 | !(dmi_flags & DM_INTEGRITY_FIX_PADDING_SUPPORTED)) { |
310 | 0 | log_err(cd, _("Kernel does not support dm-integrity fixed metadata alignment.")); |
311 | 0 | return -ENOTSUP; |
312 | 0 | } |
313 | | |
314 | 0 | if (r < 0 && (dmd->flags & CRYPT_ACTIVATE_RECALCULATE) && |
315 | 0 | !(crypt_get_compatibility(cd) & CRYPT_COMPAT_LEGACY_INTEGRITY_RECALC) && |
316 | 0 | ((sb_flags & SB_FLAG_FIXED_HMAC) ? |
317 | 0 | (tgt->u.integrity.vk && !tgt->u.integrity.journal_integrity_key) : |
318 | 0 | (tgt->u.integrity.vk || tgt->u.integrity.journal_integrity_key))) { |
319 | 0 | log_err(cd, _("Kernel refuses to activate insecure recalculate option (see legacy activation options to override).")); |
320 | 0 | return -ENOTSUP; |
321 | 0 | } |
322 | | |
323 | 0 | if (r < 0 && (sb_flags & SB_FLAG_INLINE) && !dm_flags(cd, DM_INTEGRITY, &dmi_flags) && |
324 | 0 | !(dmi_flags & DM_INTEGRITY_INLINE_MODE_SUPPORTED)) { |
325 | 0 | log_err(cd, _("Kernel does not support dm-integrity inline mode.")); |
326 | 0 | return -ENOTSUP; |
327 | 0 | } |
328 | | |
329 | 0 | return r; |
330 | 0 | } |
331 | | |
332 | | int INTEGRITY_activate(struct crypt_device *cd, |
333 | | const char *name, |
334 | | const struct crypt_params_integrity *params, |
335 | | struct volume_key *vk, |
336 | | struct volume_key *journal_crypt_key, |
337 | | struct volume_key *journal_mac_key, |
338 | | uint32_t flags, uint32_t sb_flags) |
339 | 0 | { |
340 | 0 | struct crypt_dm_active_device dmdq = {}, dmd = {}; |
341 | 0 | int r; |
342 | |
|
343 | 0 | if (flags & CRYPT_ACTIVATE_REFRESH) { |
344 | 0 | r = dm_query_device(cd, name, DM_ACTIVE_CRYPT_KEYSIZE | |
345 | 0 | DM_ACTIVE_CRYPT_KEY | |
346 | 0 | DM_ACTIVE_INTEGRITY_PARAMS | |
347 | 0 | DM_ACTIVE_JOURNAL_CRYPT_KEY | |
348 | 0 | DM_ACTIVE_JOURNAL_MAC_KEY, &dmdq); |
349 | 0 | if (r < 0) |
350 | 0 | return r; |
351 | | |
352 | 0 | r = INTEGRITY_create_dmd_device(cd, params, vk ?: dmdq.segment.u.integrity.vk, |
353 | 0 | journal_crypt_key ?: dmdq.segment.u.integrity.journal_crypt_key, |
354 | 0 | journal_mac_key ?: dmdq.segment.u.integrity.journal_integrity_key, |
355 | 0 | &dmd, flags, sb_flags); |
356 | |
|
357 | 0 | if (!r) |
358 | 0 | dmd.size = dmdq.size; |
359 | 0 | } else |
360 | 0 | r = INTEGRITY_create_dmd_device(cd, params, vk, journal_crypt_key, |
361 | 0 | journal_mac_key, &dmd, flags, sb_flags); |
362 | | |
363 | 0 | if (!r) |
364 | 0 | r = INTEGRITY_activate_dmd_device(cd, name, CRYPT_INTEGRITY, &dmd, sb_flags); |
365 | |
|
366 | 0 | dm_targets_free(cd, &dmdq); |
367 | 0 | dm_targets_free(cd, &dmd); |
368 | 0 | return r; |
369 | 0 | } |
370 | | |
371 | | static int _create_reduced_device(struct crypt_device *cd, |
372 | | const char *name, |
373 | | uint64_t device_size_sectors, |
374 | | struct device **ret_device) |
375 | 0 | { |
376 | 0 | int r; |
377 | 0 | char path[PATH_MAX]; |
378 | 0 | struct device *dev; |
379 | |
|
380 | 0 | struct crypt_dm_active_device dmd = { |
381 | 0 | .size = device_size_sectors, |
382 | 0 | .flags = CRYPT_ACTIVATE_PRIVATE, |
383 | 0 | }; |
384 | |
|
385 | 0 | assert(cd); |
386 | 0 | assert(name); |
387 | 0 | assert(device_size_sectors); |
388 | 0 | assert(ret_device); |
389 | |
|
390 | 0 | r = snprintf(path, sizeof(path), "%s/%s", dm_get_dir(), name); |
391 | 0 | if (r < 0 || (size_t)r >= sizeof(path)) |
392 | 0 | return -EINVAL; |
393 | | |
394 | 0 | r = device_block_adjust(cd, crypt_data_device(cd), DEV_OK, |
395 | 0 | crypt_get_data_offset(cd), &device_size_sectors, &dmd.flags); |
396 | 0 | if (r) |
397 | 0 | return r; |
398 | | |
399 | 0 | log_dbg(cd, "Activating reduced helper device %s.", name); |
400 | |
|
401 | 0 | r = dm_linear_target_set(&dmd.segment, 0, dmd.size, crypt_data_device(cd), crypt_get_data_offset(cd)); |
402 | 0 | if (!r) |
403 | 0 | r = dm_create_device(cd, name, CRYPT_SUBDEV, &dmd); |
404 | 0 | dm_targets_free(cd, &dmd); |
405 | 0 | if (r < 0) |
406 | 0 | return r; |
407 | | |
408 | 0 | r = device_alloc(cd, &dev, path); |
409 | 0 | if (!r) { |
410 | 0 | *ret_device = dev; |
411 | 0 | return 0; |
412 | 0 | } |
413 | | |
414 | 0 | dm_remove_device(cd, name, CRYPT_DEACTIVATE_FORCE); |
415 | |
|
416 | 0 | return r; |
417 | 0 | } |
418 | | |
419 | | int INTEGRITY_format(struct crypt_device *cd, |
420 | | const struct crypt_params_integrity *params, |
421 | | struct volume_key *integrity_key, |
422 | | struct volume_key *journal_crypt_key, |
423 | | struct volume_key *journal_mac_key, |
424 | | uint64_t backing_device_sectors, |
425 | | uint32_t *sb_flags, |
426 | | bool integrity_inline) |
427 | 0 | { |
428 | 0 | uint64_t dmi_flags; |
429 | 0 | char reduced_device_name[70], tmp_name[64], tmp_uuid[40]; |
430 | 0 | struct crypt_dm_active_device dmdi = { |
431 | 0 | .size = 8, |
432 | 0 | .flags = CRYPT_ACTIVATE_PRIVATE, /* We always create journal but it can be unused later */ |
433 | 0 | }; |
434 | 0 | struct dm_target *tgt = &dmdi.segment; |
435 | 0 | int r; |
436 | 0 | uuid_t tmp_uuid_bin; |
437 | 0 | uint64_t data_offset_sectors; |
438 | 0 | struct device *p_metadata_device, *p_data_device, *reduced_device = NULL; |
439 | |
|
440 | 0 | uuid_generate(tmp_uuid_bin); |
441 | 0 | uuid_unparse(tmp_uuid_bin, tmp_uuid); |
442 | |
|
443 | 0 | r = snprintf(tmp_name, sizeof(tmp_name), "temporary-cryptsetup-%s", tmp_uuid); |
444 | 0 | if (r < 0 || (size_t)r >= sizeof(tmp_name)) |
445 | 0 | return -EINVAL; |
446 | | |
447 | 0 | p_metadata_device = INTEGRITY_metadata_device(cd); |
448 | |
|
449 | 0 | if (backing_device_sectors) { |
450 | 0 | r = snprintf(reduced_device_name, sizeof(reduced_device_name), |
451 | 0 | "temporary-cryptsetup-reduced-%s", tmp_uuid); |
452 | 0 | if (r < 0 || (size_t)r >= sizeof(reduced_device_name)) |
453 | 0 | return -EINVAL; |
454 | | |
455 | | /* |
456 | | * Creates reduced dm-linear mapping over data device starting at |
457 | | * crypt_data_offset(cd) and backing_device_sectors in size. |
458 | | */ |
459 | 0 | r = _create_reduced_device(cd, reduced_device_name, |
460 | 0 | backing_device_sectors, &reduced_device); |
461 | 0 | if (r < 0) |
462 | 0 | return r; |
463 | | |
464 | 0 | data_offset_sectors = 0; |
465 | 0 | p_data_device = reduced_device; |
466 | 0 | if (p_metadata_device == crypt_data_device(cd)) |
467 | 0 | p_metadata_device = reduced_device; |
468 | 0 | } else { |
469 | 0 | data_offset_sectors = crypt_get_data_offset(cd); |
470 | 0 | p_data_device = crypt_data_device(cd); |
471 | 0 | } |
472 | | |
473 | 0 | if (integrity_inline) |
474 | 0 | dmdi.flags |= (CRYPT_ACTIVATE_NO_JOURNAL | CRYPT_ACTIVATE_INLINE_MODE); |
475 | |
|
476 | 0 | r = dm_integrity_target_set(cd, tgt, 0, dmdi.size, p_metadata_device, |
477 | 0 | p_data_device, crypt_get_integrity_tag_size(cd), |
478 | 0 | data_offset_sectors, crypt_get_sector_size(cd), integrity_key, |
479 | 0 | journal_crypt_key, journal_mac_key, params); |
480 | 0 | if (r < 0) |
481 | 0 | goto err; |
482 | | |
483 | 0 | log_dbg(cd, "Trying to format INTEGRITY device on top of %s, tmp name %s, tag size %d%s.", |
484 | 0 | device_path(tgt->data_device), tmp_name, tgt->u.integrity.tag_size, integrity_inline ? " (inline)" : ""); |
485 | |
|
486 | 0 | r = device_block_adjust(cd, tgt->data_device, DEV_EXCL, tgt->u.integrity.offset, NULL, NULL); |
487 | 0 | if (r < 0 && (dm_flags(cd, DM_INTEGRITY, &dmi_flags) || !(dmi_flags & DM_INTEGRITY_SUPPORTED))) { |
488 | 0 | log_err(cd, _("Kernel does not support dm-integrity mapping.")); |
489 | 0 | r = -ENOTSUP; |
490 | 0 | } |
491 | 0 | if (r) |
492 | 0 | goto err; |
493 | | |
494 | 0 | if (tgt->u.integrity.meta_device) { |
495 | 0 | r = device_block_adjust(cd, tgt->u.integrity.meta_device, DEV_EXCL, 0, NULL, NULL); |
496 | 0 | if (r) |
497 | 0 | goto err; |
498 | 0 | } |
499 | | |
500 | 0 | r = dm_create_device(cd, tmp_name, CRYPT_INTEGRITY, &dmdi); |
501 | 0 | if (r) |
502 | 0 | goto err; |
503 | | |
504 | 0 | r = dm_remove_device(cd, tmp_name, CRYPT_DEACTIVATE_FORCE); |
505 | 0 | if (r) |
506 | 0 | goto err; |
507 | | |
508 | | /* reload sb_flags from superblock (important for SB_FLAG_INLINE) */ |
509 | 0 | if (sb_flags) |
510 | 0 | r = INTEGRITY_read_sb(cd, NULL, sb_flags); |
511 | 0 | err: |
512 | 0 | dm_targets_free(cd, &dmdi); |
513 | 0 | if (reduced_device) { |
514 | | dm_remove_device(cd, reduced_device_name, CRYPT_DEACTIVATE_FORCE); |
515 | 0 | device_free(cd, reduced_device); |
516 | 0 | } |
517 | 0 | return r; |
518 | 0 | } |