/src/cryptsetup/lib/luks1/keymanage.c
Line | Count | Source |
1 | | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | | /* |
3 | | * LUKS - Linux Unified Key Setup |
4 | | * |
5 | | * Copyright (C) 2004-2006 Clemens Fruhwirth <clemens@endorphin.org> |
6 | | * Copyright (C) 2009-2025 Red Hat, Inc. All rights reserved. |
7 | | * Copyright (C) 2013-2025 Milan Broz |
8 | | */ |
9 | | |
10 | | #include <sys/types.h> |
11 | | #include <sys/stat.h> |
12 | | #include <errno.h> |
13 | | #include <unistd.h> |
14 | | #include <stdio.h> |
15 | | #include <stdlib.h> |
16 | | #include <string.h> |
17 | | #include <ctype.h> |
18 | | #include <uuid/uuid.h> |
19 | | #include <limits.h> |
20 | | |
21 | | #include "luks.h" |
22 | | #include "af.h" |
23 | | #include "internal.h" |
24 | | |
25 | | int LUKS_keyslot_area(const struct luks_phdr *hdr, |
26 | | int keyslot, |
27 | | uint64_t *offset, |
28 | | uint64_t *length) |
29 | 0 | { |
30 | 0 | if(keyslot >= LUKS_NUMKEYS || keyslot < 0) |
31 | 0 | return -EINVAL; |
32 | | |
33 | 0 | *offset = (uint64_t)hdr->keyblock[keyslot].keyMaterialOffset * SECTOR_SIZE; |
34 | 0 | *length = AF_split_sectors(hdr->keyBytes, LUKS_STRIPES) * SECTOR_SIZE; |
35 | |
|
36 | 0 | return 0; |
37 | 0 | } |
38 | | |
39 | | /* insertsort: because the array has 8 elements and it's mostly sorted. that's why */ |
40 | | static void LUKS_sort_keyslots(const struct luks_phdr *hdr, int *array) |
41 | 690 | { |
42 | 690 | int i, j, x; |
43 | | |
44 | 5.52k | for (i = 1; i < LUKS_NUMKEYS; i++) { |
45 | 4.83k | j = i; |
46 | 9.66k | while (j > 0 && hdr->keyblock[array[j-1]].keyMaterialOffset > hdr->keyblock[array[j]].keyMaterialOffset) { |
47 | 4.83k | x = array[j]; |
48 | 4.83k | array[j] = array[j-1]; |
49 | 4.83k | array[j-1] = x; |
50 | 4.83k | j--; |
51 | 4.83k | } |
52 | 4.83k | } |
53 | 690 | } |
54 | | |
55 | | static int _is_not_lower(char *str, unsigned max_len) |
56 | 0 | { |
57 | 0 | for(; *str && max_len; str++, max_len--) |
58 | 0 | if (isupper(*str)) |
59 | 0 | return 1; |
60 | 0 | return 0; |
61 | 0 | } |
62 | | |
63 | | static int _to_lower(char *str, unsigned max_len) |
64 | 0 | { |
65 | 0 | int r = 0; |
66 | |
|
67 | 0 | for(; *str && max_len; str++, max_len--) |
68 | 0 | if (isupper(*str)) { |
69 | 0 | *str = tolower(*str); |
70 | 0 | r = 1; |
71 | 0 | } |
72 | |
|
73 | 0 | return r; |
74 | 0 | } |
75 | | |
76 | | size_t LUKS_device_sectors(const struct luks_phdr *hdr) |
77 | 111 | { |
78 | 111 | int sorted_areas[LUKS_NUMKEYS] = { 0, 1, 2, 3, 4, 5, 6, 7 }; |
79 | | |
80 | 111 | LUKS_sort_keyslots(hdr, sorted_areas); |
81 | | |
82 | 111 | return hdr->keyblock[sorted_areas[LUKS_NUMKEYS-1]].keyMaterialOffset + AF_split_sectors(hdr->keyBytes, LUKS_STRIPES); |
83 | 111 | } |
84 | | |
85 | | size_t LUKS_keyslots_offset(const struct luks_phdr *hdr) |
86 | 0 | { |
87 | 0 | int sorted_areas[LUKS_NUMKEYS] = { 0, 1, 2, 3, 4, 5, 6, 7 }; |
88 | |
|
89 | 0 | LUKS_sort_keyslots(hdr, sorted_areas); |
90 | |
|
91 | 0 | return hdr->keyblock[sorted_areas[0]].keyMaterialOffset; |
92 | 0 | } |
93 | | |
94 | | static int LUKS_check_device_size(struct crypt_device *ctx, const struct luks_phdr *hdr, int falloc) |
95 | 117 | { |
96 | 117 | struct device *device = crypt_metadata_device(ctx); |
97 | 117 | uint64_t dev_sectors, hdr_sectors; |
98 | | |
99 | 117 | if (!hdr->keyBytes) |
100 | 6 | return -EINVAL; |
101 | | |
102 | 111 | if (device_size(device, &dev_sectors)) { |
103 | 0 | log_dbg(ctx, "Cannot get device size for device %s.", device_path(device)); |
104 | 0 | return -EIO; |
105 | 0 | } |
106 | | |
107 | 111 | dev_sectors >>= SECTOR_SHIFT; |
108 | 111 | hdr_sectors = LUKS_device_sectors(hdr); |
109 | 111 | log_dbg(ctx, "Key length %u, device size %" PRIu64 " sectors, header size %" |
110 | 111 | PRIu64 " sectors.", hdr->keyBytes, dev_sectors, hdr_sectors); |
111 | | |
112 | 111 | if (hdr_sectors > dev_sectors) { |
113 | | /* If it is header file, increase its size */ |
114 | 89 | if (falloc && !device_fallocate(device, hdr_sectors << SECTOR_SHIFT)) |
115 | 0 | return 0; |
116 | | |
117 | 89 | log_err(ctx, _("Device %s is too small. (LUKS1 requires at least %" PRIu64 " bytes.)"), |
118 | 89 | device_path(device), hdr_sectors * SECTOR_SIZE); |
119 | 89 | return -EINVAL; |
120 | 89 | } |
121 | | |
122 | 22 | return 0; |
123 | 111 | } |
124 | | |
125 | | static int LUKS_check_keyslots(struct crypt_device *ctx, const struct luks_phdr *phdr) |
126 | 579 | { |
127 | 579 | int i, prev, next, sorted_areas[LUKS_NUMKEYS] = { 0, 1, 2, 3, 4, 5, 6, 7 }; |
128 | 579 | uint32_t secs_per_stripes = AF_split_sectors(phdr->keyBytes, LUKS_STRIPES); |
129 | | |
130 | 579 | LUKS_sort_keyslots(phdr, sorted_areas); |
131 | | |
132 | | /* Check keyslot to prevent access outside of header and keyslot area */ |
133 | 1.67k | for (i = 0; i < LUKS_NUMKEYS; i++) { |
134 | | /* enforce stripes == 4000 */ |
135 | 1.55k | if (phdr->keyblock[i].stripes != LUKS_STRIPES) { |
136 | 427 | log_dbg(ctx, "Invalid stripes count %u in keyslot %u.", |
137 | 427 | phdr->keyblock[i].stripes, i); |
138 | 427 | log_err(ctx, _("LUKS keyslot %u is invalid."), i); |
139 | 427 | return -1; |
140 | 427 | } |
141 | | |
142 | | /* First sectors is the header itself */ |
143 | 1.12k | if (phdr->keyblock[i].keyMaterialOffset * SECTOR_SIZE < sizeof(*phdr)) { |
144 | 1 | log_dbg(ctx, "Invalid offset %u in keyslot %u.", |
145 | 1 | phdr->keyblock[i].keyMaterialOffset, i); |
146 | 1 | log_err(ctx, _("LUKS keyslot %u is invalid."), i); |
147 | 1 | return -1; |
148 | 1 | } |
149 | | |
150 | | /* Ignore following check for detached header where offset can be zero. */ |
151 | 1.12k | if (phdr->payloadOffset == 0) |
152 | 131 | continue; |
153 | | |
154 | 992 | if (phdr->payloadOffset <= phdr->keyblock[i].keyMaterialOffset) { |
155 | 15 | log_dbg(ctx, "Invalid offset %u in keyslot %u (beyond data area offset %u).", |
156 | 15 | phdr->keyblock[i].keyMaterialOffset, i, |
157 | 15 | phdr->payloadOffset); |
158 | 15 | log_err(ctx, _("LUKS keyslot %u is invalid."), i); |
159 | 15 | return -1; |
160 | 15 | } |
161 | | |
162 | 977 | if (phdr->payloadOffset < (phdr->keyblock[i].keyMaterialOffset + secs_per_stripes)) { |
163 | 11 | log_dbg(ctx, "Invalid keyslot size %u (offset %u, stripes %u) in " |
164 | 11 | "keyslot %u (beyond data area offset %u).", |
165 | 11 | secs_per_stripes, |
166 | 11 | phdr->keyblock[i].keyMaterialOffset, |
167 | 11 | phdr->keyblock[i].stripes, |
168 | 11 | i, phdr->payloadOffset); |
169 | 11 | log_err(ctx, _("LUKS keyslot %u is invalid."), i); |
170 | 11 | return -1; |
171 | 11 | } |
172 | 977 | } |
173 | | |
174 | | /* check no keyslot overlaps with each other */ |
175 | 982 | for (i = 1; i < LUKS_NUMKEYS; i++) { |
176 | 862 | prev = sorted_areas[i-1]; |
177 | 862 | next = sorted_areas[i]; |
178 | 862 | if (phdr->keyblock[next].keyMaterialOffset < |
179 | 862 | (phdr->keyblock[prev].keyMaterialOffset + secs_per_stripes)) { |
180 | 5 | log_dbg(ctx, "Not enough space in LUKS keyslot %d.", prev); |
181 | 5 | log_err(ctx, _("LUKS keyslot %u is invalid."), prev); |
182 | 5 | return -1; |
183 | 5 | } |
184 | 862 | } |
185 | | /* do not check last keyslot on purpose, it must be tested in device size check */ |
186 | | |
187 | 120 | return 0; |
188 | 125 | } |
189 | | |
190 | | static const char *dbg_slot_state(crypt_keyslot_info ki) |
191 | 0 | { |
192 | 0 | switch(ki) { |
193 | 0 | case CRYPT_SLOT_INACTIVE: |
194 | 0 | return "INACTIVE"; |
195 | 0 | case CRYPT_SLOT_ACTIVE: |
196 | 0 | return "ACTIVE"; |
197 | 0 | case CRYPT_SLOT_ACTIVE_LAST: |
198 | 0 | return "ACTIVE_LAST"; |
199 | 0 | case CRYPT_SLOT_INVALID: |
200 | 0 | default: |
201 | 0 | return "INVALID"; |
202 | 0 | } |
203 | 0 | } |
204 | | |
205 | | int LUKS_hdr_backup(const char *backup_file, struct crypt_device *ctx) |
206 | 0 | { |
207 | 0 | struct device *device = crypt_metadata_device(ctx); |
208 | 0 | struct luks_phdr hdr; |
209 | 0 | int fd, devfd, r = 0; |
210 | 0 | size_t hdr_size; |
211 | 0 | size_t buffer_size; |
212 | 0 | ssize_t ret; |
213 | 0 | char *buffer = NULL; |
214 | |
|
215 | 0 | r = LUKS_read_phdr(&hdr, 1, 0, ctx); |
216 | 0 | if (r) |
217 | 0 | return r; |
218 | | |
219 | 0 | hdr_size = LUKS_device_sectors(&hdr) << SECTOR_SHIFT; |
220 | 0 | buffer_size = size_round_up(hdr_size, crypt_getpagesize()); |
221 | |
|
222 | 0 | buffer = malloc(buffer_size); |
223 | 0 | if (!buffer || hdr_size < LUKS_ALIGN_KEYSLOTS || hdr_size > buffer_size) { |
224 | 0 | r = -ENOMEM; |
225 | 0 | goto out; |
226 | 0 | } |
227 | 0 | memset(buffer, 0, buffer_size); |
228 | |
|
229 | 0 | log_dbg(ctx, "Storing backup of header (%zu bytes) and keyslot area (%zu bytes).", |
230 | 0 | sizeof(hdr), hdr_size - LUKS_ALIGN_KEYSLOTS); |
231 | |
|
232 | 0 | log_dbg(ctx, "Output backup file size: %zu bytes.", buffer_size); |
233 | |
|
234 | 0 | devfd = device_open(ctx, device, O_RDONLY); |
235 | 0 | if (devfd < 0) { |
236 | 0 | log_err(ctx, _("Device %s is not a valid LUKS device."), device_path(device)); |
237 | 0 | r = -EINVAL; |
238 | 0 | goto out; |
239 | 0 | } |
240 | | |
241 | 0 | if (read_lseek_blockwise(devfd, device_block_size(ctx, device), device_alignment(device), |
242 | 0 | buffer, hdr_size, 0) < (ssize_t)hdr_size) { |
243 | 0 | r = -EIO; |
244 | 0 | goto out; |
245 | 0 | } |
246 | | |
247 | | /* Wipe unused area, so backup cannot contain old signatures */ |
248 | 0 | if (hdr.keyblock[0].keyMaterialOffset * SECTOR_SIZE == LUKS_ALIGN_KEYSLOTS) |
249 | 0 | memset(buffer + sizeof(hdr), 0, LUKS_ALIGN_KEYSLOTS - sizeof(hdr)); |
250 | |
|
251 | 0 | fd = open(backup_file, O_CREAT|O_EXCL|O_WRONLY, S_IRUSR); |
252 | 0 | if (fd == -1) { |
253 | 0 | if (errno == EEXIST) |
254 | 0 | log_err(ctx, _("Requested header backup file %s already exists."), backup_file); |
255 | 0 | else |
256 | 0 | log_err(ctx, _("Cannot create header backup file %s."), backup_file); |
257 | 0 | r = -EINVAL; |
258 | 0 | goto out; |
259 | 0 | } |
260 | 0 | ret = write_buffer(fd, buffer, buffer_size); |
261 | 0 | close(fd); |
262 | 0 | if (ret < (ssize_t)buffer_size) { |
263 | 0 | log_err(ctx, _("Cannot write header backup file %s."), backup_file); |
264 | 0 | r = -EIO; |
265 | 0 | goto out; |
266 | 0 | } |
267 | | |
268 | 0 | r = 0; |
269 | 0 | out: |
270 | 0 | crypt_safe_memzero(&hdr, sizeof(hdr)); |
271 | 0 | crypt_safe_memzero(buffer, buffer_size); |
272 | 0 | free(buffer); |
273 | 0 | return r; |
274 | 0 | } |
275 | | |
276 | | int LUKS_hdr_restore( |
277 | | const char *backup_file, |
278 | | struct luks_phdr *hdr, |
279 | | struct crypt_device *ctx) |
280 | 0 | { |
281 | 0 | struct device *device = crypt_metadata_device(ctx); |
282 | 0 | int fd, r = 0, devfd = -1, diff_uuid = 0; |
283 | 0 | ssize_t ret, buffer_size = 0; |
284 | 0 | char *buffer = NULL, msg[200]; |
285 | 0 | struct luks_phdr hdr_file; |
286 | |
|
287 | 0 | r = LUKS_read_phdr_backup(backup_file, &hdr_file, 0, ctx); |
288 | 0 | if (r == -ENOENT) |
289 | 0 | return r; |
290 | | |
291 | 0 | if (!r) |
292 | 0 | buffer_size = LUKS_device_sectors(&hdr_file) << SECTOR_SHIFT; |
293 | |
|
294 | 0 | if (r || buffer_size < LUKS_ALIGN_KEYSLOTS) { |
295 | 0 | log_err(ctx, _("Backup file does not contain valid LUKS header.")); |
296 | 0 | r = -EINVAL; |
297 | 0 | goto out; |
298 | 0 | } |
299 | | |
300 | 0 | buffer = malloc(buffer_size); |
301 | 0 | if (!buffer) { |
302 | 0 | r = -ENOMEM; |
303 | 0 | goto out; |
304 | 0 | } |
305 | | |
306 | 0 | fd = open(backup_file, O_RDONLY); |
307 | 0 | if (fd == -1) { |
308 | 0 | log_err(ctx, _("Cannot open header backup file %s."), backup_file); |
309 | 0 | r = -EINVAL; |
310 | 0 | goto out; |
311 | 0 | } |
312 | | |
313 | 0 | ret = read_buffer(fd, buffer, buffer_size); |
314 | 0 | close(fd); |
315 | 0 | if (ret < buffer_size) { |
316 | 0 | log_err(ctx, _("Cannot read header backup file %s."), backup_file); |
317 | 0 | r = -EIO; |
318 | 0 | goto out; |
319 | 0 | } |
320 | | |
321 | 0 | r = LUKS_read_phdr(hdr, 0, 0, ctx); |
322 | 0 | if (r == 0) { |
323 | 0 | log_dbg(ctx, "Device %s already contains LUKS header, checking UUID and offset.", device_path(device)); |
324 | 0 | if(hdr->payloadOffset != hdr_file.payloadOffset || |
325 | 0 | hdr->keyBytes != hdr_file.keyBytes) { |
326 | 0 | log_err(ctx, _("Data offset or key size differs on device and backup, restore failed.")); |
327 | 0 | r = -EINVAL; |
328 | 0 | goto out; |
329 | 0 | } |
330 | 0 | if (memcmp(hdr->uuid, hdr_file.uuid, UUID_STRING_L)) |
331 | 0 | diff_uuid = 1; |
332 | 0 | } |
333 | | |
334 | 0 | if (snprintf(msg, sizeof(msg), _("Device %s %s%s"), device_path(device), |
335 | 0 | r ? _("does not contain LUKS header. Replacing header can destroy data on that device.") : |
336 | 0 | _("already contains LUKS header. Replacing header will destroy existing keyslots."), |
337 | 0 | diff_uuid ? _("\nWARNING: real device header has different UUID than backup!") : "") < 0) { |
338 | 0 | r = -ENOMEM; |
339 | 0 | goto out; |
340 | 0 | } |
341 | | |
342 | 0 | if (!crypt_confirm(ctx, msg)) { |
343 | 0 | r = -EINVAL; |
344 | 0 | goto out; |
345 | 0 | } |
346 | | |
347 | 0 | log_dbg(ctx, "Storing backup of header (%zu bytes) and keyslot area (%zu bytes) to device %s.", |
348 | 0 | sizeof(*hdr), buffer_size - LUKS_ALIGN_KEYSLOTS, device_path(device)); |
349 | |
|
350 | 0 | devfd = device_open(ctx, device, O_RDWR); |
351 | 0 | if (devfd < 0) { |
352 | 0 | if (errno == EACCES) |
353 | 0 | log_err(ctx, _("Cannot write to device %s, permission denied."), |
354 | 0 | device_path(device)); |
355 | 0 | else |
356 | 0 | log_err(ctx, _("Cannot open device %s."), device_path(device)); |
357 | 0 | r = -EINVAL; |
358 | 0 | goto out; |
359 | 0 | } |
360 | | |
361 | 0 | if (write_lseek_blockwise(devfd, device_block_size(ctx, device), device_alignment(device), |
362 | 0 | buffer, buffer_size, 0) < buffer_size) { |
363 | 0 | r = -EIO; |
364 | 0 | goto out; |
365 | 0 | } |
366 | | |
367 | | /* Be sure to reload new data */ |
368 | 0 | r = LUKS_read_phdr(hdr, 1, 0, ctx); |
369 | 0 | out: |
370 | 0 | device_sync(ctx, device); |
371 | 0 | crypt_safe_memzero(buffer, buffer_size); |
372 | 0 | free(buffer); |
373 | 0 | return r; |
374 | 0 | } |
375 | | |
376 | | /* This routine should do some just basic recovery for known problems. */ |
377 | | static int _keyslot_repair(struct luks_phdr *phdr, struct crypt_device *ctx) |
378 | 0 | { |
379 | 0 | struct luks_phdr temp_phdr; |
380 | 0 | const unsigned char *sector = (const unsigned char*)phdr; |
381 | 0 | struct volume_key *fake_vk; |
382 | 0 | int i, bad, r, need_write = 0; |
383 | |
|
384 | 0 | if (phdr->keyBytes != 16 && phdr->keyBytes != 32 && phdr->keyBytes != 64) { |
385 | 0 | log_err(ctx, _("Non standard key size, manual repair required.")); |
386 | 0 | return -EINVAL; |
387 | 0 | } |
388 | | |
389 | | /* |
390 | | * cryptsetup 1.0 did not align keyslots to 4k, cannot repair this one |
391 | | * Also we cannot trust possibly broken keyslots metadata here through LUKS_keyslots_offset(). |
392 | | * Expect first keyslot is aligned, if not, then manual repair is necessary. |
393 | | */ |
394 | 0 | if (phdr->keyblock[0].keyMaterialOffset < (LUKS_ALIGN_KEYSLOTS / SECTOR_SIZE)) { |
395 | 0 | log_err(ctx, _("Non standard keyslots alignment, manual repair required.")); |
396 | 0 | return -EINVAL; |
397 | 0 | } |
398 | | |
399 | | /* |
400 | | * ECB mode does not use IV but legacy dmcrypt silently allows it. |
401 | | * Today device cannot be activated anyway, so we need to fix it here. |
402 | | */ |
403 | 0 | if (!strncmp(phdr->cipherMode, "ecb-", 4)) { |
404 | 0 | log_err(ctx, _("Cipher mode repaired (%s -> %s)."), phdr->cipherMode, "ecb"); |
405 | 0 | memset(phdr->cipherMode, 0, LUKS_CIPHERMODE_L); |
406 | 0 | strcpy(phdr->cipherMode, "ecb"); |
407 | 0 | need_write = 1; |
408 | 0 | } |
409 | | |
410 | | /* |
411 | | * Old cryptsetup expects "sha1", gcrypt allows case insensitive names, |
412 | | * so always convert hash to lower case in header |
413 | | */ |
414 | 0 | if (_to_lower(phdr->hashSpec, LUKS_HASHSPEC_L)) { |
415 | 0 | log_err(ctx, _("Cipher hash repaired to lowercase (%s)."), phdr->hashSpec); |
416 | 0 | if (crypt_hmac_size(phdr->hashSpec) < LUKS_DIGESTSIZE) { |
417 | 0 | log_err(ctx, _("Requested LUKS hash %s is not supported."), phdr->hashSpec); |
418 | 0 | return -EINVAL; |
419 | 0 | } |
420 | 0 | need_write = 1; |
421 | 0 | } |
422 | | |
423 | 0 | r = LUKS_check_cipher(ctx, phdr->keyBytes, phdr->cipherName, phdr->cipherMode); |
424 | 0 | if (r < 0) |
425 | 0 | return -EINVAL; |
426 | | |
427 | 0 | fake_vk = crypt_generate_volume_key(ctx, phdr->keyBytes, KEY_QUALITY_EMPTY); |
428 | 0 | if (!fake_vk) |
429 | 0 | return -ENOMEM; |
430 | | |
431 | 0 | log_verbose(ctx, _("Repairing keyslots.")); |
432 | |
|
433 | 0 | log_dbg(ctx, "Generating second header with the same parameters for check."); |
434 | | /* cipherName, cipherMode, hashSpec, uuid are already null terminated */ |
435 | | /* payloadOffset - cannot check */ |
436 | 0 | r = LUKS_generate_phdr(&temp_phdr, fake_vk, phdr->cipherName, phdr->cipherMode, |
437 | 0 | phdr->hashSpec, phdr->uuid, |
438 | 0 | phdr->payloadOffset * SECTOR_SIZE, 0, 0, ctx); |
439 | 0 | if (r < 0) |
440 | 0 | goto out; |
441 | | |
442 | 0 | for(i = 0; i < LUKS_NUMKEYS; ++i) { |
443 | 0 | if (phdr->keyblock[i].active == LUKS_KEY_ENABLED) { |
444 | 0 | log_dbg(ctx, "Skipping repair for active keyslot %i.", i); |
445 | 0 | continue; |
446 | 0 | } |
447 | | |
448 | 0 | bad = 0; |
449 | 0 | if (phdr->keyblock[i].keyMaterialOffset != temp_phdr.keyblock[i].keyMaterialOffset) { |
450 | 0 | log_err(ctx, _("Keyslot %i: offset repaired (%u -> %u)."), i, |
451 | 0 | (unsigned)phdr->keyblock[i].keyMaterialOffset, |
452 | 0 | (unsigned)temp_phdr.keyblock[i].keyMaterialOffset); |
453 | 0 | phdr->keyblock[i].keyMaterialOffset = temp_phdr.keyblock[i].keyMaterialOffset; |
454 | 0 | bad = 1; |
455 | 0 | } |
456 | |
|
457 | 0 | if (phdr->keyblock[i].stripes != temp_phdr.keyblock[i].stripes) { |
458 | 0 | log_err(ctx, _("Keyslot %i: stripes repaired (%u -> %u)."), i, |
459 | 0 | (unsigned)phdr->keyblock[i].stripes, |
460 | 0 | (unsigned)temp_phdr.keyblock[i].stripes); |
461 | 0 | phdr->keyblock[i].stripes = temp_phdr.keyblock[i].stripes; |
462 | 0 | bad = 1; |
463 | 0 | } |
464 | | |
465 | | /* Known case - MSDOS partition table signature */ |
466 | 0 | if (i == 6 && sector[0x1fe] == 0x55 && sector[0x1ff] == 0xaa) { |
467 | 0 | log_err(ctx, _("Keyslot %i: bogus partition signature."), i); |
468 | 0 | bad = 1; |
469 | 0 | } |
470 | |
|
471 | 0 | if(bad) { |
472 | 0 | log_err(ctx, _("Keyslot %i: salt wiped."), i); |
473 | 0 | phdr->keyblock[i].active = LUKS_KEY_DISABLED; |
474 | 0 | memset(&phdr->keyblock[i].passwordSalt, 0x00, LUKS_SALTSIZE); |
475 | 0 | phdr->keyblock[i].passwordIterations = 0; |
476 | 0 | } |
477 | |
|
478 | 0 | if (bad) |
479 | 0 | need_write = 1; |
480 | 0 | } |
481 | | |
482 | | /* |
483 | | * check repair result before writing because repair can't fix out of order |
484 | | * keyslot offsets and would corrupt header again |
485 | | */ |
486 | 0 | if (LUKS_check_keyslots(ctx, phdr)) |
487 | 0 | r = -EINVAL; |
488 | 0 | else if (need_write) { |
489 | 0 | log_verbose(ctx, _("Writing LUKS header to disk.")); |
490 | 0 | r = LUKS_write_phdr(phdr, ctx); |
491 | 0 | } |
492 | 0 | out: |
493 | 0 | if (r) |
494 | 0 | log_err(ctx, _("Repair failed.")); |
495 | 0 | crypt_free_volume_key(fake_vk); |
496 | 0 | crypt_safe_memzero(&temp_phdr, sizeof(temp_phdr)); |
497 | 0 | return r; |
498 | 0 | } |
499 | | |
500 | | static int _check_and_convert_hdr(const char *device, |
501 | | struct luks_phdr *hdr, |
502 | | int require_luks_device, |
503 | | int repair, |
504 | | struct crypt_device *ctx) |
505 | 1.45k | { |
506 | 1.45k | int r = 0; |
507 | 1.45k | unsigned int i; |
508 | 1.45k | char luksMagic[] = LUKS_MAGIC; |
509 | | |
510 | 1.45k | hdr->version = be16_to_cpu(hdr->version); |
511 | 1.45k | if (memcmp(hdr->magic, luksMagic, LUKS_MAGIC_L)) { /* Check magic */ |
512 | 851 | log_dbg(ctx, "LUKS header not detected."); |
513 | 851 | if (require_luks_device) |
514 | 0 | log_err(ctx, _("Device %s is not a valid LUKS device."), device); |
515 | 851 | return -EINVAL; |
516 | 851 | } else if (hdr->version != 1) { |
517 | 24 | log_err(ctx, _("Unsupported LUKS version %d."), hdr->version); |
518 | 24 | return -EINVAL; |
519 | 24 | } |
520 | | |
521 | 579 | hdr->hashSpec[LUKS_HASHSPEC_L - 1] = '\0'; |
522 | 579 | if (crypt_hmac_size(hdr->hashSpec) < LUKS_DIGESTSIZE) { |
523 | 440 | log_err(ctx, _("Requested LUKS hash %s is not supported."), hdr->hashSpec); |
524 | 440 | r = -EINVAL; |
525 | 440 | } |
526 | | |
527 | | /* Header detected */ |
528 | 579 | hdr->payloadOffset = be32_to_cpu(hdr->payloadOffset); |
529 | 579 | hdr->keyBytes = be32_to_cpu(hdr->keyBytes); |
530 | 579 | hdr->mkDigestIterations = be32_to_cpu(hdr->mkDigestIterations); |
531 | | |
532 | 5.21k | for (i = 0; i < LUKS_NUMKEYS; ++i) { |
533 | 4.63k | hdr->keyblock[i].active = be32_to_cpu(hdr->keyblock[i].active); |
534 | 4.63k | hdr->keyblock[i].passwordIterations = be32_to_cpu(hdr->keyblock[i].passwordIterations); |
535 | 4.63k | hdr->keyblock[i].keyMaterialOffset = be32_to_cpu(hdr->keyblock[i].keyMaterialOffset); |
536 | 4.63k | hdr->keyblock[i].stripes = be32_to_cpu(hdr->keyblock[i].stripes); |
537 | 4.63k | } |
538 | | |
539 | 579 | if (LUKS_check_keyslots(ctx, hdr)) |
540 | 459 | r = -EINVAL; |
541 | | |
542 | | /* Avoid unterminated strings */ |
543 | 579 | hdr->cipherName[LUKS_CIPHERNAME_L - 1] = '\0'; |
544 | 579 | hdr->cipherMode[LUKS_CIPHERMODE_L - 1] = '\0'; |
545 | 579 | hdr->uuid[UUID_STRING_L - 1] = '\0'; |
546 | | |
547 | 579 | if (repair) { |
548 | 0 | if (!strncmp(hdr->cipherMode, "ecb-", 4)) { |
549 | 0 | log_err(ctx, _("LUKS cipher mode %s is invalid."), hdr->cipherMode); |
550 | 0 | r = -EINVAL; |
551 | 0 | } |
552 | |
|
553 | 0 | if (_is_not_lower(hdr->hashSpec, LUKS_HASHSPEC_L)) { |
554 | 0 | log_err(ctx, _("LUKS hash %s is invalid."), hdr->hashSpec); |
555 | 0 | r = -EINVAL; |
556 | 0 | } |
557 | |
|
558 | 0 | if (r == -EINVAL) |
559 | 0 | r = _keyslot_repair(hdr, ctx); |
560 | 0 | else |
561 | 0 | log_verbose(ctx, _("No known problems detected for LUKS header.")); |
562 | 0 | } |
563 | | |
564 | 579 | return r; |
565 | 1.45k | } |
566 | | |
567 | | int LUKS_read_phdr_backup(const char *backup_file, |
568 | | struct luks_phdr *hdr, |
569 | | int require_luks_device, |
570 | | struct crypt_device *ctx) |
571 | 0 | { |
572 | 0 | ssize_t hdr_size = sizeof(struct luks_phdr); |
573 | 0 | int devfd = 0, r = 0; |
574 | |
|
575 | 0 | log_dbg(ctx, "Reading LUKS header of size %d from backup file %s", |
576 | 0 | (int)hdr_size, backup_file); |
577 | |
|
578 | 0 | devfd = open(backup_file, O_RDONLY); |
579 | 0 | if (devfd == -1) { |
580 | 0 | log_err(ctx, _("Cannot open header backup file %s."), backup_file); |
581 | 0 | return -ENOENT; |
582 | 0 | } |
583 | | |
584 | 0 | if (read_buffer(devfd, hdr, hdr_size) < hdr_size) |
585 | 0 | r = -EIO; |
586 | 0 | else |
587 | 0 | r = _check_and_convert_hdr(backup_file, hdr, |
588 | 0 | require_luks_device, 0, ctx); |
589 | |
|
590 | 0 | close(devfd); |
591 | 0 | return r; |
592 | 0 | } |
593 | | |
594 | | int LUKS_read_phdr(struct luks_phdr *hdr, |
595 | | int require_luks_device, |
596 | | int repair, |
597 | | struct crypt_device *ctx) |
598 | 1.45k | { |
599 | 1.45k | int devfd, r = 0; |
600 | 1.45k | struct device *device = crypt_metadata_device(ctx); |
601 | 1.45k | ssize_t hdr_size = sizeof(struct luks_phdr); |
602 | | |
603 | | /* LUKS header starts at offset 0, first keyslot on LUKS_ALIGN_KEYSLOTS */ |
604 | 1.45k | assert(sizeof(struct luks_phdr) <= LUKS_ALIGN_KEYSLOTS); |
605 | | |
606 | | /* Stripes count cannot be changed without additional code fixes yet */ |
607 | 1.45k | assert(LUKS_STRIPES == 4000); |
608 | | |
609 | 1.45k | if (repair && !require_luks_device) |
610 | 0 | return -EINVAL; |
611 | | |
612 | 1.45k | log_dbg(ctx, "Reading LUKS header of size %zu from device %s", |
613 | 1.45k | hdr_size, device_path(device)); |
614 | | |
615 | 1.45k | devfd = device_open(ctx, device, O_RDONLY); |
616 | 1.45k | if (devfd < 0) { |
617 | 0 | log_err(ctx, _("Cannot open device %s."), device_path(device)); |
618 | 0 | return -EINVAL; |
619 | 0 | } |
620 | | |
621 | 1.45k | if (read_lseek_blockwise(devfd, device_block_size(ctx, device), device_alignment(device), |
622 | 1.45k | hdr, hdr_size, 0) < hdr_size) |
623 | 0 | r = -EIO; |
624 | 1.45k | else |
625 | 1.45k | r = _check_and_convert_hdr(device_path(device), hdr, require_luks_device, |
626 | 1.45k | repair, ctx); |
627 | | |
628 | 1.45k | if (!r) |
629 | 117 | r = LUKS_check_device_size(ctx, hdr, 0); |
630 | | |
631 | | /* |
632 | | * Cryptsetup 1.0.0 did not align keyslots to 4k (very rare version). |
633 | | * Disable direct-io to avoid possible IO errors if underlying device |
634 | | * has bigger sector size. |
635 | | */ |
636 | 1.45k | if (!r && hdr->keyblock[0].keyMaterialOffset * SECTOR_SIZE < LUKS_ALIGN_KEYSLOTS) { |
637 | 4 | log_dbg(ctx, "Old unaligned LUKS keyslot detected, disabling direct-io."); |
638 | 4 | device_disable_direct_io(device); |
639 | 4 | } |
640 | | |
641 | 1.45k | return r; |
642 | 1.45k | } |
643 | | |
644 | | int LUKS_write_phdr(struct luks_phdr *hdr, |
645 | | struct crypt_device *ctx) |
646 | 0 | { |
647 | 0 | struct device *device = crypt_metadata_device(ctx); |
648 | 0 | ssize_t hdr_size = sizeof(struct luks_phdr); |
649 | 0 | int devfd = 0; |
650 | 0 | unsigned int i; |
651 | 0 | struct luks_phdr convHdr; |
652 | 0 | int r; |
653 | |
|
654 | 0 | log_dbg(ctx, "Updating LUKS header of size %zu on device %s", |
655 | 0 | sizeof(struct luks_phdr), device_path(device)); |
656 | |
|
657 | 0 | r = LUKS_check_device_size(ctx, hdr, 1); |
658 | 0 | if (r) |
659 | 0 | return r; |
660 | | |
661 | 0 | devfd = device_open(ctx, device, O_RDWR); |
662 | 0 | if (devfd < 0) { |
663 | 0 | if (errno == EACCES) |
664 | 0 | log_err(ctx, _("Cannot write to device %s, permission denied."), |
665 | 0 | device_path(device)); |
666 | 0 | else |
667 | 0 | log_err(ctx, _("Cannot open device %s."), device_path(device)); |
668 | 0 | return -EINVAL; |
669 | 0 | } |
670 | | |
671 | 0 | memcpy(&convHdr, hdr, hdr_size); |
672 | 0 | memset(&convHdr._padding, 0, sizeof(convHdr._padding)); |
673 | | |
674 | | /* Convert every uint16/32_t item to network byte order */ |
675 | 0 | convHdr.version = cpu_to_be16(hdr->version); |
676 | 0 | convHdr.payloadOffset = cpu_to_be32(hdr->payloadOffset); |
677 | 0 | convHdr.keyBytes = cpu_to_be32(hdr->keyBytes); |
678 | 0 | convHdr.mkDigestIterations = cpu_to_be32(hdr->mkDigestIterations); |
679 | 0 | for(i = 0; i < LUKS_NUMKEYS; ++i) { |
680 | 0 | convHdr.keyblock[i].active = cpu_to_be32(hdr->keyblock[i].active); |
681 | 0 | convHdr.keyblock[i].passwordIterations = cpu_to_be32(hdr->keyblock[i].passwordIterations); |
682 | 0 | convHdr.keyblock[i].keyMaterialOffset = cpu_to_be32(hdr->keyblock[i].keyMaterialOffset); |
683 | 0 | convHdr.keyblock[i].stripes = cpu_to_be32(hdr->keyblock[i].stripes); |
684 | 0 | } |
685 | |
|
686 | 0 | r = write_lseek_blockwise(devfd, device_block_size(ctx, device), device_alignment(device), |
687 | 0 | &convHdr, hdr_size, 0) < hdr_size ? -EIO : 0; |
688 | 0 | if (r) |
689 | 0 | log_err(ctx, _("Error during update of LUKS header on device %s."), device_path(device)); |
690 | |
|
691 | 0 | device_sync(ctx, device); |
692 | | |
693 | | /* Re-read header from disk to be sure that in-memory and on-disk data are the same. */ |
694 | 0 | if (!r) { |
695 | 0 | r = LUKS_read_phdr(hdr, 1, 0, ctx); |
696 | 0 | if (r) |
697 | 0 | log_err(ctx, _("Error re-reading LUKS header after update on device %s."), |
698 | 0 | device_path(device)); |
699 | 0 | } |
700 | |
|
701 | 0 | return r; |
702 | 0 | } |
703 | | |
704 | | /* Check that kernel supports requested cipher by decryption of one sector */ |
705 | | int LUKS_check_cipher(struct crypt_device *ctx, size_t keylength, const char *cipher, const char *cipher_mode) |
706 | 0 | { |
707 | 0 | int r; |
708 | 0 | struct volume_key *empty_key; |
709 | 0 | char buf[SECTOR_SIZE]; |
710 | |
|
711 | 0 | log_dbg(ctx, "Checking if cipher %s-%s is usable.", cipher, cipher_mode); |
712 | | |
713 | | /* No need to get KEY quality random but it must avoid known weak keys. */ |
714 | 0 | empty_key = crypt_generate_volume_key(ctx, keylength, KEY_QUALITY_NORMAL); |
715 | 0 | if (!empty_key) |
716 | 0 | return -ENOMEM; |
717 | | |
718 | 0 | r = LUKS_decrypt_from_storage(buf, sizeof(buf), cipher, cipher_mode, empty_key, 0, ctx); |
719 | |
|
720 | 0 | crypt_free_volume_key(empty_key); |
721 | 0 | crypt_safe_memzero(buf, sizeof(buf)); |
722 | 0 | return r; |
723 | 0 | } |
724 | | |
725 | | int LUKS_generate_phdr(struct luks_phdr *header, |
726 | | const struct volume_key *vk, |
727 | | const char *cipherName, |
728 | | const char *cipherMode, |
729 | | const char *hashSpec, |
730 | | const char *uuid, |
731 | | uint64_t data_offset, /* in bytes */ |
732 | | uint64_t align_offset, /* in bytes */ |
733 | | uint64_t required_alignment, /* in bytes */ |
734 | | struct crypt_device *ctx) |
735 | 0 | { |
736 | 0 | int i, r; |
737 | 0 | size_t keyslot_sectors, header_sectors; |
738 | 0 | uuid_t partitionUuid; |
739 | 0 | struct crypt_pbkdf_type *pbkdf; |
740 | 0 | double PBKDF2_temp; |
741 | 0 | char luksMagic[] = LUKS_MAGIC; |
742 | |
|
743 | 0 | if (data_offset % SECTOR_SIZE || align_offset % SECTOR_SIZE || |
744 | 0 | required_alignment % SECTOR_SIZE) |
745 | 0 | return -EINVAL; |
746 | | |
747 | 0 | memset(header, 0, sizeof(struct luks_phdr)); |
748 | |
|
749 | 0 | keyslot_sectors = AF_split_sectors(crypt_volume_key_length(vk), LUKS_STRIPES); |
750 | 0 | header_sectors = LUKS_ALIGN_KEYSLOTS / SECTOR_SIZE; |
751 | |
|
752 | 0 | for (i = 0; i < LUKS_NUMKEYS; i++) { |
753 | 0 | header->keyblock[i].active = LUKS_KEY_DISABLED; |
754 | 0 | header->keyblock[i].keyMaterialOffset = header_sectors; |
755 | 0 | header->keyblock[i].stripes = LUKS_STRIPES; |
756 | 0 | header_sectors = size_round_up(header_sectors + keyslot_sectors, |
757 | 0 | LUKS_ALIGN_KEYSLOTS / SECTOR_SIZE); |
758 | 0 | } |
759 | | /* In sector is now size of all keyslot material space */ |
760 | | |
761 | | /* Data offset has priority */ |
762 | 0 | if (data_offset) |
763 | 0 | header->payloadOffset = data_offset / SECTOR_SIZE; |
764 | 0 | else if (required_alignment) { |
765 | 0 | header->payloadOffset = size_round_up(header_sectors, (required_alignment / SECTOR_SIZE)); |
766 | 0 | header->payloadOffset += (align_offset / SECTOR_SIZE); |
767 | 0 | } else |
768 | 0 | header->payloadOffset = 0; |
769 | |
|
770 | 0 | if (header->payloadOffset && header->payloadOffset < header_sectors) { |
771 | 0 | log_err(ctx, _("Data offset for LUKS header must be " |
772 | 0 | "either 0 or higher than header size.")); |
773 | 0 | return -EINVAL; |
774 | 0 | } |
775 | | |
776 | 0 | if (crypt_hmac_size(hashSpec) < LUKS_DIGESTSIZE) { |
777 | 0 | log_err(ctx, _("Requested LUKS hash %s is not supported."), hashSpec); |
778 | 0 | return -EINVAL; |
779 | 0 | } |
780 | | |
781 | 0 | if (uuid && uuid_parse(uuid, partitionUuid) == -1) { |
782 | 0 | log_err(ctx, _("Wrong LUKS UUID format provided.")); |
783 | 0 | return -EINVAL; |
784 | 0 | } |
785 | 0 | if (!uuid) |
786 | 0 | uuid_generate(partitionUuid); |
787 | | |
788 | | /* Set Magic */ |
789 | 0 | memcpy(header->magic,luksMagic,LUKS_MAGIC_L); |
790 | 0 | header->version=1; |
791 | 0 | strncpy(header->cipherName,cipherName,LUKS_CIPHERNAME_L-1); |
792 | 0 | strncpy(header->cipherMode,cipherMode,LUKS_CIPHERMODE_L-1); |
793 | 0 | strncpy(header->hashSpec,hashSpec,LUKS_HASHSPEC_L-1); |
794 | 0 | _to_lower(header->hashSpec, LUKS_HASHSPEC_L); |
795 | |
|
796 | 0 | header->keyBytes = crypt_volume_key_length(vk); |
797 | |
|
798 | 0 | log_dbg(ctx, "Generating LUKS header version %d using hash %s, %s, %s, MK %d bytes", |
799 | 0 | header->version, header->hashSpec ,header->cipherName, header->cipherMode, |
800 | 0 | header->keyBytes); |
801 | |
|
802 | 0 | r = crypt_random_get(ctx, header->mkDigestSalt, LUKS_SALTSIZE, CRYPT_RND_SALT); |
803 | 0 | if(r < 0) { |
804 | 0 | log_err(ctx, _("Cannot create LUKS header: reading random salt failed.")); |
805 | 0 | return r; |
806 | 0 | } |
807 | | |
808 | | /* Compute volume key digest */ |
809 | 0 | pbkdf = crypt_get_pbkdf(ctx); |
810 | 0 | r = crypt_benchmark_pbkdf_internal(ctx, pbkdf, crypt_volume_key_length(vk)); |
811 | 0 | if (r < 0) |
812 | 0 | return r; |
813 | 0 | assert(pbkdf->iterations); |
814 | |
|
815 | 0 | if (pbkdf->flags & CRYPT_PBKDF_NO_BENCHMARK && pbkdf->time_ms == 0) |
816 | 0 | PBKDF2_temp = LUKS_MKD_ITERATIONS_MIN; |
817 | 0 | else /* iterations per ms * LUKS_MKD_ITERATIONS_MS */ |
818 | 0 | PBKDF2_temp = (double)pbkdf->iterations * LUKS_MKD_ITERATIONS_MS / pbkdf->time_ms; |
819 | |
|
820 | 0 | if (PBKDF2_temp > (double)UINT32_MAX) |
821 | 0 | return -EINVAL; |
822 | 0 | header->mkDigestIterations = AT_LEAST((uint32_t)PBKDF2_temp, LUKS_MKD_ITERATIONS_MIN); |
823 | 0 | assert(header->mkDigestIterations); |
824 | |
|
825 | 0 | r = crypt_pbkdf(CRYPT_KDF_PBKDF2, header->hashSpec, |
826 | 0 | crypt_volume_key_get_key(vk), |
827 | 0 | crypt_volume_key_length(vk), |
828 | 0 | header->mkDigestSalt, LUKS_SALTSIZE, |
829 | 0 | header->mkDigest,LUKS_DIGESTSIZE, |
830 | 0 | header->mkDigestIterations, 0, 0); |
831 | 0 | if (r < 0) { |
832 | 0 | log_err(ctx, _("Cannot create LUKS header: header digest failed (using hash %s)."), |
833 | 0 | header->hashSpec); |
834 | 0 | return r; |
835 | 0 | } |
836 | | |
837 | 0 | uuid_unparse(partitionUuid, header->uuid); |
838 | |
|
839 | 0 | log_dbg(ctx, "Data offset %d, UUID %s, digest iterations %" PRIu32, |
840 | 0 | header->payloadOffset, header->uuid, header->mkDigestIterations); |
841 | |
|
842 | 0 | return 0; |
843 | 0 | } |
844 | | |
845 | | int LUKS_hdr_uuid_set( |
846 | | struct luks_phdr *hdr, |
847 | | const char *uuid, |
848 | | struct crypt_device *ctx) |
849 | 0 | { |
850 | 0 | uuid_t partitionUuid; |
851 | |
|
852 | 0 | if (uuid && uuid_parse(uuid, partitionUuid) == -1) { |
853 | 0 | log_err(ctx, _("Wrong LUKS UUID format provided.")); |
854 | 0 | return -EINVAL; |
855 | 0 | } |
856 | 0 | if (!uuid) |
857 | 0 | uuid_generate(partitionUuid); |
858 | |
|
859 | 0 | uuid_unparse(partitionUuid, hdr->uuid); |
860 | |
|
861 | 0 | return LUKS_write_phdr(hdr, ctx); |
862 | 0 | } |
863 | | |
864 | | int LUKS_set_key(unsigned int keyIndex, |
865 | | const char *password, size_t passwordLen, |
866 | | struct luks_phdr *hdr, struct volume_key *vk, |
867 | | struct crypt_device *ctx) |
868 | 0 | { |
869 | 0 | struct volume_key *derived_vk = NULL; |
870 | 0 | char *AfKey = NULL; |
871 | 0 | void *derived_key = NULL; |
872 | 0 | size_t AFEKSize; |
873 | 0 | struct crypt_pbkdf_type *pbkdf; |
874 | 0 | int r; |
875 | |
|
876 | 0 | if(hdr->keyblock[keyIndex].active != LUKS_KEY_DISABLED) { |
877 | 0 | log_err(ctx, _("Key slot %d active, purge first."), keyIndex); |
878 | 0 | return -EINVAL; |
879 | 0 | } |
880 | | |
881 | | /* LUKS keyslot has always at least 4000 stripes according to specification */ |
882 | 0 | if(hdr->keyblock[keyIndex].stripes < 4000) { |
883 | 0 | log_err(ctx, _("Key slot %d material includes too few stripes. Header manipulation?"), |
884 | 0 | keyIndex); |
885 | 0 | return -EINVAL; |
886 | 0 | } |
887 | | |
888 | 0 | log_dbg(ctx, "Calculating data for key slot %d", keyIndex); |
889 | 0 | pbkdf = crypt_get_pbkdf(ctx); |
890 | 0 | r = crypt_benchmark_pbkdf_internal(ctx, pbkdf, crypt_volume_key_length(vk)); |
891 | 0 | if (r < 0) |
892 | 0 | return r; |
893 | 0 | assert(pbkdf->iterations); |
894 | | |
895 | | /* |
896 | | * Final iteration count is at least LUKS_SLOT_ITERATIONS_MIN |
897 | | */ |
898 | 0 | hdr->keyblock[keyIndex].passwordIterations = |
899 | 0 | AT_LEAST(pbkdf->iterations, LUKS_SLOT_ITERATIONS_MIN); |
900 | 0 | log_dbg(ctx, "Key slot %d use %" PRIu32 " password iterations.", keyIndex, |
901 | 0 | hdr->keyblock[keyIndex].passwordIterations); |
902 | |
|
903 | 0 | derived_key = crypt_safe_alloc(hdr->keyBytes); |
904 | 0 | if (!derived_key) { |
905 | 0 | r = -ENOMEM; |
906 | 0 | goto out; |
907 | 0 | } |
908 | | |
909 | 0 | r = crypt_random_get(ctx, hdr->keyblock[keyIndex].passwordSalt, |
910 | 0 | LUKS_SALTSIZE, CRYPT_RND_SALT); |
911 | 0 | if (r < 0) |
912 | 0 | goto out; |
913 | | |
914 | 0 | r = crypt_pbkdf(CRYPT_KDF_PBKDF2, hdr->hashSpec, password, passwordLen, |
915 | 0 | hdr->keyblock[keyIndex].passwordSalt, LUKS_SALTSIZE, |
916 | 0 | derived_key, hdr->keyBytes, |
917 | 0 | hdr->keyblock[keyIndex].passwordIterations, 0, 0); |
918 | 0 | if (r < 0) { |
919 | 0 | if ((crypt_backend_flags() & CRYPT_BACKEND_PBKDF2_INT) && |
920 | 0 | hdr->keyblock[keyIndex].passwordIterations > INT_MAX) |
921 | 0 | log_err(ctx, _("PBKDF2 iteration value overflow.")); |
922 | 0 | goto out; |
923 | 0 | } |
924 | | |
925 | 0 | derived_vk = crypt_alloc_volume_key_by_safe_alloc(&derived_key); |
926 | 0 | if (!derived_vk) { |
927 | 0 | r = -ENOMEM; |
928 | 0 | goto out; |
929 | 0 | } |
930 | | |
931 | | /* |
932 | | * AF splitting, the volume key stored in vk->key is split to AfKey |
933 | | */ |
934 | 0 | assert(crypt_volume_key_length(vk) == hdr->keyBytes); |
935 | 0 | AFEKSize = AF_split_sectors(crypt_volume_key_length(vk), hdr->keyblock[keyIndex].stripes) * SECTOR_SIZE; |
936 | 0 | AfKey = crypt_safe_alloc(AFEKSize); |
937 | 0 | if (!AfKey) { |
938 | 0 | r = -ENOMEM; |
939 | 0 | goto out; |
940 | 0 | } |
941 | | |
942 | 0 | log_dbg(ctx, "Using hash %s for AF in key slot %d, %d stripes", |
943 | 0 | hdr->hashSpec, keyIndex, hdr->keyblock[keyIndex].stripes); |
944 | 0 | r = AF_split(ctx, crypt_volume_key_get_key(vk), AfKey, crypt_volume_key_length(vk), |
945 | 0 | hdr->keyblock[keyIndex].stripes, hdr->hashSpec); |
946 | 0 | if (r < 0) |
947 | 0 | goto out; |
948 | | |
949 | 0 | log_dbg(ctx, "Updating key slot %d [0x%04x] area.", keyIndex, |
950 | 0 | hdr->keyblock[keyIndex].keyMaterialOffset << 9); |
951 | | /* Encryption via dm */ |
952 | 0 | r = LUKS_encrypt_to_storage(AfKey, |
953 | 0 | AFEKSize, |
954 | 0 | hdr->cipherName, hdr->cipherMode, |
955 | 0 | derived_vk, |
956 | 0 | hdr->keyblock[keyIndex].keyMaterialOffset, |
957 | 0 | ctx); |
958 | 0 | if (r < 0) |
959 | 0 | goto out; |
960 | | |
961 | | /* Mark the key as active in phdr */ |
962 | 0 | r = LUKS_keyslot_set(hdr, (int)keyIndex, 1, ctx); |
963 | 0 | if (r < 0) |
964 | 0 | goto out; |
965 | | |
966 | 0 | r = LUKS_write_phdr(hdr, ctx); |
967 | 0 | if (r < 0) |
968 | 0 | goto out; |
969 | | |
970 | 0 | r = 0; |
971 | 0 | out: |
972 | 0 | crypt_safe_free(AfKey); |
973 | 0 | crypt_safe_free(derived_key); |
974 | 0 | crypt_free_volume_key(derived_vk); |
975 | 0 | return r; |
976 | 0 | } |
977 | | |
978 | | /* Check whether a volume key is invalid. */ |
979 | | int LUKS_verify_volume_key(const struct luks_phdr *hdr, |
980 | | const struct volume_key *vk) |
981 | 0 | { |
982 | 0 | char checkHashBuf[LUKS_DIGESTSIZE]; |
983 | |
|
984 | 0 | if (crypt_pbkdf(CRYPT_KDF_PBKDF2, hdr->hashSpec, crypt_volume_key_get_key(vk), |
985 | 0 | crypt_volume_key_length(vk), |
986 | 0 | hdr->mkDigestSalt, LUKS_SALTSIZE, |
987 | 0 | checkHashBuf, LUKS_DIGESTSIZE, |
988 | 0 | hdr->mkDigestIterations, 0, 0) < 0) |
989 | 0 | return -EINVAL; |
990 | | |
991 | 0 | if (crypt_backend_memeq(checkHashBuf, hdr->mkDigest, LUKS_DIGESTSIZE)) |
992 | 0 | return -EPERM; |
993 | | |
994 | 0 | return 0; |
995 | 0 | } |
996 | | |
997 | | /* Try to open a particular key slot */ |
998 | | static int LUKS_open_key(unsigned int keyIndex, |
999 | | const char *password, |
1000 | | size_t passwordLen, |
1001 | | struct luks_phdr *hdr, |
1002 | | struct volume_key **r_vk, |
1003 | | struct crypt_device *ctx) |
1004 | 0 | { |
1005 | 0 | crypt_keyslot_info ki = LUKS_keyslot_info(hdr, keyIndex); |
1006 | 0 | struct volume_key *derived_vk = NULL, *vk = NULL; |
1007 | 0 | char *AfKey = NULL; |
1008 | 0 | void *key = NULL, *derived_key = NULL; |
1009 | 0 | size_t AFEKSize; |
1010 | 0 | int r; |
1011 | |
|
1012 | 0 | log_dbg(ctx, "Trying to open key slot %d [%s].", keyIndex, |
1013 | 0 | dbg_slot_state(ki)); |
1014 | |
|
1015 | 0 | if (ki < CRYPT_SLOT_ACTIVE) |
1016 | 0 | return -ENOENT; |
1017 | | |
1018 | 0 | derived_key = crypt_safe_alloc(hdr->keyBytes); |
1019 | 0 | if (!derived_key) |
1020 | 0 | return -ENOMEM; |
1021 | | |
1022 | 0 | key = crypt_safe_alloc(hdr->keyBytes); |
1023 | 0 | if (!key) { |
1024 | 0 | r = -ENOMEM; |
1025 | 0 | goto out; |
1026 | 0 | } |
1027 | | |
1028 | 0 | AFEKSize = AF_split_sectors(hdr->keyBytes, hdr->keyblock[keyIndex].stripes) * SECTOR_SIZE; |
1029 | 0 | AfKey = crypt_safe_alloc(AFEKSize); |
1030 | 0 | if (!AfKey) { |
1031 | 0 | r = -ENOMEM; |
1032 | 0 | goto out; |
1033 | 0 | } |
1034 | | |
1035 | 0 | r = crypt_pbkdf(CRYPT_KDF_PBKDF2, hdr->hashSpec, password, passwordLen, |
1036 | 0 | hdr->keyblock[keyIndex].passwordSalt, LUKS_SALTSIZE, |
1037 | 0 | derived_key, hdr->keyBytes, |
1038 | 0 | hdr->keyblock[keyIndex].passwordIterations, 0, 0); |
1039 | 0 | if (r < 0) { |
1040 | 0 | log_err(ctx, _("Cannot open keyslot (using hash %s)."), hdr->hashSpec); |
1041 | 0 | goto out; |
1042 | 0 | } |
1043 | | |
1044 | 0 | derived_vk = crypt_alloc_volume_key_by_safe_alloc(&derived_key); |
1045 | 0 | if (!derived_vk) { |
1046 | 0 | r = -ENOMEM; |
1047 | 0 | goto out; |
1048 | 0 | } |
1049 | | |
1050 | 0 | log_dbg(ctx, "Reading key slot %d area.", keyIndex); |
1051 | 0 | r = LUKS_decrypt_from_storage(AfKey, |
1052 | 0 | AFEKSize, |
1053 | 0 | hdr->cipherName, hdr->cipherMode, |
1054 | 0 | derived_vk, |
1055 | 0 | hdr->keyblock[keyIndex].keyMaterialOffset, |
1056 | 0 | ctx); |
1057 | 0 | if (r < 0) |
1058 | 0 | goto out; |
1059 | | |
1060 | 0 | r = AF_merge(AfKey, key, hdr->keyBytes, hdr->keyblock[keyIndex].stripes, hdr->hashSpec); |
1061 | 0 | if (r < 0) |
1062 | 0 | goto out; |
1063 | | |
1064 | 0 | vk = crypt_alloc_volume_key_by_safe_alloc(&key); |
1065 | 0 | if (!vk) { |
1066 | 0 | r = -ENOMEM; |
1067 | 0 | goto out; |
1068 | 0 | } |
1069 | | |
1070 | 0 | r = LUKS_verify_volume_key(hdr, vk); |
1071 | 0 | if (r < 0) |
1072 | 0 | goto out; |
1073 | | |
1074 | | /* Allow only empty passphrase with null cipher */ |
1075 | 0 | if (crypt_is_cipher_null(hdr->cipherName) && passwordLen) |
1076 | 0 | r = -EPERM; |
1077 | 0 | else |
1078 | 0 | *r_vk = vk; |
1079 | 0 | out: |
1080 | 0 | if (r < 0) { |
1081 | 0 | crypt_free_volume_key(vk); |
1082 | 0 | *r_vk = NULL; |
1083 | 0 | } |
1084 | 0 | crypt_safe_free(AfKey); |
1085 | 0 | crypt_safe_free(key); |
1086 | 0 | crypt_safe_free(derived_key); |
1087 | 0 | crypt_free_volume_key(derived_vk); |
1088 | 0 | return r; |
1089 | 0 | } |
1090 | | |
1091 | | int LUKS_open_key_with_hdr(int keyIndex, |
1092 | | const char *password, |
1093 | | size_t passwordLen, |
1094 | | struct luks_phdr *hdr, |
1095 | | struct volume_key **vk, |
1096 | | struct crypt_device *ctx) |
1097 | 0 | { |
1098 | 0 | unsigned int i, tried = 0; |
1099 | 0 | int r; |
1100 | |
|
1101 | 0 | if (keyIndex >= 0) { |
1102 | 0 | r = LUKS_open_key(keyIndex, password, passwordLen, hdr, vk, ctx); |
1103 | 0 | return (r < 0) ? r : keyIndex; |
1104 | 0 | } |
1105 | | |
1106 | 0 | for (i = 0; i < LUKS_NUMKEYS; i++) { |
1107 | 0 | r = LUKS_open_key(i, password, passwordLen, hdr, vk, ctx); |
1108 | 0 | if (r == 0) |
1109 | 0 | return i; |
1110 | | |
1111 | | /* Do not retry for errors that are no -EPERM or -ENOENT, |
1112 | | former meaning password wrong, latter key slot inactive */ |
1113 | 0 | if ((r != -EPERM) && (r != -ENOENT)) |
1114 | 0 | return r; |
1115 | 0 | if (r == -EPERM) |
1116 | 0 | tried++; |
1117 | 0 | } |
1118 | | /* Warning, early returns above */ |
1119 | 0 | return tried ? -EPERM : -ENOENT; |
1120 | 0 | } |
1121 | | |
1122 | | int LUKS_del_key(unsigned int keyIndex, |
1123 | | struct luks_phdr *hdr, |
1124 | | struct crypt_device *ctx) |
1125 | 0 | { |
1126 | 0 | struct device *device = crypt_metadata_device(ctx); |
1127 | 0 | unsigned int startOffset, endOffset; |
1128 | 0 | int r; |
1129 | |
|
1130 | 0 | r = LUKS_read_phdr(hdr, 1, 0, ctx); |
1131 | 0 | if (r) |
1132 | 0 | return r; |
1133 | | |
1134 | 0 | r = LUKS_keyslot_set(hdr, keyIndex, 0, ctx); |
1135 | 0 | if (r) { |
1136 | 0 | log_err(ctx, _("Key slot %d is invalid, please select keyslot between 0 and %d."), |
1137 | 0 | keyIndex, LUKS_NUMKEYS - 1); |
1138 | 0 | return r; |
1139 | 0 | } |
1140 | | |
1141 | | /* secure deletion of key material */ |
1142 | 0 | startOffset = hdr->keyblock[keyIndex].keyMaterialOffset; |
1143 | 0 | endOffset = startOffset + AF_split_sectors(hdr->keyBytes, hdr->keyblock[keyIndex].stripes); |
1144 | |
|
1145 | 0 | r = crypt_wipe_device(ctx, device, CRYPT_WIPE_SPECIAL, startOffset * SECTOR_SIZE, |
1146 | 0 | (endOffset - startOffset) * SECTOR_SIZE, |
1147 | 0 | (endOffset - startOffset) * SECTOR_SIZE, NULL, NULL); |
1148 | 0 | if (r) { |
1149 | 0 | if (r == -EACCES) { |
1150 | 0 | log_err(ctx, _("Cannot write to device %s, permission denied."), |
1151 | 0 | device_path(device)); |
1152 | 0 | r = -EINVAL; |
1153 | 0 | } else |
1154 | 0 | log_err(ctx, _("Cannot wipe device %s."), |
1155 | 0 | device_path(device)); |
1156 | 0 | return r; |
1157 | 0 | } |
1158 | | |
1159 | | /* Wipe keyslot info */ |
1160 | 0 | memset(&hdr->keyblock[keyIndex].passwordSalt, 0, LUKS_SALTSIZE); |
1161 | 0 | hdr->keyblock[keyIndex].passwordIterations = 0; |
1162 | |
|
1163 | 0 | r = LUKS_write_phdr(hdr, ctx); |
1164 | |
|
1165 | 0 | return r; |
1166 | 0 | } |
1167 | | |
1168 | | crypt_keyslot_info LUKS_keyslot_info(struct luks_phdr *hdr, int keyslot) |
1169 | 0 | { |
1170 | 0 | int i; |
1171 | |
|
1172 | 0 | if(keyslot >= LUKS_NUMKEYS || keyslot < 0) |
1173 | 0 | return CRYPT_SLOT_INVALID; |
1174 | | |
1175 | 0 | if (hdr->keyblock[keyslot].active == LUKS_KEY_DISABLED) |
1176 | 0 | return CRYPT_SLOT_INACTIVE; |
1177 | | |
1178 | 0 | if (hdr->keyblock[keyslot].active != LUKS_KEY_ENABLED) |
1179 | 0 | return CRYPT_SLOT_INVALID; |
1180 | | |
1181 | 0 | for(i = 0; i < LUKS_NUMKEYS; i++) |
1182 | 0 | if(i != keyslot && hdr->keyblock[i].active == LUKS_KEY_ENABLED) |
1183 | 0 | return CRYPT_SLOT_ACTIVE; |
1184 | | |
1185 | 0 | return CRYPT_SLOT_ACTIVE_LAST; |
1186 | 0 | } |
1187 | | |
1188 | | int LUKS_keyslot_find_empty(struct luks_phdr *hdr) |
1189 | 0 | { |
1190 | 0 | int i; |
1191 | |
|
1192 | 0 | for (i = 0; i < LUKS_NUMKEYS; i++) |
1193 | 0 | if(hdr->keyblock[i].active == LUKS_KEY_DISABLED) |
1194 | 0 | break; |
1195 | |
|
1196 | 0 | if (i == LUKS_NUMKEYS) |
1197 | 0 | return -EINVAL; |
1198 | | |
1199 | 0 | return i; |
1200 | 0 | } |
1201 | | |
1202 | | int LUKS_keyslot_active_count(struct luks_phdr *hdr) |
1203 | 0 | { |
1204 | 0 | int i, num = 0; |
1205 | |
|
1206 | 0 | for (i = 0; i < LUKS_NUMKEYS; i++) |
1207 | 0 | if(hdr->keyblock[i].active == LUKS_KEY_ENABLED) |
1208 | 0 | num++; |
1209 | |
|
1210 | 0 | return num; |
1211 | 0 | } |
1212 | | |
1213 | | int LUKS_keyslot_set(struct luks_phdr *hdr, int keyslot, int enable, struct crypt_device *ctx) |
1214 | 0 | { |
1215 | 0 | crypt_keyslot_info ki = LUKS_keyslot_info(hdr, keyslot); |
1216 | |
|
1217 | 0 | if (ki == CRYPT_SLOT_INVALID) |
1218 | 0 | return -EINVAL; |
1219 | | |
1220 | 0 | hdr->keyblock[keyslot].active = enable ? LUKS_KEY_ENABLED : LUKS_KEY_DISABLED; |
1221 | 0 | log_dbg(ctx, "Key slot %d was %s in LUKS header.", keyslot, enable ? "enabled" : "disabled"); |
1222 | 0 | return 0; |
1223 | 0 | } |
1224 | | |
1225 | | int LUKS1_activate(struct crypt_device *cd, |
1226 | | const char *name, |
1227 | | struct volume_key *vk, |
1228 | | uint32_t flags) |
1229 | 0 | { |
1230 | 0 | int r; |
1231 | 0 | struct crypt_dm_active_device dmd = { |
1232 | 0 | .flags = flags, |
1233 | 0 | .uuid = crypt_get_uuid(cd), |
1234 | 0 | }; |
1235 | |
|
1236 | 0 | r = dm_crypt_target_set(&dmd.segment, 0, dmd.size, crypt_data_device(cd), |
1237 | 0 | vk, crypt_get_cipher_spec(cd), crypt_get_iv_offset(cd), |
1238 | 0 | crypt_get_data_offset(cd), NULL, 0, 0, crypt_get_sector_size(cd)); |
1239 | 0 | if (!r) |
1240 | 0 | r = create_or_reload_device(cd, name, CRYPT_LUKS1, &dmd); |
1241 | |
|
1242 | 0 | dm_targets_free(cd, &dmd); |
1243 | |
|
1244 | 0 | return r; |
1245 | 0 | } |
1246 | | |
1247 | | int LUKS_wipe_header_areas(struct luks_phdr *hdr, |
1248 | | struct crypt_device *ctx) |
1249 | 0 | { |
1250 | 0 | int i, r; |
1251 | 0 | uint64_t offset, length; |
1252 | 0 | size_t wipe_block; |
1253 | |
|
1254 | 0 | r = LUKS_check_device_size(ctx, hdr, 1); |
1255 | 0 | if (r) |
1256 | 0 | return r; |
1257 | | |
1258 | | /* Wipe complete header, keyslots and padding areas with zeroes. */ |
1259 | 0 | offset = 0; |
1260 | 0 | length = (uint64_t)hdr->payloadOffset * SECTOR_SIZE; |
1261 | 0 | wipe_block = 1024 * 1024; |
1262 | | |
1263 | | /* On detached header or bogus header, wipe at least the first 4k */ |
1264 | 0 | if (length == 0 || length > (LUKS_MAX_KEYSLOT_SIZE * LUKS_NUMKEYS)) { |
1265 | 0 | length = 4096; |
1266 | 0 | wipe_block = 4096; |
1267 | 0 | } |
1268 | |
|
1269 | 0 | log_dbg(ctx, "Wiping LUKS areas (0x%06" PRIx64 " - 0x%06" PRIx64") with zeroes.", |
1270 | 0 | offset, length + offset); |
1271 | |
|
1272 | 0 | r = crypt_wipe_device(ctx, crypt_metadata_device(ctx), CRYPT_WIPE_ZERO, |
1273 | 0 | offset, length, wipe_block, NULL, NULL); |
1274 | 0 | if (r < 0) |
1275 | 0 | return r; |
1276 | | |
1277 | | /* Wipe keyslots areas */ |
1278 | 0 | wipe_block = 1024 * 1024; |
1279 | 0 | for (i = 0; i < LUKS_NUMKEYS; i++) { |
1280 | 0 | r = LUKS_keyslot_area(hdr, i, &offset, &length); |
1281 | 0 | if (r < 0) |
1282 | 0 | return r; |
1283 | | |
1284 | | /* Ignore too big LUKS1 keyslots here */ |
1285 | 0 | if (length > LUKS_MAX_KEYSLOT_SIZE || |
1286 | 0 | offset > (LUKS_MAX_KEYSLOT_SIZE - length)) |
1287 | 0 | continue; |
1288 | | |
1289 | 0 | if (length == 0 || offset < 4096) |
1290 | 0 | return -EINVAL; |
1291 | | |
1292 | 0 | log_dbg(ctx, "Wiping keyslot %i area (0x%06" PRIx64 " - 0x%06" PRIx64") with random data.", |
1293 | 0 | i, offset, length + offset); |
1294 | |
|
1295 | 0 | r = crypt_wipe_device(ctx, crypt_metadata_device(ctx), CRYPT_WIPE_RANDOM, |
1296 | 0 | offset, length, wipe_block, NULL, NULL); |
1297 | 0 | if (r < 0) |
1298 | 0 | return r; |
1299 | 0 | } |
1300 | | |
1301 | 0 | return r; |
1302 | 0 | } |
1303 | | |
1304 | | int LUKS_keyslot_pbkdf(struct luks_phdr *hdr, int keyslot, struct crypt_pbkdf_type *pbkdf) |
1305 | 0 | { |
1306 | 0 | if (LUKS_keyslot_info(hdr, keyslot) < CRYPT_SLOT_ACTIVE) |
1307 | 0 | return -EINVAL; |
1308 | | |
1309 | 0 | pbkdf->type = CRYPT_KDF_PBKDF2; |
1310 | 0 | pbkdf->hash = hdr->hashSpec; |
1311 | 0 | pbkdf->iterations = hdr->keyblock[keyslot].passwordIterations; |
1312 | 0 | pbkdf->max_memory_kb = 0; |
1313 | 0 | pbkdf->parallel_threads = 0; |
1314 | 0 | pbkdf->time_ms = 0; |
1315 | 0 | pbkdf->flags = 0; |
1316 | 0 | return 0; |
1317 | 0 | } |