Coverage Report

Created: 2025-06-13 06:36

/src/cryptsetup/lib/utils_device.c
Line
Count
Source (jump to first uncovered line)
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
 * device backend utilities
4
 *
5
 * Copyright (C) 2004 Jana Saout <jana@saout.de>
6
 * Copyright (C) 2004-2007 Clemens Fruhwirth <clemens@endorphin.org>
7
 * Copyright (C) 2009-2025 Red Hat, Inc. All rights reserved.
8
 * Copyright (C) 2009-2025 Milan Broz
9
 */
10
11
#include <string.h>
12
#include <stdlib.h>
13
#include <errno.h>
14
#include <sys/types.h>
15
#include <sys/stat.h>
16
#include <sys/ioctl.h>
17
#include <linux/fs.h>
18
#include <unistd.h>
19
#if HAVE_SYS_SYSMACROS_H
20
# include <sys/sysmacros.h>     /* for major, minor */
21
#endif
22
#if HAVE_SYS_STATVFS_H
23
# include <sys/statvfs.h>
24
#endif
25
#include "internal.h"
26
#include "utils_device_locking.h"
27
28
struct device {
29
  char *path;
30
31
  char *file_path;
32
  int loop_fd;
33
34
  int ro_dev_fd;
35
  int dev_fd;
36
  int dev_fd_excl;
37
38
  struct crypt_lock_handle *lh;
39
40
  unsigned int o_direct:1;
41
  unsigned int init_done:1; /* path is bdev or loop already initialized */
42
43
  /* cached values */
44
  size_t alignment;
45
  size_t block_size;
46
  size_t loop_block_size;
47
};
48
49
static size_t device_fs_block_size_fd(int fd)
50
12.8k
{
51
12.8k
  size_t max_size = MAX_SECTOR_SIZE;
52
53
12.8k
#if HAVE_SYS_STATVFS_H
54
12.8k
  struct statvfs buf;
55
56
  /*
57
   * NOTE: some filesystems (NFS) returns bogus blocksize (1MB).
58
   * Page-size io should always work and avoids increasing IO beyond aligned LUKS header.
59
   */
60
12.8k
  if (!fstatvfs(fd, &buf) && buf.f_bsize && buf.f_bsize <= max_size)
61
12.8k
    return (size_t)buf.f_bsize;
62
0
#endif
63
0
  return max_size;
64
12.8k
}
65
66
static size_t device_block_size_fd(int fd, size_t *min_size)
67
12.8k
{
68
12.8k
  struct stat st;
69
12.8k
  size_t bsize;
70
12.8k
  int arg;
71
72
12.8k
  if (fstat(fd, &st) < 0)
73
0
    return 0;
74
75
12.8k
  if (S_ISREG(st.st_mode))
76
12.8k
    bsize = device_fs_block_size_fd(fd);
77
0
  else {
78
0
    if (ioctl(fd, BLKSSZGET, &arg) < 0)
79
0
      bsize = crypt_getpagesize();
80
0
    else
81
0
      bsize = (size_t)arg;
82
0
  }
83
84
12.8k
  if (!min_size)
85
6.43k
    return bsize;
86
87
6.43k
  if (S_ISREG(st.st_mode)) {
88
    /* file can be empty as well */
89
6.43k
    if (st.st_size > (ssize_t)bsize)
90
6.43k
      *min_size = bsize;
91
0
    else
92
0
      *min_size = st.st_size;
93
6.43k
  } else {
94
    /* block device must have at least one block */
95
0
    *min_size = bsize;
96
0
  }
97
98
6.43k
  return bsize;
99
12.8k
}
100
101
static size_t device_block_phys_size_fd(int fd)
102
0
{
103
0
  struct stat st;
104
0
  int arg;
105
0
  size_t bsize = SECTOR_SIZE;
106
107
0
  if (fstat(fd, &st) < 0)
108
0
    return bsize;
109
110
0
  if (S_ISREG(st.st_mode))
111
0
    bsize = MAX_SECTOR_SIZE;
112
0
  else if (ioctl(fd, BLKPBSZGET, &arg) >= 0)
113
0
    bsize = (size_t)arg;
114
115
0
  return bsize;
116
0
}
117
118
static size_t device_alignment_fd(int devfd)
119
12.8k
{
120
12.8k
  long alignment = DEFAULT_MEM_ALIGNMENT;
121
122
12.8k
#ifdef _PC_REC_XFER_ALIGN
123
12.8k
  alignment = fpathconf(devfd, _PC_REC_XFER_ALIGN);
124
12.8k
  if (alignment < 0)
125
0
    alignment = DEFAULT_MEM_ALIGNMENT;
126
12.8k
#endif
127
12.8k
  return (size_t)alignment;
128
12.8k
}
129
130
static int device_read_test(struct crypt_device *cd, int devfd)
131
6.43k
{
132
6.43k
  char buffer[512];
133
6.43k
  int r;
134
6.43k
  size_t minsize = 0, blocksize, alignment;
135
6.43k
  struct stat st;
136
137
  /* skip check for block devices, direct-io must work there  */
138
6.43k
  if (fstat(devfd, &st) < 0)
139
0
    return -EINVAL;
140
141
6.43k
  if (S_ISBLK(st.st_mode))
142
0
    return 0;
143
144
6.43k
  blocksize = device_block_size_fd(devfd, &minsize);
145
6.43k
  alignment = device_alignment_fd(devfd);
146
147
6.43k
  if (!blocksize || !alignment)
148
0
    return -EINVAL;
149
150
6.43k
  if (minsize == 0)
151
0
    return 0;
152
153
6.43k
  if (minsize > sizeof(buffer))
154
6.43k
    minsize = sizeof(buffer);
155
156
6.43k
  if (read_blockwise(devfd, blocksize, alignment, buffer, minsize) == (ssize_t)minsize) {
157
6.43k
    log_dbg(cd, "Direct-io read works.");
158
6.43k
    r = 0;
159
6.43k
  } else {
160
0
    log_dbg(cd, "Direct-io read failed.");
161
0
    r = -EIO;
162
0
  }
163
164
6.43k
  crypt_safe_memzero(buffer, sizeof(buffer));
165
6.43k
  return r;
166
6.43k
}
167
168
/*
169
 * The direct-io is always preferred. The header is usually mapped to the same
170
 * device and can be accessed when the rest of device is mapped to data device.
171
 * Using direct-io ensures that we do not mess with data in cache.
172
 * (But proper alignment should prevent this in the first place.)
173
 * The read test is needed to detect broken configurations (seen with remote
174
 * block devices) that allow open with direct-io but then fails on read.
175
 */
176
static int device_ready(struct crypt_device *cd, struct device *device)
177
6.43k
{
178
6.43k
  int devfd = -1, r = 0;
179
6.43k
  struct stat st;
180
6.43k
  size_t tmp_size;
181
182
6.43k
  if (!device)
183
0
    return -EINVAL;
184
185
6.43k
  if (device->o_direct) {
186
6.43k
    log_dbg(cd, "Trying to open device %s with direct-io.",
187
6.43k
      device_path(device));
188
6.43k
    device->o_direct = 0;
189
6.43k
    devfd = open(device_path(device), O_RDONLY | O_DIRECT);
190
6.43k
    if (devfd >= 0) {
191
6.43k
      if (device_read_test(cd, devfd) == 0) {
192
6.43k
        device->o_direct = 1;
193
6.43k
      } else {
194
0
        close(devfd);
195
0
        devfd = -1;
196
0
      }
197
6.43k
    }
198
6.43k
  }
199
200
6.43k
  if (devfd < 0) {
201
0
    log_dbg(cd, "Trying to open device %s without direct-io.",
202
0
      device_path(device));
203
0
    devfd = open(device_path(device), O_RDONLY);
204
0
  }
205
206
6.43k
  if (devfd < 0) {
207
0
    log_err(cd, _("Device %s does not exist or access denied."),
208
0
      device_path(device));
209
0
    return -EINVAL;
210
0
  }
211
212
6.43k
  if (fstat(devfd, &st) < 0)
213
0
    r = -EINVAL;
214
6.43k
  else if (!S_ISBLK(st.st_mode))
215
6.43k
    r = S_ISREG(st.st_mode) ? -ENOTBLK : -EINVAL;
216
6.43k
  if (r == -EINVAL) {
217
0
    log_err(cd, _("Device %s is not compatible."),
218
0
      device_path(device));
219
0
    close(devfd);
220
0
    return r;
221
0
  }
222
223
  /* Allow only increase (loop device) */
224
6.43k
  tmp_size = device_alignment_fd(devfd);
225
6.43k
  if (tmp_size > device->alignment)
226
6.43k
    device->alignment = tmp_size;
227
228
6.43k
  tmp_size = device_block_size_fd(devfd, NULL);
229
6.43k
  if (tmp_size > device->block_size)
230
6.43k
    device->block_size = tmp_size;
231
232
6.43k
  close(devfd);
233
6.43k
  return r;
234
6.43k
}
235
236
static int _open_locked(struct crypt_device *cd, struct device *device, int flags)
237
11.0k
{
238
11.0k
  int fd;
239
240
11.0k
  if (!device)
241
0
    return -EINVAL;
242
243
11.0k
  log_dbg(cd, "Opening locked device %s", device_path(device));
244
245
11.0k
  if ((flags & O_ACCMODE) != O_RDONLY && device_locked_readonly(device->lh)) {
246
2.29k
    log_dbg(cd, "Cannot open locked device %s in write mode. Read lock held.", device_path(device));
247
2.29k
    return -EAGAIN;
248
2.29k
  }
249
250
8.73k
  fd = open(device_path(device), flags);
251
8.73k
  if (fd < 0)
252
0
    return -errno;
253
254
8.73k
  if (device_locked_verify(cd, fd, device->lh)) {
255
    /* fd doesn't correspond to a locked resource */
256
0
    close(fd);
257
0
    log_dbg(cd, "Failed to verify lock resource for device %s.", device_path(device));
258
0
    return -EINVAL;
259
0
  }
260
261
8.73k
  return fd;
262
8.73k
}
263
264
/*
265
 * Common wrapper for device sync.
266
 */
267
void device_sync(struct crypt_device *cd, struct device *device)
268
2.29k
{
269
2.29k
  if (!device || device->dev_fd < 0)
270
0
    return;
271
272
2.29k
  if (fsync(device->dev_fd) == -1)
273
0
    log_dbg(cd, "Cannot sync device %s.", device_path(device));
274
2.29k
}
275
276
/*
277
 * in non-locked mode returns always fd or -1
278
 *
279
 * in locked mode:
280
 *  opened fd or one of:
281
 *  -EAGAIN : requested write mode while device being locked in via shared lock
282
 *  -EINVAL : invalid lock fd state
283
 *  -1  : all other errors
284
 */
285
static int device_open_internal(struct crypt_device *cd, struct device *device, int flags)
286
43.5k
{
287
43.5k
  int access, devfd;
288
289
43.5k
  if (device->o_direct)
290
43.5k
    flags |= O_DIRECT;
291
292
43.5k
  access = flags & O_ACCMODE;
293
43.5k
  if (access == O_WRONLY)
294
0
    access = O_RDWR;
295
296
43.5k
  if (access == O_RDONLY && device->ro_dev_fd >= 0) {
297
32.5k
    log_dbg(cd, "Reusing open r%c fd on device %s", 'o', device_path(device));
298
32.5k
    return device->ro_dev_fd;
299
32.5k
  } else if (access == O_RDWR && device->dev_fd >= 0) {
300
0
    log_dbg(cd, "Reusing open r%c fd on device %s", 'w', device_path(device));
301
0
    return device->dev_fd;
302
0
  }
303
304
11.0k
  if (device_locked(device->lh))
305
11.0k
    devfd = _open_locked(cd, device, flags);
306
0
  else
307
0
    devfd = open(device_path(device), flags);
308
309
11.0k
  if (devfd < 0) {
310
2.29k
    log_dbg(cd, "Cannot open device %s%s.",
311
2.29k
      device_path(device),
312
2.29k
      access != O_RDONLY ? " for write" : "");
313
2.29k
    return devfd;
314
2.29k
  }
315
316
8.73k
  if (access == O_RDONLY)
317
6.43k
    device->ro_dev_fd = devfd;
318
2.29k
  else
319
2.29k
    device->dev_fd = devfd;
320
321
8.73k
  return devfd;
322
11.0k
}
323
324
int device_open(struct crypt_device *cd, struct device *device, int flags)
325
0
{
326
0
  if (!device)
327
0
    return -EINVAL;
328
329
0
  assert(!device_locked(device->lh));
330
0
  return device_open_internal(cd, device, flags);
331
0
}
332
333
int device_open_excl(struct crypt_device *cd, struct device *device, int flags)
334
0
{
335
0
  const char *path;
336
0
  struct stat st;
337
338
0
  if (!device)
339
0
    return -EINVAL;
340
341
0
  assert(!device_locked(device->lh));
342
343
0
  if (device->dev_fd_excl < 0) {
344
0
    path = device_path(device);
345
0
    if (stat(path, &st))
346
0
      return -EINVAL;
347
0
    if (!S_ISBLK(st.st_mode))
348
0
      log_dbg(cd, "%s is not a block device. Can't open in exclusive mode.",
349
0
        path);
350
0
    else {
351
      /* open(2) with O_EXCL (w/o O_CREAT) on regular file is undefined behaviour according to man page */
352
      /* coverity[toctou] */
353
0
      device->dev_fd_excl = open(path, O_RDONLY | O_EXCL); /* lgtm[cpp/toctou-race-condition] */
354
0
      if (device->dev_fd_excl < 0)
355
0
        return errno == EBUSY ? -EBUSY : device->dev_fd_excl;
356
0
      if (fstat(device->dev_fd_excl, &st) || !S_ISBLK(st.st_mode)) {
357
0
        log_dbg(cd, "%s is not a block device. Can't open in exclusive mode.",
358
0
          path);
359
0
        close(device->dev_fd_excl);
360
0
        device->dev_fd_excl = -1;
361
0
      } else
362
0
        log_dbg(cd, "Device %s is blocked for exclusive open.", path);
363
0
    }
364
0
  }
365
366
0
  return device_open_internal(cd, device, flags);
367
0
}
368
369
void device_release_excl(struct crypt_device *cd, struct device *device)
370
0
{
371
0
  if (device && device->dev_fd_excl >= 0) {
372
0
    if (close(device->dev_fd_excl))
373
0
      log_dbg(cd, "Failed to release exclusive handle on device %s.",
374
0
        device_path(device));
375
0
    else
376
0
      log_dbg(cd, "Closed exclusive fd for %s.", device_path(device));
377
0
    device->dev_fd_excl = -1;
378
0
  }
379
0
}
380
381
int device_open_locked(struct crypt_device *cd, struct device *device, int flags)
382
43.5k
{
383
43.5k
  if (!device)
384
0
    return -EINVAL;
385
386
43.5k
  assert(!crypt_metadata_locking_enabled() || device_locked(device->lh));
387
43.5k
  return device_open_internal(cd, device, flags);
388
43.5k
}
389
390
/* Avoid any read from device, expects direct-io to work. */
391
int device_alloc_no_check(struct device **device, const char *path)
392
6.43k
{
393
6.43k
  struct device *dev;
394
395
6.43k
  if (!path) {
396
0
    *device = NULL;
397
0
    return 0;
398
0
  }
399
400
6.43k
  dev = malloc(sizeof(struct device));
401
6.43k
  if (!dev)
402
0
    return -ENOMEM;
403
404
6.43k
  memset(dev, 0, sizeof(struct device));
405
6.43k
  dev->path = strdup(path);
406
6.43k
  if (!dev->path) {
407
0
    free(dev);
408
0
    return -ENOMEM;
409
0
  }
410
6.43k
  dev->loop_fd = -1;
411
6.43k
  dev->ro_dev_fd = -1;
412
6.43k
  dev->dev_fd = -1;
413
6.43k
  dev->dev_fd_excl = -1;
414
6.43k
  dev->o_direct = 1;
415
416
6.43k
  *device = dev;
417
6.43k
  return 0;
418
6.43k
}
419
420
int device_alloc(struct crypt_device *cd, struct device **device, const char *path)
421
6.43k
{
422
6.43k
  struct device *dev;
423
6.43k
  int r;
424
425
6.43k
  r = device_alloc_no_check(&dev, path);
426
6.43k
  if (r < 0)
427
0
    return r;
428
429
6.43k
  if (dev) {
430
6.43k
    r = device_ready(cd, dev);
431
6.43k
    if (!r) {
432
0
      dev->init_done = 1;
433
6.43k
    } else if (r == -ENOTBLK) {
434
      /* alloc loop later */
435
6.43k
    } else if (r < 0) {
436
0
      free(dev->path);
437
0
      free(dev);
438
0
      return -ENOTBLK;
439
0
    }
440
6.43k
  }
441
442
6.43k
  *device = dev;
443
6.43k
  return 0;
444
6.43k
}
445
446
void device_free(struct crypt_device *cd, struct device *device)
447
12.8k
{
448
12.8k
  if (!device)
449
6.43k
    return;
450
451
6.43k
  device_close(cd, device);
452
453
6.43k
  if (device->dev_fd_excl != -1) {
454
0
    log_dbg(cd, "Closed exclusive fd for %s.", device_path(device));
455
0
    close(device->dev_fd_excl);
456
0
  }
457
458
6.43k
  if (device->loop_fd != -1) {
459
0
    log_dbg(cd, "Closed loop %s (%s).", device->path, device->file_path);
460
0
    close(device->loop_fd);
461
0
  }
462
463
6.43k
  assert(!device_locked(device->lh));
464
465
6.43k
  free(device->file_path);
466
6.43k
  free(device->path);
467
6.43k
  free(device);
468
6.43k
}
469
470
/* Get block device path */
471
const char *device_block_path(const struct device *device)
472
0
{
473
0
  if (!device)
474
0
    return NULL;
475
476
0
  return device->path;
477
0
}
478
479
/* Get path to device / file */
480
const char *device_path(const struct device *device)
481
156k
{
482
156k
  if (!device)
483
0
    return NULL;
484
485
156k
  if (device->file_path)
486
0
    return device->file_path;
487
488
156k
  return device->path;
489
156k
}
490
491
/* block device topology ioctls, introduced in 2.6.32 */
492
#ifndef BLKIOMIN
493
#define BLKIOMIN    _IO(0x12,120)
494
#define BLKIOOPT    _IO(0x12,121)
495
#define BLKALIGNOFF _IO(0x12,122)
496
#endif
497
498
void device_topology_alignment(struct crypt_device *cd,
499
             struct device *device,
500
             unsigned long *required_alignment, /* bytes */
501
             unsigned long *alignment_offset,   /* bytes */
502
             unsigned long default_alignment)
503
0
{
504
0
  int dev_alignment_offset = 0;
505
0
  unsigned int min_io_size = 0, opt_io_size = 0;
506
0
  unsigned long temp_alignment = 0;
507
0
  int fd;
508
509
0
  *required_alignment = default_alignment;
510
0
  *alignment_offset = 0;
511
512
0
  if (!device || !device->path) //FIXME
513
0
    return;
514
515
0
  fd = open(device->path, O_RDONLY);
516
0
  if (fd == -1)
517
0
    return;
518
519
  /* minimum io size */
520
0
  if (ioctl(fd, BLKIOMIN, &min_io_size) == -1) {
521
0
    log_dbg(cd, "Topology info for %s not supported, using default alignment %lu bytes.",
522
0
      device->path, default_alignment);
523
0
    goto out;
524
0
  }
525
526
  /* optimal io size */
527
0
  if (ioctl(fd, BLKIOOPT, &opt_io_size) == -1)
528
0
    opt_io_size = min_io_size;
529
530
  /* alignment offset, bogus -1 means misaligned/unknown */
531
0
  if (ioctl(fd, BLKALIGNOFF, &dev_alignment_offset) == -1 || dev_alignment_offset < 0)
532
0
    dev_alignment_offset = 0;
533
0
  *alignment_offset = (unsigned long)dev_alignment_offset;
534
535
0
  temp_alignment = (unsigned long)min_io_size;
536
537
  /*
538
   * Ignore bogus opt-io that could break alignment.
539
   * Also real opt_io_size should be aligned to minimal page size (4k).
540
   * Some bogus USB enclosures reports wrong data here.
541
   */
542
0
  if ((temp_alignment < (unsigned long)opt_io_size) &&
543
0
      !((unsigned long)opt_io_size % temp_alignment) && !MISALIGNED_4K(opt_io_size))
544
0
    temp_alignment = (unsigned long)opt_io_size;
545
0
  else if (opt_io_size && (opt_io_size != min_io_size))
546
0
    log_err(cd, _("Ignoring bogus optimal-io size for data device (%u bytes)."), opt_io_size);
547
548
  /* If calculated alignment is multiple of default, keep default */
549
0
  if (temp_alignment && (default_alignment % temp_alignment))
550
0
    *required_alignment = temp_alignment;
551
552
0
  log_dbg(cd, "Topology: IO (%u/%u), offset = %lu; Required alignment is %lu bytes.",
553
0
    min_io_size, opt_io_size, *alignment_offset, *required_alignment);
554
0
out:
555
0
  (void)close(fd);
556
0
}
557
558
size_t device_block_size(struct crypt_device *cd, struct device *device)
559
61.0k
{
560
61.0k
  int fd;
561
562
61.0k
  if (!device)
563
0
    return 0;
564
565
61.0k
  if (device->block_size)
566
61.0k
    return device->block_size;
567
568
0
  fd = open(device->file_path ?: device->path, O_RDONLY);
569
0
  if (fd >= 0) {
570
0
    device->block_size = device_block_size_fd(fd, NULL);
571
0
    close(fd);
572
0
  }
573
574
0
  if (!device->block_size)
575
0
    log_dbg(cd, "Cannot get block size for device %s.", device_path(device));
576
577
0
  return device->block_size;
578
0
}
579
580
size_t device_optimal_encryption_sector_size(struct crypt_device *cd, struct device *device)
581
0
{
582
0
  int fd;
583
0
  size_t phys_block_size;
584
585
0
  if (!device)
586
0
    return SECTOR_SIZE;
587
588
0
  fd = open(device->file_path ?: device->path, O_RDONLY);
589
0
  if (fd < 0) {
590
0
    log_dbg(cd, "Cannot get optimal encryption sector size for device %s.", device_path(device));
591
0
    return SECTOR_SIZE;
592
0
  }
593
594
  /* cache device block size */
595
0
  device->block_size = device_block_size_fd(fd, NULL);
596
0
  if (!device->block_size) {
597
0
    close(fd);
598
0
    log_dbg(cd, "Cannot get block size for device %s.", device_path(device));
599
0
    return SECTOR_SIZE;
600
0
  }
601
602
0
  if (device->block_size >= MAX_SECTOR_SIZE) {
603
0
    close(fd);
604
0
    return MISALIGNED(device->block_size, MAX_SECTOR_SIZE) ? SECTOR_SIZE : MAX_SECTOR_SIZE;
605
0
  }
606
607
0
  phys_block_size = device_block_phys_size_fd(fd);
608
0
  close(fd);
609
610
0
  if (device->block_size >= phys_block_size ||
611
0
      phys_block_size <= SECTOR_SIZE ||
612
0
      phys_block_size > MAX_SECTOR_SIZE ||
613
0
      MISALIGNED(phys_block_size, device->block_size))
614
0
    return device->block_size;
615
616
0
  return phys_block_size;
617
0
}
618
619
int device_read_ahead(struct device *device, uint32_t *read_ahead)
620
0
{
621
0
  int fd, r = 0;
622
0
  long read_ahead_long;
623
624
0
  if (!device)
625
0
    return 0;
626
627
0
  if ((fd = open(device->path, O_RDONLY)) < 0)
628
0
    return 0;
629
630
0
  r = ioctl(fd, BLKRAGET, &read_ahead_long) ? 0 : 1;
631
0
  close(fd);
632
633
0
  if (r)
634
0
    *read_ahead = (uint32_t) read_ahead_long;
635
636
0
  return r;
637
0
}
638
639
/* Get data size in bytes */
640
int device_size(struct device *device, uint64_t *size)
641
6.61k
{
642
6.61k
  struct stat st;
643
6.61k
  int devfd, r = -EINVAL;
644
645
6.61k
  if (!device)
646
0
    return -EINVAL;
647
648
6.61k
  devfd = open(device->path, O_RDONLY);
649
6.61k
  if (devfd == -1)
650
0
    return -EINVAL;
651
652
6.61k
  if (fstat(devfd, &st) < 0)
653
0
    goto out;
654
655
6.61k
  if (S_ISREG(st.st_mode)) {
656
6.61k
    *size = (uint64_t)st.st_size;
657
6.61k
    r = 0;
658
6.61k
  } else if (ioctl(devfd, BLKGETSIZE64, size) >= 0)
659
0
    r = 0;
660
6.61k
out:
661
6.61k
  close(devfd);
662
6.61k
  return r;
663
6.61k
}
664
665
/* For a file, allocate the required space */
666
int device_fallocate(struct device *device, uint64_t size)
667
0
{
668
0
  struct stat st;
669
0
  int devfd, r = -EINVAL;
670
671
0
  if (!device)
672
0
    return -EINVAL;
673
674
0
  devfd = open(device_path(device), O_RDWR);
675
0
  if (devfd == -1)
676
0
    return -EINVAL;
677
678
0
  if (!fstat(devfd, &st) && S_ISREG(st.st_mode) &&
679
0
      ((uint64_t)st.st_size >= size || !posix_fallocate(devfd, 0, size))) {
680
0
    r = 0;
681
0
    if (device->file_path && crypt_loop_resize(device->path))
682
0
      r = -EINVAL;
683
0
  }
684
685
0
  close(devfd);
686
0
  return r;
687
0
}
688
689
int device_check_size(struct crypt_device *cd,
690
          struct device *device,
691
          uint64_t req_offset, int falloc)
692
6.61k
{
693
6.61k
  uint64_t dev_size;
694
695
6.61k
  if (device_size(device, &dev_size)) {
696
0
    log_dbg(cd, "Cannot get device size for device %s.", device_path(device));
697
0
    return -EIO;
698
0
  }
699
700
6.61k
  log_dbg(cd, "Device size %" PRIu64 ", offset %" PRIu64 ".", dev_size, req_offset);
701
702
6.61k
  if (req_offset > dev_size) {
703
    /* If it is header file, increase its size */
704
1
    if (falloc && !device_fallocate(device, req_offset))
705
0
      return 0;
706
707
1
    log_err(cd, _("Device %s is too small. Need at least %" PRIu64 " bytes."),
708
1
      device_path(device), req_offset);
709
1
    return -EINVAL;
710
1
  }
711
712
6.61k
  return 0;
713
6.61k
}
714
715
static int device_info(struct crypt_device *cd,
716
           struct device *device,
717
           enum devcheck device_check,
718
           int *readonly, uint64_t *size)
719
0
{
720
0
  struct stat st;
721
0
  int fd = -1, r, flags = 0, real_readonly;
722
0
  uint64_t real_size;
723
724
0
  if (!device)
725
0
    return -ENOTBLK;
726
727
0
  real_readonly = 0;
728
0
  real_size = 0;
729
730
0
  if (stat(device->path, &st) < 0) {
731
0
    r = -EINVAL;
732
0
    goto out;
733
0
  }
734
735
  /* never wipe header on mounted device */
736
0
  if (device_check == DEV_EXCL && S_ISBLK(st.st_mode))
737
0
    flags |= O_EXCL;
738
739
  /* Try to open read-write to check whether it is a read-only device */
740
  /* coverity[toctou] */
741
0
  fd = open(device->path, O_RDWR | flags);
742
0
  if (fd == -1 && errno == EROFS) {
743
0
    real_readonly = 1;
744
0
    fd = open(device->path, O_RDONLY | flags);
745
0
  }
746
747
0
  if (fd == -1 && device_check == DEV_EXCL && errno == EBUSY) {
748
0
    r = -EBUSY;
749
0
    goto out;
750
0
  }
751
752
0
  if (fd == -1) {
753
0
    r = errno ? -errno : -EINVAL;
754
0
    goto out;
755
0
  }
756
757
0
  r = 0;
758
0
  if (S_ISREG(st.st_mode)) {
759
    //FIXME: add readonly check
760
0
    real_size = (uint64_t)st.st_size;
761
0
    real_size >>= SECTOR_SHIFT;
762
0
  } else {
763
    /* If the device can be opened read-write, i.e. readonly is still 0, then
764
     * check whether BKROGET says that it is read-only. E.g. read-only loop
765
     * devices may be opened read-write but are read-only according to BLKROGET
766
     */
767
0
    if (real_readonly == 0 && (r = ioctl(fd, BLKROGET, &real_readonly)) < 0)
768
0
      goto out;
769
770
0
    r = ioctl(fd, BLKGETSIZE64, &real_size);
771
0
    if (r >= 0) {
772
0
      real_size >>= SECTOR_SHIFT;
773
0
      goto out;
774
0
    }
775
0
  }
776
0
out:
777
0
  if (fd != -1)
778
0
    close(fd);
779
780
0
  switch (r) {
781
0
  case 0:
782
0
    if (readonly)
783
0
      *readonly = real_readonly;
784
0
    if (size)
785
0
      *size = real_size;
786
0
    break;
787
0
  case -EBUSY:
788
0
    log_err(cd, _("Cannot use device %s which is in use "
789
0
            "(already mapped or mounted)."), device_path(device));
790
0
    break;
791
0
  case -EACCES:
792
0
    log_err(cd, _("Cannot use device %s, permission denied."), device_path(device));
793
0
    break;
794
0
  default:
795
0
    log_err(cd, _("Cannot get info about device %s."), device_path(device));
796
0
    r = -EINVAL;
797
0
  }
798
799
0
  return r;
800
0
}
801
802
int device_check_access(struct crypt_device *cd,
803
      struct device *device,
804
      enum devcheck device_check)
805
0
{
806
0
  return device_info(cd, device, device_check, NULL, NULL);
807
0
}
808
809
static int device_internal_prepare(struct crypt_device *cd, struct device *device)
810
0
{
811
0
  char *loop_device = NULL, *file_path = NULL;
812
0
  int r, loop_fd, readonly = 0;
813
814
0
  if (device->init_done)
815
0
    return 0;
816
817
0
  if (getuid() || geteuid()) {
818
0
    log_err(cd, _("Cannot use a loopback device, "
819
0
            "running as non-root user."));
820
0
    return -ENOTSUP;
821
0
  }
822
823
0
  log_dbg(cd, "Allocating a free loop device (block size: %zu).",
824
0
    device->loop_block_size ?: SECTOR_SIZE);
825
826
  /* Keep the loop open, detached on last close. */
827
0
  loop_fd = crypt_loop_attach(&loop_device, device->path, 0, 1, &readonly, device->loop_block_size);
828
0
  if (loop_fd == -1) {
829
0
    log_err(cd, _("Attaching loopback device failed "
830
0
      "(loop device with autoclear flag is required)."));
831
0
    free(loop_device);
832
0
    return -EINVAL;
833
0
  }
834
835
0
  file_path = device->path;
836
0
  device->path = loop_device;
837
838
0
  r = device_ready(cd, device);
839
0
  if (r < 0) {
840
0
    device->path = file_path;
841
0
    crypt_loop_detach(loop_device);
842
0
    free(loop_device);
843
0
    return r;
844
0
  }
845
846
0
  log_dbg(cd, "Attached loop device block size is %zu bytes.", device_block_size_fd(loop_fd, NULL));
847
848
0
  device->loop_fd = loop_fd;
849
0
  device->file_path = file_path;
850
0
  device->init_done = 1;
851
852
0
  return 0;
853
0
}
854
855
int device_block_adjust(struct crypt_device *cd,
856
      struct device *device,
857
      enum devcheck device_check,
858
      uint64_t device_offset,
859
      uint64_t *size,
860
      uint32_t *flags)
861
0
{
862
0
  int r, real_readonly;
863
0
  uint64_t real_size;
864
865
0
  if (!device)
866
0
    return -ENOTBLK;
867
868
0
  r = device_internal_prepare(cd, device);
869
0
  if (r)
870
0
    return r;
871
872
0
  r = device_info(cd, device, device_check, &real_readonly, &real_size);
873
0
  if (r)
874
0
    return r;
875
876
0
  if (device_offset >= real_size) {
877
0
    log_err(cd, _("Requested offset is beyond real size of device %s."),
878
0
      device_path(device));
879
0
    return -EINVAL;
880
0
  }
881
882
0
  if (size && !*size) {
883
0
    *size = real_size;
884
0
    if (!*size) {
885
0
      log_err(cd, _("Device %s has zero size."), device_path(device));
886
0
      return -ENOTBLK;
887
0
    }
888
0
    *size -= device_offset;
889
0
  }
890
891
  /* in case of size is set by parameter */
892
0
  if (size && ((real_size - device_offset) < *size)) {
893
0
    log_dbg(cd, "Device %s: offset = %" PRIu64 " requested size = %" PRIu64
894
0
      ", backing device size = %" PRIu64,
895
0
      device->path, device_offset, *size, real_size);
896
0
    log_err(cd, _("Device %s is too small."), device_path(device));
897
0
    return -EINVAL;
898
0
  }
899
900
0
  if (flags && real_readonly)
901
0
    *flags |= CRYPT_ACTIVATE_READONLY;
902
903
0
  if (size)
904
0
    log_dbg(cd, "Calculated device size is %" PRIu64" sectors (%s), offset %" PRIu64 ".",
905
0
    *size, real_readonly ? "RO" : "RW", device_offset);
906
0
  return 0;
907
0
}
908
909
size_t size_round_up(size_t size, size_t block)
910
0
{
911
0
  size_t s = (size + (block - 1)) / block;
912
0
  return s * block;
913
0
}
914
915
void device_disable_direct_io(struct device *device)
916
0
{
917
0
  if (device)
918
0
    device->o_direct = 0;
919
0
}
920
921
int device_direct_io(const struct device *device)
922
6.43k
{
923
6.43k
  return device ? device->o_direct : 0;
924
6.43k
}
925
926
static int device_compare_path(const char *path1, const char *path2)
927
0
{
928
0
  struct stat st_path1, st_path2;
929
930
0
  if (stat(path1, &st_path1 ) < 0 || stat(path2, &st_path2 ) < 0)
931
0
    return -EINVAL;
932
933
0
  if (S_ISBLK(st_path1.st_mode) && S_ISBLK(st_path2.st_mode))
934
0
    return (st_path1.st_rdev == st_path2.st_rdev) ? 1 : 0;
935
936
0
  if (S_ISREG(st_path1.st_mode) && S_ISREG(st_path2.st_mode))
937
0
    return (st_path1.st_ino == st_path2.st_ino &&
938
0
      st_path1.st_dev == st_path2.st_dev) ? 1 : 0;
939
940
0
  return 0;
941
0
}
942
943
int device_is_identical(struct device *device1, struct device *device2)
944
0
{
945
0
  if (!device1 || !device2)
946
0
    return 0;
947
948
0
  if (device1 == device2)
949
0
    return 1;
950
951
0
  if (!strcmp(device_path(device1), device_path(device2)))
952
0
    return 1;
953
954
0
  return device_compare_path(device_path(device1), device_path(device2));
955
0
}
956
957
int device_is_rotational(struct device *device)
958
0
{
959
0
  struct stat st;
960
961
0
  if (!device)
962
0
    return -EINVAL;
963
964
0
  if (stat(device_path(device), &st) < 0)
965
0
    return -EINVAL;
966
967
0
  if (!S_ISBLK(st.st_mode))
968
0
    return 0;
969
970
0
  return crypt_dev_is_rotational(major(st.st_rdev), minor(st.st_rdev));
971
0
}
972
973
int device_is_dax(struct device *device)
974
0
{
975
0
  struct stat st;
976
977
0
  if (!device)
978
0
    return -EINVAL;
979
980
0
  if (stat(device_path(device), &st) < 0)
981
0
    return -EINVAL;
982
983
0
  if (!S_ISBLK(st.st_mode))
984
0
    return 0;
985
986
0
  return crypt_dev_is_dax(major(st.st_rdev), minor(st.st_rdev));
987
0
}
988
989
int device_is_zoned(struct device *device)
990
0
{
991
0
  struct stat st;
992
993
0
  if (!device)
994
0
    return -EINVAL;
995
996
0
  if (stat(device_path(device), &st) < 0)
997
0
    return -EINVAL;
998
999
0
  if (!S_ISBLK(st.st_mode))
1000
0
    return 0;
1001
1002
0
  return crypt_dev_is_zoned(major(st.st_rdev), minor(st.st_rdev));
1003
0
}
1004
1005
int device_is_nop_dif(struct device *device, uint32_t *tag_size)
1006
0
{
1007
0
  struct stat st;
1008
1009
0
  if (!device)
1010
0
    return -EINVAL;
1011
1012
0
  if (stat(device_path(device), &st) < 0)
1013
0
    return -EINVAL;
1014
1015
0
  if (!S_ISBLK(st.st_mode))
1016
0
    return 0;
1017
1018
0
  return crypt_dev_is_nop_dif(major(st.st_rdev), minor(st.st_rdev), tag_size);
1019
0
}
1020
1021
size_t device_alignment(struct device *device)
1022
61.0k
{
1023
61.0k
  int devfd;
1024
1025
61.0k
  if (!device)
1026
0
    return -EINVAL;
1027
1028
61.0k
  if (!device->alignment) {
1029
0
    devfd = open(device_path(device), O_RDONLY);
1030
0
    if (devfd != -1) {
1031
0
      device->alignment = device_alignment_fd(devfd);
1032
0
      close(devfd);
1033
0
    }
1034
0
  }
1035
1036
61.0k
  return device->alignment;
1037
61.0k
}
1038
1039
void device_set_lock_handle(struct device *device, struct crypt_lock_handle *h)
1040
17.4k
{
1041
17.4k
  if (device)
1042
17.4k
    device->lh = h;
1043
17.4k
}
1044
1045
struct crypt_lock_handle *device_get_lock_handle(struct device *device)
1046
17.4k
{
1047
17.4k
  return device ? device->lh : NULL;
1048
17.4k
}
1049
1050
int device_read_lock(struct crypt_device *cd, struct device *device)
1051
6.43k
{
1052
6.43k
  if (!device || !crypt_metadata_locking_enabled())
1053
0
    return 0;
1054
1055
6.43k
  if (device_read_lock_internal(cd, device))
1056
0
    return -EBUSY;
1057
1058
6.43k
  return 0;
1059
6.43k
}
1060
1061
int device_write_lock(struct crypt_device *cd, struct device *device)
1062
2.29k
{
1063
2.29k
  if (!device || !crypt_metadata_locking_enabled())
1064
0
    return 0;
1065
1066
2.29k
  assert(!device_locked(device->lh) || !device_locked_readonly(device->lh));
1067
1068
2.29k
  return device_write_lock_internal(cd, device);
1069
2.29k
}
1070
1071
void device_read_unlock(struct crypt_device *cd, struct device *device)
1072
6.43k
{
1073
6.43k
  if (!device || !crypt_metadata_locking_enabled())
1074
0
    return;
1075
1076
6.43k
  assert(device_locked(device->lh));
1077
1078
6.43k
  device_unlock_internal(cd, device);
1079
6.43k
}
1080
1081
void device_write_unlock(struct crypt_device *cd, struct device *device)
1082
2.29k
{
1083
2.29k
  if (!device || !crypt_metadata_locking_enabled())
1084
0
    return;
1085
1086
2.29k
  assert(device_locked(device->lh) && !device_locked_readonly(device->lh));
1087
1088
2.29k
  device_unlock_internal(cd, device);
1089
2.29k
}
1090
1091
bool device_is_locked(struct device *device)
1092
0
{
1093
0
  return device ? device_locked(device->lh) : 0;
1094
0
}
1095
1096
void device_close(struct crypt_device *cd, struct device *device)
1097
6.43k
{
1098
6.43k
  if (!device)
1099
0
    return;
1100
1101
6.43k
  if (device->ro_dev_fd != -1) {
1102
6.43k
    log_dbg(cd, "Closing read only fd for %s.", device_path(device));
1103
6.43k
    if (close(device->ro_dev_fd))
1104
0
      log_dbg(cd, "Failed to close read only fd for %s.", device_path(device));
1105
6.43k
    device->ro_dev_fd = -1;
1106
6.43k
  }
1107
1108
6.43k
  if (device->dev_fd != -1) {
1109
2.29k
    log_dbg(cd, "Closing read write fd for %s.", device_path(device));
1110
2.29k
    if (close(device->dev_fd))
1111
0
      log_dbg(cd, "Failed to close read write fd for %s.", device_path(device));
1112
2.29k
    device->dev_fd = -1;
1113
2.29k
  }
1114
6.43k
}
1115
1116
void device_set_block_size(struct device *device, size_t size)
1117
2.29k
{
1118
2.29k
  if (!device)
1119
0
    return;
1120
1121
2.29k
  device->loop_block_size = size;
1122
2.29k
}