Coverage Report

Created: 2025-11-07 06:58

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/cryptsetup/lib/utils_wipe.c
Line
Count
Source
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
 * utils_wipe - wipe a device
4
 *
5
 * Copyright (C) 2004-2007 Clemens Fruhwirth <clemens@endorphin.org>
6
 * Copyright (C) 2009-2025 Red Hat, Inc. All rights reserved.
7
 * Copyright (C) 2009-2025 Milan Broz
8
 */
9
10
#include <stdlib.h>
11
#include <errno.h>
12
#include <sys/ioctl.h>
13
#include <sys/stat.h>
14
#include <linux/fs.h>
15
#include "internal.h"
16
#include "luks2/luks2_internal.h"
17
#include "luks2/hw_opal/hw_opal.h"
18
19
/* block device zeroout ioctls, introduced in Linux kernel 3.7 */
20
#ifndef BLKZEROOUT
21
#define BLKZEROOUT _IO(0x12,127)
22
#endif
23
24
static int wipe_zeroout(struct crypt_device *cd, int devfd,
25
      uint64_t offset, uint64_t length)
26
0
{
27
0
  static bool zeroout_available = true;
28
0
  uint64_t range[2] = { offset, length };
29
0
  int r;
30
31
0
  if (!zeroout_available)
32
0
    return -ENOTSUP;
33
34
0
  r = ioctl(devfd, BLKZEROOUT, &range);
35
0
  if (r < 0) {
36
0
    log_dbg(cd, "BLKZEROOUT ioctl not available (error %i), disabling.", r);
37
0
    zeroout_available = false;
38
0
    return -ENOTSUP;
39
0
  }
40
41
0
  return 0;
42
0
}
43
44
/*
45
 * Wipe using Peter Gutmann method described in
46
 * https://www.cs.auckland.ac.nz/~pgut001/pubs/secure_del.html
47
 * Note: used only for rotational device (and even there it is not needed today...)
48
 */
49
static void wipeSpecial(char *buffer, size_t buffer_size, unsigned int turn)
50
0
{
51
0
  unsigned int i;
52
53
0
  const unsigned char write_modes[27][4] = {
54
0
    {"\x55\x55\x55"}, {"\xaa\xaa\xaa"}, {"\x92\x49\x24"},
55
0
    {"\x49\x24\x92"}, {"\x24\x92\x49"}, {"\x00\x00\x00"},
56
0
    {"\x11\x11\x11"}, {"\x22\x22\x22"}, {"\x33\x33\x33"},
57
0
    {"\x44\x44\x44"}, {"\x55\x55\x55"}, {"\x66\x66\x66"},
58
0
    {"\x77\x77\x77"}, {"\x88\x88\x88"}, {"\x99\x99\x99"},
59
0
    {"\xaa\xaa\xaa"}, {"\xbb\xbb\xbb"}, {"\xcc\xcc\xcc"},
60
0
    {"\xdd\xdd\xdd"}, {"\xee\xee\xee"}, {"\xff\xff\xff"},
61
0
    {"\x92\x49\x24"}, {"\x49\x24\x92"}, {"\x24\x92\x49"},
62
0
    {"\x6d\xb6\xdb"}, {"\xb6\xdb\x6d"}, {"\xdb\x6d\xb6"}
63
0
  };
64
65
0
  for (i = 0; i < buffer_size / 3; ++i) {
66
0
    memcpy(buffer, write_modes[turn], 3);
67
0
    buffer += 3;
68
0
  }
69
0
}
70
71
static int crypt_wipe_special(struct crypt_device *cd, int fd, size_t bsize,
72
            size_t alignment, char *buffer,
73
            uint64_t offset, size_t size)
74
0
{
75
0
  int r = 0;
76
0
  unsigned int i;
77
0
  ssize_t written;
78
79
0
  for (i = 0; i < 39; ++i) {
80
0
    if (i <  5) {
81
0
      r = crypt_random_get(cd, buffer, size, CRYPT_RND_NORMAL);
82
0
    } else if (i >=  5 && i < 32) {
83
0
      wipeSpecial(buffer, size, i - 5);
84
0
      r = 0;
85
0
    } else if (i >= 32 && i < 38) {
86
0
      r = crypt_random_get(cd, buffer, size, CRYPT_RND_NORMAL);
87
0
    } else if (i >= 38 && i < 39) {
88
0
      memset(buffer, 0xFF, size);
89
0
      r = 0;
90
0
    }
91
0
    if (r < 0)
92
0
      return -EIO;
93
94
0
    written = write_lseek_blockwise(fd, bsize, alignment,
95
0
            buffer, size, offset);
96
0
    if (written < 0 || written != (ssize_t)size)
97
0
      return -EIO;
98
0
  }
99
100
  /* Rewrite it finally with random */
101
0
  if (crypt_random_get(cd, buffer, size, CRYPT_RND_NORMAL) < 0)
102
0
    return -EIO;
103
104
0
  written = write_lseek_blockwise(fd, bsize, alignment, buffer, size, offset);
105
0
  if (written < 0 || written != (ssize_t)size)
106
0
    return -EIO;
107
108
0
  return 0;
109
0
}
110
111
static int wipe_block(struct crypt_device *cd, int devfd, crypt_wipe_pattern pattern,
112
          char *sf, size_t device_block_size, size_t alignment,
113
          size_t wipe_block_size, uint64_t offset, bool *need_block_init,
114
          bool blockdev)
115
0
{
116
0
  int r;
117
118
0
  if (pattern == CRYPT_WIPE_SPECIAL)
119
0
    return crypt_wipe_special(cd, devfd, device_block_size, alignment,
120
0
            sf, offset, wipe_block_size);
121
122
0
  if (*need_block_init) {
123
0
    if (pattern == CRYPT_WIPE_ZERO) {
124
0
      memset(sf, 0, wipe_block_size);
125
0
      *need_block_init = false;
126
0
      r = 0;
127
0
    } else if (pattern == CRYPT_WIPE_RANDOM ||
128
0
         pattern == CRYPT_WIPE_ENCRYPTED_ZERO) {
129
0
      r = crypt_random_get(cd, sf, wipe_block_size,
130
0
               CRYPT_RND_NORMAL) ? -EIO : 0;
131
0
      *need_block_init = true;
132
0
    } else
133
0
      r = -EINVAL;
134
135
0
    if (r)
136
0
      return r;
137
0
  }
138
139
0
  if (blockdev && pattern == CRYPT_WIPE_ZERO &&
140
0
      !wipe_zeroout(cd, devfd, offset, wipe_block_size)) {
141
    /* zeroout ioctl does not move offset */
142
0
    if (lseek(devfd, offset + wipe_block_size, SEEK_SET) < 0) {
143
0
      log_err(cd, _("Cannot seek to device offset."));
144
0
      return -EINVAL;
145
0
    }
146
0
    return 0;
147
0
  }
148
149
0
  if (write_blockwise(devfd, device_block_size, alignment, sf,
150
0
          wipe_block_size) == (ssize_t)wipe_block_size)
151
0
    return 0;
152
153
0
  return -EIO;
154
0
}
155
156
int crypt_wipe_device(struct crypt_device *cd,
157
  struct device *device,
158
  crypt_wipe_pattern pattern,
159
  uint64_t offset,
160
  uint64_t length,
161
  size_t wipe_block_size,
162
  int (*progress)(uint64_t size, uint64_t offset, void *usrptr),
163
  void *usrptr)
164
0
{
165
0
  int r, devfd;
166
0
  struct stat st;
167
0
  size_t bsize, alignment;
168
0
  char *sf = NULL;
169
0
  uint64_t dev_size;
170
0
  bool need_block_init = true;
171
172
  /* Note: LUKS1 calls it with wipe_block not aligned to multiple of bsize */
173
0
  bsize = device_block_size(cd, device);
174
0
  alignment = device_alignment(device);
175
176
0
  log_dbg(cd, "Wipe device %s [%u], offset %" PRIu64 ", length %" PRIu64 ", block %zu, bsize %zu, align %zu.",
177
0
    device_path(device), (unsigned)pattern, offset, length, wipe_block_size, bsize, alignment);
178
179
0
  if (!bsize || !alignment || !wipe_block_size)
180
0
    return -EINVAL;
181
182
  /* if wipe_block_size < bsize, then a wipe is highly ineffective */
183
184
  /* Everything must be aligned to SECTOR_SIZE */
185
0
  if (MISALIGNED_512(offset) || MISALIGNED_512(length) || MISALIGNED_512(wipe_block_size))
186
0
    return -EINVAL;
187
188
0
  if (device_is_locked(device))
189
0
    devfd = device_open_locked(cd, device, O_RDWR);
190
0
  else
191
0
    devfd = device_open(cd, device, O_RDWR);
192
0
  if (devfd < 0)
193
0
    return errno ? -errno : -EINVAL;
194
195
0
  if (fstat(devfd, &st) < 0) {
196
0
    r = -EINVAL;
197
0
    goto out;
198
0
  }
199
200
0
  if (length)
201
0
    dev_size = offset + length;
202
0
  else {
203
0
    r = device_size(device, &dev_size);
204
0
    if (r)
205
0
      goto out;
206
207
0
    if (dev_size <= offset) {
208
0
      r = -EINVAL;
209
0
      goto out;
210
0
    }
211
0
  }
212
213
0
  r = posix_memalign((void **)&sf, alignment, wipe_block_size);
214
0
  if (r)
215
0
    goto out;
216
217
0
  if (lseek(devfd, offset, SEEK_SET) < 0) {
218
0
    log_err(cd, _("Cannot seek to device offset."));
219
0
    r = -EINVAL;
220
0
    goto out;
221
0
  }
222
223
0
  if (progress && progress(dev_size, offset, usrptr)) {
224
0
    r = -EINVAL; /* No change yet, treat this as a parameter error */
225
0
    goto out;
226
0
  }
227
228
0
  if (pattern == CRYPT_WIPE_SPECIAL && !device_is_rotational(device)) {
229
0
    log_dbg(cd, "Non-rotational device, using random data wipe mode.");
230
0
    pattern = CRYPT_WIPE_RANDOM;
231
0
  }
232
233
0
  while (offset < dev_size) {
234
0
    if ((offset + wipe_block_size) > dev_size)
235
0
      wipe_block_size = dev_size - offset;
236
237
0
    r = wipe_block(cd, devfd, pattern, sf, bsize, alignment,
238
0
             wipe_block_size, offset, &need_block_init, S_ISBLK(st.st_mode));
239
0
    if (r) {
240
0
      log_err(cd,_("Device wipe error, offset %" PRIu64 "."), offset);
241
0
      break;
242
0
    }
243
244
0
    offset += wipe_block_size;
245
246
0
    if (progress && progress(dev_size, offset, usrptr)) {
247
0
      r = -EINTR;
248
0
      break;
249
0
    }
250
0
  }
251
252
0
  device_sync(cd, device);
253
0
out:
254
0
  free(sf);
255
0
  return r;
256
0
}
257
258
int crypt_wipe(struct crypt_device *cd,
259
  const char *dev_path,
260
  crypt_wipe_pattern pattern,
261
  uint64_t offset,
262
  uint64_t length,
263
  size_t wipe_block_size,
264
  uint32_t flags,
265
  int (*progress)(uint64_t size, uint64_t offset, void *usrptr),
266
  void *usrptr)
267
0
{
268
0
  struct device *device;
269
0
  int r;
270
271
0
  if (!cd)
272
0
    return -EINVAL;
273
274
0
  r = init_crypto(cd);
275
0
  if (r < 0)
276
0
    return r;
277
278
0
  if (!dev_path)
279
0
    device = crypt_data_device(cd);
280
0
  else {
281
0
    r = device_alloc_no_check(&device, dev_path);
282
0
    if (r < 0)
283
0
      return r;
284
285
0
    if (flags & CRYPT_WIPE_NO_DIRECT_IO)
286
0
      device_disable_direct_io(device);
287
0
  }
288
0
  if (!device)
289
0
    return -EINVAL;
290
291
0
  if (!wipe_block_size)
292
0
    wipe_block_size = 1024*1024;
293
294
0
  r = crypt_wipe_device(cd, device, pattern, offset, length,
295
0
            wipe_block_size, progress, usrptr);
296
297
0
  if (dev_path)
298
0
    device_free(cd, device);
299
300
0
  return r;
301
0
}
302
303
int crypt_wipe_hw_opal(struct crypt_device *cd,
304
           int segment,
305
           const char *password,
306
           size_t password_size,
307
           uint32_t flags)
308
0
{
309
0
  int r;
310
0
  struct luks2_hdr *hdr;
311
0
  uint32_t opal_segment_number;
312
0
  struct crypt_lock_handle *opal_lh = NULL;
313
314
0
  UNUSED(flags);
315
316
0
  if (!cd)
317
0
    return -EINVAL;
318
319
0
  if (!password)
320
0
    return -EINVAL;
321
322
0
  if (segment < CRYPT_LUKS2_SEGMENT || segment > 8)
323
0
    return -EINVAL;
324
325
0
  r = crypt_opal_supported(cd, crypt_data_device(cd));
326
0
  if (r < 0)
327
0
    return r;
328
329
0
  if (segment == CRYPT_NO_SEGMENT) {
330
0
    r = opal_factory_reset(cd, crypt_data_device(cd), password, password_size);
331
0
    if (r == -EPERM)
332
0
      log_err(cd, _("Incorrect OPAL PSID."));
333
0
    else if (r < 0)
334
0
      log_err(cd, _("Cannot erase OPAL device."));
335
0
    return r;
336
0
  }
337
338
0
  if (onlyLUKS2(cd) < 0)
339
0
    return -EINVAL;
340
341
0
  hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
342
0
  if (!hdr)
343
0
    return -EINVAL;
344
345
0
  if (segment == CRYPT_LUKS2_SEGMENT) {
346
0
    r = LUKS2_get_opal_segment_number(hdr, CRYPT_DEFAULT_SEGMENT, &opal_segment_number);
347
0
    if (r < 0) {
348
0
      log_dbg(cd, "Can not get OPAL segment number.");
349
0
      return r;
350
0
    }
351
0
  } else
352
0
    opal_segment_number = segment;
353
354
0
  r = opal_exclusive_lock(cd, crypt_data_device(cd), &opal_lh);
355
0
  if (r < 0) {
356
0
    log_err(cd, _("Failed to acquire OPAL lock on device %s."), device_path(crypt_data_device(cd)));
357
0
    return -EINVAL;
358
0
  }
359
360
0
  r = opal_reset_segment(cd,
361
0
             crypt_data_device(cd),
362
0
             opal_segment_number,
363
0
             password,
364
0
             password_size);
365
366
0
  opal_exclusive_unlock(cd, opal_lh);
367
0
  if (r < 0)
368
0
    return r;
369
370
0
  return LUKS2_wipe_header_areas(cd, hdr);
371
0
}