Coverage Report

Created: 2026-04-11 06:29

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/lvm2/libdm/ioctl/libdm-iface.c
Line
Count
Source
1
/*
2
 * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
3
 * Copyright (C) 2004-2013 Red Hat, Inc. All rights reserved.
4
 *
5
 * This file is part of the device-mapper userspace tools.
6
 *
7
 * This copyrighted material is made available to anyone wishing to use,
8
 * modify, copy, or redistribute it subject to the terms and conditions
9
 * of the GNU Lesser General Public License v.2.1.
10
 *
11
 * You should have received a copy of the GNU Lesser General Public License
12
 * along with this program; if not, write to the Free Software Foundation,
13
 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
14
 */
15
16
#include "libdm/misc/dmlib.h"
17
#include "libdm-targets.h"
18
#include "libdm/libdm-common.h"
19
20
#include <stddef.h>
21
#include <fcntl.h>
22
#include <dirent.h>
23
#include <sys/ioctl.h>
24
#include <sys/utsname.h>
25
#include <limits.h>
26
27
#ifdef __linux__
28
#  include "libdm/misc/kdev_t.h"
29
#  include <linux/limits.h>
30
#else
31
#  define MAJOR(x) major((x))
32
#  define MINOR(x) minor((x))
33
#  define MKDEV(x,y) makedev(((dev_t)x),((dev_t)y))
34
#endif
35
36
#include "libdm/misc/dm-ioctl.h"
37
38
/*
39
 * Ensure build compatibility.  
40
 * The hard-coded versions here are the highest present 
41
 * in the _cmd_data arrays.
42
 */
43
44
#if !((DM_VERSION_MAJOR == 4 && DM_VERSION_MINOR >= 6))
45
#error The version of dm-ioctl.h included is incompatible.
46
#endif
47
48
/* FIXME This should be exported in device-mapper.h */
49
0
#define DM_NAME "device-mapper"
50
51
0
#define PROC_MISC "/proc/misc"
52
0
#define PROC_DEVICES "/proc/devices"
53
0
#define MISC_NAME "misc"
54
55
0
#define NUMBER_OF_MAJORS 4096
56
57
/*
58
 * Static minor number assigned since kernel version 2.6.36.
59
 * The original definition is in kernel's include/linux/miscdevice.h.
60
 * This number is also visible in modules.devname exported by depmod
61
 * utility (support included in module-init-tools version >= 3.12).
62
 */
63
0
#define MAPPER_CTRL_MINOR 236
64
0
#define MISC_MAJOR 10
65
66
/* dm major version no for running kernel */
67
static unsigned _dm_version = DM_VERSION_MAJOR;
68
static unsigned _dm_version_minor = 0;
69
static unsigned _dm_version_patchlevel = 0;
70
static int _log_suppress = 0;
71
static struct dm_timestamp *_dm_ioctl_timestamp = NULL;
72
static int _dm_warn_inactive_suppress = 0;
73
74
/*
75
 * If the kernel dm driver only supports one major number
76
 * we store it in _dm_device_major.  Otherwise we indicate
77
 * which major numbers have been claimed by device-mapper
78
 * in _dm_bitset.
79
 */
80
static unsigned _dm_multiple_major_support = 1;
81
static dm_bitset_t _dm_bitset = NULL;
82
static uint32_t _dm_device_major = 0;
83
84
static int _control_fd = -1;
85
static int _hold_control_fd_open = 0;
86
static int _version_checked = 0;
87
static int _version_ok = 1;
88
static unsigned _ioctl_buffer_double_factor = 0;
89
/* Max ioctl buffer: 16KB << 16 = 1GB */
90
0
#define DM_IOCTL_BUFFER_MAX_DOUBLINGS 16
91
92
/* *INDENT-OFF* */
93
static const struct cmd_data _cmd_data_v4[] = {
94
  {"create",  DM_DEV_CREATE,    {4, 0, 0}},
95
  {"reload",  DM_TABLE_LOAD,    {4, 0, 0}},
96
  {"remove",  DM_DEV_REMOVE,    {4, 0, 0}},
97
  {"remove_all",  DM_REMOVE_ALL,    {4, 0, 0}},
98
  {"suspend", DM_DEV_SUSPEND,   {4, 0, 0}},
99
  {"resume",  DM_DEV_SUSPEND,   {4, 0, 0}},
100
  {"info",  DM_DEV_STATUS,    {4, 0, 0}},
101
  {"deps",  DM_TABLE_DEPS,    {4, 0, 0}},
102
  {"rename",  DM_DEV_RENAME,    {4, 0, 0}},
103
  {"version", DM_VERSION,   {4, 0, 0}},
104
  {"status",  DM_TABLE_STATUS,  {4, 0, 0}},
105
  {"table", DM_TABLE_STATUS,  {4, 0, 0}},
106
  {"waitevent", DM_DEV_WAIT,    {4, 0, 0}},
107
  {"names", DM_LIST_DEVICES,  {4, 0, 0}},
108
  {"clear", DM_TABLE_CLEAR,   {4, 0, 0}},
109
  {"mknodes", DM_DEV_STATUS,    {4, 0, 0}},
110
#ifdef DM_LIST_VERSIONS
111
  {"versions",  DM_LIST_VERSIONS, {4, 1, 0}},
112
#endif
113
#ifdef DM_TARGET_MSG
114
  {"message", DM_TARGET_MSG,    {4, 2, 0}},
115
#endif
116
#ifdef DM_DEV_SET_GEOMETRY
117
  {"setgeometry", DM_DEV_SET_GEOMETRY,  {4, 6, 0}},
118
#endif
119
#ifdef DM_DEV_ARM_POLL
120
  {"armpoll", DM_DEV_ARM_POLL,  {4, 36, 0}},
121
#endif
122
#ifdef DM_GET_TARGET_VERSION
123
  {"target-version", DM_GET_TARGET_VERSION, {4, 41, 0}},
124
#endif
125
};
126
/* *INDENT-ON* */
127
128
/* Validate task type against the command table. */
129
static int _validate_task_type(struct dm_task *dmt)
130
0
{
131
0
  if ((unsigned) dmt->type >= DM_ARRAY_SIZE(_cmd_data_v4)) {
132
0
    log_error(INTERNAL_ERROR "unknown device-mapper task %d",
133
0
        dmt->type);
134
0
    return 0;
135
0
  }
136
137
0
  return 1;
138
0
}
139
140
0
#define ALIGNMENT 8
141
142
/* FIXME Rejig library to record & use errno instead */
143
#ifndef DM_EXISTS_FLAG
144
0
#  define DM_EXISTS_FLAG 0x00000004
145
#endif
146
147
static char *_align(char *ptr, unsigned int a)
148
0
{
149
0
  register unsigned long agn = --a;
150
151
0
  return (char *) (((unsigned long) ptr + agn) & ~agn);
152
0
}
153
154
static unsigned _kernel_major = 0;
155
static unsigned _kernel_minor = 0;
156
static unsigned _kernel_release = 0;
157
158
static int _uname(void)
159
0
{
160
0
  static int _uts_set = 0;
161
0
  struct utsname _uts;
162
0
  int parts;
163
164
0
  if (_uts_set)
165
0
    return 1;
166
167
0
  if (uname(&_uts)) {
168
0
    log_error("uname failed: %s", strerror(errno));
169
0
    return 0;
170
0
  }
171
172
0
  parts = sscanf(_uts.release, "%u.%u.%u",
173
0
           &_kernel_major, &_kernel_minor, &_kernel_release);
174
175
  /* Kernels with a major number of 2 always had 3 parts. */
176
0
  if (parts < 1 || (_kernel_major < 3 && parts < 3)) {
177
0
    log_error("Could not determine kernel version used.");
178
0
    return 0;
179
0
  }
180
181
0
  _uts_set = 1;
182
0
  return 1;
183
0
}
184
185
int get_uname_version(unsigned *major, unsigned *minor, unsigned *release)
186
0
{
187
0
  if (!_uname())
188
0
    return_0;
189
190
0
  *major = _kernel_major;
191
0
  *minor = _kernel_minor;
192
0
  *release = _kernel_release;
193
194
0
  return 1;
195
0
}
196
197
#ifdef DM_IOCTLS
198
199
/*
200
 * Set number to NULL to populate _dm_bitset - otherwise first
201
 * match is returned.
202
 * Returns:
203
 *  0 - error
204
 *  1 - success - number found
205
 *  2 - success - number not found (only if require_module_loaded=0)
206
 */
207
static int _get_proc_number(const char *file, const char *name,
208
          uint32_t *number, int require_module_loaded)
209
0
{
210
0
  FILE *fl;
211
0
  char nm[256];
212
0
  char *line = NULL;
213
0
  size_t len;
214
0
  uint32_t num;
215
0
  unsigned blocksection = (strcmp(file, PROC_DEVICES) == 0) ? 0 : 1;
216
217
0
  if (!(fl = fopen(file, "r"))) {
218
0
    log_sys_error("fopen", file);
219
0
    return 0;
220
0
  }
221
222
0
  while (getline(&line, &len, fl) != -1) {
223
0
    if (!blocksection && (line[0] == 'B'))
224
0
      blocksection = 1;
225
0
    else if (sscanf(line, "%u %255s\n", &num, &nm[0]) == 2) {
226
0
      if (!strcmp(name, nm)) {
227
0
        if (number) {
228
0
          *number = num;
229
0
          if (fclose(fl))
230
0
            log_sys_error("fclose", file);
231
0
          free(line);
232
0
          return 1;
233
0
        }
234
0
        dm_bit_set(_dm_bitset, num);
235
0
      }
236
0
    }
237
0
  }
238
0
  if (fclose(fl))
239
0
    log_sys_error("fclose", file);
240
0
  free(line);
241
242
0
  if (number) {
243
0
    if (require_module_loaded) {
244
0
      log_error("%s: No entry for %s found", file, name);
245
0
      return 0;
246
0
    }
247
248
0
    return 2;
249
0
  }
250
251
0
  return 1;
252
0
}
253
254
static int _control_device_number(uint32_t *major, uint32_t *minor)
255
0
{
256
0
  if (!_get_proc_number(PROC_DEVICES, MISC_NAME, major, 1) ||
257
0
      !_get_proc_number(PROC_MISC, DM_NAME, minor, 1)) {
258
0
    *major = 0;
259
0
    return 0;
260
0
  }
261
262
0
  return 1;
263
0
}
264
265
static int _control_unlink(const char *control)
266
0
{
267
0
  if (unlink(control) && (errno != ENOENT)) {
268
0
    log_sys_error("unlink", control);
269
0
    return -1;
270
0
  }
271
272
0
  return 0;
273
0
}
274
275
/*
276
 * Returns 1 if it exists on returning; 0 if it doesn't; -1 if it's wrong.
277
 */
278
static int _control_exists(const char *control, uint32_t major, uint32_t minor)
279
0
{
280
0
  struct stat buf;
281
282
0
  if (stat(control, &buf) < 0) {
283
0
    if (errno != ENOENT)
284
0
      log_sys_error("stat", control);
285
0
    return 0;
286
0
  }
287
288
0
  if (!S_ISCHR(buf.st_mode)) {
289
0
    log_verbose("%s: Wrong inode type", control);
290
0
    return _control_unlink(control);
291
0
  }
292
293
0
  if (major && buf.st_rdev != MKDEV(major, minor)) {
294
0
    log_verbose("%s: Wrong device number: (%u, %u) instead of "
295
0
          "(%u, %u).", control,
296
0
          MAJOR(buf.st_rdev), MINOR(buf.st_rdev),
297
0
          major, minor);
298
0
    return _control_unlink(control);
299
0
  }
300
301
0
  return 1;
302
0
}
303
304
static int _create_control(const char *control, uint32_t major, uint32_t minor)
305
0
{
306
0
  int ret;
307
0
  mode_t old_umask;
308
309
  /*
310
   * Return if the control already exists with intended major/minor
311
   * or there's an error unlinking an apparently incorrect one.
312
   */
313
0
  ret = _control_exists(control, major, minor);
314
0
  if (ret == -1)
315
0
    return_0; /* Failed to unlink existing incorrect node */
316
0
  if (ret)
317
0
    return 1; /* Already exists and correct */
318
319
0
  (void) dm_prepare_selinux_context(dm_dir(), S_IFDIR);
320
0
  old_umask = umask(DM_DEV_DIR_UMASK);
321
0
  ret = dm_create_dir(dm_dir());
322
0
  umask(old_umask);
323
0
  (void) dm_prepare_selinux_context(NULL, 0);
324
325
0
  if (!ret)
326
0
    return_0;
327
328
0
  log_verbose("Creating device %s (%u, %u)", control, major, minor);
329
330
0
  (void) dm_prepare_selinux_context(control, S_IFCHR);
331
0
  old_umask = umask(DM_CONTROL_NODE_UMASK);
332
0
  if (mknod(control, S_IFCHR | S_IRUSR | S_IWUSR,
333
0
      MKDEV(major, minor)) < 0)  {
334
0
    if (errno != EEXIST) {
335
0
      log_sys_error("mknod", control);
336
0
      ret = 0;
337
0
    } else if (_control_exists(control, major, minor) != 1) {
338
0
      stack; /* Invalid control node created by parallel command ? */
339
0
      ret = 0;
340
0
    }
341
0
  }
342
0
  umask(old_umask);
343
0
  (void) dm_prepare_selinux_context(NULL, 0);
344
345
0
  return ret;
346
0
}
347
#endif
348
349
/*
350
 * FIXME Update bitset in long-running process if dm claims new major numbers.
351
 */
352
/*
353
 * If require_module_loaded=0, caller is responsible to check
354
 * whether _dm_device_major or _dm_bitset is really set. If
355
 * it's not, it means the module is not loaded.
356
 */
357
static int _create_dm_bitset(int require_module_loaded)
358
0
{
359
0
  int r;
360
361
0
#ifdef DM_IOCTLS
362
0
  if (_dm_bitset || _dm_device_major)
363
0
    return 1;
364
365
0
  if (!_uname())
366
0
    return 0;
367
368
  /*
369
   * 2.6 kernels are limited to one major number.
370
   * Assume 2.4 kernels are patched not to.
371
   * FIXME Check _dm_version and _dm_version_minor if 2.6 changes this.
372
   */
373
0
  if (KERNEL_VERSION(_kernel_major, _kernel_minor, _kernel_release) >=
374
0
      KERNEL_VERSION(2, 6, 0))
375
0
    _dm_multiple_major_support = 0;
376
377
0
  if (!_dm_multiple_major_support) {
378
0
    if (!_get_proc_number(PROC_DEVICES, DM_NAME, &_dm_device_major,
379
0
              require_module_loaded))
380
0
      return 0;
381
0
    return 1;
382
0
  }
383
384
  /* Multiple major numbers supported */
385
0
  if (!(_dm_bitset = dm_bitset_create(NULL, NUMBER_OF_MAJORS)))
386
0
    return 0;
387
388
0
  r = _get_proc_number(PROC_DEVICES, DM_NAME, NULL, require_module_loaded);
389
0
  if (!r || r == 2) {
390
0
    dm_bitset_destroy(_dm_bitset);
391
0
    _dm_bitset = NULL;
392
    /*
393
     * It's not an error if we didn't find anything and we
394
     * didn't require module to be loaded at the same time.
395
     */
396
0
    return r == 2;
397
0
  }
398
399
0
  return 1;
400
#else
401
  return 0;
402
#endif
403
0
}
404
405
int dm_is_dm_major(uint32_t major)
406
0
{
407
0
  if (!_create_dm_bitset(0))
408
0
    return 0;
409
410
0
  if (_dm_multiple_major_support) {
411
0
    if (!_dm_bitset)
412
0
      return 0;
413
0
    return dm_bit(_dm_bitset, major) ? 1 : 0;
414
0
  }
415
416
0
  if (!_dm_device_major)
417
0
    return 0;
418
419
0
  return (major == _dm_device_major) ? 1 : 0;
420
0
}
421
422
static void _close_control_fd(void)
423
5.65k
{
424
5.65k
  if (_control_fd != -1) {
425
0
    if (close(_control_fd) < 0)
426
0
      log_sys_debug("close", "_control_fd");
427
0
    _control_fd = -1;
428
0
  }
429
5.65k
}
430
431
#ifdef DM_IOCTLS
432
static int _open_and_assign_control_fd(const char *control)
433
0
{
434
0
  if ((_control_fd = open(control, O_RDWR)) < 0) {
435
0
    log_sys_error("open", control);
436
0
    return 0;
437
0
  }
438
439
0
  return 1;
440
0
}
441
#endif
442
443
static int _open_control(void)
444
0
{
445
0
#ifdef DM_IOCTLS
446
0
  char control[PATH_MAX];
447
0
  uint32_t major = MISC_MAJOR;
448
0
  uint32_t minor = MAPPER_CTRL_MINOR;
449
450
0
  if (_control_fd != -1)
451
0
    return 1;
452
453
0
  if (!_uname())
454
0
    return 0;
455
456
0
  if (dm_snprintf(control, sizeof(control), "%s/%s", dm_dir(), DM_CONTROL_NODE) < 0)
457
0
    goto_bad;
458
459
  /*
460
   * Prior to 2.6.36 the minor number should be looked up in /proc.
461
   */
462
0
  if ((KERNEL_VERSION(_kernel_major, _kernel_minor, _kernel_release) <
463
0
       KERNEL_VERSION(2, 6, 36)) &&
464
0
      !_control_device_number(&major, &minor))
465
0
    goto_bad;
466
467
  /*
468
   * Create the node with correct major and minor if not already done.
469
   * Udev may already have created /dev/mapper/control
470
   * from the modules.devname file generated by depmod.
471
   */
472
0
  if (!_create_control(control, major, minor))
473
0
    goto_bad;
474
475
  /*
476
   * As of 2.6.36 kernels, the open can trigger autoloading dm-mod.
477
   */
478
0
  if (!_open_and_assign_control_fd(control))
479
0
    goto_bad;
480
  
481
0
  if (!_create_dm_bitset(1)) {
482
0
    log_error("Failed to set up list of device-mapper major numbers");
483
0
    return 0;
484
0
  }
485
486
0
  return 1;
487
488
0
bad:
489
0
  log_error("Failure to communicate with kernel device-mapper driver.");
490
0
  if (!geteuid())
491
0
    log_error("Check that device-mapper is available in the kernel.");
492
0
  return 0;
493
#else
494
  return 1;
495
#endif
496
0
}
497
498
static void _dm_zfree_string(char *string)
499
0
{
500
0
  if (string) {
501
0
    memset(string, 0, strlen(string));
502
0
    __asm__ volatile ("" ::: "memory"); /* Compiler barrier. */
503
0
    dm_free(string);
504
0
  }
505
0
}
506
507
static void _dm_zfree_dmi(struct dm_ioctl *dmi)
508
0
{
509
0
  if (dmi) {
510
0
    memset(dmi, 0, dmi->data_size);
511
0
    __asm__ volatile ("" ::: "memory"); /* Compiler barrier. */
512
0
    dm_free(dmi);
513
0
  }
514
0
}
515
516
static void _dm_task_free_targets(struct dm_task *dmt)
517
0
{
518
0
  struct target *t, *n;
519
520
0
  for (t = dmt->head; t; t = n) {
521
0
    n = t->next;
522
0
    if (dmt->secure_data)
523
0
      _dm_zfree_string(t->params);
524
0
    else
525
0
      dm_free(t->params);
526
0
    dm_free(t->type);
527
0
    dm_free(t);
528
0
  }
529
530
0
  dmt->head = dmt->tail = NULL;
531
0
}
532
533
void dm_task_destroy(struct dm_task *dmt)
534
0
{
535
0
  _dm_task_free_targets(dmt);
536
0
  if (dmt->secure_data)
537
0
    _dm_zfree_dmi(dmt->dmi.v4);
538
0
  else
539
0
    dm_free(dmt->dmi.v4);
540
0
  dm_free(dmt->dev_name);
541
0
  dm_free(dmt->mangled_dev_name);
542
0
  dm_free(dmt->newname);
543
0
  dm_free(dmt->message);
544
0
  dm_free(dmt->geometry);
545
0
  dm_free(dmt->uuid);
546
0
  dm_free(dmt->mangled_uuid);
547
0
  dm_free(dmt);
548
0
}
549
550
/*
551
 * Protocol Version 4 functions.
552
 */
553
554
int dm_task_get_driver_version(struct dm_task *dmt, char *version, size_t size)
555
0
{
556
0
  unsigned *v;
557
558
0
  if (!dmt->dmi.v4) {
559
0
    if (version)
560
0
      version[0] = '\0';
561
0
    return 0;
562
0
  }
563
564
0
  v = dmt->dmi.v4->version;
565
0
  _dm_version_minor = v[1];
566
0
  _dm_version_patchlevel = v[2];
567
0
  if (version &&
568
0
      (snprintf(version, size, "%u.%u.%u", v[0], v[1], v[2]) < 0)) {
569
0
    log_error("Buffer for version is to short.");
570
0
    if (size > 0)
571
0
      version[0] = '\0';
572
0
    return 0;
573
0
  }
574
575
0
  return 1;
576
0
}
577
578
static int _check_version(char *version, size_t size, int log_suppress)
579
0
{
580
0
  struct dm_task *task;
581
0
  int r;
582
583
0
  if (!(task = dm_task_create(DM_DEVICE_VERSION))) {
584
0
    log_error("Failed to get device-mapper version");
585
0
    version[0] = '\0';
586
0
    return 0;
587
0
  }
588
589
0
  if (log_suppress)
590
0
    _log_suppress = 1;
591
592
0
  r = dm_task_run(task);
593
0
  if (!dm_task_get_driver_version(task, version, size))
594
0
    stack;
595
0
  dm_task_destroy(task);
596
0
  _log_suppress = 0;
597
598
0
  return r;
599
0
}
600
601
/*
602
 * Find out device-mapper's major version number the first time 
603
 * this is called and whether or not we support it.
604
 */
605
int dm_check_version(void)
606
0
{
607
0
  char libversion[64] = "", dmversion[64] = "";
608
0
  const char *compat = "";
609
610
0
  if (_version_checked)
611
0
    return _version_ok;
612
613
0
  _version_checked = 1;
614
615
0
  if (_check_version(dmversion, sizeof(dmversion), 0))
616
0
    return 1;
617
618
0
  dm_get_library_version(libversion, sizeof(libversion));
619
620
0
  log_error("Incompatible libdevmapper %s%s and kernel driver %s.",
621
0
      *libversion ? libversion : "(unknown version)", compat,
622
0
      *dmversion ? dmversion : "(unknown version)");
623
624
0
  _version_ok = 0;
625
0
  return 0;
626
0
}
627
628
int dm_cookie_supported(void)
629
0
{
630
0
  return (dm_check_version() &&
631
0
    ((_dm_version == 4) ? _dm_version_minor >= 15 : _dm_version > 4));
632
0
}
633
634
static int _dm_inactive_supported(void)
635
0
{
636
0
  int inactive_supported = 0;
637
638
0
  if (dm_check_version() && _dm_version >= 4) {
639
0
    if (_dm_version_minor >= 16)
640
0
      inactive_supported = 1; /* upstream */
641
0
    else if (_dm_version_minor == 11 &&
642
0
       (_dm_version_patchlevel >= 6 &&
643
0
        _dm_version_patchlevel <= 40)) {
644
0
      inactive_supported = 1; /* RHEL 5.7 */
645
0
    }
646
0
  }
647
648
0
  return inactive_supported;
649
0
}
650
651
int dm_message_supports_precise_timestamps(void)
652
0
{
653
  /*
654
   * 4.32.0 supports "precise_timestamps" and "histogram:" options
655
   * to @stats_create messages but lacks the ability to report
656
   * these properties via a subsequent @stats_list: require at
657
   * least 4.33.0 in order to use these features.
658
   */
659
0
  if (dm_check_version() && _dm_version >= 4)
660
0
    if (_dm_version_minor >= 33)
661
0
      return 1;
662
0
  return 0;
663
0
}
664
665
void *dm_get_next_target(struct dm_task *dmt, void *next,
666
       uint64_t *start, uint64_t *length,
667
       char **target_type, char **params)
668
0
{
669
0
  struct target *t = (struct target *) next;
670
671
0
  if (!t)
672
0
    t = dmt->head;
673
674
0
  if (!t) {
675
0
    *start = 0;
676
0
    *length = 0;
677
0
    *target_type = 0;
678
0
    *params = 0;
679
0
    return NULL;
680
0
  }
681
682
0
  *start = t->start;
683
0
  *length = t->length;
684
0
  *target_type = t->type;
685
0
  *params = t->params;
686
687
0
  return t->next;
688
0
}
689
690
/* Unmarshal the target info returned from a status call */
691
static int _unmarshal_status(struct dm_task *dmt, struct dm_ioctl *dmi)
692
0
{
693
0
  char *outbuf = (char *) dmi + dmi->data_start;
694
0
  char *outptr = outbuf;
695
0
  uint32_t i;
696
0
  struct dm_target_spec *spec;
697
698
0
  _dm_task_free_targets(dmt);
699
700
0
  for (i = 0; i < dmi->target_count; i++) {
701
0
    spec = (struct dm_target_spec *) outptr;
702
0
    if (!dm_task_add_target(dmt, spec->sector_start,
703
0
          spec->length,
704
0
          spec->target_type,
705
0
          outptr + sizeof(*spec))) {
706
0
      return 0;
707
0
    }
708
709
0
    outptr = outbuf + spec->next;
710
0
  }
711
712
0
  return 1;
713
0
}
714
715
int dm_format_dev(char *buf, int bufsize, uint32_t dev_major,
716
      uint32_t dev_minor)
717
0
{
718
0
  int r;
719
720
0
  if (bufsize < 8)
721
0
    return 0;
722
723
0
  r = snprintf(buf, (size_t) bufsize, "%u:%u", dev_major, dev_minor);
724
0
  if (r < 0 || r > bufsize - 1)
725
0
    return 0;
726
727
0
  return 1;
728
0
}
729
730
DM_EXPORT_NEW_SYMBOL(int, dm_task_get_info, 1_02_97)
731
  (struct dm_task *dmt, struct dm_info *info)
732
0
{
733
0
  if (!dmt->dmi.v4)
734
0
    return 0;
735
736
0
  memset(info, 0, sizeof(*info));
737
738
0
  info->exists = dmt->dmi.v4->flags & DM_EXISTS_FLAG ? 1 : 0;
739
0
  if (!info->exists)
740
0
    return 1;
741
742
0
  info->suspended = dmt->dmi.v4->flags & DM_SUSPEND_FLAG ? 1 : 0;
743
0
  info->read_only = dmt->dmi.v4->flags & DM_READONLY_FLAG ? 1 : 0;
744
0
  info->live_table = dmt->dmi.v4->flags & DM_ACTIVE_PRESENT_FLAG ? 1 : 0;
745
0
  info->inactive_table = dmt->dmi.v4->flags & DM_INACTIVE_PRESENT_FLAG ?
746
0
      1 : 0;
747
0
  info->deferred_remove = dmt->dmi.v4->flags & DM_DEFERRED_REMOVE;
748
0
  info->internal_suspend = (dmt->dmi.v4->flags & DM_INTERNAL_SUSPEND_FLAG) ? 1 : 0;
749
0
  info->target_count = dmt->dmi.v4->target_count;
750
0
  info->open_count = dmt->dmi.v4->open_count;
751
0
  info->event_nr = dmt->dmi.v4->event_nr;
752
0
  info->major = MAJOR(dmt->dmi.v4->dev);
753
0
  info->minor = MINOR(dmt->dmi.v4->dev);
754
755
0
  return 1;
756
0
}
757
758
uint32_t dm_task_get_read_ahead(const struct dm_task *dmt, uint32_t *read_ahead)
759
0
{
760
0
  const char *dev_name;
761
762
0
  *read_ahead = 0;
763
764
0
  if (!dmt->dmi.v4 || !(dmt->dmi.v4->flags & DM_EXISTS_FLAG))
765
0
    return 0;
766
767
0
  if (*dmt->dmi.v4->name)
768
0
    dev_name = dmt->dmi.v4->name;
769
0
  else if (!(dev_name = DEV_NAME(dmt))) {
770
0
    log_error("Get read ahead request failed: device name unrecorded.");
771
0
    return 0;
772
0
  }
773
774
0
  return get_dev_node_read_ahead(dev_name, MAJOR(dmt->dmi.v4->dev),
775
0
               MINOR(dmt->dmi.v4->dev), read_ahead);
776
0
}
777
778
struct dm_deps *dm_task_get_deps(struct dm_task *dmt)
779
0
{
780
0
  if (!dmt) {
781
0
    log_error(INTERNAL_ERROR "Missing dm_task.");
782
0
    return NULL;
783
0
  }
784
785
0
  return (struct dm_deps *) (((char *) dmt->dmi.v4) +
786
0
           dmt->dmi.v4->data_start);
787
0
}
788
789
/*
790
 * Round up the ptr to an 8-byte boundary.
791
 * Follow kernel pattern.
792
 */
793
0
#define ALIGN_MASK 7
794
static size_t _align_val(size_t val)
795
0
{
796
0
  return (val + ALIGN_MASK) & ~ALIGN_MASK;
797
0
}
798
static void *_align_ptr(void *ptr)
799
0
{
800
0
  return (void *)(uintptr_t)_align_val((size_t)ptr);
801
0
}
802
803
0
static int _check_has_event_nr(void) {
804
0
  static int _has_event_nr = -1;
805
806
0
  if (_has_event_nr < 0)
807
0
    _has_event_nr = dm_check_version() &&
808
0
      ((_dm_version == 4) ?  _dm_version_minor >= 38 : _dm_version > 4);
809
810
0
  return _has_event_nr;
811
0
}
812
813
struct dm_names *dm_task_get_names(struct dm_task *dmt)
814
0
{
815
0
  return (struct dm_names *) (((char *) dmt->dmi.v4) +
816
0
            dmt->dmi.v4->data_start);
817
0
}
818
819
struct dm_versions *dm_task_get_versions(struct dm_task *dmt)
820
0
{
821
0
  return (struct dm_versions *) (((char *) dmt->dmi.v4) +
822
0
               dmt->dmi.v4->data_start);
823
0
}
824
825
int dm_task_get_device_list(struct dm_task *dmt, struct dm_list **devs_list,
826
          unsigned *devs_features)
827
0
{
828
0
  struct dm_names *names, *names1;
829
0
  struct dm_active_device *dm_dev, *dm_new_dev;
830
0
  struct dm_list *devs;
831
0
  unsigned next = 0;
832
0
  uint32_t *event_nr;
833
0
  char *uuid_ptr;
834
0
  size_t len;
835
0
  int cnt = 0;
836
837
0
  *devs_list = 0;
838
0
  *devs_features = 0;
839
840
0
  if ((names = dm_task_get_names(dmt)) && names->dev) {
841
0
    names1 = names;
842
0
    if (!names->name[0])
843
0
      cnt = -1; /* -> cnt == 0 when no device is really present */
844
0
    do {
845
0
      names1 = (struct dm_names *)((char *) names1 + next);
846
0
      next = names1->next;
847
0
      ++cnt;
848
0
    } while (next);
849
0
  }
850
851
  /* buffer for devs +  sorted ptrs + dm_devs + aligned strings */
852
0
  if (!(devs = malloc(sizeof(*devs) + cnt * (2 * sizeof(void*) + sizeof(*dm_dev)) +
853
0
          (cnt ? (char*)names1 - (char*)names + 256 : 0))))
854
0
    return_0;
855
856
0
  dm_list_init(devs);
857
858
0
  if (!cnt) {
859
    /* nothing in the list -> mark all features present */
860
0
    *devs_features |= (DM_DEVICE_LIST_HAS_EVENT_NR | DM_DEVICE_LIST_HAS_UUID);
861
0
    goto out; /* nothing else to do */
862
0
  }
863
864
  /* Shift position where to store individual dm_devs */
865
0
  dm_dev = (struct dm_active_device *) ((long*) (devs + 1) + cnt);
866
867
0
  do {
868
0
    names = (struct dm_names *)((char *) names + next);
869
870
0
    dm_dev->devno = (dev_t) names->dev;
871
0
    dm_dev->name = (const char *)(dm_dev + 1);
872
0
    dm_dev->event_nr = 0;
873
0
    dm_dev->uuid = "";
874
875
0
    len = strlen(names->name) + 1;
876
0
    memcpy((char*)dm_dev->name, names->name, len);
877
878
0
    dm_new_dev = _align_ptr((char*)(dm_dev + 1) + len);
879
0
    if (_check_has_event_nr()) {
880
881
0
      *devs_features |= DM_DEVICE_LIST_HAS_EVENT_NR;
882
0
      event_nr = _align_ptr(names->name + len);
883
0
      dm_dev->event_nr = event_nr[0];
884
885
0
      if ((event_nr[1] & DM_NAME_LIST_FLAG_HAS_UUID)) {
886
0
        *devs_features |= DM_DEVICE_LIST_HAS_UUID;
887
0
        uuid_ptr = _align_ptr(event_nr + 2);
888
0
        len = strlen(uuid_ptr) + 1;
889
0
        memcpy(dm_new_dev, uuid_ptr, len);
890
0
        dm_dev->uuid = (const char *) dm_new_dev;
891
0
        dm_new_dev = _align_ptr((char*)dm_new_dev + len);
892
0
      }
893
0
    }
894
895
0
    dm_list_add(devs, &dm_dev->list);
896
0
    dm_dev = dm_new_dev;
897
0
    next = names->next;
898
0
  } while (next);
899
900
0
    out:
901
0
  *devs_list = devs;
902
903
0
  return 1;
904
0
}
905
906
void dm_device_list_destroy(struct dm_list **devs_list)
907
0
{
908
0
  struct dm_device_list *devs = (struct dm_device_list *) *devs_list;
909
910
0
  if (devs) {
911
0
    free(devs);
912
0
    *devs_list = NULL;
913
0
  }
914
0
}
915
916
int dm_device_list_equal(const struct dm_list *list1, const struct dm_list *list2)
917
0
{
918
0
  const struct dm_active_device *dev1, *dev2;
919
0
  const struct dm_list *l2;
920
921
0
  if (!list1 || !list2)
922
0
    return (list1 == list2);
923
924
0
  if (!(l2 = dm_list_first(list2)))
925
0
    return dm_list_empty(list1);
926
927
0
  dm_list_iterate_items(dev1, list1) {
928
0
    dev2 = dm_list_item(l2, struct dm_active_device);
929
0
    if ((dev1->devno != dev2->devno) || strcmp(dev1->uuid, dev2->uuid))
930
0
      return 0;
931
0
    if (!(l2 = dm_list_next(list2, l2)))
932
0
      return !dm_list_next(list1, &dev1->list);
933
0
  }
934
935
0
  return 1;
936
0
}
937
938
const char *dm_task_get_message_response(struct dm_task *dmt)
939
0
{
940
0
  const char *start, *end;
941
942
0
  if (!(dmt->dmi.v4->flags & DM_DATA_OUT_FLAG))
943
0
    return NULL;
944
945
0
  start = (const char *) dmt->dmi.v4 + dmt->dmi.v4->data_start;
946
0
  end = (const char *) dmt->dmi.v4 + dmt->dmi.v4->data_size;
947
948
0
  if (end < start) {
949
0
    log_error(INTERNAL_ERROR "Corrupted message structure returned: start %d > end %d", (int)dmt->dmi.v4->data_start, (int)dmt->dmi.v4->data_size);
950
0
    return NULL;
951
0
  }
952
953
0
  if (!memchr(start, 0, end - start)) {
954
0
    log_error(INTERNAL_ERROR "Message response doesn't contain terminating NUL character");
955
0
    return NULL;
956
0
  }
957
958
0
  return start;
959
0
}
960
961
int dm_task_set_ro(struct dm_task *dmt)
962
0
{
963
0
  dmt->read_only = 1;
964
0
  return 1;
965
0
}
966
967
int dm_task_set_read_ahead(struct dm_task *dmt, uint32_t read_ahead,
968
         uint32_t read_ahead_flags)
969
0
{
970
0
  dmt->read_ahead = read_ahead;
971
0
  dmt->read_ahead_flags = read_ahead_flags;
972
973
0
  return 1;
974
0
}
975
976
int dm_task_suppress_identical_reload(struct dm_task *dmt)
977
0
{
978
0
  dmt->suppress_identical_reload = 1;
979
0
  return 1;
980
0
}
981
982
int dm_task_set_add_node(struct dm_task *dmt, dm_add_node_t add_node)
983
0
{
984
0
  switch (add_node) {
985
0
  case DM_ADD_NODE_ON_RESUME:
986
0
  case DM_ADD_NODE_ON_CREATE:
987
0
    dmt->add_node = add_node;
988
0
    return 1;
989
0
  default:
990
0
    log_error("Unknown add node parameter");
991
0
    return 0;
992
0
  }
993
0
}
994
995
int dm_task_set_newuuid(struct dm_task *dmt, const char *newuuid)
996
0
{
997
0
  dm_string_mangling_t mangling_mode = dm_get_name_mangling_mode();
998
0
  char mangled_uuid[DM_UUID_LEN];
999
0
  int r = 0;
1000
1001
0
  if (strlen(newuuid) >= DM_UUID_LEN) {
1002
0
    log_error("Uuid \"%s\" too long", newuuid);
1003
0
    return 0;
1004
0
  }
1005
1006
0
  if (!check_multiple_mangled_string_allowed(newuuid, "new UUID", mangling_mode))
1007
0
    return_0;
1008
1009
0
  if (mangling_mode != DM_STRING_MANGLING_NONE &&
1010
0
      (r = mangle_string(newuuid, "new UUID", strlen(newuuid), mangled_uuid,
1011
0
             sizeof(mangled_uuid), mangling_mode)) < 0) {
1012
0
    log_error("Failed to mangle new device UUID \"%s\"", newuuid);
1013
0
    return 0;
1014
0
  }
1015
1016
0
  if (r) {
1017
0
    log_debug_activation("New device uuid mangled [%s]: %s --> %s",
1018
0
             mangling_mode == DM_STRING_MANGLING_AUTO ? "auto" : "hex",
1019
0
             newuuid, mangled_uuid);
1020
0
    newuuid = mangled_uuid;
1021
0
  }
1022
1023
0
  dm_free(dmt->newname);
1024
0
  if (!(dmt->newname = dm_strdup(newuuid))) {
1025
0
    log_error("dm_task_set_newuuid: strdup(%s) failed", newuuid);
1026
0
    return 0;
1027
0
  }
1028
0
  dmt->new_uuid = 1;
1029
1030
0
  return 1;
1031
0
}
1032
1033
int dm_task_set_message(struct dm_task *dmt, const char *message)
1034
0
{
1035
0
  dm_free(dmt->message);
1036
0
  if (!(dmt->message = dm_strdup(message))) {
1037
0
    log_error("dm_task_set_message: strdup failed");
1038
0
    return 0;
1039
0
  }
1040
1041
0
  return 1;
1042
0
}
1043
1044
int dm_task_set_sector(struct dm_task *dmt, uint64_t sector)
1045
0
{
1046
0
  dmt->sector = sector;
1047
1048
0
  return 1;
1049
0
}
1050
1051
int dm_task_set_geometry(struct dm_task *dmt, const char *cylinders, const char *heads,
1052
       const char *sectors, const char *start)
1053
0
{
1054
0
  dm_free(dmt->geometry);
1055
0
  if (dm_asprintf(&(dmt->geometry), "%s %s %s %s",
1056
0
      cylinders, heads, sectors, start) < 0) {
1057
0
    log_error("dm_task_set_geometry: sprintf failed");
1058
0
    return 0;
1059
0
  }
1060
1061
0
  return 1;
1062
0
}
1063
1064
int dm_task_no_flush(struct dm_task *dmt)
1065
0
{
1066
0
  dmt->no_flush = 1;
1067
1068
0
  return 1;
1069
0
}
1070
1071
int dm_task_no_open_count(struct dm_task *dmt)
1072
0
{
1073
0
  dmt->no_open_count = 1;
1074
1075
0
  return 1;
1076
0
}
1077
1078
int dm_task_skip_lockfs(struct dm_task *dmt)
1079
0
{
1080
0
  dmt->skip_lockfs = 1;
1081
1082
0
  return 1;
1083
0
}
1084
1085
int dm_task_secure_data(struct dm_task *dmt)
1086
0
{
1087
0
  dmt->secure_data = 1;
1088
1089
0
  return 1;
1090
0
}
1091
1092
int dm_task_ima_measurement(struct dm_task *dmt)
1093
0
{
1094
0
  dmt->ima_measurement = 1;
1095
1096
0
  return 1;
1097
0
}
1098
1099
int dm_task_retry_remove(struct dm_task *dmt)
1100
0
{
1101
0
  dmt->retry_remove = 1;
1102
1103
0
  return 1;
1104
0
}
1105
1106
int dm_task_deferred_remove(struct dm_task *dmt)
1107
0
{
1108
0
  dmt->deferred_remove = 1;
1109
1110
0
  return 1;
1111
0
}
1112
1113
int dm_task_query_inactive_table(struct dm_task *dmt)
1114
0
{
1115
0
  dmt->query_inactive_table = 1;
1116
1117
0
  return 1;
1118
0
}
1119
1120
int dm_task_set_event_nr(struct dm_task *dmt, uint32_t event_nr)
1121
0
{
1122
0
  dmt->event_nr = event_nr;
1123
1124
0
  return 1;
1125
0
}
1126
1127
int dm_task_set_record_timestamp(struct dm_task *dmt)
1128
0
{
1129
0
  if (!_dm_ioctl_timestamp)
1130
0
    _dm_ioctl_timestamp = dm_timestamp_alloc();
1131
1132
0
  if (!_dm_ioctl_timestamp)
1133
0
    return_0;
1134
1135
0
  dmt->record_timestamp = 1;
1136
1137
0
  return 1;
1138
0
}
1139
1140
struct dm_timestamp *dm_task_get_ioctl_timestamp(struct dm_task *dmt)
1141
0
{
1142
0
  return dmt->record_timestamp ? _dm_ioctl_timestamp : NULL;
1143
0
}
1144
1145
struct target *create_target(uint64_t start, uint64_t len, const char *type,
1146
           const char *params)
1147
0
{
1148
0
  struct target *t;
1149
1150
0
  if (strlen(type) >= DM_MAX_TYPE_NAME) {
1151
0
    log_error("Target type name %s is too long.", type);
1152
0
    return NULL;
1153
0
  }
1154
1155
0
  if (!(t = dm_zalloc(sizeof(*t)))) {
1156
0
    log_error("create_target: malloc(%" PRIsize_t ") failed",
1157
0
        sizeof(*t));
1158
0
    return NULL;
1159
0
  }
1160
1161
0
  if (!(t->params = dm_strdup(params))) {
1162
0
    log_error("create_target: strdup(params) failed");
1163
0
    goto bad;
1164
0
  }
1165
1166
0
  if (!(t->type = dm_strdup(type))) {
1167
0
    log_error("create_target: strdup(type) failed");
1168
0
    goto bad;
1169
0
  }
1170
1171
0
  t->start = start;
1172
0
  t->length = len;
1173
0
  return t;
1174
1175
0
      bad:
1176
0
  _dm_zfree_string(t->params);
1177
0
  dm_free(t->type);
1178
0
  dm_free(t);
1179
0
  return NULL;
1180
0
}
1181
1182
static char *_add_target(struct target *t, char *out, char *end)
1183
0
{
1184
0
  char *out_sp = out;
1185
0
  struct dm_target_spec sp;
1186
0
  size_t sp_size = sizeof(struct dm_target_spec);
1187
0
  unsigned int backslash_count = 0;
1188
0
  int len;
1189
0
  char *pt;
1190
1191
0
  if (strlen(t->type) >= sizeof(sp.target_type)) {
1192
0
    log_error("Target type name %s is too long.", t->type);
1193
0
    return NULL;
1194
0
  }
1195
1196
0
  sp.status = 0;
1197
0
  sp.sector_start = t->start;
1198
0
  sp.length = t->length;
1199
0
  strncpy(sp.target_type, t->type, sizeof(sp.target_type) - 1);
1200
0
  sp.target_type[sizeof(sp.target_type) - 1] = '\0';
1201
1202
0
  out += sp_size;
1203
0
  pt = t->params;
1204
1205
0
  while (*pt)
1206
0
    if (*pt++ == '\\')
1207
0
      backslash_count++;
1208
1209
0
  len = strlen(t->params) + 1;
1210
1211
0
  if ((out >= end) || (out + len + backslash_count) >= end) {
1212
0
    log_error("Ran out of memory building ioctl parameter");
1213
0
    return NULL;
1214
0
  }
1215
1216
0
  if (backslash_count) {
1217
    /* replace "\" with "\\" */
1218
0
    pt = t->params;
1219
0
    do {
1220
0
      if (*pt == '\\')
1221
0
        *out++ = '\\';
1222
0
      *out++ = *pt++;
1223
0
    } while (*pt);
1224
0
    *out++ = '\0';
1225
0
  }
1226
0
  else {
1227
0
    memcpy(out, t->params, len);
1228
0
    out += len + backslash_count;
1229
0
  }
1230
1231
  /* align next block */
1232
0
  out = _align(out, ALIGNMENT);
1233
1234
0
  sp.next = out - out_sp;
1235
0
  memcpy(out_sp, &sp, sp_size);
1236
1237
0
  return out;
1238
0
}
1239
1240
static int _lookup_dev_name(uint64_t dev, char *buf, size_t len)
1241
0
{
1242
0
  struct dm_names *names;
1243
0
  unsigned next = 0;
1244
0
  struct dm_task *dmt;
1245
0
  int r = 0;
1246
 
1247
0
  if (!(dmt = dm_task_create(DM_DEVICE_LIST)))
1248
0
    return 0;
1249
 
1250
0
  if (!dm_task_run(dmt))
1251
0
    goto out;
1252
1253
0
  if (!(names = dm_task_get_names(dmt)))
1254
0
    goto out;
1255
 
1256
0
  if (!names->dev)
1257
0
    goto out;
1258
 
1259
0
  do {
1260
0
    names = (struct dm_names *)((char *) names + next);
1261
0
    if (names->dev == dev) {
1262
0
      memccpy(buf, names->name, 0, len);
1263
0
      r = 1;
1264
0
      break;
1265
0
    }
1266
0
    next = names->next;
1267
0
  } while (next);
1268
1269
0
      out:
1270
0
  dm_task_destroy(dmt);
1271
0
  return r;
1272
0
}
1273
1274
static int _add_params(int type)
1275
0
{
1276
0
  switch (type) {
1277
0
  case DM_DEVICE_REMOVE_ALL:
1278
0
  case DM_DEVICE_CREATE:
1279
0
  case DM_DEVICE_REMOVE:
1280
0
  case DM_DEVICE_SUSPEND:
1281
0
  case DM_DEVICE_STATUS:
1282
0
  case DM_DEVICE_CLEAR:
1283
0
  case DM_DEVICE_ARM_POLL:
1284
0
    return 0; /* IOCTL_FLAGS_NO_PARAMS in drivers/md/dm-ioctl.c */
1285
0
  default:
1286
0
    return 1;
1287
0
  }
1288
0
}
1289
1290
static struct dm_ioctl *_flatten(struct dm_task *dmt, unsigned repeat_count)
1291
0
{
1292
0
  size_t min_size;
1293
0
  const int (*version)[3];
1294
1295
0
  struct dm_ioctl *dmi;
1296
0
  struct target *t;
1297
0
  struct dm_target_msg *tmsg;
1298
0
  size_t len = sizeof(struct dm_ioctl);
1299
0
  size_t message_len = 0, newname_len = 0, geometry_len = 0;
1300
0
  char *b, *e;
1301
0
  int count = 0;
1302
1303
0
  if (_add_params(dmt->type))
1304
0
    for (t = dmt->head; t; t = t->next) {
1305
0
      len += sizeof(struct dm_target_spec);
1306
0
      len += strlen(t->params) + 1 + ALIGNMENT;
1307
0
      count++;
1308
0
    }
1309
0
  else if (dmt->head)
1310
0
    log_debug_activation(INTERNAL_ERROR "dm '%s' ioctl should not define parameters.",
1311
0
             _cmd_data_v4[dmt->type].name);
1312
0
  switch (dmt->type) {
1313
0
  case DM_DEVICE_CREATE:
1314
0
  case DM_DEVICE_DEPS:
1315
0
  case DM_DEVICE_LIST:
1316
0
  case DM_DEVICE_STATUS:
1317
0
  case DM_DEVICE_TABLE:
1318
0
  case DM_DEVICE_TARGET_MSG:
1319
0
    min_size = 16 * 1024;
1320
0
    break;
1321
0
  default:
1322
0
    min_size = 2 * 1024;
1323
0
  }
1324
1325
0
  if (count && (dmt->sector || dmt->message)) {
1326
0
    log_error("targets and message are incompatible");
1327
0
    return NULL;
1328
0
  }
1329
1330
0
  if (count && dmt->newname) {
1331
0
    log_error("targets and rename are incompatible");
1332
0
    return NULL;
1333
0
  }
1334
1335
0
  if (count && dmt->geometry) {
1336
0
    log_error("targets and geometry are incompatible");
1337
0
    return NULL;
1338
0
  }
1339
1340
0
  if (dmt->newname && (dmt->sector || dmt->message)) {
1341
0
    log_error("message and rename are incompatible");
1342
0
    return NULL;
1343
0
  }
1344
1345
0
  if (dmt->newname && dmt->geometry) {
1346
0
    log_error("geometry and rename are incompatible");
1347
0
    return NULL;
1348
0
  }
1349
1350
0
  if (dmt->geometry && (dmt->sector || dmt->message)) {
1351
0
    log_error("geometry and message are incompatible");
1352
0
    return NULL;
1353
0
  }
1354
1355
0
  if (dmt->sector && !dmt->message) {
1356
0
    log_error("message is required with sector");
1357
0
    return NULL;
1358
0
  }
1359
1360
0
  if (dmt->newname) {
1361
0
    newname_len = strlen(dmt->newname) + 1;
1362
0
    len += newname_len;
1363
0
  }
1364
1365
0
  if (dmt->message) {
1366
0
    message_len = strlen(dmt->message) + 1;
1367
0
    len += sizeof(struct dm_target_msg) + message_len;
1368
0
  }
1369
1370
0
  if (dmt->geometry) {
1371
0
    geometry_len = strlen(dmt->geometry) + 1;
1372
0
    len += geometry_len;
1373
0
  }
1374
1375
  /*
1376
   * Give len a minimum size so that we have space to store
1377
   * dependencies or status information.
1378
   */
1379
0
  if (len < min_size)
1380
0
    len = min_size;
1381
1382
  /* Increase buffer size if repeating because buffer was too small */
1383
0
  while (repeat_count--)
1384
0
    len *= 2;
1385
1386
0
  if (!(dmi = dm_zalloc(len)))
1387
0
    return NULL;
1388
1389
0
  version = &_cmd_data_v4[dmt->type].version;
1390
1391
0
  dmi->version[0] = (*version)[0];
1392
0
  dmi->version[1] = (*version)[1];
1393
0
  dmi->version[2] = (*version)[2];
1394
1395
0
  dmi->data_size = len;
1396
0
  dmi->data_start = sizeof(struct dm_ioctl);
1397
1398
0
  if (dmt->minor >= 0) {
1399
0
    if (!_dm_multiple_major_support && dmt->allow_default_major_fallback &&
1400
0
        dmt->major != (int) _dm_device_major) {
1401
0
      log_verbose("Overriding major number of %d "
1402
0
            "with %u for persistent device.",
1403
0
            dmt->major, _dm_device_major);
1404
0
      dmt->major = _dm_device_major;
1405
0
    }
1406
1407
0
    if (dmt->major <= 0) {
1408
0
      log_error("Missing major number for persistent device.");
1409
0
      goto bad;
1410
0
    }
1411
1412
0
    dmi->flags |= DM_PERSISTENT_DEV_FLAG;
1413
0
    dmi->dev = MKDEV(dmt->major, dmt->minor);
1414
0
  }
1415
1416
  /* Does driver support device number referencing? */
1417
0
  if (_dm_version_minor < 3 && !DEV_NAME(dmt) && !DEV_UUID(dmt) && dmi->dev) {
1418
0
    if (!_lookup_dev_name(dmi->dev, dmi->name, sizeof(dmi->name))) {
1419
0
      log_error("Unable to find name for device (%" PRIu32
1420
0
          ":%" PRIu32 ")", dmt->major, dmt->minor);
1421
0
      goto bad;
1422
0
    }
1423
0
    log_verbose("device (%" PRIu32 ":%" PRIu32 ") is %s "
1424
0
          "for compatibility with old kernel",
1425
0
          dmt->major, dmt->minor, dmi->name);
1426
0
  }
1427
1428
  /* FIXME Until resume ioctl supplies name, use dev_name for readahead */
1429
0
  if (DEV_NAME(dmt) &&
1430
0
      (((dmt->type != DM_DEVICE_RESUME) &&
1431
0
        (dmt->type != DM_DEVICE_RELOAD) &&
1432
0
        (dmt->type != DM_DEVICE_REMOVE)) ||
1433
0
       (dmt->minor < 0) || (dmt->major < 0)))
1434
    /* When RESUME, RELOAD or REMOVE sets maj:min and dev_name,
1435
     * use just maj:min for the ioctl; dev_name stays on dmt
1436
     * for error/debug messages and _dm_task_node_ops */
1437
0
    memccpy(dmi->name, DEV_NAME(dmt), 0, sizeof(dmi->name));
1438
1439
0
  if (DEV_UUID(dmt))
1440
0
    memccpy(dmi->uuid, DEV_UUID(dmt), 0, sizeof(dmi->uuid));
1441
1442
0
  if (dmt->type == DM_DEVICE_SUSPEND)
1443
0
    dmi->flags |= DM_SUSPEND_FLAG;
1444
0
  if (dmt->no_flush) {
1445
0
    if (_dm_version_minor < 12)
1446
0
      log_verbose("No flush flag unsupported by kernel. "
1447
0
            "Buffers will be flushed.");
1448
0
    else
1449
0
      dmi->flags |= DM_NOFLUSH_FLAG;
1450
0
  }
1451
0
  if (dmt->read_only)
1452
0
    dmi->flags |= DM_READONLY_FLAG;
1453
0
  if (dmt->skip_lockfs)
1454
0
    dmi->flags |= DM_SKIP_LOCKFS_FLAG;
1455
0
  if (dmt->deferred_remove && (dmt->type == DM_DEVICE_REMOVE || dmt->type == DM_DEVICE_REMOVE_ALL))
1456
0
    dmi->flags |= DM_DEFERRED_REMOVE;
1457
1458
0
  if (dmt->secure_data) {
1459
0
    if (_dm_version_minor < 20)
1460
0
      log_verbose("Secure data flag unsupported by kernel. "
1461
0
            "Buffers will not be wiped after use.");
1462
0
    dmi->flags |= DM_SECURE_DATA_FLAG;
1463
0
  }
1464
0
  if (dmt->query_inactive_table) {
1465
0
    if (!_dm_inactive_supported())
1466
0
      log_warn_suppress(_dm_warn_inactive_suppress++,
1467
0
            "WARNING: Inactive table query unsupported by kernel. "
1468
0
            "It will use live table.");
1469
0
    dmi->flags |= DM_QUERY_INACTIVE_TABLE_FLAG;
1470
0
  }
1471
0
  if (dmt->new_uuid) {
1472
0
    if (_dm_version_minor < 19) {
1473
0
      log_error("Setting UUID unsupported by kernel. "
1474
0
          "Aborting operation.");
1475
0
      goto bad;
1476
0
    }
1477
0
    dmi->flags |= DM_UUID_FLAG;
1478
0
  }
1479
0
  if (dmt->ima_measurement) {
1480
0
    if (_dm_version_minor < 45) {
1481
0
      log_error("IMA measurement unsupported by kernel. "
1482
0
          "Aborting operation.");
1483
0
      goto bad;
1484
0
    }
1485
0
    dmi->flags |= DM_IMA_MEASUREMENT_FLAG;
1486
0
  }
1487
1488
0
  dmi->target_count = count;
1489
0
  dmi->event_nr = dmt->event_nr;
1490
1491
0
  b = (char *) (dmi + 1);
1492
0
  e = (char *) dmi + len;
1493
1494
0
  if (_add_params(dmt->type))
1495
0
    for (t = dmt->head; t; t = t->next)
1496
0
      if (!(b = _add_target(t, b, e)))
1497
0
        goto_bad;
1498
1499
0
  if (dmt->newname)
1500
0
    memcpy(b, dmt->newname, newname_len);
1501
1502
0
  if (dmt->message) {
1503
0
    tmsg = (struct dm_target_msg *) b;
1504
0
    tmsg->sector = dmt->sector;
1505
0
    memcpy(tmsg->message, dmt->message, message_len);
1506
0
  }
1507
1508
0
  if (dmt->geometry)
1509
0
    memcpy(b, dmt->geometry, geometry_len);
1510
1511
0
  return dmi;
1512
1513
0
      bad:
1514
0
  _dm_zfree_dmi(dmi);
1515
0
  return NULL;
1516
0
}
1517
1518
static int _process_mapper_dir(struct dm_task *dmt)
1519
0
{
1520
0
  struct dirent *dirent;
1521
0
  DIR *d;
1522
0
  const char *dir;
1523
0
  int r = 1;
1524
1525
0
  dir = dm_dir();
1526
0
  if (!(d = opendir(dir))) {
1527
0
    log_sys_error("opendir", dir);
1528
0
    return 0;
1529
0
  }
1530
1531
0
  while ((dirent = readdir(d))) {
1532
0
    if (!strcmp(dirent->d_name, ".") ||
1533
0
        !strcmp(dirent->d_name, "..") ||
1534
0
        !strcmp(dirent->d_name, "control"))
1535
0
      continue;
1536
0
    if (!dm_task_set_name(dmt, dirent->d_name)) {
1537
0
      r = 0;
1538
0
      stack;
1539
0
      continue; /* try next name */
1540
0
    }
1541
0
    if (!dm_task_run(dmt)) {
1542
0
      r = 0;
1543
0
      stack;  /* keep going */
1544
0
    }
1545
0
  }
1546
1547
0
  if (closedir(d))
1548
0
    log_sys_debug("closedir", dir);
1549
1550
0
  return r;
1551
0
}
1552
1553
static int _process_all_v4(struct dm_task *dmt)
1554
0
{
1555
0
  struct dm_task *task;
1556
0
  struct dm_names *names;
1557
0
  unsigned next = 0;
1558
0
  int r = 1;
1559
1560
0
  if (!(task = dm_task_create(DM_DEVICE_LIST)))
1561
0
    return 0;
1562
1563
0
  if (!dm_task_run(task)) {
1564
0
    r = 0;
1565
0
    goto out;
1566
0
  }
1567
1568
0
  if (!(names = dm_task_get_names(task))) {
1569
0
    r = 0;
1570
0
    goto out;
1571
0
  }
1572
1573
0
  if (!names->dev)
1574
0
    goto out;
1575
1576
0
  do {
1577
0
    names = (struct dm_names *)((char *) names + next);
1578
0
    if (!dm_task_set_name(dmt, names->name)) {
1579
0
      r = 0;
1580
0
      goto out;
1581
0
    }
1582
0
    if (!dm_task_run(dmt))
1583
0
      r = 0;
1584
0
    next = names->next;
1585
0
  } while (next);
1586
1587
0
      out:
1588
0
  dm_task_destroy(task);
1589
0
  return r;
1590
0
}
1591
1592
static int _mknodes_v4(struct dm_task *dmt)
1593
0
{
1594
0
  (void) _process_mapper_dir(dmt);
1595
1596
0
  return _process_all_v4(dmt);
1597
0
}
1598
1599
/*
1600
 * If an operation that uses a cookie fails, decrement the
1601
 * semaphore instead of udev.
1602
 */
1603
static int _udev_complete(struct dm_task *dmt)
1604
0
{
1605
0
  uint16_t base;
1606
1607
0
  if (dmt->cookie_set &&
1608
0
      (base = dmt->event_nr & ~DM_UDEV_FLAGS_MASK))
1609
    /* strip flags from the cookie and use cookie magic instead */
1610
0
    return dm_udev_complete(base | (DM_COOKIE_MAGIC <<
1611
0
            DM_UDEV_FLAGS_SHIFT));
1612
1613
0
  return 1;
1614
0
}
1615
1616
#ifdef DM_IOCTLS
1617
static int _check_uevent_generated(struct dm_ioctl *dmi)
1618
0
{
1619
0
  if (!dm_check_version() ||
1620
0
      ((_dm_version == 4) ? _dm_version_minor < 17 : _dm_version < 4))
1621
    /* can't check, assume uevent is generated */
1622
0
    return 1;
1623
1624
0
  return dmi->flags & DM_UEVENT_GENERATED_FLAG;
1625
0
}
1626
#endif
1627
1628
/*
1629
 * Create a RELOAD task and populate it with table data.
1630
 * Used by both _create_and_load_v4() and the async create chain.
1631
 * The target list (head/tail) is moved -- caller must NULL its own copy.
1632
 * Returns the new task on success, NULL on failure.
1633
 */
1634
static struct dm_task *_new_reload_task(const char *name,
1635
          struct target *head,
1636
          struct target *tail,
1637
          int read_only,
1638
          int secure_data,
1639
          int ima_measurement,
1640
          int major,
1641
          int minor)
1642
0
{
1643
0
  struct dm_task *task;
1644
1645
0
  if (!(task = dm_task_create(DM_DEVICE_RELOAD)))
1646
0
    return_NULL;
1647
1648
0
  if (name && !dm_task_set_name(task, name)) {
1649
0
    dm_task_destroy(task);
1650
0
    return_NULL;
1651
0
  }
1652
1653
0
  task->read_only = read_only;
1654
0
  task->head = head;
1655
0
  task->tail = tail;
1656
0
  task->secure_data = secure_data;
1657
0
  task->ima_measurement = ima_measurement;
1658
0
  task->major = major;
1659
0
  task->minor = minor;
1660
1661
0
  return task;
1662
0
}
1663
1664
/*
1665
 * Revert a failed CREATE-with-table by issuing a synchronous REMOVE.
1666
 * If cookie_set, sets up a udev cookie for the remove.
1667
 * Used by both _create_and_load_v4() and the async create chain.
1668
 */
1669
static void _revert_create(const char *dev_name,
1670
         int cookie_set, uint32_t event_nr)
1671
0
{
1672
0
  struct dm_task *dmt;
1673
0
  uint32_t cookie;
1674
1675
0
  if (!dev_name || !*dev_name)
1676
0
    return;
1677
1678
0
  if (!(dmt = dm_task_create(DM_DEVICE_REMOVE)))
1679
0
    return;
1680
1681
0
  if (!dm_task_set_name(dmt, dev_name)) {
1682
0
    dm_task_destroy(dmt);
1683
0
    return;
1684
0
  }
1685
1686
0
  if (cookie_set) {
1687
0
    cookie = (event_nr & ~DM_UDEV_FLAGS_MASK) |
1688
0
       (DM_COOKIE_MAGIC << DM_UDEV_FLAGS_SHIFT);
1689
0
    if (!dm_task_set_cookie(dmt, &cookie,
1690
0
          (event_nr & DM_UDEV_FLAGS_MASK) >>
1691
0
          DM_UDEV_FLAGS_SHIFT))
1692
0
      stack; /* keep going */
1693
0
  }
1694
1695
0
  if (!dm_task_run(dmt))
1696
0
    log_error("Failed to revert device creation.");
1697
1698
0
  dm_task_destroy(dmt);
1699
0
}
1700
1701
static int _create_and_load_v4(struct dm_task *dmt)
1702
0
{
1703
0
  struct dm_info info;
1704
0
  struct dm_task *task;
1705
0
  int r, ioctl_errno = 0;
1706
1707
  /* Use new task struct to create the device */
1708
0
  if (!(task = dm_task_create(DM_DEVICE_CREATE))) {
1709
0
    _udev_complete(dmt);
1710
0
    return_0;
1711
0
  }
1712
1713
  /* Copy across relevant fields */
1714
0
  if (dmt->dev_name && !dm_task_set_name(task, dmt->dev_name))
1715
0
    goto_bad;
1716
1717
0
  if (dmt->uuid && !dm_task_set_uuid(task, dmt->uuid))
1718
0
    goto_bad;
1719
1720
0
  task->major = dmt->major;
1721
0
  task->minor = dmt->minor;
1722
0
  task->uid = dmt->uid;
1723
0
  task->gid = dmt->gid;
1724
0
  task->mode = dmt->mode;
1725
  /* FIXME: Just for udev_check in dm_task_run. Can we avoid this? */
1726
0
  task->event_nr = dmt->event_nr & DM_UDEV_FLAGS_MASK;
1727
0
  task->cookie_set = dmt->cookie_set;
1728
0
  task->add_node = dmt->add_node;
1729
1730
0
  if (!dm_task_run(task)) {
1731
0
    ioctl_errno = task->ioctl_errno;
1732
0
    goto_bad;
1733
0
  }
1734
1735
0
  if (!dm_task_get_info(task, &info) || !info.exists)
1736
0
    goto_bad;
1737
1738
0
  dm_task_destroy(task);
1739
1740
  /* Next load the table */
1741
0
  if (!(task = _new_reload_task(dmt->dev_name, dmt->head, dmt->tail,
1742
0
              dmt->read_only, dmt->secure_data,
1743
0
              dmt->ima_measurement,
1744
0
              info.major, info.minor))) {
1745
0
    stack;
1746
0
    _udev_complete(dmt);
1747
0
    goto revert;
1748
0
  }
1749
1750
0
  r = dm_task_run(task);
1751
0
  if (!r)
1752
0
    ioctl_errno = task->ioctl_errno;
1753
1754
0
  task->head = NULL;
1755
0
  task->tail = NULL;
1756
0
  dm_task_destroy(task);
1757
1758
0
  if (!r) {
1759
0
    stack;
1760
0
    _udev_complete(dmt);
1761
0
    goto revert;
1762
0
  }
1763
1764
  /* Use the original structure last so the info will be correct */
1765
0
  dmt->type = DM_DEVICE_RESUME;
1766
0
  dm_free(dmt->uuid);
1767
0
  dmt->uuid = NULL;
1768
0
  dm_free(dmt->mangled_uuid);
1769
0
  dmt->mangled_uuid = NULL;
1770
  /* coverity[double_free] recursive function call */
1771
0
  _dm_task_free_targets(dmt);
1772
1773
0
  if (dm_task_run(dmt))
1774
0
    return 1;
1775
1776
0
      revert:
1777
0
  _revert_create(dmt->dev_name, dmt->cookie_set, dmt->event_nr);
1778
1779
0
  if (ioctl_errno != 0)
1780
0
    dmt->ioctl_errno =  ioctl_errno;
1781
1782
0
  return 0;
1783
1784
0
      bad:
1785
0
  dm_task_destroy(task);
1786
0
  _udev_complete(dmt);
1787
1788
0
  if (ioctl_errno != 0)
1789
0
    dmt->ioctl_errno =  ioctl_errno;
1790
1791
0
  return 0;
1792
0
}
1793
1794
uint64_t dm_task_get_existing_table_size(struct dm_task *dmt)
1795
0
{
1796
0
  return dmt->existing_table_size;
1797
0
}
1798
1799
static int _reload_with_suppression_v4(struct dm_task *dmt)
1800
0
{
1801
0
  struct dm_task *task;
1802
0
  struct target *t1, *t2;
1803
0
  size_t len;
1804
0
  int r;
1805
1806
  /* New task to get existing table information */
1807
0
  if (!(task = dm_task_create(DM_DEVICE_TABLE))) {
1808
0
    log_error("Failed to create device-mapper task struct");
1809
0
    return 0;
1810
0
  }
1811
1812
  /* Copy across relevant fields */
1813
0
  if (dmt->dev_name && !dm_task_set_name(task, dmt->dev_name)) {
1814
0
    dm_task_destroy(task);
1815
0
    return 0;
1816
0
  }
1817
1818
0
  if (dmt->uuid && !dm_task_set_uuid(task, dmt->uuid)) {
1819
0
    dm_task_destroy(task);
1820
0
    return 0;
1821
0
  }
1822
1823
0
  task->major = dmt->major;
1824
0
  task->minor = dmt->minor;
1825
1826
0
  r = dm_task_run(task);
1827
1828
0
  if (!r) {
1829
0
    dm_task_destroy(task);
1830
0
    return r;
1831
0
  }
1832
1833
  /* Store existing table size */
1834
0
  t2 = task->head;
1835
0
  while (t2 && t2->next)
1836
0
    t2 = t2->next;
1837
0
  dmt->existing_table_size = t2 ? t2->start + t2->length : 0;
1838
1839
0
  if (((task->dmi.v4->flags & DM_READONLY_FLAG) ? 1 : 0) != dmt->read_only)
1840
0
    goto no_match;
1841
1842
0
  t1 = dmt->head;
1843
0
  t2 = task->head;
1844
1845
0
  while (t1 && t2) {
1846
0
    len = strlen(t2->params);
1847
0
    while (len-- > 0 && t2->params[len] == ' ')
1848
0
      t2->params[len] = '\0';
1849
1850
0
    if (t1->start != t2->start) {
1851
0
      log_debug("reload %u:%u diff start %llu %llu type %s %s", task->major, task->minor,
1852
0
           (unsigned long long)t1->start, (unsigned long long)t2->start, t1->type, t2->type);
1853
0
      goto no_match;
1854
0
    }
1855
0
    if (t1->length != t2->length) {
1856
0
      log_debug("reload %u:%u diff length %llu %llu type %s %s", task->major, task->minor,
1857
0
          (unsigned long long)t1->length, (unsigned long long)t2->length, t1->type, t2->type);
1858
0
      goto no_match;
1859
0
    }
1860
0
    if (strcmp(t1->type, t2->type)) {
1861
0
      log_debug("reload %u:%u diff type %s %s", task->major, task->minor, t1->type, t2->type);
1862
0
      goto no_match;
1863
0
    }
1864
0
    if (strcmp(t1->params, t2->params)) {
1865
0
      if (dmt->skip_reload_params_compare) {
1866
0
        log_debug("reload %u:%u diff params ignore for type %s",
1867
0
            task->major, task->minor, t1->type);
1868
0
        log_debug("reload params1 %s", t1->params);
1869
0
        log_debug("reload params2 %s", t2->params);
1870
0
      } else {
1871
0
        log_debug("reload %u:%u diff params for type %s",
1872
0
            task->major, task->minor, t1->type);
1873
0
        log_debug("reload params1 %s", t1->params);
1874
0
        log_debug("reload params2 %s", t2->params);
1875
0
        goto no_match;
1876
0
      }
1877
0
    }
1878
1879
0
    t1 = t1->next;
1880
0
    t2 = t2->next;
1881
0
  }
1882
  
1883
0
  if (!t1 && !t2) {
1884
0
    dmt->dmi.v4 = task->dmi.v4;
1885
0
    task->dmi.v4 = NULL;
1886
0
    dm_task_destroy(task);
1887
0
    return 1;
1888
0
  }
1889
1890
0
no_match:
1891
0
  dm_task_destroy(task);
1892
1893
  /* Now do the original reload */
1894
0
  dmt->suppress_identical_reload = 0;
1895
0
  r = dm_task_run(dmt);
1896
1897
0
  return r;
1898
0
}
1899
1900
static int _check_children_not_suspended_v4(struct dm_task *dmt, uint64_t device)
1901
0
{
1902
0
  struct dm_task *task;
1903
0
  struct dm_info info;
1904
0
  struct dm_deps *deps;
1905
0
  int r = 0;
1906
0
  uint32_t i;
1907
1908
  /* Find dependencies */
1909
0
  if (!(task = dm_task_create(DM_DEVICE_DEPS)))
1910
0
    return 0;
1911
1912
  /* Copy across or set relevant fields */
1913
0
  if (device) {
1914
0
    task->major = MAJOR(device);
1915
0
    task->minor = MINOR(device);
1916
0
  } else {
1917
0
    if (dmt->dev_name && !dm_task_set_name(task, dmt->dev_name))
1918
0
      goto out;
1919
1920
0
    if (dmt->uuid && !dm_task_set_uuid(task, dmt->uuid))
1921
0
      goto out;
1922
1923
0
    task->major = dmt->major;
1924
0
    task->minor = dmt->minor;
1925
0
  }
1926
1927
0
  task->uid = dmt->uid;
1928
0
  task->gid = dmt->gid;
1929
0
  task->mode = dmt->mode;
1930
  /* FIXME: Just for udev_check in dm_task_run. Can we avoid this? */
1931
0
  task->event_nr = dmt->event_nr & DM_UDEV_FLAGS_MASK;
1932
0
  task->cookie_set = dmt->cookie_set;
1933
0
  task->add_node = dmt->add_node;
1934
  
1935
0
  if (!(r = dm_task_run(task)))
1936
0
    goto out;
1937
1938
0
  if (!dm_task_get_info(task, &info) || !info.exists)
1939
0
    goto out;
1940
1941
  /*
1942
   * Warn if any of the devices this device depends upon are already
1943
   * suspended: I/O could become trapped between the two devices.
1944
   */
1945
0
  if (info.suspended) {
1946
0
    if (!device)
1947
0
      log_debug_activation("Attempting to suspend a device that is already suspended "
1948
0
               "(%u:%u)", info.major, info.minor);
1949
0
    else
1950
0
      log_error(INTERNAL_ERROR "Attempt to suspend device %s%s%s%.0d%s%.0d%s%s"
1951
0
          "that uses already-suspended device (%u:%u)", 
1952
0
          DEV_NAME(dmt) ? : "", DEV_UUID(dmt) ? : "",
1953
0
          dmt->major > 0 ? "(" : "",
1954
0
          dmt->major > 0 ? dmt->major : 0,
1955
0
          dmt->major > 0 ? ":" : "",
1956
0
          dmt->minor > 0 ? dmt->minor : 0,
1957
0
          dmt->major > 0 && dmt->minor == 0 ? "0" : "",
1958
0
          dmt->major > 0 ? ") " : "",
1959
0
          info.major, info.minor);
1960
1961
    /* No need for further recursion */
1962
0
    r = 1;
1963
0
    goto out;
1964
0
  }
1965
1966
0
  if (!(deps = dm_task_get_deps(task)))
1967
0
    goto out;
1968
1969
0
  for (i = 0; i < deps->count; i++) {
1970
    /* Only recurse with dm devices */
1971
0
    if (MAJOR(deps->device[i]) != _dm_device_major)
1972
0
      continue;
1973
1974
0
    if (!_check_children_not_suspended_v4(task, deps->device[i]))
1975
0
      goto out;
1976
0
  }
1977
1978
0
  r = 1;
1979
1980
0
out:
1981
0
  dm_task_destroy(task);
1982
1983
0
  return r;
1984
0
}
1985
1986
static int _suspend_with_validation_v4(struct dm_task *dmt)
1987
0
{
1988
  /* Avoid recursion */
1989
0
  dmt->enable_checks = 0;
1990
1991
  /*
1992
   * Ensure we can't leave any I/O trapped between suspended devices.
1993
   */
1994
0
  if (!_check_children_not_suspended_v4(dmt, 0))
1995
0
    return 0;
1996
1997
  /* Finally, perform the original suspend. */
1998
0
  return dm_task_run(dmt);
1999
0
}
2000
2001
static const char *_sanitise_message(char *message)
2002
0
{
2003
0
  const char *sanitised_message = message ?: "";
2004
2005
  /* FIXME: Check for whitespace variations. */
2006
  /* This traps what cryptsetup sends us. */
2007
0
  if (message && !strncasecmp(message, "key set", 7))
2008
0
    sanitised_message = "key set";
2009
2010
0
  return sanitised_message;
2011
0
}
2012
2013
#ifdef DM_IOCTLS
2014
static int _do_dm_ioctl_unmangle_string(char *str, const char *str_name,
2015
          char *buf, size_t buf_size,
2016
          dm_string_mangling_t mode)
2017
0
{
2018
0
  int r;
2019
2020
0
  if (mode == DM_STRING_MANGLING_NONE)
2021
0
    return 1;
2022
2023
0
  if (!check_multiple_mangled_string_allowed(str, str_name, mode))
2024
0
    return_0;
2025
2026
0
  if ((r = unmangle_string(str, str_name, strlen(str), buf, buf_size, mode)) < 0) {
2027
0
    log_debug_activation("_do_dm_ioctl_unmangle_string: failed to "
2028
0
             "unmangle %s \"%s\"", str_name, str);
2029
0
    return 0;
2030
0
  }
2031
2032
0
  if (r)
2033
0
    memcpy(str, buf, strlen(buf) + 1);
2034
2035
0
  return 1;
2036
0
}
2037
2038
static int _dm_ioctl_unmangle_names(int type, struct dm_ioctl *dmi)
2039
0
{
2040
0
  char buf[DM_NAME_LEN];
2041
0
  char buf_uuid[DM_UUID_LEN];
2042
0
  struct dm_name_list *names;
2043
0
  unsigned next = 0;
2044
0
  char *name;
2045
0
  int r = 1;
2046
0
  uint32_t *event_nr;
2047
0
  char *uuid_ptr;
2048
0
  dm_string_mangling_t mangling_mode = dm_get_name_mangling_mode();
2049
2050
0
  if ((name = dmi->name))
2051
0
    r &= _do_dm_ioctl_unmangle_string(name, "name", buf, sizeof(buf),
2052
0
              mangling_mode);
2053
2054
0
  if (type == DM_DEVICE_LIST &&
2055
0
      ((names = ((struct dm_name_list *) ((char *)dmi + dmi->data_start)))) &&
2056
0
      names->dev) {
2057
0
    do {
2058
0
      names = (struct dm_name_list *)((char *) names + next);
2059
0
      event_nr = _align_ptr(names->name + strlen(names->name) + 1);
2060
0
      r &= _do_dm_ioctl_unmangle_string(names->name, "name",
2061
0
                buf, sizeof(buf), mangling_mode);
2062
      /* Unmangle also UUID within same loop */
2063
0
      if (_check_has_event_nr() &&
2064
0
          (event_nr[1] & DM_NAME_LIST_FLAG_HAS_UUID)) {
2065
0
        uuid_ptr = _align_ptr(event_nr + 2);
2066
0
        r &= _do_dm_ioctl_unmangle_string(uuid_ptr, "UUID", buf_uuid,
2067
0
                  sizeof(buf_uuid), mangling_mode);
2068
0
      }
2069
0
      next = names->next;
2070
0
    } while (next);
2071
0
  }
2072
2073
0
  return r;
2074
0
}
2075
2076
static int _dm_ioctl_unmangle_uuids(int type, struct dm_ioctl *dmi)
2077
0
{
2078
0
  char buf[DM_UUID_LEN];
2079
0
  char *uuid = dmi->uuid;
2080
2081
0
  if (uuid)
2082
0
    return _do_dm_ioctl_unmangle_string(uuid, "UUID", buf, sizeof(buf),
2083
0
                dm_get_name_mangling_mode());
2084
2085
0
  return 1;
2086
0
}
2087
#endif
2088
2089
/* True for ioctl types that generate a udev event and need cookie handling. */
2090
static inline int _dmt_has_uevent(const struct dm_task *dmt)
2091
0
{
2092
0
  return dmt->type == DM_DEVICE_RESUME ||
2093
0
         dmt->type == DM_DEVICE_REMOVE ||
2094
0
         dmt->type == DM_DEVICE_RENAME;
2095
0
}
2096
2097
/*
2098
 * Flatten the task into a fresh dm_ioctl buffer and set all flags and
2099
 * udev cookie bits.  Returns the allocated buffer (caller owns it) or
2100
 * NULL on failure.
2101
 */
2102
static struct dm_ioctl *_dm_task_build_dmi(struct dm_task *dmt,
2103
             unsigned buffer_repeat_count)
2104
0
{
2105
0
  struct dm_ioctl *dmi;
2106
2107
0
  dmi = _flatten(dmt, buffer_repeat_count);
2108
0
  if (!dmi) {
2109
0
    log_error("Couldn't create ioctl argument.");
2110
0
    return NULL;
2111
0
  }
2112
2113
0
  if (dmt->type == DM_DEVICE_TABLE)
2114
0
    dmi->flags |= DM_STATUS_TABLE_FLAG;
2115
2116
0
  dmi->flags |= DM_EXISTS_FLAG; /* FIXME */
2117
2118
0
  if (dmt->no_open_count)
2119
0
    dmi->flags |= DM_SKIP_BDGET_FLAG;
2120
2121
0
  if (_dmt_has_uevent(dmt) && dm_cookie_supported()) {
2122
    /*
2123
     * Always mark events coming from libdevmapper as
2124
     * "primary sourced". This is needed to distinguish
2125
     * any spurious events so we can act appropriately.
2126
     * This needs to be applied even when udev_sync is
2127
     * not used because udev flags could be used alone.
2128
     */
2129
0
    dmi->event_nr |= DM_UDEV_PRIMARY_SOURCE_FLAG <<
2130
0
         DM_UDEV_FLAGS_SHIFT;
2131
2132
    /*
2133
     * Prevent udev vs. libdevmapper race when processing nodes
2134
     * and symlinks. This can happen when the udev rules are
2135
     * installed and udev synchronization code is enabled in
2136
     * libdevmapper but the software using libdevmapper does not
2137
     * make use of it (by not calling dm_task_set_cookie before).
2138
     * We need to instruct the udev rules not to be applied at
2139
     * all in this situation so we can gracefully fallback to
2140
     * libdevmapper's node and symlink creation code.
2141
     */
2142
0
    if (!dmt->cookie_set && dm_udev_get_sync_support()) {
2143
0
      log_debug_activation("Cookie value is not set while trying to call %s "
2144
0
               "ioctl. Please, consider using libdevmapper's udev "
2145
0
               "synchronization interface or disable it explicitly "
2146
0
               "by calling dm_udev_set_sync_support(0).",
2147
0
               dmt->type == DM_DEVICE_RESUME ? "DM_DEVICE_RESUME" :
2148
0
               dmt->type == DM_DEVICE_REMOVE ? "DM_DEVICE_REMOVE" :
2149
0
                       "DM_DEVICE_RENAME");
2150
0
      log_debug_activation("Switching off device-mapper and all subsystem related "
2151
0
               "udev rules. Falling back to libdevmapper node creation.");
2152
      /*
2153
       * Disable general dm and subsystem rules but keep
2154
       * dm disk rules if not flagged out explicitly before.
2155
       * We need /dev/disk content for the software that expects it.
2156
      */
2157
0
      dmi->event_nr |= (DM_UDEV_DISABLE_DM_RULES_FLAG |
2158
0
            DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG) <<
2159
0
           DM_UDEV_FLAGS_SHIFT;
2160
0
    }
2161
0
  }
2162
2163
0
  log_debug_activation("dm %s %s%s %s%s%s %s%.0d%s%.0d%s"
2164
0
           "%s[ %s%s%s%s%s%s%s%s%s%s] %.0" PRIu64 " %s [%u]",
2165
0
           _cmd_data_v4[dmt->type].name,
2166
0
           dmt->new_uuid ? "UUID " : "",
2167
0
           dmi->name, dmi->uuid, dmt->newname ? " " : "",
2168
0
           dmt->newname ? dmt->newname : "",
2169
0
           dmt->major > 0 ? "(" : "",
2170
0
           dmt->major > 0 ? dmt->major : 0,
2171
0
           dmt->major > 0 ? ":" : "",
2172
0
           dmt->minor > 0 ? dmt->minor : 0,
2173
0
           dmt->major > 0 && dmt->minor == 0 ? "0" : "",
2174
0
           dmt->major > 0 ? ") " : "",
2175
0
           dmt->no_open_count ? "noopencount " : "opencount ",
2176
0
           dmt->no_flush ? "noflush " : "flush ",
2177
0
           dmt->read_only ? "readonly " : "",
2178
0
           dmt->skip_lockfs ? "skiplockfs " : "",
2179
0
           dmt->retry_remove ? "retryremove " : "",
2180
0
           dmt->deferred_remove ? "deferredremove " : "",
2181
0
           dmt->secure_data ? "securedata " : "",
2182
0
           dmt->ima_measurement ? "ima_measurement " : "",
2183
0
           dmt->query_inactive_table ? "inactive " : "",
2184
0
           dmt->enable_checks ? "enablechecks " : "",
2185
0
           dmt->sector, _sanitise_message(dmt->message),
2186
0
           dmi->data_size);
2187
2188
0
  return dmi;
2189
0
}
2190
2191
/* Execute a single DM ioctl.  Sets dmt->ioctl_errno on failure. */
2192
int dm_ioctl_exec(int fd, struct dm_task *dmt, struct dm_ioctl *dmi)
2193
0
{
2194
0
  int r;
2195
2196
0
  dmt->ioctl_errno = 0;
2197
2198
0
#ifdef DM_IOCTLS
2199
0
  r = ioctl(fd, _cmd_data_v4[dmt->type].cmd, dmi);
2200
0
  if (r < 0)
2201
0
    dmt->ioctl_errno = errno;
2202
#else /* Userspace alternative for testing */
2203
  r = 0;
2204
#endif
2205
0
  return r;
2206
0
}
2207
2208
/*
2209
 * Execute ioctl with EBUSY retry for remove operations.
2210
 *
2211
 * The dmi buffer is reused across retries without rebuilding.
2212
 * On EBUSY the kernel fails the ioctl early without modifying
2213
 * the dmi structure, so the input fields (name, uuid, dev)
2214
 * remain valid for the next attempt.
2215
 */
2216
static int _dm_ioctl_exec_retry(int fd, struct dm_task *dmt)
2217
0
{
2218
0
  unsigned retries = 0;
2219
0
  int r;
2220
2221
0
  do {
2222
0
    if (retries) {
2223
0
      log_debug_activation("EBUSY retry %u/%u for %s.",
2224
0
               retries, DM_IOCTL_RETRIES,
2225
0
               dmt->dmi.v4->name);
2226
0
      usleep(DM_RETRY_USLEEP_DELAY);
2227
0
    }
2228
2229
0
    r = dm_ioctl_exec(fd, dmt, dmt->dmi.v4);
2230
2231
0
    if (r >= 0)
2232
0
      break;
2233
2234
0
  } while (dmt->retry_remove &&
2235
0
     (dmt->ioctl_errno == EBUSY) &&
2236
0
     (dmt->type == DM_DEVICE_REMOVE) &&
2237
0
     (++retries <= DM_IOCTL_RETRIES));
2238
2239
0
  return r;
2240
0
}
2241
2242
/*
2243
 * Post-ioctl processing: error logging, uevent completion,
2244
 * and name/uuid unmangling.
2245
 * On ioctl error returns 0; caller must call _udev_complete().
2246
 * Calls _udev_complete() only when uevent was not generated by kernel.
2247
 * Returns 1 on success, 0 on failure.
2248
 */
2249
static int _dm_ioctl_post(struct dm_task *dmt, struct dm_ioctl *dmi,
2250
        int r)
2251
0
{
2252
0
#ifdef DM_IOCTLS
2253
0
  if (r < 0 && dmt->expected_errno != dmt->ioctl_errno) {
2254
0
    if (dmt->ioctl_errno == ENXIO && ((dmt->type == DM_DEVICE_INFO) ||
2255
0
              (dmt->type == DM_DEVICE_MKNODES) ||
2256
0
              (dmt->type == DM_DEVICE_STATUS)))
2257
0
      dmi->flags &= ~DM_EXISTS_FLAG; /* FIXME */
2258
0
    else {
2259
0
      if (_log_suppress || dmt->ioctl_errno == EINTR)
2260
0
        log_verbose("device-mapper: %s ioctl on %s %s%s%.0d%s%.0d%s%s "
2261
0
              "failed: %s",
2262
0
              _cmd_data_v4[dmt->type].name,
2263
0
              dmi->name[0] ? dmi->name : DEV_NAME(dmt) ? : "",
2264
0
              dmi->uuid[0] ? dmi->uuid : DEV_UUID(dmt) ? : "",
2265
0
              dmt->major > 0 ? "(" : "",
2266
0
              dmt->major > 0 ? dmt->major : 0,
2267
0
              dmt->major > 0 ? ":" : "",
2268
0
              dmt->minor > 0 ? dmt->minor : 0,
2269
0
              dmt->major > 0 && dmt->minor == 0 ? "0" : "",
2270
0
              dmt->major > 0 ? ")" : "",
2271
0
              strerror(dmt->ioctl_errno));
2272
0
      else
2273
0
        log_error("device-mapper: %s ioctl on %s %s%s%.0d%s%.0d%s%s "
2274
0
            "failed: %s",
2275
0
            _cmd_data_v4[dmt->type].name,
2276
0
            dmi->name[0] ? dmi->name : DEV_NAME(dmt) ? : "",
2277
0
            dmi->uuid[0] ? dmi->uuid : DEV_UUID(dmt) ? : "",
2278
0
            dmt->major > 0 ? "(" : "",
2279
0
            dmt->major > 0 ? dmt->major : 0,
2280
0
            dmt->major > 0 ? ":" : "",
2281
0
            dmt->minor > 0 ? dmt->minor : 0,
2282
0
            dmt->major > 0 && dmt->minor == 0 ? "0" : "",
2283
0
            dmt->major > 0 ? ")" : "",
2284
0
            strerror(dmt->ioctl_errno));
2285
2286
0
      return 0;
2287
0
    }
2288
0
  }
2289
2290
0
  if (_dmt_has_uevent(dmt) && dm_udev_get_sync_support() &&
2291
0
      !_check_uevent_generated(dmi)) {
2292
0
    if (dmt->deferred_remove)
2293
0
      log_debug_activation("Deferred remove: device busy, "
2294
0
               "no uevent generated.");
2295
0
    else {
2296
0
      log_debug_activation("Uevent not generated! Calling "
2297
0
               "udev_complete internally to "
2298
0
               "avoid process lock-up.");
2299
0
    }
2300
0
    _udev_complete(dmt);
2301
0
  }
2302
2303
0
  if (!_dm_ioctl_unmangle_names(dmt->type, dmi))
2304
0
    return 0;
2305
2306
0
  if (dmt->type != DM_DEVICE_REMOVE &&
2307
0
      !_dm_ioctl_unmangle_uuids(dmt->type, dmi))
2308
0
    return 0;
2309
2310
#else /* Userspace alternative for testing */
2311
  return 0;
2312
#endif
2313
0
  return 1;
2314
0
}
2315
2316
/* Build ioctl buffer, execute it, and post-process the result. */
2317
static struct dm_ioctl *_do_dm_ioctl(struct dm_task *dmt,
2318
             unsigned buffer_repeat_count)
2319
0
{
2320
0
  struct dm_ioctl *dmi;
2321
0
  int r;
2322
2323
0
  if (!(dmi = _dm_task_build_dmi(dmt, buffer_repeat_count)))
2324
0
    return_NULL;
2325
2326
0
  _dm_zfree_dmi(dmt->dmi.v4);
2327
0
  dmt->dmi.v4 = dmi;
2328
2329
0
  r = _dm_ioctl_exec_retry(_control_fd, dmt);
2330
2331
0
  if (dmt->record_timestamp)
2332
0
    if (!dm_timestamp_get(_dm_ioctl_timestamp))
2333
0
      stack;
2334
2335
0
  if (!_dm_ioctl_post(dmt, dmi, r)) {
2336
0
    _dm_zfree_dmi(dmi);
2337
0
    dmt->dmi.v4 = NULL;
2338
0
    return_NULL;
2339
0
  }
2340
2341
0
  return dmi;
2342
0
}
2343
2344
void dm_task_update_nodes(void)
2345
0
{
2346
0
  update_devs();
2347
0
}
2348
2349
/*
2350
 * Perform device-node operations after a successful ioctl.
2351
 * Returns 1 on success, 0 on failure.
2352
 */
2353
static int _dm_task_node_ops(struct dm_task *dmt, struct dm_ioctl *dmi)
2354
0
{
2355
0
  const char *dev_name = DEV_NAME(dmt);
2356
0
  int check_udev = dmt->cookie_set &&
2357
0
       !(dmt->event_nr >> DM_UDEV_FLAGS_SHIFT &
2358
0
         DM_UDEV_DISABLE_DM_RULES_FLAG);
2359
0
  int rely_on_udev = dmt->cookie_set ? (dmt->event_nr >> DM_UDEV_FLAGS_SHIFT &
2360
0
                DM_UDEV_DISABLE_LIBRARY_FALLBACK) : 0;
2361
2362
0
  switch (dmt->type) {
2363
0
  case DM_DEVICE_CREATE:
2364
0
    if ((dmt->add_node == DM_ADD_NODE_ON_CREATE) &&
2365
0
        dev_name && *dev_name && !rely_on_udev)
2366
0
      add_dev_node(dev_name, MAJOR(dmi->dev),
2367
0
             MINOR(dmi->dev), dmt->uid, dmt->gid,
2368
0
             dmt->mode, check_udev, rely_on_udev);
2369
0
    break;
2370
0
  case DM_DEVICE_REMOVE:
2371
0
    if (dev_name && !rely_on_udev)
2372
0
      rm_dev_node(dev_name, check_udev, rely_on_udev);
2373
0
    break;
2374
2375
0
  case DM_DEVICE_RENAME:
2376
0
    if (!dmt->new_uuid && dev_name)
2377
0
      rename_dev_node(dev_name, dmt->newname,
2378
0
          check_udev, rely_on_udev);
2379
0
    break;
2380
2381
0
  case DM_DEVICE_RESUME:
2382
0
    if (dev_name && *dev_name) {
2383
0
      if (dmt->add_node == DM_ADD_NODE_ON_RESUME)
2384
0
        add_dev_node(dev_name, MAJOR(dmi->dev),
2385
0
               MINOR(dmi->dev), dmt->uid, dmt->gid,
2386
0
               dmt->mode, check_udev, rely_on_udev);
2387
0
      set_dev_node_read_ahead(dev_name,
2388
0
            MAJOR(dmi->dev), MINOR(dmi->dev),
2389
0
            dmt->read_ahead, dmt->read_ahead_flags);
2390
0
    }
2391
0
    break;
2392
2393
0
  case DM_DEVICE_MKNODES:
2394
0
    if (dmi->flags & DM_EXISTS_FLAG)
2395
0
      add_dev_node(dmi->name, MAJOR(dmi->dev),
2396
0
             MINOR(dmi->dev), dmt->uid,
2397
0
             dmt->gid, dmt->mode, 0, rely_on_udev);
2398
0
    else if (dev_name)
2399
0
      rm_dev_node(dev_name, 0, rely_on_udev);
2400
0
    break;
2401
2402
0
  case DM_DEVICE_STATUS:
2403
0
  case DM_DEVICE_TABLE:
2404
0
  case DM_DEVICE_WAITEVENT:
2405
0
    if (!_unmarshal_status(dmt, dmi))
2406
0
      return 0;
2407
0
    break;
2408
0
  }
2409
2410
0
  return 1;
2411
0
}
2412
2413
int dm_task_get_errno(struct dm_task *dmt)
2414
0
{
2415
0
  return dmt->ioctl_errno;
2416
0
}
2417
2418
/*
2419
 * Check whether a DM_BUFFER_FULL_FLAG retry is allowed for this task type.
2420
 * Increments the global doubling factor on success.
2421
 * Returns 1 if the caller should retry with a larger buffer, 0 otherwise.
2422
 */
2423
static int _can_retry_buffer_full(struct dm_task *dmt)
2424
0
{
2425
0
  switch (dmt->type) {
2426
0
  case DM_DEVICE_LIST_VERSIONS:
2427
0
  case DM_DEVICE_LIST:
2428
0
  case DM_DEVICE_DEPS:
2429
0
  case DM_DEVICE_STATUS:
2430
0
  case DM_DEVICE_TABLE:
2431
0
  case DM_DEVICE_WAITEVENT:
2432
0
  case DM_DEVICE_TARGET_MSG:
2433
0
    break;
2434
0
  default:
2435
0
    log_error("WARNING: libdevmapper buffer too small for data.");
2436
0
    return 0;
2437
0
  }
2438
2439
0
  if (_ioctl_buffer_double_factor >= DM_IOCTL_BUFFER_MAX_DOUBLINGS) {
2440
0
    log_error("Ioctl buffer maximum reached (16KB << %u = %zu bytes), giving up.",
2441
0
        _ioctl_buffer_double_factor,
2442
0
        (size_t)16 * 1024 << _ioctl_buffer_double_factor);
2443
0
    return 0;
2444
0
  }
2445
2446
0
  _ioctl_buffer_double_factor++;
2447
2448
0
  return 1;
2449
0
}
2450
2451
#if defined(GNU_SYMVER)
2452
/*
2453
 * Enforce new version 1_02_197 of dm_task_run() that propagates
2454
 * ioctl() errno is being linked to app.
2455
 */
2456
DM_EXPORT_SYMBOL_BASE(dm_task_run)
2457
int dm_task_run_base(struct dm_task *dmt);
2458
int dm_task_run_base(struct dm_task *dmt)
2459
0
{
2460
0
  return dm_task_run(dmt);
2461
0
}
2462
#endif
2463
2464
DM_EXPORT_NEW_SYMBOL(int, dm_task_run, 1_02_197)
2465
  (struct dm_task *dmt)
2466
0
{
2467
0
  struct dm_ioctl *dmi;
2468
0
  int suspended_counter;
2469
0
  const char *dev_name = DEV_NAME(dmt);
2470
0
  const char *dev_uuid = DEV_UUID(dmt);
2471
2472
0
  if (!_validate_task_type(dmt))
2473
0
    return_0;
2474
2475
  /* Old-style creation had a table supplied */
2476
0
  if (dmt->type == DM_DEVICE_CREATE && dmt->head)
2477
0
    return _create_and_load_v4(dmt);
2478
2479
0
  if (dmt->type == DM_DEVICE_MKNODES && !dev_name &&
2480
0
      !dev_uuid && dmt->major <= 0)
2481
0
    return _mknodes_v4(dmt);
2482
2483
0
  if ((dmt->type == DM_DEVICE_RELOAD) && dmt->suppress_identical_reload)
2484
0
    return _reload_with_suppression_v4(dmt);
2485
2486
0
  if ((dmt->type == DM_DEVICE_SUSPEND) && dmt->enable_checks)
2487
0
    return _suspend_with_validation_v4(dmt);
2488
2489
0
  if (!_open_control()) {
2490
0
    _udev_complete(dmt);
2491
0
    return_0;
2492
0
  }
2493
2494
0
  if ((suspended_counter = dm_get_suspended_counter()) &&
2495
0
      dmt->type == DM_DEVICE_RELOAD)
2496
0
    log_error(INTERNAL_ERROR "Performing unsafe table load while %d device(s) "
2497
0
        "are known to be suspended: "
2498
0
        "%s%s%s %s%.0d%s%.0d%s%s",
2499
0
        suspended_counter,
2500
0
        dev_name ? : "",
2501
0
        dev_uuid ? " UUID " : "",
2502
0
        dev_uuid ? : "",
2503
0
        dmt->major > 0 ? "(" : "",
2504
0
        dmt->major > 0 ? dmt->major : 0,
2505
0
        dmt->major > 0 ? ":" : "",
2506
0
        dmt->minor > 0 ? dmt->minor : 0,
2507
0
        dmt->major > 0 && dmt->minor == 0 ? "0" : "",
2508
0
        dmt->major > 0 ? ") " : "");
2509
2510
  /* FIXME Detect and warn if cookie set but should not be. */
2511
0
repeat_ioctl:
2512
0
  if (!(dmi = _do_dm_ioctl(dmt, _ioctl_buffer_double_factor))) {
2513
0
    _udev_complete(dmt);
2514
0
    return_0;
2515
0
  }
2516
2517
0
  if (dmi->flags & DM_BUFFER_FULL_FLAG) {
2518
0
    if (_can_retry_buffer_full(dmt))
2519
0
      goto repeat_ioctl;
2520
2521
0
    goto_bad;
2522
0
  }
2523
2524
0
  if (!_dm_task_node_ops(dmt, dmi))
2525
0
    goto_bad;
2526
2527
0
  return 1;
2528
2529
0
      bad:
2530
0
  _dm_zfree_dmi(dmt->dmi.v4);
2531
0
  dmt->dmi.v4 = NULL;
2532
0
  return 0;
2533
0
}
2534
2535
void dm_hold_control_dev(int hold_open)
2536
0
{
2537
0
  _hold_control_fd_open = hold_open ? 1 : 0;
2538
2539
0
  log_debug("Hold of control device is now %sset.",
2540
0
      _hold_control_fd_open ? "" : "un");
2541
0
}
2542
2543
void dm_lib_release(void)
2544
5.65k
{
2545
5.65k
  if (!_hold_control_fd_open)
2546
5.65k
    _close_control_fd();
2547
5.65k
  dm_timestamp_destroy(_dm_ioctl_timestamp);
2548
5.65k
  _dm_ioctl_timestamp = NULL;
2549
5.65k
  update_devs();
2550
5.65k
}
2551
2552
void dm_pools_check_leaks(void);
2553
2554
void dm_lib_exit(void)
2555
0
{
2556
0
  int suspended_counter;
2557
0
  static unsigned _exited = 0;
2558
2559
0
  if (_exited++)
2560
0
    return;
2561
2562
0
  if ((suspended_counter = dm_get_suspended_counter()))
2563
0
    log_error("libdevmapper exiting with %d device(s) still suspended.", suspended_counter);
2564
2565
0
  dm_lib_release();
2566
0
  selinux_release();
2567
0
  if (_dm_bitset)
2568
0
    dm_bitset_destroy(_dm_bitset);
2569
0
  _dm_bitset = NULL;
2570
0
  dm_pools_check_leaks();
2571
0
  dm_dump_memory();
2572
0
  _version_ok = 1;
2573
0
  _version_checked = 0;
2574
0
}
2575
2576
#if defined(GNU_SYMVER)
2577
/*
2578
 * Maintain binary backward compatibility.
2579
 * Version script mechanism works with 'gcc' compatible compilers only.
2580
 */
2581
2582
/*
2583
 * This following code is here to retain ABI compatibility after adding
2584
 * the field deferred_remove to struct dm_info in version 1.02.89.
2585
 *
2586
 * Binaries linked against version 1.02.88 of libdevmapper or earlier
2587
 * will use this function that returns dm_info without the
2588
 * deferred_remove field.
2589
 *
2590
 * Binaries compiled against version 1.02.89 onwards will use
2591
 * the new function dm_task_get_info_with_deferred_remove due to the
2592
 * #define.
2593
 *
2594
 * N.B. Keep this function at the end of the file to make sure that
2595
 * no code in this file accidentally calls it.
2596
 */
2597
2598
DM_EXPORT_SYMBOL_BASE(dm_task_get_info)
2599
int dm_task_get_info_base(struct dm_task *dmt, struct dm_info *info);
2600
int dm_task_get_info_base(struct dm_task *dmt, struct dm_info *info)
2601
0
{
2602
0
  struct dm_info new_info;
2603
2604
0
  if (!dm_task_get_info(dmt, &new_info))
2605
0
    return 0;
2606
2607
0
  memcpy(info, &new_info, offsetof(struct dm_info, deferred_remove));
2608
2609
0
  return 1;
2610
0
}
2611
2612
#endif
2613
2614
int dm_task_get_info_with_deferred_remove(struct dm_task *dmt, struct dm_info *info);
2615
int dm_task_get_info_with_deferred_remove(struct dm_task *dmt, struct dm_info *info)
2616
0
{
2617
0
  struct dm_info new_info;
2618
2619
0
  if (!dm_task_get_info(dmt, &new_info))
2620
0
    return 0;
2621
2622
0
  memcpy(info, &new_info, offsetof(struct dm_info, internal_suspend));
2623
2624
0
  return 1;
2625
0
}