Coverage Report

Created: 2024-05-21 06:33

/src/lvm2/libdm/ioctl/libdm-iface.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
3
 * Copyright (C) 2004-2013 Red Hat, Inc. All rights reserved.
4
 *
5
 * This file is part of the device-mapper userspace tools.
6
 *
7
 * This copyrighted material is made available to anyone wishing to use,
8
 * modify, copy, or redistribute it subject to the terms and conditions
9
 * of the GNU Lesser General Public License v.2.1.
10
 *
11
 * You should have received a copy of the GNU Lesser General Public License
12
 * along with this program; if not, write to the Free Software Foundation,
13
 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
14
 */
15
16
#include "libdm/misc/dmlib.h"
17
#include "libdm-targets.h"
18
#include "libdm-common.h"
19
20
#include <stddef.h>
21
#include <fcntl.h>
22
#include <dirent.h>
23
#include <sys/ioctl.h>
24
#include <sys/utsname.h>
25
#include <limits.h>
26
27
#ifdef __linux__
28
#  include "libdm/misc/kdev_t.h"
29
#  include <linux/limits.h>
30
#else
31
#  define MAJOR(x) major((x))
32
#  define MINOR(x) minor((x))
33
#  define MKDEV(x,y) makedev(((dev_t)x),((dev_t)y))
34
#endif
35
36
#include "libdm/misc/dm-ioctl.h"
37
38
/*
39
 * Ensure build compatibility.  
40
 * The hard-coded versions here are the highest present 
41
 * in the _cmd_data arrays.
42
 */
43
44
#if !((DM_VERSION_MAJOR == 4 && DM_VERSION_MINOR >= 6))
45
#error The version of dm-ioctl.h included is incompatible.
46
#endif
47
48
/* FIXME This should be exported in device-mapper.h */
49
0
#define DM_NAME "device-mapper"
50
51
0
#define PROC_MISC "/proc/misc"
52
0
#define PROC_DEVICES "/proc/devices"
53
0
#define MISC_NAME "misc"
54
55
0
#define NUMBER_OF_MAJORS 4096
56
57
/*
58
 * Static minor number assigned since kernel version 2.6.36.
59
 * The original definition is in kernel's include/linux/miscdevice.h.
60
 * This number is also visible in modules.devname exported by depmod
61
 * utility (support included in module-init-tools version >= 3.12).
62
 */
63
0
#define MAPPER_CTRL_MINOR 236
64
0
#define MISC_MAJOR 10
65
66
/* dm major version no for running kernel */
67
static unsigned _dm_version = DM_VERSION_MAJOR;
68
static unsigned _dm_version_minor = 0;
69
static unsigned _dm_version_patchlevel = 0;
70
static int _log_suppress = 0;
71
static struct dm_timestamp *_dm_ioctl_timestamp = NULL;
72
73
/*
74
 * If the kernel dm driver only supports one major number
75
 * we store it in _dm_device_major.  Otherwise we indicate
76
 * which major numbers have been claimed by device-mapper
77
 * in _dm_bitset.
78
 */
79
static unsigned _dm_multiple_major_support = 1;
80
static dm_bitset_t _dm_bitset = NULL;
81
static uint32_t _dm_device_major = 0;
82
83
static int _control_fd = -1;
84
static int _hold_control_fd_open = 0;
85
static int _version_checked = 0;
86
static int _version_ok = 1;
87
static unsigned _ioctl_buffer_double_factor = 0;
88
89
/* *INDENT-OFF* */
90
static const struct cmd_data _cmd_data_v4[] = {
91
  {"create",  DM_DEV_CREATE,    {4, 0, 0}},
92
  {"reload",  DM_TABLE_LOAD,    {4, 0, 0}},
93
  {"remove",  DM_DEV_REMOVE,    {4, 0, 0}},
94
  {"remove_all",  DM_REMOVE_ALL,    {4, 0, 0}},
95
  {"suspend", DM_DEV_SUSPEND,   {4, 0, 0}},
96
  {"resume",  DM_DEV_SUSPEND,   {4, 0, 0}},
97
  {"info",  DM_DEV_STATUS,    {4, 0, 0}},
98
  {"deps",  DM_TABLE_DEPS,    {4, 0, 0}},
99
  {"rename",  DM_DEV_RENAME,    {4, 0, 0}},
100
  {"version", DM_VERSION,   {4, 0, 0}},
101
  {"status",  DM_TABLE_STATUS,  {4, 0, 0}},
102
  {"table", DM_TABLE_STATUS,  {4, 0, 0}},
103
  {"waitevent", DM_DEV_WAIT,    {4, 0, 0}},
104
  {"names", DM_LIST_DEVICES,  {4, 0, 0}},
105
  {"clear", DM_TABLE_CLEAR,   {4, 0, 0}},
106
  {"mknodes", DM_DEV_STATUS,    {4, 0, 0}},
107
#ifdef DM_LIST_VERSIONS
108
  {"versions",  DM_LIST_VERSIONS, {4, 1, 0}},
109
#endif
110
#ifdef DM_TARGET_MSG
111
  {"message", DM_TARGET_MSG,    {4, 2, 0}},
112
#endif
113
#ifdef DM_DEV_SET_GEOMETRY
114
  {"setgeometry", DM_DEV_SET_GEOMETRY,  {4, 6, 0}},
115
#endif
116
#ifdef DM_DEV_ARM_POLL
117
  {"armpoll", DM_DEV_ARM_POLL,  {4, 36, 0}},
118
#endif
119
#ifdef DM_GET_TARGET_VERSION
120
  {"target-version", DM_GET_TARGET_VERSION, {4, 41, 0}},
121
#endif
122
};
123
/* *INDENT-ON* */
124
125
0
#define ALIGNMENT 8
126
127
/* FIXME Rejig library to record & use errno instead */
128
#ifndef DM_EXISTS_FLAG
129
0
#  define DM_EXISTS_FLAG 0x00000004
130
#endif
131
132
static char *_align(char *ptr, unsigned int a)
133
0
{
134
0
  register unsigned long agn = --a;
135
136
0
  return (char *) (((unsigned long) ptr + agn) & ~agn);
137
0
}
138
139
static unsigned _kernel_major = 0;
140
static unsigned _kernel_minor = 0;
141
static unsigned _kernel_release = 0;
142
143
static int _uname(void)
144
0
{
145
0
  static int _uts_set = 0;
146
0
  struct utsname _uts;
147
0
  int parts;
148
149
0
  if (_uts_set)
150
0
    return 1;
151
152
0
  if (uname(&_uts)) {
153
0
    log_error("uname failed: %s", strerror(errno));
154
0
    return 0;
155
0
  }
156
157
0
  parts = sscanf(_uts.release, "%u.%u.%u",
158
0
           &_kernel_major, &_kernel_minor, &_kernel_release);
159
160
  /* Kernels with a major number of 2 always had 3 parts. */
161
0
  if (parts < 1 || (_kernel_major < 3 && parts < 3)) {
162
0
    log_error("Could not determine kernel version used.");
163
0
    return 0;
164
0
  }
165
166
0
  _uts_set = 1;
167
0
  return 1;
168
0
}
169
170
int get_uname_version(unsigned *major, unsigned *minor, unsigned *release)
171
0
{
172
0
  if (!_uname())
173
0
    return_0;
174
175
0
  *major = _kernel_major;
176
0
  *minor = _kernel_minor;
177
0
  *release = _kernel_release;
178
179
0
  return 1;
180
0
}
181
182
#ifdef DM_IOCTLS
183
184
/*
185
 * Set number to NULL to populate _dm_bitset - otherwise first
186
 * match is returned.
187
 * Returns:
188
 *  0 - error
189
 *  1 - success - number found
190
 *  2 - success - number not found (only if require_module_loaded=0)
191
 */
192
static int _get_proc_number(const char *file, const char *name,
193
          uint32_t *number, int require_module_loaded)
194
0
{
195
0
  FILE *fl;
196
0
  char nm[256];
197
0
  char *line = NULL;
198
0
  size_t len;
199
0
  uint32_t num;
200
201
0
  if (!(fl = fopen(file, "r"))) {
202
0
    log_sys_error("fopen", file);
203
0
    return 0;
204
0
  }
205
206
0
  while (getline(&line, &len, fl) != -1) {
207
0
    if (sscanf(line, "%u %255s\n", &num, &nm[0]) == 2) {
208
0
      if (!strcmp(name, nm)) {
209
0
        if (number) {
210
0
          *number = num;
211
0
          if (fclose(fl))
212
0
            log_sys_error("fclose", file);
213
0
          free(line);
214
0
          return 1;
215
0
        }
216
0
        dm_bit_set(_dm_bitset, num);
217
0
      }
218
0
    }
219
0
  }
220
0
  if (fclose(fl))
221
0
    log_sys_error("fclose", file);
222
0
  free(line);
223
224
0
  if (number) {
225
0
    if (require_module_loaded) {
226
0
      log_error("%s: No entry for %s found", file, name);
227
0
      return 0;
228
0
    }
229
230
0
    return 2;
231
0
  }
232
233
0
  return 1;
234
0
}
235
236
static int _control_device_number(uint32_t *major, uint32_t *minor)
237
0
{
238
0
  if (!_get_proc_number(PROC_DEVICES, MISC_NAME, major, 1) ||
239
0
      !_get_proc_number(PROC_MISC, DM_NAME, minor, 1)) {
240
0
    *major = 0;
241
0
    return 0;
242
0
  }
243
244
0
  return 1;
245
0
}
246
247
static int _control_unlink(const char *control)
248
0
{
249
0
  if (unlink(control) && (errno != ENOENT)) {
250
0
    log_sys_error("unlink", control);
251
0
    return -1;
252
0
  }
253
254
0
  return 0;
255
0
}
256
257
/*
258
 * Returns 1 if it exists on returning; 0 if it doesn't; -1 if it's wrong.
259
 */
260
static int _control_exists(const char *control, uint32_t major, uint32_t minor)
261
0
{
262
0
  struct stat buf;
263
264
0
  if (stat(control, &buf) < 0) {
265
0
    if (errno != ENOENT)
266
0
      log_sys_error("stat", control);
267
0
    return 0;
268
0
  }
269
270
0
  if (!S_ISCHR(buf.st_mode)) {
271
0
    log_verbose("%s: Wrong inode type", control);
272
0
    return _control_unlink(control);
273
0
  }
274
275
0
  if (major && buf.st_rdev != MKDEV(major, minor)) {
276
0
    log_verbose("%s: Wrong device number: (%u, %u) instead of "
277
0
          "(%u, %u)", control,
278
0
          MAJOR(buf.st_mode), MINOR(buf.st_mode),
279
0
          major, minor);
280
0
    return _control_unlink(control);
281
0
  }
282
283
0
  return 1;
284
0
}
285
286
static int _create_control(const char *control, uint32_t major, uint32_t minor)
287
0
{
288
0
  int ret;
289
0
  mode_t old_umask;
290
291
  /*
292
   * Return if the control already exists with intended major/minor
293
   * or there's an error unlinking an apparently incorrect one.
294
   */
295
0
  ret = _control_exists(control, major, minor);
296
0
  if (ret == -1)
297
0
    return_0; /* Failed to unlink existing incorrect node */
298
0
  if (ret)
299
0
    return 1; /* Already exists and correct */
300
301
0
  (void) dm_prepare_selinux_context(dm_dir(), S_IFDIR);
302
0
  old_umask = umask(DM_DEV_DIR_UMASK);
303
0
  ret = dm_create_dir(dm_dir());
304
0
  umask(old_umask);
305
0
  (void) dm_prepare_selinux_context(NULL, 0);
306
307
0
  if (!ret)
308
0
    return_0;
309
310
0
  log_verbose("Creating device %s (%u, %u)", control, major, minor);
311
312
0
  (void) dm_prepare_selinux_context(control, S_IFCHR);
313
0
  old_umask = umask(DM_CONTROL_NODE_UMASK);
314
0
  if (mknod(control, S_IFCHR | S_IRUSR | S_IWUSR,
315
0
      MKDEV(major, minor)) < 0)  {
316
0
    if (errno != EEXIST) {
317
0
      log_sys_error("mknod", control);
318
0
      ret = 0;
319
0
    } else if (_control_exists(control, major, minor) != 1) {
320
0
      stack; /* Invalid control node created by parallel command ? */
321
0
      ret = 0;
322
0
    }
323
0
  }
324
0
  umask(old_umask);
325
0
  (void) dm_prepare_selinux_context(NULL, 0);
326
327
0
  return ret;
328
0
}
329
#endif
330
331
/*
332
 * FIXME Update bitset in long-running process if dm claims new major numbers.
333
 */
334
/*
335
 * If require_module_loaded=0, caller is responsible to check
336
 * whether _dm_device_major or _dm_bitset is really set. If
337
 * it's not, it means the module is not loaded.
338
 */
339
static int _create_dm_bitset(int require_module_loaded)
340
0
{
341
0
  int r;
342
343
0
#ifdef DM_IOCTLS
344
0
  if (_dm_bitset || _dm_device_major)
345
0
    return 1;
346
347
0
  if (!_uname())
348
0
    return 0;
349
350
  /*
351
   * 2.6 kernels are limited to one major number.
352
   * Assume 2.4 kernels are patched not to.
353
   * FIXME Check _dm_version and _dm_version_minor if 2.6 changes this.
354
   */
355
0
  if (KERNEL_VERSION(_kernel_major, _kernel_minor, _kernel_release) >=
356
0
      KERNEL_VERSION(2, 6, 0))
357
0
    _dm_multiple_major_support = 0;
358
359
0
  if (!_dm_multiple_major_support) {
360
0
    if (!_get_proc_number(PROC_DEVICES, DM_NAME, &_dm_device_major,
361
0
              require_module_loaded))
362
0
      return 0;
363
0
    return 1;
364
0
  }
365
366
  /* Multiple major numbers supported */
367
0
  if (!(_dm_bitset = dm_bitset_create(NULL, NUMBER_OF_MAJORS)))
368
0
    return 0;
369
370
0
  r = _get_proc_number(PROC_DEVICES, DM_NAME, NULL, require_module_loaded);
371
0
  if (!r || r == 2) {
372
0
    dm_bitset_destroy(_dm_bitset);
373
0
    _dm_bitset = NULL;
374
    /*
375
     * It's not an error if we didn't find anything and we
376
     * didn't require module to be loaded at the same time.
377
     */
378
0
    return r == 2;
379
0
  }
380
381
0
  return 1;
382
#else
383
  return 0;
384
#endif
385
0
}
386
387
int dm_is_dm_major(uint32_t major)
388
0
{
389
0
  if (!_create_dm_bitset(0))
390
0
    return 0;
391
392
0
  if (_dm_multiple_major_support) {
393
0
    if (!_dm_bitset)
394
0
      return 0;
395
0
    return dm_bit(_dm_bitset, major) ? 1 : 0;
396
0
  }
397
398
0
  if (!_dm_device_major)
399
0
    return 0;
400
401
0
  return (major == _dm_device_major) ? 1 : 0;
402
0
}
403
404
static void _close_control_fd(void)
405
11.0k
{
406
11.0k
  if (_control_fd != -1) {
407
0
    if (close(_control_fd) < 0)
408
0
      log_sys_debug("close", "_control_fd");
409
0
    _control_fd = -1;
410
0
  }
411
11.0k
}
412
413
#ifdef DM_IOCTLS
414
static int _open_and_assign_control_fd(const char *control)
415
0
{
416
0
  if ((_control_fd = open(control, O_RDWR)) < 0) {
417
0
    log_sys_error("open", control);
418
0
    return 0;
419
0
  }
420
421
0
  return 1;
422
0
}
423
#endif
424
425
static int _open_control(void)
426
0
{
427
0
#ifdef DM_IOCTLS
428
0
  char control[PATH_MAX];
429
0
  uint32_t major = MISC_MAJOR;
430
0
  uint32_t minor = MAPPER_CTRL_MINOR;
431
432
0
  if (_control_fd != -1)
433
0
    return 1;
434
435
0
  if (!_uname())
436
0
    return 0;
437
438
0
  if (dm_snprintf(control, sizeof(control), "%s/%s", dm_dir(), DM_CONTROL_NODE) < 0)
439
0
    goto_bad;
440
441
  /*
442
   * Prior to 2.6.36 the minor number should be looked up in /proc.
443
   */
444
0
  if ((KERNEL_VERSION(_kernel_major, _kernel_minor, _kernel_release) <
445
0
       KERNEL_VERSION(2, 6, 36)) &&
446
0
      !_control_device_number(&major, &minor))
447
0
    goto_bad;
448
449
  /*
450
   * Create the node with correct major and minor if not already done.
451
   * Udev may already have created /dev/mapper/control
452
   * from the modules.devname file generated by depmod.
453
   */
454
0
  if (!_create_control(control, major, minor))
455
0
    goto_bad;
456
457
  /*
458
   * As of 2.6.36 kernels, the open can trigger autoloading dm-mod.
459
   */
460
0
  if (!_open_and_assign_control_fd(control))
461
0
    goto_bad;
462
  
463
0
  if (!_create_dm_bitset(1)) {
464
0
    log_error("Failed to set up list of device-mapper major numbers");
465
0
    return 0;
466
0
  }
467
468
0
  return 1;
469
470
0
bad:
471
0
  log_error("Failure to communicate with kernel device-mapper driver.");
472
0
  if (!geteuid())
473
0
    log_error("Check that device-mapper is available in the kernel.");
474
0
  return 0;
475
#else
476
  return 1;
477
#endif
478
0
}
479
480
static void _dm_zfree_string(char *string)
481
0
{
482
0
  if (string) {
483
0
    memset(string, 0, strlen(string));
484
0
    asm volatile ("" ::: "memory"); /* Compiler barrier. */
485
0
    dm_free(string);
486
0
  }
487
0
}
488
489
static void _dm_zfree_dmi(struct dm_ioctl *dmi)
490
0
{
491
0
  if (dmi) {
492
0
    memset(dmi, 0, dmi->data_size);
493
0
    asm volatile ("" ::: "memory"); /* Compiler barrier. */
494
0
    dm_free(dmi);
495
0
  }
496
0
}
497
498
static void _dm_task_free_targets(struct dm_task *dmt)
499
0
{
500
0
  struct target *t, *n;
501
502
0
  for (t = dmt->head; t; t = n) {
503
0
    n = t->next;
504
0
    _dm_zfree_string(t->params);
505
0
    dm_free(t->type);
506
0
    dm_free(t);
507
0
  }
508
509
0
  dmt->head = dmt->tail = NULL;
510
0
}
511
512
void dm_task_destroy(struct dm_task *dmt)
513
0
{
514
0
  _dm_task_free_targets(dmt);
515
0
  _dm_zfree_dmi(dmt->dmi.v4);
516
0
  dm_free(dmt->dev_name);
517
0
  dm_free(dmt->mangled_dev_name);
518
0
  dm_free(dmt->newname);
519
0
  dm_free(dmt->message);
520
0
  dm_free(dmt->geometry);
521
0
  dm_free(dmt->uuid);
522
0
  dm_free(dmt->mangled_uuid);
523
0
  dm_free(dmt);
524
0
}
525
526
/*
527
 * Protocol Version 4 functions.
528
 */
529
530
int dm_task_get_driver_version(struct dm_task *dmt, char *version, size_t size)
531
0
{
532
0
  unsigned *v;
533
534
0
  if (!dmt->dmi.v4) {
535
0
    if (version)
536
0
      version[0] = '\0';
537
0
    return 0;
538
0
  }
539
540
0
  v = dmt->dmi.v4->version;
541
0
  _dm_version_minor = v[1];
542
0
  _dm_version_patchlevel = v[2];
543
0
  if (version &&
544
0
      (snprintf(version, size, "%u.%u.%u", v[0], v[1], v[2]) < 0)) {
545
0
    log_error("Buffer for version is to short.");
546
0
    if (size > 0)
547
0
      version[0] = '\0';
548
0
    return 0;
549
0
  }
550
551
0
  return 1;
552
0
}
553
554
static int _check_version(char *version, size_t size, int log_suppress)
555
0
{
556
0
  struct dm_task *task;
557
0
  int r;
558
559
0
  if (!(task = dm_task_create(DM_DEVICE_VERSION))) {
560
0
    log_error("Failed to get device-mapper version");
561
0
    version[0] = '\0';
562
0
    return 0;
563
0
  }
564
565
0
  if (log_suppress)
566
0
    _log_suppress = 1;
567
568
0
  r = dm_task_run(task);
569
0
  if (!dm_task_get_driver_version(task, version, size))
570
0
    stack;
571
0
  dm_task_destroy(task);
572
0
  _log_suppress = 0;
573
574
0
  return r;
575
0
}
576
577
/*
578
 * Find out device-mapper's major version number the first time 
579
 * this is called and whether or not we support it.
580
 */
581
int dm_check_version(void)
582
0
{
583
0
  char libversion[64] = "", dmversion[64] = "";
584
0
  const char *compat = "";
585
586
0
  if (_version_checked)
587
0
    return _version_ok;
588
589
0
  _version_checked = 1;
590
591
0
  if (_check_version(dmversion, sizeof(dmversion), 0))
592
0
    return 1;
593
594
0
  dm_get_library_version(libversion, sizeof(libversion));
595
596
0
  log_error("Incompatible libdevmapper %s%s and kernel driver %s.",
597
0
      *libversion ? libversion : "(unknown version)", compat,
598
0
      *dmversion ? dmversion : "(unknown version)");
599
600
0
  _version_ok = 0;
601
0
  return 0;
602
0
}
603
604
int dm_cookie_supported(void)
605
0
{
606
0
  return (dm_check_version() &&
607
0
    ((_dm_version == 4) ? _dm_version_minor >= 15 : _dm_version > 4));
608
0
}
609
610
static int _dm_inactive_supported(void)
611
0
{
612
0
  int inactive_supported = 0;
613
614
0
  if (dm_check_version() && _dm_version >= 4) {
615
0
    if (_dm_version_minor >= 16)
616
0
      inactive_supported = 1; /* upstream */
617
0
    else if (_dm_version_minor == 11 &&
618
0
       (_dm_version_patchlevel >= 6 &&
619
0
        _dm_version_patchlevel <= 40)) {
620
0
      inactive_supported = 1; /* RHEL 5.7 */
621
0
    }
622
0
  }
623
624
0
  return inactive_supported;
625
0
}
626
627
int dm_message_supports_precise_timestamps(void)
628
0
{
629
  /*
630
   * 4.32.0 supports "precise_timestamps" and "histogram:" options
631
   * to @stats_create messages but lacks the ability to report
632
   * these properties via a subsequent @stats_list: require at
633
   * least 4.33.0 in order to use these features.
634
   */
635
0
  if (dm_check_version() && _dm_version >= 4)
636
0
    if (_dm_version_minor >= 33)
637
0
      return 1;
638
0
  return 0;
639
0
}
640
641
void *dm_get_next_target(struct dm_task *dmt, void *next,
642
       uint64_t *start, uint64_t *length,
643
       char **target_type, char **params)
644
0
{
645
0
  struct target *t = (struct target *) next;
646
647
0
  if (!t)
648
0
    t = dmt->head;
649
650
0
  if (!t) {
651
0
    *start = 0;
652
0
    *length = 0;
653
0
    *target_type = 0;
654
0
    *params = 0;
655
0
    return NULL;
656
0
  }
657
658
0
  *start = t->start;
659
0
  *length = t->length;
660
0
  *target_type = t->type;
661
0
  *params = t->params;
662
663
0
  return t->next;
664
0
}
665
666
/* Unmarshall the target info returned from a status call */
667
static int _unmarshal_status(struct dm_task *dmt, struct dm_ioctl *dmi)
668
0
{
669
0
  char *outbuf = (char *) dmi + dmi->data_start;
670
0
  char *outptr = outbuf;
671
0
  uint32_t i;
672
0
  struct dm_target_spec *spec;
673
674
0
  _dm_task_free_targets(dmt);
675
676
0
  for (i = 0; i < dmi->target_count; i++) {
677
0
    spec = (struct dm_target_spec *) outptr;
678
0
    if (!dm_task_add_target(dmt, spec->sector_start,
679
0
          spec->length,
680
0
          spec->target_type,
681
0
          outptr + sizeof(*spec))) {
682
0
      return 0;
683
0
    }
684
685
0
    outptr = outbuf + spec->next;
686
0
  }
687
688
0
  return 1;
689
0
}
690
691
int dm_format_dev(char *buf, int bufsize, uint32_t dev_major,
692
      uint32_t dev_minor)
693
0
{
694
0
  int r;
695
696
0
  if (bufsize < 8)
697
0
    return 0;
698
699
0
  r = snprintf(buf, (size_t) bufsize, "%u:%u", dev_major, dev_minor);
700
0
  if (r < 0 || r > bufsize - 1)
701
0
    return 0;
702
703
0
  return 1;
704
0
}
705
706
DM_EXPORT_NEW_SYMBOL(int, dm_task_get_info, 1_02_97)
707
  (struct dm_task *dmt, struct dm_info *info)
708
0
{
709
0
  if (!dmt->dmi.v4)
710
0
    return 0;
711
712
0
  memset(info, 0, sizeof(*info));
713
714
0
  info->exists = dmt->dmi.v4->flags & DM_EXISTS_FLAG ? 1 : 0;
715
0
  if (!info->exists)
716
0
    return 1;
717
718
0
  info->suspended = dmt->dmi.v4->flags & DM_SUSPEND_FLAG ? 1 : 0;
719
0
  info->read_only = dmt->dmi.v4->flags & DM_READONLY_FLAG ? 1 : 0;
720
0
  info->live_table = dmt->dmi.v4->flags & DM_ACTIVE_PRESENT_FLAG ? 1 : 0;
721
0
  info->inactive_table = dmt->dmi.v4->flags & DM_INACTIVE_PRESENT_FLAG ?
722
0
      1 : 0;
723
0
  info->deferred_remove = dmt->dmi.v4->flags & DM_DEFERRED_REMOVE;
724
0
  info->internal_suspend = (dmt->dmi.v4->flags & DM_INTERNAL_SUSPEND_FLAG) ? 1 : 0;
725
0
  info->target_count = dmt->dmi.v4->target_count;
726
0
  info->open_count = dmt->dmi.v4->open_count;
727
0
  info->event_nr = dmt->dmi.v4->event_nr;
728
0
  info->major = MAJOR(dmt->dmi.v4->dev);
729
0
  info->minor = MINOR(dmt->dmi.v4->dev);
730
731
0
  return 1;
732
0
}
733
734
uint32_t dm_task_get_read_ahead(const struct dm_task *dmt, uint32_t *read_ahead)
735
0
{
736
0
  const char *dev_name;
737
738
0
  *read_ahead = 0;
739
740
0
  if (!dmt->dmi.v4 || !(dmt->dmi.v4->flags & DM_EXISTS_FLAG))
741
0
    return 0;
742
743
0
  if (*dmt->dmi.v4->name)
744
0
    dev_name = dmt->dmi.v4->name;
745
0
  else if (!(dev_name = DEV_NAME(dmt))) {
746
0
    log_error("Get read ahead request failed: device name unrecorded.");
747
0
    return 0;
748
0
  }
749
750
0
  return get_dev_node_read_ahead(dev_name, MAJOR(dmt->dmi.v4->dev),
751
0
               MINOR(dmt->dmi.v4->dev), read_ahead);
752
0
}
753
754
struct dm_deps *dm_task_get_deps(struct dm_task *dmt)
755
0
{
756
0
  if (!dmt) {
757
0
    log_error(INTERNAL_ERROR "Missing dm_task.");
758
0
    return NULL;
759
0
  }
760
761
0
  return (struct dm_deps *) (((char *) dmt->dmi.v4) +
762
0
           dmt->dmi.v4->data_start);
763
0
}
764
765
/*
766
 * Round up the ptr to an 8-byte boundary.
767
 * Follow kernel pattern.
768
 */
769
0
#define ALIGN_MASK 7
770
static size_t _align_val(size_t val)
771
0
{
772
0
  return (val + ALIGN_MASK) & ~ALIGN_MASK;
773
0
}
774
static void *_align_ptr(void *ptr)
775
0
{
776
0
  return (void *)_align_val((size_t)ptr);
777
0
}
778
779
0
static int _check_has_event_nr(void) {
780
0
  static int _has_event_nr = -1;
781
782
0
  if (_has_event_nr < 0)
783
0
    _has_event_nr = dm_check_version() &&
784
0
      ((_dm_version == 4) ?  _dm_version_minor >= 38 : _dm_version > 4);
785
786
0
  return _has_event_nr;
787
0
}
788
789
struct dm_names *dm_task_get_names(struct dm_task *dmt)
790
0
{
791
0
  return (struct dm_names *) (((char *) dmt->dmi.v4) +
792
0
            dmt->dmi.v4->data_start);
793
0
}
794
795
struct dm_versions *dm_task_get_versions(struct dm_task *dmt)
796
0
{
797
0
  return (struct dm_versions *) (((char *) dmt->dmi.v4) +
798
0
               dmt->dmi.v4->data_start);
799
0
}
800
801
const char *dm_task_get_message_response(struct dm_task *dmt)
802
0
{
803
0
  const char *start, *end;
804
805
0
  if (!(dmt->dmi.v4->flags & DM_DATA_OUT_FLAG))
806
0
    return NULL;
807
808
0
  start = (const char *) dmt->dmi.v4 + dmt->dmi.v4->data_start;
809
0
  end = (const char *) dmt->dmi.v4 + dmt->dmi.v4->data_size;
810
811
0
  if (end < start) {
812
0
    log_error(INTERNAL_ERROR "Corrupted message structure returned: start %d > end %d", (int)dmt->dmi.v4->data_start, (int)dmt->dmi.v4->data_size);
813
0
    return NULL;
814
0
  }
815
816
0
  if (!memchr(start, 0, end - start)) {
817
0
    log_error(INTERNAL_ERROR "Message response doesn't contain terminating NUL character");
818
0
    return NULL;
819
0
  }
820
821
0
  return start;
822
0
}
823
824
int dm_task_set_ro(struct dm_task *dmt)
825
0
{
826
0
  dmt->read_only = 1;
827
0
  return 1;
828
0
}
829
830
int dm_task_set_read_ahead(struct dm_task *dmt, uint32_t read_ahead,
831
         uint32_t read_ahead_flags)
832
0
{
833
0
  dmt->read_ahead = read_ahead;
834
0
  dmt->read_ahead_flags = read_ahead_flags;
835
836
0
  return 1;
837
0
}
838
839
int dm_task_suppress_identical_reload(struct dm_task *dmt)
840
0
{
841
0
  dmt->suppress_identical_reload = 1;
842
0
  return 1;
843
0
}
844
845
int dm_task_set_add_node(struct dm_task *dmt, dm_add_node_t add_node)
846
0
{
847
0
  switch (add_node) {
848
0
  case DM_ADD_NODE_ON_RESUME:
849
0
  case DM_ADD_NODE_ON_CREATE:
850
0
    dmt->add_node = add_node;
851
0
    return 1;
852
0
  default:
853
0
    log_error("Unknown add node parameter");
854
0
    return 0;
855
0
  }
856
0
}
857
858
int dm_task_set_newuuid(struct dm_task *dmt, const char *newuuid)
859
0
{
860
0
  dm_string_mangling_t mangling_mode = dm_get_name_mangling_mode();
861
0
  char mangled_uuid[DM_UUID_LEN];
862
0
  int r = 0;
863
864
0
  if (strlen(newuuid) >= DM_UUID_LEN) {
865
0
    log_error("Uuid \"%s\" too long", newuuid);
866
0
    return 0;
867
0
  }
868
869
0
  if (!check_multiple_mangled_string_allowed(newuuid, "new UUID", mangling_mode))
870
0
    return_0;
871
872
0
  if (mangling_mode != DM_STRING_MANGLING_NONE &&
873
0
      (r = mangle_string(newuuid, "new UUID", strlen(newuuid), mangled_uuid,
874
0
             sizeof(mangled_uuid), mangling_mode)) < 0) {
875
0
    log_error("Failed to mangle new device UUID \"%s\"", newuuid);
876
0
    return 0;
877
0
  }
878
879
0
  if (r) {
880
0
    log_debug_activation("New device uuid mangled [%s]: %s --> %s",
881
0
             mangling_mode == DM_STRING_MANGLING_AUTO ? "auto" : "hex",
882
0
             newuuid, mangled_uuid);
883
0
    newuuid = mangled_uuid;
884
0
  }
885
886
0
  dm_free(dmt->newname);
887
0
  if (!(dmt->newname = dm_strdup(newuuid))) {
888
0
    log_error("dm_task_set_newuuid: strdup(%s) failed", newuuid);
889
0
    return 0;
890
0
  }
891
0
  dmt->new_uuid = 1;
892
893
0
  return 1;
894
0
}
895
896
int dm_task_set_message(struct dm_task *dmt, const char *message)
897
0
{
898
0
  dm_free(dmt->message);
899
0
  if (!(dmt->message = dm_strdup(message))) {
900
0
    log_error("dm_task_set_message: strdup failed");
901
0
    return 0;
902
0
  }
903
904
0
  return 1;
905
0
}
906
907
int dm_task_set_sector(struct dm_task *dmt, uint64_t sector)
908
0
{
909
0
  dmt->sector = sector;
910
911
0
  return 1;
912
0
}
913
914
int dm_task_set_geometry(struct dm_task *dmt, const char *cylinders, const char *heads,
915
       const char *sectors, const char *start)
916
0
{
917
0
  dm_free(dmt->geometry);
918
0
  if (dm_asprintf(&(dmt->geometry), "%s %s %s %s",
919
0
      cylinders, heads, sectors, start) < 0) {
920
0
    log_error("dm_task_set_geometry: sprintf failed");
921
0
    return 0;
922
0
  }
923
924
0
  return 1;
925
0
}
926
927
int dm_task_no_flush(struct dm_task *dmt)
928
0
{
929
0
  dmt->no_flush = 1;
930
931
0
  return 1;
932
0
}
933
934
int dm_task_no_open_count(struct dm_task *dmt)
935
0
{
936
0
  dmt->no_open_count = 1;
937
938
0
  return 1;
939
0
}
940
941
int dm_task_skip_lockfs(struct dm_task *dmt)
942
0
{
943
0
  dmt->skip_lockfs = 1;
944
945
0
  return 1;
946
0
}
947
948
int dm_task_secure_data(struct dm_task *dmt)
949
0
{
950
0
  dmt->secure_data = 1;
951
952
0
  return 1;
953
0
}
954
955
int dm_task_ima_measurement(struct dm_task *dmt)
956
0
{
957
0
  dmt->ima_measurement = 1;
958
959
0
  return 1;
960
0
}
961
962
int dm_task_retry_remove(struct dm_task *dmt)
963
0
{
964
0
  dmt->retry_remove = 1;
965
966
0
  return 1;
967
0
}
968
969
int dm_task_deferred_remove(struct dm_task *dmt)
970
0
{
971
0
  dmt->deferred_remove = 1;
972
973
0
  return 1;
974
0
}
975
976
int dm_task_query_inactive_table(struct dm_task *dmt)
977
0
{
978
0
  dmt->query_inactive_table = 1;
979
980
0
  return 1;
981
0
}
982
983
int dm_task_set_event_nr(struct dm_task *dmt, uint32_t event_nr)
984
0
{
985
0
  dmt->event_nr = event_nr;
986
987
0
  return 1;
988
0
}
989
990
int dm_task_set_record_timestamp(struct dm_task *dmt)
991
0
{
992
0
  if (!_dm_ioctl_timestamp)
993
0
    _dm_ioctl_timestamp = dm_timestamp_alloc();
994
995
0
  if (!_dm_ioctl_timestamp)
996
0
    return_0;
997
998
0
  dmt->record_timestamp = 1;
999
1000
0
  return 1;
1001
0
}
1002
1003
struct dm_timestamp *dm_task_get_ioctl_timestamp(struct dm_task *dmt)
1004
0
{
1005
0
  return dmt->record_timestamp ? _dm_ioctl_timestamp : NULL;
1006
0
}
1007
1008
struct target *create_target(uint64_t start, uint64_t len, const char *type,
1009
           const char *params)
1010
0
{
1011
0
  struct target *t;
1012
1013
0
  if (strlen(type) >= DM_MAX_TYPE_NAME) {
1014
0
    log_error("Target type name %s is too long.", type);
1015
0
    return NULL;
1016
0
  }
1017
1018
0
  if (!(t = dm_zalloc(sizeof(*t)))) {
1019
0
    log_error("create_target: malloc(%" PRIsize_t ") failed",
1020
0
        sizeof(*t));
1021
0
    return NULL;
1022
0
  }
1023
1024
0
  if (!(t->params = dm_strdup(params))) {
1025
0
    log_error("create_target: strdup(params) failed");
1026
0
    goto bad;
1027
0
  }
1028
1029
0
  if (!(t->type = dm_strdup(type))) {
1030
0
    log_error("create_target: strdup(type) failed");
1031
0
    goto bad;
1032
0
  }
1033
1034
0
  t->start = start;
1035
0
  t->length = len;
1036
0
  return t;
1037
1038
0
      bad:
1039
0
  _dm_zfree_string(t->params);
1040
0
  dm_free(t->type);
1041
0
  dm_free(t);
1042
0
  return NULL;
1043
0
}
1044
1045
static char *_add_target(struct target *t, char *out, char *end)
1046
0
{
1047
0
  char *out_sp = out;
1048
0
  struct dm_target_spec sp;
1049
0
  size_t sp_size = sizeof(struct dm_target_spec);
1050
0
  unsigned int backslash_count = 0;
1051
0
  int len;
1052
0
  char *pt;
1053
1054
0
  if (strlen(t->type) >= sizeof(sp.target_type)) {
1055
0
    log_error("Target type name %s is too long.", t->type);
1056
0
    return NULL;
1057
0
  }
1058
1059
0
  sp.status = 0;
1060
0
  sp.sector_start = t->start;
1061
0
  sp.length = t->length;
1062
0
  strncpy(sp.target_type, t->type, sizeof(sp.target_type) - 1);
1063
0
  sp.target_type[sizeof(sp.target_type) - 1] = '\0';
1064
1065
0
  out += sp_size;
1066
0
  pt = t->params;
1067
1068
0
  while (*pt)
1069
0
    if (*pt++ == '\\')
1070
0
      backslash_count++;
1071
0
  len = strlen(t->params) + backslash_count;
1072
1073
0
  if ((out >= end) || (out + len + 1) >= end) {
1074
0
    log_error("Ran out of memory building ioctl parameter");
1075
0
    return NULL;
1076
0
  }
1077
1078
0
  if (backslash_count) {
1079
    /* replace "\" with "\\" */
1080
0
    pt = t->params;
1081
0
    do {
1082
0
      if (*pt == '\\')
1083
0
        *out++ = '\\';
1084
0
      *out++ = *pt++;
1085
0
    } while (*pt);
1086
0
    *out++ = '\0';
1087
0
  }
1088
0
  else {
1089
0
    strcpy(out, t->params);
1090
0
    out += len + 1;
1091
0
  }
1092
1093
  /* align next block */
1094
0
  out = _align(out, ALIGNMENT);
1095
1096
0
  sp.next = out - out_sp;
1097
0
  memcpy(out_sp, &sp, sp_size);
1098
1099
0
  return out;
1100
0
}
1101
1102
static int _lookup_dev_name(uint64_t dev, char *buf, size_t len)
1103
0
{
1104
0
  struct dm_names *names;
1105
0
  unsigned next = 0;
1106
0
  struct dm_task *dmt;
1107
0
  int r = 0;
1108
 
1109
0
  if (!(dmt = dm_task_create(DM_DEVICE_LIST)))
1110
0
    return 0;
1111
 
1112
0
  if (!dm_task_run(dmt))
1113
0
    goto out;
1114
1115
0
  if (!(names = dm_task_get_names(dmt)))
1116
0
    goto out;
1117
 
1118
0
  if (!names->dev)
1119
0
    goto out;
1120
 
1121
0
  do {
1122
0
    names = (struct dm_names *)((char *) names + next);
1123
0
    if (names->dev == dev) {
1124
0
      memccpy(buf, names->name, 0, len);
1125
0
      r = 1;
1126
0
      break;
1127
0
    }
1128
0
    next = names->next;
1129
0
  } while (next);
1130
1131
0
      out:
1132
0
  dm_task_destroy(dmt);
1133
0
  return r;
1134
0
}
1135
1136
static int _add_params(int type)
1137
0
{
1138
0
  switch (type) {
1139
0
  case DM_DEVICE_REMOVE_ALL:
1140
0
  case DM_DEVICE_CREATE:
1141
0
  case DM_DEVICE_REMOVE:
1142
0
  case DM_DEVICE_SUSPEND:
1143
0
  case DM_DEVICE_STATUS:
1144
0
  case DM_DEVICE_CLEAR:
1145
0
  case DM_DEVICE_ARM_POLL:
1146
0
    return 0; /* IOCTL_FLAGS_NO_PARAMS in drivers/md/dm-ioctl.c */
1147
0
  default:
1148
0
    return 1;
1149
0
  }
1150
0
}
1151
1152
static struct dm_ioctl *_flatten(struct dm_task *dmt, unsigned repeat_count)
1153
0
{
1154
0
  const size_t min_size = 16 * 1024;
1155
0
  const int (*version)[3];
1156
1157
0
  struct dm_ioctl *dmi;
1158
0
  struct target *t;
1159
0
  struct dm_target_msg *tmsg;
1160
0
  size_t len = sizeof(struct dm_ioctl);
1161
0
  char *b, *e;
1162
0
  int count = 0;
1163
1164
0
  if (_add_params(dmt->type))
1165
0
    for (t = dmt->head; t; t = t->next) {
1166
0
      len += sizeof(struct dm_target_spec);
1167
0
      len += strlen(t->params) + 1 + ALIGNMENT;
1168
0
      count++;
1169
0
    }
1170
0
  else if (dmt->head)
1171
0
    log_debug_activation(INTERNAL_ERROR "dm '%s' ioctl should not define parameters.",
1172
0
             _cmd_data_v4[dmt->type].name);
1173
1174
0
  if (count && (dmt->sector || dmt->message)) {
1175
0
    log_error("targets and message are incompatible");
1176
0
    return NULL;
1177
0
  }
1178
1179
0
  if (count && dmt->newname) {
1180
0
    log_error("targets and rename are incompatible");
1181
0
    return NULL;
1182
0
  }
1183
1184
0
  if (count && dmt->geometry) {
1185
0
    log_error("targets and geometry are incompatible");
1186
0
    return NULL;
1187
0
  }
1188
1189
0
  if (dmt->newname && (dmt->sector || dmt->message)) {
1190
0
    log_error("message and rename are incompatible");
1191
0
    return NULL;
1192
0
  }
1193
1194
0
  if (dmt->newname && dmt->geometry) {
1195
0
    log_error("geometry and rename are incompatible");
1196
0
    return NULL;
1197
0
  }
1198
1199
0
  if (dmt->geometry && (dmt->sector || dmt->message)) {
1200
0
    log_error("geometry and message are incompatible");
1201
0
    return NULL;
1202
0
  }
1203
1204
0
  if (dmt->sector && !dmt->message) {
1205
0
    log_error("message is required with sector");
1206
0
    return NULL;
1207
0
  }
1208
1209
0
  if (dmt->newname)
1210
0
    len += strlen(dmt->newname) + 1;
1211
1212
0
  if (dmt->message)
1213
0
    len += sizeof(struct dm_target_msg) + strlen(dmt->message) + 1;
1214
1215
0
  if (dmt->geometry)
1216
0
    len += strlen(dmt->geometry) + 1;
1217
1218
  /*
1219
   * Give len a minimum size so that we have space to store
1220
   * dependencies or status information.
1221
   */
1222
0
  if (len < min_size)
1223
0
    len = min_size;
1224
1225
  /* Increase buffer size if repeating because buffer was too small */
1226
0
  while (repeat_count--)
1227
0
    len *= 2;
1228
1229
0
  if (!(dmi = dm_zalloc(len)))
1230
0
    return NULL;
1231
1232
0
  version = &_cmd_data_v4[dmt->type].version;
1233
1234
0
  dmi->version[0] = (*version)[0];
1235
0
  dmi->version[1] = (*version)[1];
1236
0
  dmi->version[2] = (*version)[2];
1237
1238
0
  dmi->data_size = len;
1239
0
  dmi->data_start = sizeof(struct dm_ioctl);
1240
1241
0
  if (dmt->minor >= 0) {
1242
0
    if (!_dm_multiple_major_support && dmt->allow_default_major_fallback &&
1243
0
        dmt->major != (int) _dm_device_major) {
1244
0
      log_verbose("Overriding major number of %d "
1245
0
            "with %u for persistent device.",
1246
0
            dmt->major, _dm_device_major);
1247
0
      dmt->major = _dm_device_major;
1248
0
    }
1249
1250
0
    if (dmt->major <= 0) {
1251
0
      log_error("Missing major number for persistent device.");
1252
0
      goto bad;
1253
0
    }
1254
1255
0
    dmi->flags |= DM_PERSISTENT_DEV_FLAG;
1256
0
    dmi->dev = MKDEV(dmt->major, dmt->minor);
1257
0
  }
1258
1259
  /* Does driver support device number referencing? */
1260
0
  if (_dm_version_minor < 3 && !DEV_NAME(dmt) && !DEV_UUID(dmt) && dmi->dev) {
1261
0
    if (!_lookup_dev_name(dmi->dev, dmi->name, sizeof(dmi->name))) {
1262
0
      log_error("Unable to find name for device (%" PRIu32
1263
0
          ":%" PRIu32 ")", dmt->major, dmt->minor);
1264
0
      goto bad;
1265
0
    }
1266
0
    log_verbose("device (%" PRIu32 ":%" PRIu32 ") is %s "
1267
0
          "for compatibility with old kernel",
1268
0
          dmt->major, dmt->minor, dmi->name);
1269
0
  }
1270
1271
  /* FIXME Until resume ioctl supplies name, use dev_name for readahead */
1272
0
  if (DEV_NAME(dmt) &&
1273
0
      (((dmt->type != DM_DEVICE_RESUME) &&
1274
0
        (dmt->type != DM_DEVICE_RELOAD)) ||
1275
0
       (dmt->minor < 0) || (dmt->major < 0)))
1276
    /* When RESUME or RELOAD sets maj:min and dev_name, use just maj:min,
1277
     * passed dev_name is useful for better error/debug messages */
1278
0
    memccpy(dmi->name, DEV_NAME(dmt), 0, sizeof(dmi->name));
1279
1280
0
  if (DEV_UUID(dmt))
1281
0
    memccpy(dmi->uuid, DEV_UUID(dmt), 0, sizeof(dmi->uuid));
1282
1283
0
  if (dmt->type == DM_DEVICE_SUSPEND)
1284
0
    dmi->flags |= DM_SUSPEND_FLAG;
1285
0
  if (dmt->no_flush) {
1286
0
    if (_dm_version_minor < 12)
1287
0
      log_verbose("No flush flag unsupported by kernel. "
1288
0
            "Buffers will be flushed.");
1289
0
    else
1290
0
      dmi->flags |= DM_NOFLUSH_FLAG;
1291
0
  }
1292
0
  if (dmt->read_only)
1293
0
    dmi->flags |= DM_READONLY_FLAG;
1294
0
  if (dmt->skip_lockfs)
1295
0
    dmi->flags |= DM_SKIP_LOCKFS_FLAG;
1296
0
  if (dmt->deferred_remove && (dmt->type == DM_DEVICE_REMOVE || dmt->type == DM_DEVICE_REMOVE_ALL))
1297
0
    dmi->flags |= DM_DEFERRED_REMOVE;
1298
1299
0
  if (dmt->secure_data) {
1300
0
    if (_dm_version_minor < 20)
1301
0
      log_verbose("Secure data flag unsupported by kernel. "
1302
0
            "Buffers will not be wiped after use.");
1303
0
    dmi->flags |= DM_SECURE_DATA_FLAG;
1304
0
  }
1305
0
  if (dmt->query_inactive_table) {
1306
0
    if (!_dm_inactive_supported())
1307
0
      log_warn("WARNING: Inactive table query unsupported "
1308
0
         "by kernel.  It will use live table.");
1309
0
    dmi->flags |= DM_QUERY_INACTIVE_TABLE_FLAG;
1310
0
  }
1311
0
  if (dmt->new_uuid) {
1312
0
    if (_dm_version_minor < 19) {
1313
0
      log_error("WARNING: Setting UUID unsupported by "
1314
0
          "kernel.  Aborting operation.");
1315
0
      goto bad;
1316
0
    }
1317
0
    dmi->flags |= DM_UUID_FLAG;
1318
0
  }
1319
0
  if (dmt->ima_measurement) {
1320
0
    if (_dm_version_minor < 45) {
1321
0
      log_error("WARNING: IMA measurement unsupported by "
1322
0
          "kernel.  Aborting operation.");
1323
0
      goto bad;
1324
0
    }
1325
0
    dmi->flags |= DM_IMA_MEASUREMENT_FLAG;
1326
0
  }
1327
1328
0
  dmi->target_count = count;
1329
0
  dmi->event_nr = dmt->event_nr;
1330
1331
0
  b = (char *) (dmi + 1);
1332
0
  e = (char *) dmi + len;
1333
1334
0
  if (_add_params(dmt->type))
1335
0
    for (t = dmt->head; t; t = t->next)
1336
0
      if (!(b = _add_target(t, b, e)))
1337
0
        goto_bad;
1338
1339
0
  if (dmt->newname)
1340
0
    strcpy(b, dmt->newname);
1341
1342
0
  if (dmt->message) {
1343
0
    tmsg = (struct dm_target_msg *) b;
1344
0
    tmsg->sector = dmt->sector;
1345
0
    strcpy(tmsg->message, dmt->message);
1346
0
  }
1347
1348
0
  if (dmt->geometry)
1349
0
    strcpy(b, dmt->geometry);
1350
1351
0
  return dmi;
1352
1353
0
      bad:
1354
0
  _dm_zfree_dmi(dmi);
1355
0
  return NULL;
1356
0
}
1357
1358
static int _process_mapper_dir(struct dm_task *dmt)
1359
0
{
1360
0
  struct dirent *dirent;
1361
0
  DIR *d;
1362
0
  const char *dir;
1363
0
  int r = 1;
1364
1365
0
  dir = dm_dir();
1366
0
  if (!(d = opendir(dir))) {
1367
0
    log_sys_error("opendir", dir);
1368
0
    return 0;
1369
0
  }
1370
1371
0
  while ((dirent = readdir(d))) {
1372
0
    if (!strcmp(dirent->d_name, ".") ||
1373
0
        !strcmp(dirent->d_name, "..") ||
1374
0
        !strcmp(dirent->d_name, "control"))
1375
0
      continue;
1376
0
    if (!dm_task_set_name(dmt, dirent->d_name)) {
1377
0
      r = 0;
1378
0
      stack;
1379
0
      continue; /* try next name */
1380
0
    }
1381
0
    if (!dm_task_run(dmt)) {
1382
0
      r = 0;
1383
0
      stack;  /* keep going */
1384
0
    }
1385
0
  }
1386
1387
0
  if (closedir(d))
1388
0
    log_sys_debug("closedir", dir);
1389
1390
0
  return r;
1391
0
}
1392
1393
static int _process_all_v4(struct dm_task *dmt)
1394
0
{
1395
0
  struct dm_task *task;
1396
0
  struct dm_names *names;
1397
0
  unsigned next = 0;
1398
0
  int r = 1;
1399
1400
0
  if (!(task = dm_task_create(DM_DEVICE_LIST)))
1401
0
    return 0;
1402
1403
0
  if (!dm_task_run(task)) {
1404
0
    r = 0;
1405
0
    goto out;
1406
0
  }
1407
1408
0
  if (!(names = dm_task_get_names(task))) {
1409
0
    r = 0;
1410
0
    goto out;
1411
0
  }
1412
1413
0
  if (!names->dev)
1414
0
    goto out;
1415
1416
0
  do {
1417
0
    names = (struct dm_names *)((char *) names + next);
1418
0
    if (!dm_task_set_name(dmt, names->name)) {
1419
0
      r = 0;
1420
0
      goto out;
1421
0
    }
1422
0
    if (!dm_task_run(dmt))
1423
0
      r = 0;
1424
0
    next = names->next;
1425
0
  } while (next);
1426
1427
0
      out:
1428
0
  dm_task_destroy(task);
1429
0
  return r;
1430
0
}
1431
1432
static int _mknodes_v4(struct dm_task *dmt)
1433
0
{
1434
0
  (void) _process_mapper_dir(dmt);
1435
1436
0
  return _process_all_v4(dmt);
1437
0
}
1438
1439
/*
1440
 * If an operation that uses a cookie fails, decrement the
1441
 * semaphore instead of udev.
1442
 */
1443
static int _udev_complete(struct dm_task *dmt)
1444
0
{
1445
0
  uint16_t base;
1446
1447
0
  if (dmt->cookie_set &&
1448
0
      (base = dmt->event_nr & ~DM_UDEV_FLAGS_MASK))
1449
    /* strip flags from the cookie and use cookie magic instead */
1450
0
    return dm_udev_complete(base | (DM_COOKIE_MAGIC <<
1451
0
            DM_UDEV_FLAGS_SHIFT));
1452
1453
0
  return 1;
1454
0
}
1455
1456
#ifdef DM_IOCTLS
1457
static int _check_uevent_generated(struct dm_ioctl *dmi)
1458
0
{
1459
0
  if (!dm_check_version() ||
1460
0
      ((_dm_version == 4) ? _dm_version_minor < 17 : _dm_version < 4))
1461
    /* can't check, assume uevent is generated */
1462
0
    return 1;
1463
1464
0
  return dmi->flags & DM_UEVENT_GENERATED_FLAG;
1465
0
}
1466
#endif
1467
1468
static int _create_and_load_v4(struct dm_task *dmt)
1469
0
{
1470
0
  struct dm_info info;
1471
0
  struct dm_task *task;
1472
0
  int r, ioctl_errno = 0;
1473
0
  uint32_t cookie;
1474
1475
  /* Use new task struct to create the device */
1476
0
  if (!(task = dm_task_create(DM_DEVICE_CREATE))) {
1477
0
    _udev_complete(dmt);
1478
0
    return_0;
1479
0
  }
1480
1481
  /* Copy across relevant fields */
1482
0
  if (dmt->dev_name && !dm_task_set_name(task, dmt->dev_name))
1483
0
    goto_bad;
1484
1485
0
  if (dmt->uuid && !dm_task_set_uuid(task, dmt->uuid))
1486
0
    goto_bad;
1487
1488
0
  task->major = dmt->major;
1489
0
  task->minor = dmt->minor;
1490
0
  task->uid = dmt->uid;
1491
0
  task->gid = dmt->gid;
1492
0
  task->mode = dmt->mode;
1493
  /* FIXME: Just for udev_check in dm_task_run. Can we avoid this? */
1494
0
  task->event_nr = dmt->event_nr & DM_UDEV_FLAGS_MASK;
1495
0
  task->cookie_set = dmt->cookie_set;
1496
0
  task->add_node = dmt->add_node;
1497
1498
0
  if (!dm_task_run(task)) {
1499
0
    ioctl_errno = task->ioctl_errno;
1500
0
    goto_bad;
1501
0
  }
1502
1503
0
  if (!dm_task_get_info(task, &info) || !info.exists)
1504
0
    goto_bad;
1505
1506
0
  dm_task_destroy(task);
1507
1508
  /* Next load the table */
1509
0
  if (!(task = dm_task_create(DM_DEVICE_RELOAD))) {
1510
0
    stack;
1511
0
    _udev_complete(dmt);
1512
0
    goto revert;
1513
0
  }
1514
1515
  /* Copy across relevant fields */
1516
0
  if (dmt->dev_name && !dm_task_set_name(task, dmt->dev_name)) {
1517
0
    stack;
1518
0
    dm_task_destroy(task);
1519
0
    _udev_complete(dmt);
1520
0
    goto revert;
1521
0
  }
1522
1523
0
  task->major = info.major;
1524
0
  task->minor = info.minor;
1525
0
  task->read_only = dmt->read_only;
1526
0
  task->head = dmt->head;
1527
0
  task->tail = dmt->tail;
1528
0
  task->secure_data = dmt->secure_data;
1529
0
  task->ima_measurement = dmt->ima_measurement;
1530
1531
0
  r = dm_task_run(task);
1532
0
  if (!r)
1533
0
    ioctl_errno = task->ioctl_errno;
1534
1535
0
  task->head = NULL;
1536
0
  task->tail = NULL;
1537
0
  dm_task_destroy(task);
1538
1539
0
  if (!r) {
1540
0
    stack;
1541
0
    _udev_complete(dmt);
1542
0
    goto revert;
1543
0
  }
1544
1545
  /* Use the original structure last so the info will be correct */
1546
0
  dmt->type = DM_DEVICE_RESUME;
1547
0
  dm_free(dmt->uuid);
1548
0
  dmt->uuid = NULL;
1549
0
  dm_free(dmt->mangled_uuid);
1550
0
  dmt->mangled_uuid = NULL;
1551
  /* coverity[double_free] recursive function call */
1552
0
  _dm_task_free_targets(dmt);
1553
1554
0
  if (dm_task_run(dmt))
1555
0
    return 1;
1556
1557
0
      revert:
1558
0
  dmt->type = DM_DEVICE_REMOVE;
1559
0
  dm_free(dmt->uuid);
1560
0
  dmt->uuid = NULL;
1561
0
  dm_free(dmt->mangled_uuid);
1562
0
  dmt->mangled_uuid = NULL;
1563
  /* coverity[double_free] recursive function call */
1564
0
  _dm_task_free_targets(dmt);
1565
1566
  /*
1567
   * Also udev-synchronize "remove" dm task that is a part of this revert!
1568
   * But only if the original dm task was supposed to be synchronized.
1569
   */
1570
0
  if (dmt->cookie_set) {
1571
0
    cookie = (dmt->event_nr & ~DM_UDEV_FLAGS_MASK) |
1572
0
       (DM_COOKIE_MAGIC << DM_UDEV_FLAGS_SHIFT);
1573
0
    if (!dm_task_set_cookie(dmt, &cookie,
1574
0
          (dmt->event_nr & DM_UDEV_FLAGS_MASK) >>
1575
0
          DM_UDEV_FLAGS_SHIFT))
1576
0
      stack; /* keep going */
1577
0
  }
1578
1579
0
  if (!dm_task_run(dmt))
1580
0
    log_error("Failed to revert device creation.");
1581
1582
0
  if (ioctl_errno != 0)
1583
0
    dmt->ioctl_errno =  ioctl_errno;
1584
1585
0
  return 0;
1586
1587
0
      bad:
1588
0
  dm_task_destroy(task);
1589
0
  _udev_complete(dmt);
1590
1591
0
  if (ioctl_errno != 0)
1592
0
    dmt->ioctl_errno =  ioctl_errno;
1593
1594
0
  return 0;
1595
0
}
1596
1597
uint64_t dm_task_get_existing_table_size(struct dm_task *dmt)
1598
0
{
1599
0
  return dmt->existing_table_size;
1600
0
}
1601
1602
static int _reload_with_suppression_v4(struct dm_task *dmt)
1603
0
{
1604
0
  struct dm_task *task;
1605
0
  struct target *t1, *t2;
1606
0
  size_t len;
1607
0
  int r;
1608
1609
  /* New task to get existing table information */
1610
0
  if (!(task = dm_task_create(DM_DEVICE_TABLE))) {
1611
0
    log_error("Failed to create device-mapper task struct");
1612
0
    return 0;
1613
0
  }
1614
1615
  /* Copy across relevant fields */
1616
0
  if (dmt->dev_name && !dm_task_set_name(task, dmt->dev_name)) {
1617
0
    dm_task_destroy(task);
1618
0
    return 0;
1619
0
  }
1620
1621
0
  if (dmt->uuid && !dm_task_set_uuid(task, dmt->uuid)) {
1622
0
    dm_task_destroy(task);
1623
0
    return 0;
1624
0
  }
1625
1626
0
  task->major = dmt->major;
1627
0
  task->minor = dmt->minor;
1628
1629
0
  r = dm_task_run(task);
1630
1631
0
  if (!r) {
1632
0
    dm_task_destroy(task);
1633
0
    return r;
1634
0
  }
1635
1636
  /* Store existing table size */
1637
0
  t2 = task->head;
1638
0
  while (t2 && t2->next)
1639
0
    t2 = t2->next;
1640
0
  dmt->existing_table_size = t2 ? t2->start + t2->length : 0;
1641
1642
0
  if (((task->dmi.v4->flags & DM_READONLY_FLAG) ? 1 : 0) != dmt->read_only)
1643
0
    goto no_match;
1644
1645
0
  t1 = dmt->head;
1646
0
  t2 = task->head;
1647
1648
0
  while (t1 && t2) {
1649
0
    len = strlen(t2->params);
1650
0
    while (len-- > 0 && t2->params[len] == ' ')
1651
0
      t2->params[len] = '\0';
1652
0
    if ((t1->start != t2->start) ||
1653
0
        (t1->length != t2->length) ||
1654
0
        (strcmp(t1->type, t2->type)) ||
1655
0
        (strcmp(t1->params, t2->params)))
1656
0
      goto no_match;
1657
0
    t1 = t1->next;
1658
0
    t2 = t2->next;
1659
0
  }
1660
  
1661
0
  if (!t1 && !t2) {
1662
0
    dmt->dmi.v4 = task->dmi.v4;
1663
0
    task->dmi.v4 = NULL;
1664
0
    dm_task_destroy(task);
1665
0
    return 1;
1666
0
  }
1667
1668
0
no_match:
1669
0
  dm_task_destroy(task);
1670
1671
  /* Now do the original reload */
1672
0
  dmt->suppress_identical_reload = 0;
1673
0
  r = dm_task_run(dmt);
1674
1675
0
  return r;
1676
0
}
1677
1678
static int _check_children_not_suspended_v4(struct dm_task *dmt, uint64_t device)
1679
0
{
1680
0
  struct dm_task *task;
1681
0
  struct dm_info info;
1682
0
  struct dm_deps *deps;
1683
0
  int r = 0;
1684
0
  uint32_t i;
1685
1686
  /* Find dependencies */
1687
0
  if (!(task = dm_task_create(DM_DEVICE_DEPS)))
1688
0
    return 0;
1689
1690
  /* Copy across or set relevant fields */
1691
0
  if (device) {
1692
0
    task->major = MAJOR(device);
1693
0
    task->minor = MINOR(device);
1694
0
  } else {
1695
0
    if (dmt->dev_name && !dm_task_set_name(task, dmt->dev_name))
1696
0
      goto out;
1697
1698
0
    if (dmt->uuid && !dm_task_set_uuid(task, dmt->uuid))
1699
0
      goto out;
1700
1701
0
    task->major = dmt->major;
1702
0
    task->minor = dmt->minor;
1703
0
  }
1704
1705
0
  task->uid = dmt->uid;
1706
0
  task->gid = dmt->gid;
1707
0
  task->mode = dmt->mode;
1708
  /* FIXME: Just for udev_check in dm_task_run. Can we avoid this? */
1709
0
  task->event_nr = dmt->event_nr & DM_UDEV_FLAGS_MASK;
1710
0
  task->cookie_set = dmt->cookie_set;
1711
0
  task->add_node = dmt->add_node;
1712
  
1713
0
  if (!(r = dm_task_run(task)))
1714
0
    goto out;
1715
1716
0
  if (!dm_task_get_info(task, &info) || !info.exists)
1717
0
    goto out;
1718
1719
  /*
1720
   * Warn if any of the devices this device depends upon are already
1721
   * suspended: I/O could become trapped between the two devices.
1722
   */
1723
0
  if (info.suspended) {
1724
0
    if (!device)
1725
0
      log_debug_activation("Attempting to suspend a device that is already suspended "
1726
0
               "(%u:%u)", info.major, info.minor);
1727
0
    else
1728
0
      log_error(INTERNAL_ERROR "Attempt to suspend device %s%s%s%.0d%s%.0d%s%s"
1729
0
          "that uses already-suspended device (%u:%u)", 
1730
0
          DEV_NAME(dmt) ? : "", DEV_UUID(dmt) ? : "",
1731
0
          dmt->major > 0 ? "(" : "",
1732
0
          dmt->major > 0 ? dmt->major : 0,
1733
0
          dmt->major > 0 ? ":" : "",
1734
0
          dmt->minor > 0 ? dmt->minor : 0,
1735
0
          dmt->major > 0 && dmt->minor == 0 ? "0" : "",
1736
0
          dmt->major > 0 ? ") " : "",
1737
0
          info.major, info.minor);
1738
1739
    /* No need for further recursion */
1740
0
    r = 1;
1741
0
    goto out;
1742
0
  }
1743
1744
0
  if (!(deps = dm_task_get_deps(task)))
1745
0
    goto out;
1746
1747
0
  for (i = 0; i < deps->count; i++) {
1748
    /* Only recurse with dm devices */
1749
0
    if (MAJOR(deps->device[i]) != _dm_device_major)
1750
0
      continue;
1751
1752
0
    if (!_check_children_not_suspended_v4(task, deps->device[i]))
1753
0
      goto out;
1754
0
  }
1755
1756
0
  r = 1;
1757
1758
0
out:
1759
0
  dm_task_destroy(task);
1760
1761
0
  return r;
1762
0
}
1763
1764
static int _suspend_with_validation_v4(struct dm_task *dmt)
1765
0
{
1766
  /* Avoid recursion */
1767
0
  dmt->enable_checks = 0;
1768
1769
  /*
1770
   * Ensure we can't leave any I/O trapped between suspended devices.
1771
   */
1772
0
  if (!_check_children_not_suspended_v4(dmt, 0))
1773
0
    return 0;
1774
1775
  /* Finally, perform the original suspend. */
1776
0
  return dm_task_run(dmt);
1777
0
}
1778
1779
static const char *_sanitise_message(char *message)
1780
0
{
1781
0
  const char *sanitised_message = message ?: "";
1782
1783
  /* FIXME: Check for whitespace variations. */
1784
  /* This traps what cryptsetup sends us. */
1785
0
  if (message && !strncasecmp(message, "key set", 7))
1786
0
    sanitised_message = "key set";
1787
1788
0
  return sanitised_message;
1789
0
}
1790
1791
#ifdef DM_IOCTLS
1792
static int _do_dm_ioctl_unmangle_string(char *str, const char *str_name,
1793
          char *buf, size_t buf_size,
1794
          dm_string_mangling_t mode)
1795
0
{
1796
0
  int r;
1797
1798
0
  if (mode == DM_STRING_MANGLING_NONE)
1799
0
    return 1;
1800
1801
0
  if (!check_multiple_mangled_string_allowed(str, str_name, mode))
1802
0
    return_0;
1803
1804
0
  if ((r = unmangle_string(str, str_name, strlen(str), buf, buf_size, mode)) < 0) {
1805
0
    log_debug_activation("_do_dm_ioctl_unmangle_string: failed to "
1806
0
             "unmangle %s \"%s\"", str_name, str);
1807
0
    return 0;
1808
0
  }
1809
1810
0
  if (r)
1811
0
    memcpy(str, buf, strlen(buf) + 1);
1812
1813
0
  return 1;
1814
0
}
1815
1816
static int _dm_ioctl_unmangle_names(int type, struct dm_ioctl *dmi)
1817
0
{
1818
0
  char buf[DM_NAME_LEN];
1819
0
  char buf_uuid[DM_UUID_LEN];
1820
0
  struct dm_name_list *names;
1821
0
  unsigned next = 0;
1822
0
  char *name;
1823
0
  int r = 1;
1824
0
  uint32_t *event_nr;
1825
0
  char *uuid_ptr;
1826
0
  dm_string_mangling_t mangling_mode = dm_get_name_mangling_mode();
1827
1828
0
  if ((name = dmi->name))
1829
0
    r &= _do_dm_ioctl_unmangle_string(name, "name", buf, sizeof(buf),
1830
0
              mangling_mode);
1831
1832
0
  if (type == DM_DEVICE_LIST &&
1833
0
      ((names = ((struct dm_name_list *) ((char *)dmi + dmi->data_start)))) &&
1834
0
      names->dev) {
1835
0
    do {
1836
0
      names = (struct dm_name_list *)((char *) names + next);
1837
0
      event_nr = _align_ptr(names->name + strlen(names->name) + 1);
1838
0
      r &= _do_dm_ioctl_unmangle_string(names->name, "name",
1839
0
                buf, sizeof(buf), mangling_mode);
1840
      /* Unmangle also UUID within same loop */
1841
0
      if (_check_has_event_nr() &&
1842
0
          (event_nr[1] & DM_NAME_LIST_FLAG_HAS_UUID)) {
1843
0
        uuid_ptr = _align_ptr(event_nr + 2);
1844
0
        r &= _do_dm_ioctl_unmangle_string(uuid_ptr, "UUID", buf_uuid,
1845
0
                  sizeof(buf_uuid), mangling_mode);
1846
0
      }
1847
0
      next = names->next;
1848
0
    } while (next);
1849
0
  }
1850
1851
0
  return r;
1852
0
}
1853
1854
static int _dm_ioctl_unmangle_uuids(int type, struct dm_ioctl *dmi)
1855
0
{
1856
0
  char buf[DM_UUID_LEN];
1857
0
  char *uuid = dmi->uuid;
1858
1859
0
  if (uuid)
1860
0
    return _do_dm_ioctl_unmangle_string(uuid, "UUID", buf, sizeof(buf),
1861
0
                dm_get_name_mangling_mode());
1862
1863
0
  return 1;
1864
0
}
1865
#endif
1866
1867
static struct dm_ioctl *_do_dm_ioctl(struct dm_task *dmt, unsigned command,
1868
             unsigned buffer_repeat_count,
1869
             unsigned retry_repeat_count,
1870
             int *retryable)
1871
0
{
1872
0
  struct dm_ioctl *dmi;
1873
0
  int ioctl_with_uevent;
1874
0
  int r;
1875
1876
0
  dmt->ioctl_errno = 0;
1877
1878
0
  dmi = _flatten(dmt, buffer_repeat_count);
1879
0
  if (!dmi) {
1880
0
    log_error("Couldn't create ioctl argument.");
1881
0
    return NULL;
1882
0
  }
1883
1884
0
  if (dmt->type == DM_DEVICE_TABLE)
1885
0
    dmi->flags |= DM_STATUS_TABLE_FLAG;
1886
1887
0
  dmi->flags |= DM_EXISTS_FLAG; /* FIXME */
1888
1889
0
  if (dmt->no_open_count)
1890
0
    dmi->flags |= DM_SKIP_BDGET_FLAG;
1891
1892
0
  ioctl_with_uevent = dmt->type == DM_DEVICE_RESUME ||
1893
0
          dmt->type == DM_DEVICE_REMOVE ||
1894
0
          dmt->type == DM_DEVICE_RENAME;
1895
1896
0
  if (ioctl_with_uevent && dm_cookie_supported()) {
1897
    /*
1898
     * Always mark events coming from libdevmapper as
1899
     * "primary sourced". This is needed to distinguish
1900
     * any spurious events so we can act appropriately.
1901
     * This needs to be applied even when udev_sync is
1902
     * not used because udev flags could be used alone.
1903
     */
1904
0
    dmi->event_nr |= DM_UDEV_PRIMARY_SOURCE_FLAG <<
1905
0
         DM_UDEV_FLAGS_SHIFT;
1906
1907
    /*
1908
     * Prevent udev vs. libdevmapper race when processing nodes
1909
     * and symlinks. This can happen when the udev rules are
1910
     * installed and udev synchronization code is enabled in
1911
     * libdevmapper but the software using libdevmapper does not
1912
     * make use of it (by not calling dm_task_set_cookie before).
1913
     * We need to instruct the udev rules not to be applied at
1914
     * all in this situation so we can gracefully fallback to
1915
     * libdevmapper's node and symlink creation code.
1916
     */
1917
0
    if (!dmt->cookie_set && dm_udev_get_sync_support()) {
1918
0
      log_debug_activation("Cookie value is not set while trying to call %s "
1919
0
               "ioctl. Please, consider using libdevmapper's udev "
1920
0
               "synchronization interface or disable it explicitly "
1921
0
               "by calling dm_udev_set_sync_support(0).",
1922
0
               dmt->type == DM_DEVICE_RESUME ? "DM_DEVICE_RESUME" :
1923
0
               dmt->type == DM_DEVICE_REMOVE ? "DM_DEVICE_REMOVE" :
1924
0
                       "DM_DEVICE_RENAME");
1925
0
      log_debug_activation("Switching off device-mapper and all subsystem related "
1926
0
               "udev rules. Falling back to libdevmapper node creation.");
1927
      /*
1928
       * Disable general dm and subsystem rules but keep
1929
       * dm disk rules if not flagged out explicitly before.
1930
       * We need /dev/disk content for the software that expects it.
1931
      */
1932
0
      dmi->event_nr |= (DM_UDEV_DISABLE_DM_RULES_FLAG |
1933
0
            DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG) <<
1934
0
           DM_UDEV_FLAGS_SHIFT;
1935
0
    }
1936
0
  }
1937
1938
0
  log_debug_activation("dm %s %s%s %s%s%s %s%.0d%s%.0d%s"
1939
0
           "%s[ %s%s%s%s%s%s%s%s%s%s] %.0" PRIu64 " %s [%u] (*%u)",
1940
0
           _cmd_data_v4[dmt->type].name,
1941
0
           dmt->new_uuid ? "UUID " : "",
1942
0
           dmi->name, dmi->uuid, dmt->newname ? " " : "",
1943
0
           dmt->newname ? dmt->newname : "",
1944
0
           dmt->major > 0 ? "(" : "",
1945
0
           dmt->major > 0 ? dmt->major : 0,
1946
0
           dmt->major > 0 ? ":" : "",
1947
0
           dmt->minor > 0 ? dmt->minor : 0,
1948
0
           dmt->major > 0 && dmt->minor == 0 ? "0" : "",
1949
0
           dmt->major > 0 ? ") " : "",
1950
0
           dmt->no_open_count ? "noopencount " : "opencount ",
1951
0
           dmt->no_flush ? "noflush " : "flush ",
1952
0
           dmt->read_only ? "readonly " : "",
1953
0
           dmt->skip_lockfs ? "skiplockfs " : "",
1954
0
           dmt->retry_remove ? "retryremove " : "",
1955
0
           dmt->deferred_remove ? "deferredremove " : "",
1956
0
           dmt->secure_data ? "securedata " : "",
1957
0
           dmt->ima_measurement ? "ima_measurement " : "",
1958
0
           dmt->query_inactive_table ? "inactive " : "",
1959
0
           dmt->enable_checks ? "enablechecks " : "",
1960
0
           dmt->sector, _sanitise_message(dmt->message),
1961
0
           dmi->data_size, retry_repeat_count);
1962
0
#ifdef DM_IOCTLS
1963
0
  r = ioctl(_control_fd, command, dmi);
1964
1965
0
  if (dmt->record_timestamp)
1966
0
    if (!dm_timestamp_get(_dm_ioctl_timestamp))
1967
0
      stack;
1968
1969
0
  if (r < 0 && dmt->expected_errno != errno) {
1970
0
    dmt->ioctl_errno = errno;
1971
0
    if (dmt->ioctl_errno == ENXIO && ((dmt->type == DM_DEVICE_INFO) ||
1972
0
              (dmt->type == DM_DEVICE_MKNODES) ||
1973
0
              (dmt->type == DM_DEVICE_STATUS)))
1974
0
      dmi->flags &= ~DM_EXISTS_FLAG; /* FIXME */
1975
0
    else {
1976
0
      if (_log_suppress || dmt->ioctl_errno == EINTR)
1977
0
        log_verbose("device-mapper: %s ioctl on %s %s%s%.0d%s%.0d%s%s "
1978
0
              "failed: %s",
1979
0
              _cmd_data_v4[dmt->type].name,
1980
0
              dmi->name[0] ? dmi->name : DEV_NAME(dmt) ? : "",
1981
0
              dmi->uuid[0] ? dmi->uuid : DEV_UUID(dmt) ? : "",
1982
0
              dmt->major > 0 ? "(" : "",
1983
0
              dmt->major > 0 ? dmt->major : 0,
1984
0
              dmt->major > 0 ? ":" : "",
1985
0
              dmt->minor > 0 ? dmt->minor : 0,
1986
0
              dmt->major > 0 && dmt->minor == 0 ? "0" : "",
1987
0
              dmt->major > 0 ? ")" : "",
1988
0
              strerror(dmt->ioctl_errno));
1989
0
      else
1990
0
        log_error("device-mapper: %s ioctl on %s %s%s%.0d%s%.0d%s%s "
1991
0
            "failed: %s",
1992
0
            _cmd_data_v4[dmt->type].name,
1993
0
            dmi->name[0] ? dmi->name : DEV_NAME(dmt) ? : "",
1994
0
            dmi->uuid[0] ? dmi->uuid : DEV_UUID(dmt) ? : "",
1995
0
            dmt->major > 0 ? "(" : "",
1996
0
            dmt->major > 0 ? dmt->major : 0,
1997
0
            dmt->major > 0 ? ":" : "",
1998
0
            dmt->minor > 0 ? dmt->minor : 0,
1999
0
            dmt->major > 0 && dmt->minor == 0 ? "0" : "",
2000
0
            dmt->major > 0 ? ")" : "",
2001
0
            strerror(dmt->ioctl_errno));
2002
2003
      /*
2004
       * It's sometimes worth retrying after EBUSY in case
2005
       * it's a transient failure caused by an asynchronous
2006
       * process quickly scanning the device.
2007
       */
2008
0
      *retryable = dmt->ioctl_errno == EBUSY;
2009
2010
0
      goto error;
2011
0
    }
2012
0
  }
2013
2014
0
  if (ioctl_with_uevent && dm_udev_get_sync_support() &&
2015
0
      !_check_uevent_generated(dmi)) {
2016
0
    log_debug_activation("Uevent not generated! Calling udev_complete "
2017
0
             "internally to avoid process lock-up.");
2018
0
    _udev_complete(dmt);
2019
0
  }
2020
2021
0
  if (!_dm_ioctl_unmangle_names(dmt->type, dmi))
2022
0
    goto error;
2023
2024
0
  if (dmt->type != DM_DEVICE_REMOVE &&
2025
0
      !_dm_ioctl_unmangle_uuids(dmt->type, dmi))
2026
0
    goto error;
2027
2028
#else /* Userspace alternative for testing */
2029
  goto error;
2030
#endif
2031
0
  return dmi;
2032
2033
0
error:
2034
0
  _dm_zfree_dmi(dmi);
2035
0
  return NULL;
2036
0
}
2037
2038
void dm_task_update_nodes(void)
2039
0
{
2040
0
  update_devs();
2041
0
}
2042
2043
0
#define DM_IOCTL_RETRIES 25
2044
0
#define DM_RETRY_USLEEP_DELAY 200000
2045
2046
int dm_task_get_errno(struct dm_task *dmt)
2047
0
{
2048
0
  return dmt->ioctl_errno;
2049
0
}
2050
2051
#if defined(GNU_SYMVER)
2052
/*
2053
 * Enforce new version 1_02_197 of dm_task_run() that propagates
2054
 * ioctl() errno is being linked to app.
2055
 */
2056
DM_EXPORT_SYMBOL_BASE(dm_task_run)
2057
int dm_task_run_base(struct dm_task *dmt);
2058
int dm_task_run_base(struct dm_task *dmt)
2059
0
{
2060
0
  return dm_task_run(dmt);
2061
0
}
2062
#endif
2063
2064
DM_EXPORT_NEW_SYMBOL(int, dm_task_run, 1_02_197)
2065
  (struct dm_task *dmt)
2066
0
{
2067
0
  struct dm_ioctl *dmi;
2068
0
  unsigned command;
2069
0
  int check_udev;
2070
0
  int rely_on_udev;
2071
0
  int suspended_counter;
2072
0
  unsigned ioctl_retry = 1;
2073
0
  int retryable = 0;
2074
0
  const char *dev_name = DEV_NAME(dmt);
2075
0
  const char *dev_uuid = DEV_UUID(dmt);
2076
2077
0
  if ((unsigned) dmt->type >= DM_ARRAY_SIZE(_cmd_data_v4)) {
2078
0
    log_error(INTERNAL_ERROR "unknown device-mapper task %d",
2079
0
        dmt->type);
2080
0
    return 0;
2081
0
  }
2082
2083
0
  command = _cmd_data_v4[dmt->type].cmd;
2084
2085
  /* Old-style creation had a table supplied */
2086
0
  if (dmt->type == DM_DEVICE_CREATE && dmt->head)
2087
0
    return _create_and_load_v4(dmt);
2088
2089
0
  if (dmt->type == DM_DEVICE_MKNODES && !dev_name &&
2090
0
      !dev_uuid && dmt->major <= 0)
2091
0
    return _mknodes_v4(dmt);
2092
2093
0
  if ((dmt->type == DM_DEVICE_RELOAD) && dmt->suppress_identical_reload)
2094
0
    return _reload_with_suppression_v4(dmt);
2095
2096
0
  if ((dmt->type == DM_DEVICE_SUSPEND) && dmt->enable_checks)
2097
0
    return _suspend_with_validation_v4(dmt);
2098
2099
0
  if (!_open_control()) {
2100
0
    _udev_complete(dmt);
2101
0
    return_0;
2102
0
  }
2103
2104
0
  if ((suspended_counter = dm_get_suspended_counter()) &&
2105
0
      dmt->type == DM_DEVICE_RELOAD)
2106
0
    log_error(INTERNAL_ERROR "Performing unsafe table load while %d device(s) "
2107
0
        "are known to be suspended: "
2108
0
        "%s%s%s %s%.0d%s%.0d%s%s",
2109
0
        suspended_counter,
2110
0
        dev_name ? : "",
2111
0
        dev_uuid ? " UUID " : "",
2112
0
        dev_uuid ? : "",
2113
0
        dmt->major > 0 ? "(" : "",
2114
0
        dmt->major > 0 ? dmt->major : 0,
2115
0
        dmt->major > 0 ? ":" : "",
2116
0
        dmt->minor > 0 ? dmt->minor : 0,
2117
0
        dmt->major > 0 && dmt->minor == 0 ? "0" : "",
2118
0
        dmt->major > 0 ? ") " : "");
2119
2120
  /* FIXME Detect and warn if cookie set but should not be. */
2121
0
repeat_ioctl:
2122
0
  if (!(dmi = _do_dm_ioctl(dmt, command, _ioctl_buffer_double_factor,
2123
0
         ioctl_retry, &retryable))) {
2124
    /*
2125
     * Async udev rules that scan devices commonly cause transient
2126
     * failures.  Normally you'd expect the user to have made sure
2127
     * nothing was using the device before issuing REMOVE, so it's
2128
     * worth retrying in case the failure is indeed transient.
2129
     */
2130
0
    if (retryable && dmt->type == DM_DEVICE_REMOVE &&
2131
0
        dmt->retry_remove && ++ioctl_retry <= DM_IOCTL_RETRIES) {
2132
0
      usleep(DM_RETRY_USLEEP_DELAY);
2133
0
      goto repeat_ioctl;
2134
0
    }
2135
2136
0
    _udev_complete(dmt);
2137
0
    return 0;
2138
0
  }
2139
2140
0
  if (dmi->flags & DM_BUFFER_FULL_FLAG) {
2141
0
    switch (dmt->type) {
2142
0
    case DM_DEVICE_LIST_VERSIONS:
2143
0
    case DM_DEVICE_LIST:
2144
0
    case DM_DEVICE_DEPS:
2145
0
    case DM_DEVICE_STATUS:
2146
0
    case DM_DEVICE_TABLE:
2147
0
    case DM_DEVICE_WAITEVENT:
2148
0
    case DM_DEVICE_TARGET_MSG:
2149
0
      _ioctl_buffer_double_factor++;
2150
0
      _dm_zfree_dmi(dmi);
2151
0
      goto repeat_ioctl;
2152
0
    default:
2153
0
      log_error("WARNING: libdevmapper buffer too small for data");
2154
0
    }
2155
0
  }
2156
2157
  /*
2158
   * Are we expecting a udev operation to occur that we need to check for?
2159
   */
2160
0
  check_udev = dmt->cookie_set &&
2161
0
         !(dmt->event_nr >> DM_UDEV_FLAGS_SHIFT &
2162
0
           DM_UDEV_DISABLE_DM_RULES_FLAG);
2163
2164
0
  rely_on_udev = dmt->cookie_set ? (dmt->event_nr >> DM_UDEV_FLAGS_SHIFT &
2165
0
            DM_UDEV_DISABLE_LIBRARY_FALLBACK) : 0;
2166
2167
0
  switch (dmt->type) {
2168
0
  case DM_DEVICE_CREATE:
2169
0
    if ((dmt->add_node == DM_ADD_NODE_ON_CREATE) &&
2170
0
        dev_name && *dev_name && !rely_on_udev)
2171
0
      add_dev_node(dev_name, MAJOR(dmi->dev),
2172
0
             MINOR(dmi->dev), dmt->uid, dmt->gid,
2173
0
             dmt->mode, check_udev, rely_on_udev);
2174
0
    break;
2175
0
  case DM_DEVICE_REMOVE:
2176
    /* FIXME Kernel needs to fill in dmi->name */
2177
0
    if (dev_name && !rely_on_udev)
2178
0
      rm_dev_node(dev_name, check_udev, rely_on_udev);
2179
0
    break;
2180
2181
0
  case DM_DEVICE_RENAME:
2182
    /* FIXME Kernel needs to fill in dmi->name */
2183
0
    if (!dmt->new_uuid && dev_name)
2184
0
      rename_dev_node(dev_name, dmt->newname,
2185
0
          check_udev, rely_on_udev);
2186
0
    break;
2187
2188
0
  case DM_DEVICE_RESUME:
2189
0
    if ((dmt->add_node == DM_ADD_NODE_ON_RESUME) &&
2190
0
        dev_name && *dev_name)
2191
0
      add_dev_node(dev_name, MAJOR(dmi->dev),
2192
0
             MINOR(dmi->dev), dmt->uid, dmt->gid,
2193
0
             dmt->mode, check_udev, rely_on_udev);
2194
    /* FIXME Kernel needs to fill in dmi->name */
2195
0
    set_dev_node_read_ahead(dev_name,
2196
0
          MAJOR(dmi->dev), MINOR(dmi->dev),
2197
0
          dmt->read_ahead, dmt->read_ahead_flags);
2198
0
    break;
2199
  
2200
0
  case DM_DEVICE_MKNODES:
2201
0
    if (dmi->flags & DM_EXISTS_FLAG)
2202
0
      add_dev_node(dmi->name, MAJOR(dmi->dev),
2203
0
             MINOR(dmi->dev), dmt->uid,
2204
0
             dmt->gid, dmt->mode, 0, rely_on_udev);
2205
0
    else if (dev_name)
2206
0
      rm_dev_node(dev_name, 0, rely_on_udev);
2207
0
    break;
2208
2209
0
  case DM_DEVICE_STATUS:
2210
0
  case DM_DEVICE_TABLE:
2211
0
  case DM_DEVICE_WAITEVENT:
2212
0
    if (!_unmarshal_status(dmt, dmi))
2213
0
      goto bad;
2214
0
    break;
2215
0
  }
2216
2217
  /* Was structure reused? */
2218
0
  _dm_zfree_dmi(dmt->dmi.v4);
2219
0
  dmt->dmi.v4 = dmi;
2220
0
  return 1;
2221
2222
0
      bad:
2223
0
  _dm_zfree_dmi(dmi);
2224
0
  return 0;
2225
0
}
2226
2227
void dm_hold_control_dev(int hold_open)
2228
0
{
2229
0
  _hold_control_fd_open = hold_open ? 1 : 0;
2230
2231
0
  log_debug("Hold of control device is now %sset.",
2232
0
      _hold_control_fd_open ? "" : "un");
2233
0
}
2234
2235
void dm_lib_release(void)
2236
11.0k
{
2237
11.0k
  if (!_hold_control_fd_open)
2238
11.0k
    _close_control_fd();
2239
11.0k
  dm_timestamp_destroy(_dm_ioctl_timestamp);
2240
11.0k
  _dm_ioctl_timestamp = NULL;
2241
11.0k
  update_devs();
2242
11.0k
}
2243
2244
void dm_pools_check_leaks(void);
2245
2246
void dm_lib_exit(void)
2247
0
{
2248
0
  int suspended_counter;
2249
0
  static unsigned _exited = 0;
2250
2251
0
  if (_exited++)
2252
0
    return;
2253
2254
0
  if ((suspended_counter = dm_get_suspended_counter()))
2255
0
    log_error("libdevmapper exiting with %d device(s) still suspended.", suspended_counter);
2256
2257
0
  dm_lib_release();
2258
0
  selinux_release();
2259
0
  if (_dm_bitset)
2260
0
    dm_bitset_destroy(_dm_bitset);
2261
0
  _dm_bitset = NULL;
2262
0
  dm_pools_check_leaks();
2263
0
  dm_dump_memory();
2264
0
  _version_ok = 1;
2265
0
  _version_checked = 0;
2266
0
}
2267
2268
#if defined(GNU_SYMVER)
2269
/*
2270
 * Maintain binary backward compatibility.
2271
 * Version script mechanism works with 'gcc' compatible compilers only.
2272
 */
2273
2274
/*
2275
 * This following code is here to retain ABI compatibility after adding
2276
 * the field deferred_remove to struct dm_info in version 1.02.89.
2277
 *
2278
 * Binaries linked against version 1.02.88 of libdevmapper or earlier
2279
 * will use this function that returns dm_info without the
2280
 * deferred_remove field.
2281
 *
2282
 * Binaries compiled against version 1.02.89 onwards will use
2283
 * the new function dm_task_get_info_with_deferred_remove due to the
2284
 * #define.
2285
 *
2286
 * N.B. Keep this function at the end of the file to make sure that
2287
 * no code in this file accidentally calls it.
2288
 */
2289
2290
DM_EXPORT_SYMBOL_BASE(dm_task_get_info)
2291
int dm_task_get_info_base(struct dm_task *dmt, struct dm_info *info);
2292
int dm_task_get_info_base(struct dm_task *dmt, struct dm_info *info)
2293
0
{
2294
0
  struct dm_info new_info;
2295
2296
0
  if (!dm_task_get_info(dmt, &new_info))
2297
0
    return 0;
2298
2299
0
  memcpy(info, &new_info, offsetof(struct dm_info, deferred_remove));
2300
2301
0
  return 1;
2302
0
}
2303
2304
#endif
2305
2306
int dm_task_get_info_with_deferred_remove(struct dm_task *dmt, struct dm_info *info);
2307
int dm_task_get_info_with_deferred_remove(struct dm_task *dmt, struct dm_info *info)
2308
0
{
2309
0
  struct dm_info new_info;
2310
2311
0
  if (!dm_task_get_info(dmt, &new_info))
2312
0
    return 0;
2313
2314
0
  memcpy(info, &new_info, offsetof(struct dm_info, internal_suspend));
2315
2316
0
  return 1;
2317
0
}