/src/lvm2/libdm/ioctl/libdm-iface.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved. |
3 | | * Copyright (C) 2004-2013 Red Hat, Inc. All rights reserved. |
4 | | * |
5 | | * This file is part of the device-mapper userspace tools. |
6 | | * |
7 | | * This copyrighted material is made available to anyone wishing to use, |
8 | | * modify, copy, or redistribute it subject to the terms and conditions |
9 | | * of the GNU Lesser General Public License v.2.1. |
10 | | * |
11 | | * You should have received a copy of the GNU Lesser General Public License |
12 | | * along with this program; if not, write to the Free Software Foundation, |
13 | | * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
14 | | */ |
15 | | |
16 | | #include "libdm/misc/dmlib.h" |
17 | | #include "libdm-targets.h" |
18 | | #include "libdm/libdm-common.h" |
19 | | |
20 | | #include <stddef.h> |
21 | | #include <fcntl.h> |
22 | | #include <dirent.h> |
23 | | #include <sys/ioctl.h> |
24 | | #include <sys/utsname.h> |
25 | | #include <limits.h> |
26 | | |
27 | | #ifdef __linux__ |
28 | | # include "libdm/misc/kdev_t.h" |
29 | | # include <linux/limits.h> |
30 | | #else |
31 | | # define MAJOR(x) major((x)) |
32 | | # define MINOR(x) minor((x)) |
33 | | # define MKDEV(x,y) makedev(((dev_t)x),((dev_t)y)) |
34 | | #endif |
35 | | |
36 | | #include "libdm/misc/dm-ioctl.h" |
37 | | |
38 | | /* |
39 | | * Ensure build compatibility. |
40 | | * The hard-coded versions here are the highest present |
41 | | * in the _cmd_data arrays. |
42 | | */ |
43 | | |
44 | | #if !((DM_VERSION_MAJOR == 4 && DM_VERSION_MINOR >= 6)) |
45 | | #error The version of dm-ioctl.h included is incompatible. |
46 | | #endif |
47 | | |
48 | | /* FIXME This should be exported in device-mapper.h */ |
49 | 0 | #define DM_NAME "device-mapper" |
50 | | |
51 | 0 | #define PROC_MISC "/proc/misc" |
52 | 0 | #define PROC_DEVICES "/proc/devices" |
53 | 0 | #define MISC_NAME "misc" |
54 | | |
55 | 0 | #define NUMBER_OF_MAJORS 4096 |
56 | | |
57 | | /* |
58 | | * Static minor number assigned since kernel version 2.6.36. |
59 | | * The original definition is in kernel's include/linux/miscdevice.h. |
60 | | * This number is also visible in modules.devname exported by depmod |
61 | | * utility (support included in module-init-tools version >= 3.12). |
62 | | */ |
63 | 0 | #define MAPPER_CTRL_MINOR 236 |
64 | 0 | #define MISC_MAJOR 10 |
65 | | |
66 | | /* dm major version no for running kernel */ |
67 | | static unsigned _dm_version = DM_VERSION_MAJOR; |
68 | | static unsigned _dm_version_minor = 0; |
69 | | static unsigned _dm_version_patchlevel = 0; |
70 | | static int _log_suppress = 0; |
71 | | static struct dm_timestamp *_dm_ioctl_timestamp = NULL; |
72 | | static int _dm_warn_inactive_suppress = 0; |
73 | | |
74 | | /* |
75 | | * If the kernel dm driver only supports one major number |
76 | | * we store it in _dm_device_major. Otherwise we indicate |
77 | | * which major numbers have been claimed by device-mapper |
78 | | * in _dm_bitset. |
79 | | */ |
80 | | static unsigned _dm_multiple_major_support = 1; |
81 | | static dm_bitset_t _dm_bitset = NULL; |
82 | | static uint32_t _dm_device_major = 0; |
83 | | |
84 | | static int _control_fd = -1; |
85 | | static int _hold_control_fd_open = 0; |
86 | | static int _version_checked = 0; |
87 | | static int _version_ok = 1; |
88 | | static unsigned _ioctl_buffer_double_factor = 0; |
89 | | |
90 | | /* *INDENT-OFF* */ |
91 | | static const struct cmd_data _cmd_data_v4[] = { |
92 | | {"create", DM_DEV_CREATE, {4, 0, 0}}, |
93 | | {"reload", DM_TABLE_LOAD, {4, 0, 0}}, |
94 | | {"remove", DM_DEV_REMOVE, {4, 0, 0}}, |
95 | | {"remove_all", DM_REMOVE_ALL, {4, 0, 0}}, |
96 | | {"suspend", DM_DEV_SUSPEND, {4, 0, 0}}, |
97 | | {"resume", DM_DEV_SUSPEND, {4, 0, 0}}, |
98 | | {"info", DM_DEV_STATUS, {4, 0, 0}}, |
99 | | {"deps", DM_TABLE_DEPS, {4, 0, 0}}, |
100 | | {"rename", DM_DEV_RENAME, {4, 0, 0}}, |
101 | | {"version", DM_VERSION, {4, 0, 0}}, |
102 | | {"status", DM_TABLE_STATUS, {4, 0, 0}}, |
103 | | {"table", DM_TABLE_STATUS, {4, 0, 0}}, |
104 | | {"waitevent", DM_DEV_WAIT, {4, 0, 0}}, |
105 | | {"names", DM_LIST_DEVICES, {4, 0, 0}}, |
106 | | {"clear", DM_TABLE_CLEAR, {4, 0, 0}}, |
107 | | {"mknodes", DM_DEV_STATUS, {4, 0, 0}}, |
108 | | #ifdef DM_LIST_VERSIONS |
109 | | {"versions", DM_LIST_VERSIONS, {4, 1, 0}}, |
110 | | #endif |
111 | | #ifdef DM_TARGET_MSG |
112 | | {"message", DM_TARGET_MSG, {4, 2, 0}}, |
113 | | #endif |
114 | | #ifdef DM_DEV_SET_GEOMETRY |
115 | | {"setgeometry", DM_DEV_SET_GEOMETRY, {4, 6, 0}}, |
116 | | #endif |
117 | | #ifdef DM_DEV_ARM_POLL |
118 | | {"armpoll", DM_DEV_ARM_POLL, {4, 36, 0}}, |
119 | | #endif |
120 | | #ifdef DM_GET_TARGET_VERSION |
121 | | {"target-version", DM_GET_TARGET_VERSION, {4, 41, 0}}, |
122 | | #endif |
123 | | }; |
124 | | /* *INDENT-ON* */ |
125 | | |
126 | 0 | #define ALIGNMENT 8 |
127 | | |
128 | | /* FIXME Rejig library to record & use errno instead */ |
129 | | #ifndef DM_EXISTS_FLAG |
130 | 0 | # define DM_EXISTS_FLAG 0x00000004 |
131 | | #endif |
132 | | |
133 | | static char *_align(char *ptr, unsigned int a) |
134 | 0 | { |
135 | 0 | register unsigned long agn = --a; |
136 | |
|
137 | 0 | return (char *) (((unsigned long) ptr + agn) & ~agn); |
138 | 0 | } |
139 | | |
140 | | static unsigned _kernel_major = 0; |
141 | | static unsigned _kernel_minor = 0; |
142 | | static unsigned _kernel_release = 0; |
143 | | |
144 | | static int _uname(void) |
145 | 0 | { |
146 | 0 | static int _uts_set = 0; |
147 | 0 | struct utsname _uts; |
148 | 0 | int parts; |
149 | |
|
150 | 0 | if (_uts_set) |
151 | 0 | return 1; |
152 | | |
153 | 0 | if (uname(&_uts)) { |
154 | 0 | log_error("uname failed: %s", strerror(errno)); |
155 | 0 | return 0; |
156 | 0 | } |
157 | | |
158 | 0 | parts = sscanf(_uts.release, "%u.%u.%u", |
159 | 0 | &_kernel_major, &_kernel_minor, &_kernel_release); |
160 | | |
161 | | /* Kernels with a major number of 2 always had 3 parts. */ |
162 | 0 | if (parts < 1 || (_kernel_major < 3 && parts < 3)) { |
163 | 0 | log_error("Could not determine kernel version used."); |
164 | 0 | return 0; |
165 | 0 | } |
166 | | |
167 | 0 | _uts_set = 1; |
168 | 0 | return 1; |
169 | 0 | } |
170 | | |
171 | | int get_uname_version(unsigned *major, unsigned *minor, unsigned *release) |
172 | 0 | { |
173 | 0 | if (!_uname()) |
174 | 0 | return_0; |
175 | | |
176 | 0 | *major = _kernel_major; |
177 | 0 | *minor = _kernel_minor; |
178 | 0 | *release = _kernel_release; |
179 | |
|
180 | 0 | return 1; |
181 | 0 | } |
182 | | |
183 | | #ifdef DM_IOCTLS |
184 | | |
185 | | /* |
186 | | * Set number to NULL to populate _dm_bitset - otherwise first |
187 | | * match is returned. |
188 | | * Returns: |
189 | | * 0 - error |
190 | | * 1 - success - number found |
191 | | * 2 - success - number not found (only if require_module_loaded=0) |
192 | | */ |
193 | | static int _get_proc_number(const char *file, const char *name, |
194 | | uint32_t *number, int require_module_loaded) |
195 | 0 | { |
196 | 0 | FILE *fl; |
197 | 0 | char nm[256]; |
198 | 0 | char *line = NULL; |
199 | 0 | size_t len; |
200 | 0 | uint32_t num; |
201 | 0 | unsigned blocksection = (strcmp(file, PROC_DEVICES) == 0) ? 0 : 1; |
202 | |
|
203 | 0 | if (!(fl = fopen(file, "r"))) { |
204 | 0 | log_sys_error("fopen", file); |
205 | 0 | return 0; |
206 | 0 | } |
207 | | |
208 | 0 | while (getline(&line, &len, fl) != -1) { |
209 | 0 | if (!blocksection && (line[0] == 'B')) |
210 | 0 | blocksection = 1; |
211 | 0 | else if (sscanf(line, "%u %255s\n", &num, &nm[0]) == 2) { |
212 | 0 | if (!strcmp(name, nm)) { |
213 | 0 | if (number) { |
214 | 0 | *number = num; |
215 | 0 | if (fclose(fl)) |
216 | 0 | log_sys_error("fclose", file); |
217 | 0 | free(line); |
218 | 0 | return 1; |
219 | 0 | } |
220 | 0 | dm_bit_set(_dm_bitset, num); |
221 | 0 | } |
222 | 0 | } |
223 | 0 | } |
224 | 0 | if (fclose(fl)) |
225 | 0 | log_sys_error("fclose", file); |
226 | 0 | free(line); |
227 | |
|
228 | 0 | if (number) { |
229 | 0 | if (require_module_loaded) { |
230 | 0 | log_error("%s: No entry for %s found", file, name); |
231 | 0 | return 0; |
232 | 0 | } |
233 | | |
234 | 0 | return 2; |
235 | 0 | } |
236 | | |
237 | 0 | return 1; |
238 | 0 | } |
239 | | |
240 | | static int _control_device_number(uint32_t *major, uint32_t *minor) |
241 | 0 | { |
242 | 0 | if (!_get_proc_number(PROC_DEVICES, MISC_NAME, major, 1) || |
243 | 0 | !_get_proc_number(PROC_MISC, DM_NAME, minor, 1)) { |
244 | 0 | *major = 0; |
245 | 0 | return 0; |
246 | 0 | } |
247 | | |
248 | 0 | return 1; |
249 | 0 | } |
250 | | |
251 | | static int _control_unlink(const char *control) |
252 | 0 | { |
253 | 0 | if (unlink(control) && (errno != ENOENT)) { |
254 | 0 | log_sys_error("unlink", control); |
255 | 0 | return -1; |
256 | 0 | } |
257 | | |
258 | 0 | return 0; |
259 | 0 | } |
260 | | |
261 | | /* |
262 | | * Returns 1 if it exists on returning; 0 if it doesn't; -1 if it's wrong. |
263 | | */ |
264 | | static int _control_exists(const char *control, uint32_t major, uint32_t minor) |
265 | 0 | { |
266 | 0 | struct stat buf; |
267 | |
|
268 | 0 | if (stat(control, &buf) < 0) { |
269 | 0 | if (errno != ENOENT) |
270 | 0 | log_sys_error("stat", control); |
271 | 0 | return 0; |
272 | 0 | } |
273 | | |
274 | 0 | if (!S_ISCHR(buf.st_mode)) { |
275 | 0 | log_verbose("%s: Wrong inode type", control); |
276 | 0 | return _control_unlink(control); |
277 | 0 | } |
278 | | |
279 | 0 | if (major && buf.st_rdev != MKDEV(major, minor)) { |
280 | 0 | log_verbose("%s: Wrong device number: (%u, %u) instead of " |
281 | 0 | "(%u, %u)", control, |
282 | 0 | MAJOR(buf.st_mode), MINOR(buf.st_mode), |
283 | 0 | major, minor); |
284 | 0 | return _control_unlink(control); |
285 | 0 | } |
286 | | |
287 | 0 | return 1; |
288 | 0 | } |
289 | | |
290 | | static int _create_control(const char *control, uint32_t major, uint32_t minor) |
291 | 0 | { |
292 | 0 | int ret; |
293 | 0 | mode_t old_umask; |
294 | | |
295 | | /* |
296 | | * Return if the control already exists with intended major/minor |
297 | | * or there's an error unlinking an apparently incorrect one. |
298 | | */ |
299 | 0 | ret = _control_exists(control, major, minor); |
300 | 0 | if (ret == -1) |
301 | 0 | return_0; /* Failed to unlink existing incorrect node */ |
302 | 0 | if (ret) |
303 | 0 | return 1; /* Already exists and correct */ |
304 | | |
305 | 0 | (void) dm_prepare_selinux_context(dm_dir(), S_IFDIR); |
306 | 0 | old_umask = umask(DM_DEV_DIR_UMASK); |
307 | 0 | ret = dm_create_dir(dm_dir()); |
308 | 0 | umask(old_umask); |
309 | 0 | (void) dm_prepare_selinux_context(NULL, 0); |
310 | |
|
311 | 0 | if (!ret) |
312 | 0 | return_0; |
313 | | |
314 | 0 | log_verbose("Creating device %s (%u, %u)", control, major, minor); |
315 | |
|
316 | 0 | (void) dm_prepare_selinux_context(control, S_IFCHR); |
317 | 0 | old_umask = umask(DM_CONTROL_NODE_UMASK); |
318 | 0 | if (mknod(control, S_IFCHR | S_IRUSR | S_IWUSR, |
319 | 0 | MKDEV(major, minor)) < 0) { |
320 | 0 | if (errno != EEXIST) { |
321 | 0 | log_sys_error("mknod", control); |
322 | 0 | ret = 0; |
323 | 0 | } else if (_control_exists(control, major, minor) != 1) { |
324 | 0 | stack; /* Invalid control node created by parallel command ? */ |
325 | 0 | ret = 0; |
326 | 0 | } |
327 | 0 | } |
328 | 0 | umask(old_umask); |
329 | 0 | (void) dm_prepare_selinux_context(NULL, 0); |
330 | |
|
331 | 0 | return ret; |
332 | 0 | } |
333 | | #endif |
334 | | |
335 | | /* |
336 | | * FIXME Update bitset in long-running process if dm claims new major numbers. |
337 | | */ |
338 | | /* |
339 | | * If require_module_loaded=0, caller is responsible to check |
340 | | * whether _dm_device_major or _dm_bitset is really set. If |
341 | | * it's not, it means the module is not loaded. |
342 | | */ |
343 | | static int _create_dm_bitset(int require_module_loaded) |
344 | 0 | { |
345 | 0 | int r; |
346 | |
|
347 | 0 | #ifdef DM_IOCTLS |
348 | 0 | if (_dm_bitset || _dm_device_major) |
349 | 0 | return 1; |
350 | | |
351 | 0 | if (!_uname()) |
352 | 0 | return 0; |
353 | | |
354 | | /* |
355 | | * 2.6 kernels are limited to one major number. |
356 | | * Assume 2.4 kernels are patched not to. |
357 | | * FIXME Check _dm_version and _dm_version_minor if 2.6 changes this. |
358 | | */ |
359 | 0 | if (KERNEL_VERSION(_kernel_major, _kernel_minor, _kernel_release) >= |
360 | 0 | KERNEL_VERSION(2, 6, 0)) |
361 | 0 | _dm_multiple_major_support = 0; |
362 | |
|
363 | 0 | if (!_dm_multiple_major_support) { |
364 | 0 | if (!_get_proc_number(PROC_DEVICES, DM_NAME, &_dm_device_major, |
365 | 0 | require_module_loaded)) |
366 | 0 | return 0; |
367 | 0 | return 1; |
368 | 0 | } |
369 | | |
370 | | /* Multiple major numbers supported */ |
371 | 0 | if (!(_dm_bitset = dm_bitset_create(NULL, NUMBER_OF_MAJORS))) |
372 | 0 | return 0; |
373 | | |
374 | 0 | r = _get_proc_number(PROC_DEVICES, DM_NAME, NULL, require_module_loaded); |
375 | 0 | if (!r || r == 2) { |
376 | 0 | dm_bitset_destroy(_dm_bitset); |
377 | 0 | _dm_bitset = NULL; |
378 | | /* |
379 | | * It's not an error if we didn't find anything and we |
380 | | * didn't require module to be loaded at the same time. |
381 | | */ |
382 | 0 | return r == 2; |
383 | 0 | } |
384 | | |
385 | 0 | return 1; |
386 | | #else |
387 | | return 0; |
388 | | #endif |
389 | 0 | } |
390 | | |
391 | | int dm_is_dm_major(uint32_t major) |
392 | 0 | { |
393 | 0 | if (!_create_dm_bitset(0)) |
394 | 0 | return 0; |
395 | | |
396 | 0 | if (_dm_multiple_major_support) { |
397 | 0 | if (!_dm_bitset) |
398 | 0 | return 0; |
399 | 0 | return dm_bit(_dm_bitset, major) ? 1 : 0; |
400 | 0 | } |
401 | | |
402 | 0 | if (!_dm_device_major) |
403 | 0 | return 0; |
404 | | |
405 | 0 | return (major == _dm_device_major) ? 1 : 0; |
406 | 0 | } |
407 | | |
408 | | static void _close_control_fd(void) |
409 | 6.43k | { |
410 | 6.43k | if (_control_fd != -1) { |
411 | 0 | if (close(_control_fd) < 0) |
412 | 0 | log_sys_debug("close", "_control_fd"); |
413 | 0 | _control_fd = -1; |
414 | 0 | } |
415 | 6.43k | } |
416 | | |
417 | | #ifdef DM_IOCTLS |
418 | | static int _open_and_assign_control_fd(const char *control) |
419 | 0 | { |
420 | 0 | if ((_control_fd = open(control, O_RDWR)) < 0) { |
421 | 0 | log_sys_error("open", control); |
422 | 0 | return 0; |
423 | 0 | } |
424 | | |
425 | 0 | return 1; |
426 | 0 | } |
427 | | #endif |
428 | | |
429 | | static int _open_control(void) |
430 | 0 | { |
431 | 0 | #ifdef DM_IOCTLS |
432 | 0 | char control[PATH_MAX]; |
433 | 0 | uint32_t major = MISC_MAJOR; |
434 | 0 | uint32_t minor = MAPPER_CTRL_MINOR; |
435 | |
|
436 | 0 | if (_control_fd != -1) |
437 | 0 | return 1; |
438 | | |
439 | 0 | if (!_uname()) |
440 | 0 | return 0; |
441 | | |
442 | 0 | if (dm_snprintf(control, sizeof(control), "%s/%s", dm_dir(), DM_CONTROL_NODE) < 0) |
443 | 0 | goto_bad; |
444 | | |
445 | | /* |
446 | | * Prior to 2.6.36 the minor number should be looked up in /proc. |
447 | | */ |
448 | 0 | if ((KERNEL_VERSION(_kernel_major, _kernel_minor, _kernel_release) < |
449 | 0 | KERNEL_VERSION(2, 6, 36)) && |
450 | 0 | !_control_device_number(&major, &minor)) |
451 | 0 | goto_bad; |
452 | | |
453 | | /* |
454 | | * Create the node with correct major and minor if not already done. |
455 | | * Udev may already have created /dev/mapper/control |
456 | | * from the modules.devname file generated by depmod. |
457 | | */ |
458 | 0 | if (!_create_control(control, major, minor)) |
459 | 0 | goto_bad; |
460 | | |
461 | | /* |
462 | | * As of 2.6.36 kernels, the open can trigger autoloading dm-mod. |
463 | | */ |
464 | 0 | if (!_open_and_assign_control_fd(control)) |
465 | 0 | goto_bad; |
466 | | |
467 | 0 | if (!_create_dm_bitset(1)) { |
468 | 0 | log_error("Failed to set up list of device-mapper major numbers"); |
469 | 0 | return 0; |
470 | 0 | } |
471 | | |
472 | 0 | return 1; |
473 | | |
474 | 0 | bad: |
475 | 0 | log_error("Failure to communicate with kernel device-mapper driver."); |
476 | 0 | if (!geteuid()) |
477 | 0 | log_error("Check that device-mapper is available in the kernel."); |
478 | 0 | return 0; |
479 | | #else |
480 | | return 1; |
481 | | #endif |
482 | 0 | } |
483 | | |
484 | | static void _dm_zfree_string(char *string) |
485 | 0 | { |
486 | 0 | if (string) { |
487 | 0 | memset(string, 0, strlen(string)); |
488 | 0 | __asm__ volatile ("" ::: "memory"); /* Compiler barrier. */ |
489 | 0 | dm_free(string); |
490 | 0 | } |
491 | 0 | } |
492 | | |
493 | | static void _dm_zfree_dmi(struct dm_ioctl *dmi) |
494 | 0 | { |
495 | 0 | if (dmi) { |
496 | 0 | memset(dmi, 0, dmi->data_size); |
497 | 0 | __asm__ volatile ("" ::: "memory"); /* Compiler barrier. */ |
498 | 0 | dm_free(dmi); |
499 | 0 | } |
500 | 0 | } |
501 | | |
502 | | static void _dm_task_free_targets(struct dm_task *dmt) |
503 | 0 | { |
504 | 0 | struct target *t, *n; |
505 | |
|
506 | 0 | for (t = dmt->head; t; t = n) { |
507 | 0 | n = t->next; |
508 | 0 | _dm_zfree_string(t->params); |
509 | 0 | dm_free(t->type); |
510 | 0 | dm_free(t); |
511 | 0 | } |
512 | |
|
513 | 0 | dmt->head = dmt->tail = NULL; |
514 | 0 | } |
515 | | |
516 | | void dm_task_destroy(struct dm_task *dmt) |
517 | 0 | { |
518 | 0 | _dm_task_free_targets(dmt); |
519 | 0 | _dm_zfree_dmi(dmt->dmi.v4); |
520 | 0 | dm_free(dmt->dev_name); |
521 | 0 | dm_free(dmt->mangled_dev_name); |
522 | 0 | dm_free(dmt->newname); |
523 | 0 | dm_free(dmt->message); |
524 | 0 | dm_free(dmt->geometry); |
525 | 0 | dm_free(dmt->uuid); |
526 | 0 | dm_free(dmt->mangled_uuid); |
527 | 0 | dm_free(dmt); |
528 | 0 | } |
529 | | |
530 | | /* |
531 | | * Protocol Version 4 functions. |
532 | | */ |
533 | | |
534 | | int dm_task_get_driver_version(struct dm_task *dmt, char *version, size_t size) |
535 | 0 | { |
536 | 0 | unsigned *v; |
537 | |
|
538 | 0 | if (!dmt->dmi.v4) { |
539 | 0 | if (version) |
540 | 0 | version[0] = '\0'; |
541 | 0 | return 0; |
542 | 0 | } |
543 | | |
544 | 0 | v = dmt->dmi.v4->version; |
545 | 0 | _dm_version_minor = v[1]; |
546 | 0 | _dm_version_patchlevel = v[2]; |
547 | 0 | if (version && |
548 | 0 | (snprintf(version, size, "%u.%u.%u", v[0], v[1], v[2]) < 0)) { |
549 | 0 | log_error("Buffer for version is to short."); |
550 | 0 | if (size > 0) |
551 | 0 | version[0] = '\0'; |
552 | 0 | return 0; |
553 | 0 | } |
554 | | |
555 | 0 | return 1; |
556 | 0 | } |
557 | | |
558 | | static int _check_version(char *version, size_t size, int log_suppress) |
559 | 0 | { |
560 | 0 | struct dm_task *task; |
561 | 0 | int r; |
562 | |
|
563 | 0 | if (!(task = dm_task_create(DM_DEVICE_VERSION))) { |
564 | 0 | log_error("Failed to get device-mapper version"); |
565 | 0 | version[0] = '\0'; |
566 | 0 | return 0; |
567 | 0 | } |
568 | | |
569 | 0 | if (log_suppress) |
570 | 0 | _log_suppress = 1; |
571 | |
|
572 | 0 | r = dm_task_run(task); |
573 | 0 | if (!dm_task_get_driver_version(task, version, size)) |
574 | 0 | stack; |
575 | 0 | dm_task_destroy(task); |
576 | 0 | _log_suppress = 0; |
577 | |
|
578 | 0 | return r; |
579 | 0 | } |
580 | | |
581 | | /* |
582 | | * Find out device-mapper's major version number the first time |
583 | | * this is called and whether or not we support it. |
584 | | */ |
585 | | int dm_check_version(void) |
586 | 0 | { |
587 | 0 | char libversion[64] = "", dmversion[64] = ""; |
588 | 0 | const char *compat = ""; |
589 | |
|
590 | 0 | if (_version_checked) |
591 | 0 | return _version_ok; |
592 | | |
593 | 0 | _version_checked = 1; |
594 | |
|
595 | 0 | if (_check_version(dmversion, sizeof(dmversion), 0)) |
596 | 0 | return 1; |
597 | | |
598 | 0 | dm_get_library_version(libversion, sizeof(libversion)); |
599 | |
|
600 | 0 | log_error("Incompatible libdevmapper %s%s and kernel driver %s.", |
601 | 0 | *libversion ? libversion : "(unknown version)", compat, |
602 | 0 | *dmversion ? dmversion : "(unknown version)"); |
603 | |
|
604 | 0 | _version_ok = 0; |
605 | 0 | return 0; |
606 | 0 | } |
607 | | |
608 | | int dm_cookie_supported(void) |
609 | 0 | { |
610 | 0 | return (dm_check_version() && |
611 | 0 | ((_dm_version == 4) ? _dm_version_minor >= 15 : _dm_version > 4)); |
612 | 0 | } |
613 | | |
614 | | static int _dm_inactive_supported(void) |
615 | 0 | { |
616 | 0 | int inactive_supported = 0; |
617 | |
|
618 | 0 | if (dm_check_version() && _dm_version >= 4) { |
619 | 0 | if (_dm_version_minor >= 16) |
620 | 0 | inactive_supported = 1; /* upstream */ |
621 | 0 | else if (_dm_version_minor == 11 && |
622 | 0 | (_dm_version_patchlevel >= 6 && |
623 | 0 | _dm_version_patchlevel <= 40)) { |
624 | 0 | inactive_supported = 1; /* RHEL 5.7 */ |
625 | 0 | } |
626 | 0 | } |
627 | |
|
628 | 0 | return inactive_supported; |
629 | 0 | } |
630 | | |
631 | | int dm_message_supports_precise_timestamps(void) |
632 | 0 | { |
633 | | /* |
634 | | * 4.32.0 supports "precise_timestamps" and "histogram:" options |
635 | | * to @stats_create messages but lacks the ability to report |
636 | | * these properties via a subsequent @stats_list: require at |
637 | | * least 4.33.0 in order to use these features. |
638 | | */ |
639 | 0 | if (dm_check_version() && _dm_version >= 4) |
640 | 0 | if (_dm_version_minor >= 33) |
641 | 0 | return 1; |
642 | 0 | return 0; |
643 | 0 | } |
644 | | |
645 | | void *dm_get_next_target(struct dm_task *dmt, void *next, |
646 | | uint64_t *start, uint64_t *length, |
647 | | char **target_type, char **params) |
648 | 0 | { |
649 | 0 | struct target *t = (struct target *) next; |
650 | |
|
651 | 0 | if (!t) |
652 | 0 | t = dmt->head; |
653 | |
|
654 | 0 | if (!t) { |
655 | 0 | *start = 0; |
656 | 0 | *length = 0; |
657 | 0 | *target_type = 0; |
658 | 0 | *params = 0; |
659 | 0 | return NULL; |
660 | 0 | } |
661 | | |
662 | 0 | *start = t->start; |
663 | 0 | *length = t->length; |
664 | 0 | *target_type = t->type; |
665 | 0 | *params = t->params; |
666 | |
|
667 | 0 | return t->next; |
668 | 0 | } |
669 | | |
670 | | /* Unmarshal the target info returned from a status call */ |
671 | | static int _unmarshal_status(struct dm_task *dmt, struct dm_ioctl *dmi) |
672 | 0 | { |
673 | 0 | char *outbuf = (char *) dmi + dmi->data_start; |
674 | 0 | char *outptr = outbuf; |
675 | 0 | uint32_t i; |
676 | 0 | struct dm_target_spec *spec; |
677 | |
|
678 | 0 | _dm_task_free_targets(dmt); |
679 | |
|
680 | 0 | for (i = 0; i < dmi->target_count; i++) { |
681 | 0 | spec = (struct dm_target_spec *) outptr; |
682 | 0 | if (!dm_task_add_target(dmt, spec->sector_start, |
683 | 0 | spec->length, |
684 | 0 | spec->target_type, |
685 | 0 | outptr + sizeof(*spec))) { |
686 | 0 | return 0; |
687 | 0 | } |
688 | | |
689 | 0 | outptr = outbuf + spec->next; |
690 | 0 | } |
691 | | |
692 | 0 | return 1; |
693 | 0 | } |
694 | | |
695 | | int dm_format_dev(char *buf, int bufsize, uint32_t dev_major, |
696 | | uint32_t dev_minor) |
697 | 0 | { |
698 | 0 | int r; |
699 | |
|
700 | 0 | if (bufsize < 8) |
701 | 0 | return 0; |
702 | | |
703 | 0 | r = snprintf(buf, (size_t) bufsize, "%u:%u", dev_major, dev_minor); |
704 | 0 | if (r < 0 || r > bufsize - 1) |
705 | 0 | return 0; |
706 | | |
707 | 0 | return 1; |
708 | 0 | } |
709 | | |
710 | | DM_EXPORT_NEW_SYMBOL(int, dm_task_get_info, 1_02_97) |
711 | | (struct dm_task *dmt, struct dm_info *info) |
712 | 0 | { |
713 | 0 | if (!dmt->dmi.v4) |
714 | 0 | return 0; |
715 | | |
716 | 0 | memset(info, 0, sizeof(*info)); |
717 | |
|
718 | 0 | info->exists = dmt->dmi.v4->flags & DM_EXISTS_FLAG ? 1 : 0; |
719 | 0 | if (!info->exists) |
720 | 0 | return 1; |
721 | | |
722 | 0 | info->suspended = dmt->dmi.v4->flags & DM_SUSPEND_FLAG ? 1 : 0; |
723 | 0 | info->read_only = dmt->dmi.v4->flags & DM_READONLY_FLAG ? 1 : 0; |
724 | 0 | info->live_table = dmt->dmi.v4->flags & DM_ACTIVE_PRESENT_FLAG ? 1 : 0; |
725 | 0 | info->inactive_table = dmt->dmi.v4->flags & DM_INACTIVE_PRESENT_FLAG ? |
726 | 0 | 1 : 0; |
727 | 0 | info->deferred_remove = dmt->dmi.v4->flags & DM_DEFERRED_REMOVE; |
728 | 0 | info->internal_suspend = (dmt->dmi.v4->flags & DM_INTERNAL_SUSPEND_FLAG) ? 1 : 0; |
729 | 0 | info->target_count = dmt->dmi.v4->target_count; |
730 | 0 | info->open_count = dmt->dmi.v4->open_count; |
731 | 0 | info->event_nr = dmt->dmi.v4->event_nr; |
732 | 0 | info->major = MAJOR(dmt->dmi.v4->dev); |
733 | 0 | info->minor = MINOR(dmt->dmi.v4->dev); |
734 | |
|
735 | 0 | return 1; |
736 | 0 | } |
737 | | |
738 | | uint32_t dm_task_get_read_ahead(const struct dm_task *dmt, uint32_t *read_ahead) |
739 | 0 | { |
740 | 0 | const char *dev_name; |
741 | |
|
742 | 0 | *read_ahead = 0; |
743 | |
|
744 | 0 | if (!dmt->dmi.v4 || !(dmt->dmi.v4->flags & DM_EXISTS_FLAG)) |
745 | 0 | return 0; |
746 | | |
747 | 0 | if (*dmt->dmi.v4->name) |
748 | 0 | dev_name = dmt->dmi.v4->name; |
749 | 0 | else if (!(dev_name = DEV_NAME(dmt))) { |
750 | 0 | log_error("Get read ahead request failed: device name unrecorded."); |
751 | 0 | return 0; |
752 | 0 | } |
753 | | |
754 | 0 | return get_dev_node_read_ahead(dev_name, MAJOR(dmt->dmi.v4->dev), |
755 | 0 | MINOR(dmt->dmi.v4->dev), read_ahead); |
756 | 0 | } |
757 | | |
758 | | struct dm_deps *dm_task_get_deps(struct dm_task *dmt) |
759 | 0 | { |
760 | 0 | if (!dmt) { |
761 | 0 | log_error(INTERNAL_ERROR "Missing dm_task."); |
762 | 0 | return NULL; |
763 | 0 | } |
764 | | |
765 | 0 | return (struct dm_deps *) (((char *) dmt->dmi.v4) + |
766 | 0 | dmt->dmi.v4->data_start); |
767 | 0 | } |
768 | | |
769 | | /* |
770 | | * Round up the ptr to an 8-byte boundary. |
771 | | * Follow kernel pattern. |
772 | | */ |
773 | 0 | #define ALIGN_MASK 7 |
774 | | static size_t _align_val(size_t val) |
775 | 0 | { |
776 | 0 | return (val + ALIGN_MASK) & ~ALIGN_MASK; |
777 | 0 | } |
778 | | static void *_align_ptr(void *ptr) |
779 | 0 | { |
780 | 0 | return (void *)_align_val((size_t)ptr); |
781 | 0 | } |
782 | | |
783 | 0 | static int _check_has_event_nr(void) { |
784 | 0 | static int _has_event_nr = -1; |
785 | |
|
786 | 0 | if (_has_event_nr < 0) |
787 | 0 | _has_event_nr = dm_check_version() && |
788 | 0 | ((_dm_version == 4) ? _dm_version_minor >= 38 : _dm_version > 4); |
789 | |
|
790 | 0 | return _has_event_nr; |
791 | 0 | } |
792 | | |
793 | | struct dm_names *dm_task_get_names(struct dm_task *dmt) |
794 | 0 | { |
795 | 0 | return (struct dm_names *) (((char *) dmt->dmi.v4) + |
796 | 0 | dmt->dmi.v4->data_start); |
797 | 0 | } |
798 | | |
799 | | struct dm_versions *dm_task_get_versions(struct dm_task *dmt) |
800 | 0 | { |
801 | 0 | return (struct dm_versions *) (((char *) dmt->dmi.v4) + |
802 | 0 | dmt->dmi.v4->data_start); |
803 | 0 | } |
804 | | |
805 | | const char *dm_task_get_message_response(struct dm_task *dmt) |
806 | 0 | { |
807 | 0 | const char *start, *end; |
808 | |
|
809 | 0 | if (!(dmt->dmi.v4->flags & DM_DATA_OUT_FLAG)) |
810 | 0 | return NULL; |
811 | | |
812 | 0 | start = (const char *) dmt->dmi.v4 + dmt->dmi.v4->data_start; |
813 | 0 | end = (const char *) dmt->dmi.v4 + dmt->dmi.v4->data_size; |
814 | |
|
815 | 0 | if (end < start) { |
816 | 0 | log_error(INTERNAL_ERROR "Corrupted message structure returned: start %d > end %d", (int)dmt->dmi.v4->data_start, (int)dmt->dmi.v4->data_size); |
817 | 0 | return NULL; |
818 | 0 | } |
819 | | |
820 | 0 | if (!memchr(start, 0, end - start)) { |
821 | 0 | log_error(INTERNAL_ERROR "Message response doesn't contain terminating NUL character"); |
822 | 0 | return NULL; |
823 | 0 | } |
824 | | |
825 | 0 | return start; |
826 | 0 | } |
827 | | |
828 | | int dm_task_set_ro(struct dm_task *dmt) |
829 | 0 | { |
830 | 0 | dmt->read_only = 1; |
831 | 0 | return 1; |
832 | 0 | } |
833 | | |
834 | | int dm_task_set_read_ahead(struct dm_task *dmt, uint32_t read_ahead, |
835 | | uint32_t read_ahead_flags) |
836 | 0 | { |
837 | 0 | dmt->read_ahead = read_ahead; |
838 | 0 | dmt->read_ahead_flags = read_ahead_flags; |
839 | |
|
840 | 0 | return 1; |
841 | 0 | } |
842 | | |
843 | | int dm_task_suppress_identical_reload(struct dm_task *dmt) |
844 | 0 | { |
845 | 0 | dmt->suppress_identical_reload = 1; |
846 | 0 | return 1; |
847 | 0 | } |
848 | | |
849 | | int dm_task_set_add_node(struct dm_task *dmt, dm_add_node_t add_node) |
850 | 0 | { |
851 | 0 | switch (add_node) { |
852 | 0 | case DM_ADD_NODE_ON_RESUME: |
853 | 0 | case DM_ADD_NODE_ON_CREATE: |
854 | 0 | dmt->add_node = add_node; |
855 | 0 | return 1; |
856 | 0 | default: |
857 | 0 | log_error("Unknown add node parameter"); |
858 | 0 | return 0; |
859 | 0 | } |
860 | 0 | } |
861 | | |
862 | | int dm_task_set_newuuid(struct dm_task *dmt, const char *newuuid) |
863 | 0 | { |
864 | 0 | dm_string_mangling_t mangling_mode = dm_get_name_mangling_mode(); |
865 | 0 | char mangled_uuid[DM_UUID_LEN]; |
866 | 0 | int r = 0; |
867 | |
|
868 | 0 | if (strlen(newuuid) >= DM_UUID_LEN) { |
869 | 0 | log_error("Uuid \"%s\" too long", newuuid); |
870 | 0 | return 0; |
871 | 0 | } |
872 | | |
873 | 0 | if (!check_multiple_mangled_string_allowed(newuuid, "new UUID", mangling_mode)) |
874 | 0 | return_0; |
875 | | |
876 | 0 | if (mangling_mode != DM_STRING_MANGLING_NONE && |
877 | 0 | (r = mangle_string(newuuid, "new UUID", strlen(newuuid), mangled_uuid, |
878 | 0 | sizeof(mangled_uuid), mangling_mode)) < 0) { |
879 | 0 | log_error("Failed to mangle new device UUID \"%s\"", newuuid); |
880 | 0 | return 0; |
881 | 0 | } |
882 | | |
883 | 0 | if (r) { |
884 | 0 | log_debug_activation("New device uuid mangled [%s]: %s --> %s", |
885 | 0 | mangling_mode == DM_STRING_MANGLING_AUTO ? "auto" : "hex", |
886 | 0 | newuuid, mangled_uuid); |
887 | 0 | newuuid = mangled_uuid; |
888 | 0 | } |
889 | |
|
890 | 0 | dm_free(dmt->newname); |
891 | 0 | if (!(dmt->newname = dm_strdup(newuuid))) { |
892 | 0 | log_error("dm_task_set_newuuid: strdup(%s) failed", newuuid); |
893 | 0 | return 0; |
894 | 0 | } |
895 | 0 | dmt->new_uuid = 1; |
896 | |
|
897 | 0 | return 1; |
898 | 0 | } |
899 | | |
900 | | int dm_task_set_message(struct dm_task *dmt, const char *message) |
901 | 0 | { |
902 | 0 | dm_free(dmt->message); |
903 | 0 | if (!(dmt->message = dm_strdup(message))) { |
904 | 0 | log_error("dm_task_set_message: strdup failed"); |
905 | 0 | return 0; |
906 | 0 | } |
907 | | |
908 | 0 | return 1; |
909 | 0 | } |
910 | | |
911 | | int dm_task_set_sector(struct dm_task *dmt, uint64_t sector) |
912 | 0 | { |
913 | 0 | dmt->sector = sector; |
914 | |
|
915 | 0 | return 1; |
916 | 0 | } |
917 | | |
918 | | int dm_task_set_geometry(struct dm_task *dmt, const char *cylinders, const char *heads, |
919 | | const char *sectors, const char *start) |
920 | 0 | { |
921 | 0 | dm_free(dmt->geometry); |
922 | 0 | if (dm_asprintf(&(dmt->geometry), "%s %s %s %s", |
923 | 0 | cylinders, heads, sectors, start) < 0) { |
924 | 0 | log_error("dm_task_set_geometry: sprintf failed"); |
925 | 0 | return 0; |
926 | 0 | } |
927 | | |
928 | 0 | return 1; |
929 | 0 | } |
930 | | |
931 | | int dm_task_no_flush(struct dm_task *dmt) |
932 | 0 | { |
933 | 0 | dmt->no_flush = 1; |
934 | |
|
935 | 0 | return 1; |
936 | 0 | } |
937 | | |
938 | | int dm_task_no_open_count(struct dm_task *dmt) |
939 | 0 | { |
940 | 0 | dmt->no_open_count = 1; |
941 | |
|
942 | 0 | return 1; |
943 | 0 | } |
944 | | |
945 | | int dm_task_skip_lockfs(struct dm_task *dmt) |
946 | 0 | { |
947 | 0 | dmt->skip_lockfs = 1; |
948 | |
|
949 | 0 | return 1; |
950 | 0 | } |
951 | | |
952 | | int dm_task_secure_data(struct dm_task *dmt) |
953 | 0 | { |
954 | 0 | dmt->secure_data = 1; |
955 | |
|
956 | 0 | return 1; |
957 | 0 | } |
958 | | |
959 | | int dm_task_ima_measurement(struct dm_task *dmt) |
960 | 0 | { |
961 | 0 | dmt->ima_measurement = 1; |
962 | |
|
963 | 0 | return 1; |
964 | 0 | } |
965 | | |
966 | | int dm_task_retry_remove(struct dm_task *dmt) |
967 | 0 | { |
968 | 0 | dmt->retry_remove = 1; |
969 | |
|
970 | 0 | return 1; |
971 | 0 | } |
972 | | |
973 | | int dm_task_deferred_remove(struct dm_task *dmt) |
974 | 0 | { |
975 | 0 | dmt->deferred_remove = 1; |
976 | |
|
977 | 0 | return 1; |
978 | 0 | } |
979 | | |
980 | | int dm_task_query_inactive_table(struct dm_task *dmt) |
981 | 0 | { |
982 | 0 | dmt->query_inactive_table = 1; |
983 | |
|
984 | 0 | return 1; |
985 | 0 | } |
986 | | |
987 | | int dm_task_set_event_nr(struct dm_task *dmt, uint32_t event_nr) |
988 | 0 | { |
989 | 0 | dmt->event_nr = event_nr; |
990 | |
|
991 | 0 | return 1; |
992 | 0 | } |
993 | | |
994 | | int dm_task_set_record_timestamp(struct dm_task *dmt) |
995 | 0 | { |
996 | 0 | if (!_dm_ioctl_timestamp) |
997 | 0 | _dm_ioctl_timestamp = dm_timestamp_alloc(); |
998 | |
|
999 | 0 | if (!_dm_ioctl_timestamp) |
1000 | 0 | return_0; |
1001 | | |
1002 | 0 | dmt->record_timestamp = 1; |
1003 | |
|
1004 | 0 | return 1; |
1005 | 0 | } |
1006 | | |
1007 | | struct dm_timestamp *dm_task_get_ioctl_timestamp(struct dm_task *dmt) |
1008 | 0 | { |
1009 | 0 | return dmt->record_timestamp ? _dm_ioctl_timestamp : NULL; |
1010 | 0 | } |
1011 | | |
1012 | | struct target *create_target(uint64_t start, uint64_t len, const char *type, |
1013 | | const char *params) |
1014 | 0 | { |
1015 | 0 | struct target *t; |
1016 | |
|
1017 | 0 | if (strlen(type) >= DM_MAX_TYPE_NAME) { |
1018 | 0 | log_error("Target type name %s is too long.", type); |
1019 | 0 | return NULL; |
1020 | 0 | } |
1021 | | |
1022 | 0 | if (!(t = dm_zalloc(sizeof(*t)))) { |
1023 | 0 | log_error("create_target: malloc(%" PRIsize_t ") failed", |
1024 | 0 | sizeof(*t)); |
1025 | 0 | return NULL; |
1026 | 0 | } |
1027 | | |
1028 | 0 | if (!(t->params = dm_strdup(params))) { |
1029 | 0 | log_error("create_target: strdup(params) failed"); |
1030 | 0 | goto bad; |
1031 | 0 | } |
1032 | | |
1033 | 0 | if (!(t->type = dm_strdup(type))) { |
1034 | 0 | log_error("create_target: strdup(type) failed"); |
1035 | 0 | goto bad; |
1036 | 0 | } |
1037 | | |
1038 | 0 | t->start = start; |
1039 | 0 | t->length = len; |
1040 | 0 | return t; |
1041 | | |
1042 | 0 | bad: |
1043 | 0 | _dm_zfree_string(t->params); |
1044 | 0 | dm_free(t->type); |
1045 | 0 | dm_free(t); |
1046 | 0 | return NULL; |
1047 | 0 | } |
1048 | | |
1049 | | static char *_add_target(struct target *t, char *out, char *end) |
1050 | 0 | { |
1051 | 0 | char *out_sp = out; |
1052 | 0 | struct dm_target_spec sp; |
1053 | 0 | size_t sp_size = sizeof(struct dm_target_spec); |
1054 | 0 | unsigned int backslash_count = 0; |
1055 | 0 | int len; |
1056 | 0 | char *pt; |
1057 | |
|
1058 | 0 | if (strlen(t->type) >= sizeof(sp.target_type)) { |
1059 | 0 | log_error("Target type name %s is too long.", t->type); |
1060 | 0 | return NULL; |
1061 | 0 | } |
1062 | | |
1063 | 0 | sp.status = 0; |
1064 | 0 | sp.sector_start = t->start; |
1065 | 0 | sp.length = t->length; |
1066 | 0 | strncpy(sp.target_type, t->type, sizeof(sp.target_type) - 1); |
1067 | 0 | sp.target_type[sizeof(sp.target_type) - 1] = '\0'; |
1068 | |
|
1069 | 0 | out += sp_size; |
1070 | 0 | pt = t->params; |
1071 | |
|
1072 | 0 | while (*pt) |
1073 | 0 | if (*pt++ == '\\') |
1074 | 0 | backslash_count++; |
1075 | 0 | len = strlen(t->params) + backslash_count; |
1076 | |
|
1077 | 0 | if ((out >= end) || (out + len + 1) >= end) { |
1078 | 0 | log_error("Ran out of memory building ioctl parameter"); |
1079 | 0 | return NULL; |
1080 | 0 | } |
1081 | | |
1082 | 0 | if (backslash_count) { |
1083 | | /* replace "\" with "\\" */ |
1084 | 0 | pt = t->params; |
1085 | 0 | do { |
1086 | 0 | if (*pt == '\\') |
1087 | 0 | *out++ = '\\'; |
1088 | 0 | *out++ = *pt++; |
1089 | 0 | } while (*pt); |
1090 | 0 | *out++ = '\0'; |
1091 | 0 | } |
1092 | 0 | else { |
1093 | 0 | strcpy(out, t->params); |
1094 | 0 | out += len + 1; |
1095 | 0 | } |
1096 | | |
1097 | | /* align next block */ |
1098 | 0 | out = _align(out, ALIGNMENT); |
1099 | |
|
1100 | 0 | sp.next = out - out_sp; |
1101 | 0 | memcpy(out_sp, &sp, sp_size); |
1102 | |
|
1103 | 0 | return out; |
1104 | 0 | } |
1105 | | |
1106 | | static int _lookup_dev_name(uint64_t dev, char *buf, size_t len) |
1107 | 0 | { |
1108 | 0 | struct dm_names *names; |
1109 | 0 | unsigned next = 0; |
1110 | 0 | struct dm_task *dmt; |
1111 | 0 | int r = 0; |
1112 | | |
1113 | 0 | if (!(dmt = dm_task_create(DM_DEVICE_LIST))) |
1114 | 0 | return 0; |
1115 | | |
1116 | 0 | if (!dm_task_run(dmt)) |
1117 | 0 | goto out; |
1118 | | |
1119 | 0 | if (!(names = dm_task_get_names(dmt))) |
1120 | 0 | goto out; |
1121 | | |
1122 | 0 | if (!names->dev) |
1123 | 0 | goto out; |
1124 | | |
1125 | 0 | do { |
1126 | 0 | names = (struct dm_names *)((char *) names + next); |
1127 | 0 | if (names->dev == dev) { |
1128 | 0 | memccpy(buf, names->name, 0, len); |
1129 | 0 | r = 1; |
1130 | 0 | break; |
1131 | 0 | } |
1132 | 0 | next = names->next; |
1133 | 0 | } while (next); |
1134 | | |
1135 | 0 | out: |
1136 | 0 | dm_task_destroy(dmt); |
1137 | 0 | return r; |
1138 | 0 | } |
1139 | | |
1140 | | static int _add_params(int type) |
1141 | 0 | { |
1142 | 0 | switch (type) { |
1143 | 0 | case DM_DEVICE_REMOVE_ALL: |
1144 | 0 | case DM_DEVICE_CREATE: |
1145 | 0 | case DM_DEVICE_REMOVE: |
1146 | 0 | case DM_DEVICE_SUSPEND: |
1147 | 0 | case DM_DEVICE_STATUS: |
1148 | 0 | case DM_DEVICE_CLEAR: |
1149 | 0 | case DM_DEVICE_ARM_POLL: |
1150 | 0 | return 0; /* IOCTL_FLAGS_NO_PARAMS in drivers/md/dm-ioctl.c */ |
1151 | 0 | default: |
1152 | 0 | return 1; |
1153 | 0 | } |
1154 | 0 | } |
1155 | | |
1156 | | static struct dm_ioctl *_flatten(struct dm_task *dmt, unsigned repeat_count) |
1157 | 0 | { |
1158 | 0 | const size_t min_size = 16 * 1024; |
1159 | 0 | const int (*version)[3]; |
1160 | |
|
1161 | 0 | struct dm_ioctl *dmi; |
1162 | 0 | struct target *t; |
1163 | 0 | struct dm_target_msg *tmsg; |
1164 | 0 | size_t len = sizeof(struct dm_ioctl); |
1165 | 0 | char *b, *e; |
1166 | 0 | int count = 0; |
1167 | |
|
1168 | 0 | if (_add_params(dmt->type)) |
1169 | 0 | for (t = dmt->head; t; t = t->next) { |
1170 | 0 | len += sizeof(struct dm_target_spec); |
1171 | 0 | len += strlen(t->params) + 1 + ALIGNMENT; |
1172 | 0 | count++; |
1173 | 0 | } |
1174 | 0 | else if (dmt->head) |
1175 | 0 | log_debug_activation(INTERNAL_ERROR "dm '%s' ioctl should not define parameters.", |
1176 | 0 | _cmd_data_v4[dmt->type].name); |
1177 | |
|
1178 | 0 | if (count && (dmt->sector || dmt->message)) { |
1179 | 0 | log_error("targets and message are incompatible"); |
1180 | 0 | return NULL; |
1181 | 0 | } |
1182 | | |
1183 | 0 | if (count && dmt->newname) { |
1184 | 0 | log_error("targets and rename are incompatible"); |
1185 | 0 | return NULL; |
1186 | 0 | } |
1187 | | |
1188 | 0 | if (count && dmt->geometry) { |
1189 | 0 | log_error("targets and geometry are incompatible"); |
1190 | 0 | return NULL; |
1191 | 0 | } |
1192 | | |
1193 | 0 | if (dmt->newname && (dmt->sector || dmt->message)) { |
1194 | 0 | log_error("message and rename are incompatible"); |
1195 | 0 | return NULL; |
1196 | 0 | } |
1197 | | |
1198 | 0 | if (dmt->newname && dmt->geometry) { |
1199 | 0 | log_error("geometry and rename are incompatible"); |
1200 | 0 | return NULL; |
1201 | 0 | } |
1202 | | |
1203 | 0 | if (dmt->geometry && (dmt->sector || dmt->message)) { |
1204 | 0 | log_error("geometry and message are incompatible"); |
1205 | 0 | return NULL; |
1206 | 0 | } |
1207 | | |
1208 | 0 | if (dmt->sector && !dmt->message) { |
1209 | 0 | log_error("message is required with sector"); |
1210 | 0 | return NULL; |
1211 | 0 | } |
1212 | | |
1213 | 0 | if (dmt->newname) |
1214 | 0 | len += strlen(dmt->newname) + 1; |
1215 | |
|
1216 | 0 | if (dmt->message) |
1217 | 0 | len += sizeof(struct dm_target_msg) + strlen(dmt->message) + 1; |
1218 | |
|
1219 | 0 | if (dmt->geometry) |
1220 | 0 | len += strlen(dmt->geometry) + 1; |
1221 | | |
1222 | | /* |
1223 | | * Give len a minimum size so that we have space to store |
1224 | | * dependencies or status information. |
1225 | | */ |
1226 | 0 | if (len < min_size) |
1227 | 0 | len = min_size; |
1228 | | |
1229 | | /* Increase buffer size if repeating because buffer was too small */ |
1230 | 0 | while (repeat_count--) |
1231 | 0 | len *= 2; |
1232 | |
|
1233 | 0 | if (!(dmi = dm_zalloc(len))) |
1234 | 0 | return NULL; |
1235 | | |
1236 | 0 | version = &_cmd_data_v4[dmt->type].version; |
1237 | |
|
1238 | 0 | dmi->version[0] = (*version)[0]; |
1239 | 0 | dmi->version[1] = (*version)[1]; |
1240 | 0 | dmi->version[2] = (*version)[2]; |
1241 | |
|
1242 | 0 | dmi->data_size = len; |
1243 | 0 | dmi->data_start = sizeof(struct dm_ioctl); |
1244 | |
|
1245 | 0 | if (dmt->minor >= 0) { |
1246 | 0 | if (!_dm_multiple_major_support && dmt->allow_default_major_fallback && |
1247 | 0 | dmt->major != (int) _dm_device_major) { |
1248 | 0 | log_verbose("Overriding major number of %d " |
1249 | 0 | "with %u for persistent device.", |
1250 | 0 | dmt->major, _dm_device_major); |
1251 | 0 | dmt->major = _dm_device_major; |
1252 | 0 | } |
1253 | |
|
1254 | 0 | if (dmt->major <= 0) { |
1255 | 0 | log_error("Missing major number for persistent device."); |
1256 | 0 | goto bad; |
1257 | 0 | } |
1258 | | |
1259 | 0 | dmi->flags |= DM_PERSISTENT_DEV_FLAG; |
1260 | 0 | dmi->dev = MKDEV(dmt->major, dmt->minor); |
1261 | 0 | } |
1262 | | |
1263 | | /* Does driver support device number referencing? */ |
1264 | 0 | if (_dm_version_minor < 3 && !DEV_NAME(dmt) && !DEV_UUID(dmt) && dmi->dev) { |
1265 | 0 | if (!_lookup_dev_name(dmi->dev, dmi->name, sizeof(dmi->name))) { |
1266 | 0 | log_error("Unable to find name for device (%" PRIu32 |
1267 | 0 | ":%" PRIu32 ")", dmt->major, dmt->minor); |
1268 | 0 | goto bad; |
1269 | 0 | } |
1270 | 0 | log_verbose("device (%" PRIu32 ":%" PRIu32 ") is %s " |
1271 | 0 | "for compatibility with old kernel", |
1272 | 0 | dmt->major, dmt->minor, dmi->name); |
1273 | 0 | } |
1274 | | |
1275 | | /* FIXME Until resume ioctl supplies name, use dev_name for readahead */ |
1276 | 0 | if (DEV_NAME(dmt) && |
1277 | 0 | (((dmt->type != DM_DEVICE_RESUME) && |
1278 | 0 | (dmt->type != DM_DEVICE_RELOAD)) || |
1279 | 0 | (dmt->minor < 0) || (dmt->major < 0))) |
1280 | | /* When RESUME or RELOAD sets maj:min and dev_name, use just maj:min, |
1281 | | * passed dev_name is useful for better error/debug messages */ |
1282 | 0 | memccpy(dmi->name, DEV_NAME(dmt), 0, sizeof(dmi->name)); |
1283 | |
|
1284 | 0 | if (DEV_UUID(dmt)) |
1285 | 0 | memccpy(dmi->uuid, DEV_UUID(dmt), 0, sizeof(dmi->uuid)); |
1286 | |
|
1287 | 0 | if (dmt->type == DM_DEVICE_SUSPEND) |
1288 | 0 | dmi->flags |= DM_SUSPEND_FLAG; |
1289 | 0 | if (dmt->no_flush) { |
1290 | 0 | if (_dm_version_minor < 12) |
1291 | 0 | log_verbose("No flush flag unsupported by kernel. " |
1292 | 0 | "Buffers will be flushed."); |
1293 | 0 | else |
1294 | 0 | dmi->flags |= DM_NOFLUSH_FLAG; |
1295 | 0 | } |
1296 | 0 | if (dmt->read_only) |
1297 | 0 | dmi->flags |= DM_READONLY_FLAG; |
1298 | 0 | if (dmt->skip_lockfs) |
1299 | 0 | dmi->flags |= DM_SKIP_LOCKFS_FLAG; |
1300 | 0 | if (dmt->deferred_remove && (dmt->type == DM_DEVICE_REMOVE || dmt->type == DM_DEVICE_REMOVE_ALL)) |
1301 | 0 | dmi->flags |= DM_DEFERRED_REMOVE; |
1302 | |
|
1303 | 0 | if (dmt->secure_data) { |
1304 | 0 | if (_dm_version_minor < 20) |
1305 | 0 | log_verbose("Secure data flag unsupported by kernel. " |
1306 | 0 | "Buffers will not be wiped after use."); |
1307 | 0 | dmi->flags |= DM_SECURE_DATA_FLAG; |
1308 | 0 | } |
1309 | 0 | if (dmt->query_inactive_table) { |
1310 | 0 | if (!_dm_inactive_supported()) |
1311 | 0 | log_warn_suppress(_dm_warn_inactive_suppress++, |
1312 | 0 | "WARNING: Inactive table query unsupported by kernel. " |
1313 | 0 | "It will use live table."); |
1314 | 0 | dmi->flags |= DM_QUERY_INACTIVE_TABLE_FLAG; |
1315 | 0 | } |
1316 | 0 | if (dmt->new_uuid) { |
1317 | 0 | if (_dm_version_minor < 19) { |
1318 | 0 | log_error("Setting UUID unsupported by kernel. " |
1319 | 0 | "Aborting operation."); |
1320 | 0 | goto bad; |
1321 | 0 | } |
1322 | 0 | dmi->flags |= DM_UUID_FLAG; |
1323 | 0 | } |
1324 | 0 | if (dmt->ima_measurement) { |
1325 | 0 | if (_dm_version_minor < 45) { |
1326 | 0 | log_error("IMA measurement unsupported by kernel. " |
1327 | 0 | "Aborting operation."); |
1328 | 0 | goto bad; |
1329 | 0 | } |
1330 | 0 | dmi->flags |= DM_IMA_MEASUREMENT_FLAG; |
1331 | 0 | } |
1332 | | |
1333 | 0 | dmi->target_count = count; |
1334 | 0 | dmi->event_nr = dmt->event_nr; |
1335 | |
|
1336 | 0 | b = (char *) (dmi + 1); |
1337 | 0 | e = (char *) dmi + len; |
1338 | |
|
1339 | 0 | if (_add_params(dmt->type)) |
1340 | 0 | for (t = dmt->head; t; t = t->next) |
1341 | 0 | if (!(b = _add_target(t, b, e))) |
1342 | 0 | goto_bad; |
1343 | | |
1344 | 0 | if (dmt->newname) |
1345 | 0 | strcpy(b, dmt->newname); |
1346 | |
|
1347 | 0 | if (dmt->message) { |
1348 | 0 | tmsg = (struct dm_target_msg *) b; |
1349 | 0 | tmsg->sector = dmt->sector; |
1350 | 0 | strcpy(tmsg->message, dmt->message); |
1351 | 0 | } |
1352 | |
|
1353 | 0 | if (dmt->geometry) |
1354 | 0 | strcpy(b, dmt->geometry); |
1355 | |
|
1356 | 0 | return dmi; |
1357 | | |
1358 | 0 | bad: |
1359 | 0 | _dm_zfree_dmi(dmi); |
1360 | 0 | return NULL; |
1361 | 0 | } |
1362 | | |
1363 | | static int _process_mapper_dir(struct dm_task *dmt) |
1364 | 0 | { |
1365 | 0 | struct dirent *dirent; |
1366 | 0 | DIR *d; |
1367 | 0 | const char *dir; |
1368 | 0 | int r = 1; |
1369 | |
|
1370 | 0 | dir = dm_dir(); |
1371 | 0 | if (!(d = opendir(dir))) { |
1372 | 0 | log_sys_error("opendir", dir); |
1373 | 0 | return 0; |
1374 | 0 | } |
1375 | | |
1376 | 0 | while ((dirent = readdir(d))) { |
1377 | 0 | if (!strcmp(dirent->d_name, ".") || |
1378 | 0 | !strcmp(dirent->d_name, "..") || |
1379 | 0 | !strcmp(dirent->d_name, "control")) |
1380 | 0 | continue; |
1381 | 0 | if (!dm_task_set_name(dmt, dirent->d_name)) { |
1382 | 0 | r = 0; |
1383 | 0 | stack; |
1384 | 0 | continue; /* try next name */ |
1385 | 0 | } |
1386 | 0 | if (!dm_task_run(dmt)) { |
1387 | 0 | r = 0; |
1388 | 0 | stack; /* keep going */ |
1389 | 0 | } |
1390 | 0 | } |
1391 | |
|
1392 | 0 | if (closedir(d)) |
1393 | 0 | log_sys_debug("closedir", dir); |
1394 | |
|
1395 | 0 | return r; |
1396 | 0 | } |
1397 | | |
1398 | | static int _process_all_v4(struct dm_task *dmt) |
1399 | 0 | { |
1400 | 0 | struct dm_task *task; |
1401 | 0 | struct dm_names *names; |
1402 | 0 | unsigned next = 0; |
1403 | 0 | int r = 1; |
1404 | |
|
1405 | 0 | if (!(task = dm_task_create(DM_DEVICE_LIST))) |
1406 | 0 | return 0; |
1407 | | |
1408 | 0 | if (!dm_task_run(task)) { |
1409 | 0 | r = 0; |
1410 | 0 | goto out; |
1411 | 0 | } |
1412 | | |
1413 | 0 | if (!(names = dm_task_get_names(task))) { |
1414 | 0 | r = 0; |
1415 | 0 | goto out; |
1416 | 0 | } |
1417 | | |
1418 | 0 | if (!names->dev) |
1419 | 0 | goto out; |
1420 | | |
1421 | 0 | do { |
1422 | 0 | names = (struct dm_names *)((char *) names + next); |
1423 | 0 | if (!dm_task_set_name(dmt, names->name)) { |
1424 | 0 | r = 0; |
1425 | 0 | goto out; |
1426 | 0 | } |
1427 | 0 | if (!dm_task_run(dmt)) |
1428 | 0 | r = 0; |
1429 | 0 | next = names->next; |
1430 | 0 | } while (next); |
1431 | | |
1432 | 0 | out: |
1433 | 0 | dm_task_destroy(task); |
1434 | 0 | return r; |
1435 | 0 | } |
1436 | | |
1437 | | static int _mknodes_v4(struct dm_task *dmt) |
1438 | 0 | { |
1439 | 0 | (void) _process_mapper_dir(dmt); |
1440 | |
|
1441 | 0 | return _process_all_v4(dmt); |
1442 | 0 | } |
1443 | | |
1444 | | /* |
1445 | | * If an operation that uses a cookie fails, decrement the |
1446 | | * semaphore instead of udev. |
1447 | | */ |
1448 | | static int _udev_complete(struct dm_task *dmt) |
1449 | 0 | { |
1450 | 0 | uint16_t base; |
1451 | |
|
1452 | 0 | if (dmt->cookie_set && |
1453 | 0 | (base = dmt->event_nr & ~DM_UDEV_FLAGS_MASK)) |
1454 | | /* strip flags from the cookie and use cookie magic instead */ |
1455 | 0 | return dm_udev_complete(base | (DM_COOKIE_MAGIC << |
1456 | 0 | DM_UDEV_FLAGS_SHIFT)); |
1457 | | |
1458 | 0 | return 1; |
1459 | 0 | } |
1460 | | |
1461 | | #ifdef DM_IOCTLS |
1462 | | static int _check_uevent_generated(struct dm_ioctl *dmi) |
1463 | 0 | { |
1464 | 0 | if (!dm_check_version() || |
1465 | 0 | ((_dm_version == 4) ? _dm_version_minor < 17 : _dm_version < 4)) |
1466 | | /* can't check, assume uevent is generated */ |
1467 | 0 | return 1; |
1468 | | |
1469 | 0 | return dmi->flags & DM_UEVENT_GENERATED_FLAG; |
1470 | 0 | } |
1471 | | #endif |
1472 | | |
1473 | | static int _create_and_load_v4(struct dm_task *dmt) |
1474 | 0 | { |
1475 | 0 | struct dm_info info; |
1476 | 0 | struct dm_task *task; |
1477 | 0 | int r, ioctl_errno = 0; |
1478 | 0 | uint32_t cookie; |
1479 | | |
1480 | | /* Use new task struct to create the device */ |
1481 | 0 | if (!(task = dm_task_create(DM_DEVICE_CREATE))) { |
1482 | 0 | _udev_complete(dmt); |
1483 | 0 | return_0; |
1484 | 0 | } |
1485 | | |
1486 | | /* Copy across relevant fields */ |
1487 | 0 | if (dmt->dev_name && !dm_task_set_name(task, dmt->dev_name)) |
1488 | 0 | goto_bad; |
1489 | | |
1490 | 0 | if (dmt->uuid && !dm_task_set_uuid(task, dmt->uuid)) |
1491 | 0 | goto_bad; |
1492 | | |
1493 | 0 | task->major = dmt->major; |
1494 | 0 | task->minor = dmt->minor; |
1495 | 0 | task->uid = dmt->uid; |
1496 | 0 | task->gid = dmt->gid; |
1497 | 0 | task->mode = dmt->mode; |
1498 | | /* FIXME: Just for udev_check in dm_task_run. Can we avoid this? */ |
1499 | 0 | task->event_nr = dmt->event_nr & DM_UDEV_FLAGS_MASK; |
1500 | 0 | task->cookie_set = dmt->cookie_set; |
1501 | 0 | task->add_node = dmt->add_node; |
1502 | |
|
1503 | 0 | if (!dm_task_run(task)) { |
1504 | 0 | ioctl_errno = task->ioctl_errno; |
1505 | 0 | goto_bad; |
1506 | 0 | } |
1507 | | |
1508 | 0 | if (!dm_task_get_info(task, &info) || !info.exists) |
1509 | 0 | goto_bad; |
1510 | | |
1511 | 0 | dm_task_destroy(task); |
1512 | | |
1513 | | /* Next load the table */ |
1514 | 0 | if (!(task = dm_task_create(DM_DEVICE_RELOAD))) { |
1515 | 0 | stack; |
1516 | 0 | _udev_complete(dmt); |
1517 | 0 | goto revert; |
1518 | 0 | } |
1519 | | |
1520 | | /* Copy across relevant fields */ |
1521 | 0 | if (dmt->dev_name && !dm_task_set_name(task, dmt->dev_name)) { |
1522 | 0 | stack; |
1523 | 0 | dm_task_destroy(task); |
1524 | 0 | _udev_complete(dmt); |
1525 | 0 | goto revert; |
1526 | 0 | } |
1527 | | |
1528 | 0 | task->major = info.major; |
1529 | 0 | task->minor = info.minor; |
1530 | 0 | task->read_only = dmt->read_only; |
1531 | 0 | task->head = dmt->head; |
1532 | 0 | task->tail = dmt->tail; |
1533 | 0 | task->secure_data = dmt->secure_data; |
1534 | 0 | task->ima_measurement = dmt->ima_measurement; |
1535 | |
|
1536 | 0 | r = dm_task_run(task); |
1537 | 0 | if (!r) |
1538 | 0 | ioctl_errno = task->ioctl_errno; |
1539 | |
|
1540 | 0 | task->head = NULL; |
1541 | 0 | task->tail = NULL; |
1542 | 0 | dm_task_destroy(task); |
1543 | |
|
1544 | 0 | if (!r) { |
1545 | 0 | stack; |
1546 | 0 | _udev_complete(dmt); |
1547 | 0 | goto revert; |
1548 | 0 | } |
1549 | | |
1550 | | /* Use the original structure last so the info will be correct */ |
1551 | 0 | dmt->type = DM_DEVICE_RESUME; |
1552 | 0 | dm_free(dmt->uuid); |
1553 | 0 | dmt->uuid = NULL; |
1554 | 0 | dm_free(dmt->mangled_uuid); |
1555 | 0 | dmt->mangled_uuid = NULL; |
1556 | | /* coverity[double_free] recursive function call */ |
1557 | 0 | _dm_task_free_targets(dmt); |
1558 | |
|
1559 | 0 | if (dm_task_run(dmt)) |
1560 | 0 | return 1; |
1561 | | |
1562 | 0 | revert: |
1563 | 0 | dmt->type = DM_DEVICE_REMOVE; |
1564 | 0 | dm_free(dmt->uuid); |
1565 | 0 | dmt->uuid = NULL; |
1566 | 0 | dm_free(dmt->mangled_uuid); |
1567 | 0 | dmt->mangled_uuid = NULL; |
1568 | | /* coverity[double_free] recursive function call */ |
1569 | 0 | _dm_task_free_targets(dmt); |
1570 | | |
1571 | | /* |
1572 | | * Also udev-synchronize "remove" dm task that is a part of this revert! |
1573 | | * But only if the original dm task was supposed to be synchronized. |
1574 | | */ |
1575 | 0 | if (dmt->cookie_set) { |
1576 | 0 | cookie = (dmt->event_nr & ~DM_UDEV_FLAGS_MASK) | |
1577 | 0 | (DM_COOKIE_MAGIC << DM_UDEV_FLAGS_SHIFT); |
1578 | 0 | if (!dm_task_set_cookie(dmt, &cookie, |
1579 | 0 | (dmt->event_nr & DM_UDEV_FLAGS_MASK) >> |
1580 | 0 | DM_UDEV_FLAGS_SHIFT)) |
1581 | 0 | stack; /* keep going */ |
1582 | 0 | } |
1583 | |
|
1584 | 0 | if (!dm_task_run(dmt)) |
1585 | 0 | log_error("Failed to revert device creation."); |
1586 | |
|
1587 | 0 | if (ioctl_errno != 0) |
1588 | 0 | dmt->ioctl_errno = ioctl_errno; |
1589 | |
|
1590 | 0 | return 0; |
1591 | | |
1592 | 0 | bad: |
1593 | 0 | dm_task_destroy(task); |
1594 | 0 | _udev_complete(dmt); |
1595 | |
|
1596 | 0 | if (ioctl_errno != 0) |
1597 | 0 | dmt->ioctl_errno = ioctl_errno; |
1598 | |
|
1599 | 0 | return 0; |
1600 | 0 | } |
1601 | | |
1602 | | uint64_t dm_task_get_existing_table_size(struct dm_task *dmt) |
1603 | 0 | { |
1604 | 0 | return dmt->existing_table_size; |
1605 | 0 | } |
1606 | | |
1607 | | static int _reload_with_suppression_v4(struct dm_task *dmt) |
1608 | 0 | { |
1609 | 0 | struct dm_task *task; |
1610 | 0 | struct target *t1, *t2; |
1611 | 0 | size_t len; |
1612 | 0 | int r; |
1613 | | |
1614 | | /* New task to get existing table information */ |
1615 | 0 | if (!(task = dm_task_create(DM_DEVICE_TABLE))) { |
1616 | 0 | log_error("Failed to create device-mapper task struct"); |
1617 | 0 | return 0; |
1618 | 0 | } |
1619 | | |
1620 | | /* Copy across relevant fields */ |
1621 | 0 | if (dmt->dev_name && !dm_task_set_name(task, dmt->dev_name)) { |
1622 | 0 | dm_task_destroy(task); |
1623 | 0 | return 0; |
1624 | 0 | } |
1625 | | |
1626 | 0 | if (dmt->uuid && !dm_task_set_uuid(task, dmt->uuid)) { |
1627 | 0 | dm_task_destroy(task); |
1628 | 0 | return 0; |
1629 | 0 | } |
1630 | | |
1631 | 0 | task->major = dmt->major; |
1632 | 0 | task->minor = dmt->minor; |
1633 | |
|
1634 | 0 | r = dm_task_run(task); |
1635 | |
|
1636 | 0 | if (!r) { |
1637 | 0 | dm_task_destroy(task); |
1638 | 0 | return r; |
1639 | 0 | } |
1640 | | |
1641 | | /* Store existing table size */ |
1642 | 0 | t2 = task->head; |
1643 | 0 | while (t2 && t2->next) |
1644 | 0 | t2 = t2->next; |
1645 | 0 | dmt->existing_table_size = t2 ? t2->start + t2->length : 0; |
1646 | |
|
1647 | 0 | if (((task->dmi.v4->flags & DM_READONLY_FLAG) ? 1 : 0) != dmt->read_only) |
1648 | 0 | goto no_match; |
1649 | | |
1650 | 0 | t1 = dmt->head; |
1651 | 0 | t2 = task->head; |
1652 | |
|
1653 | 0 | while (t1 && t2) { |
1654 | 0 | len = strlen(t2->params); |
1655 | 0 | while (len-- > 0 && t2->params[len] == ' ') |
1656 | 0 | t2->params[len] = '\0'; |
1657 | 0 | if ((t1->start != t2->start) || |
1658 | 0 | (t1->length != t2->length) || |
1659 | 0 | (strcmp(t1->type, t2->type)) || |
1660 | 0 | (strcmp(t1->params, t2->params))) |
1661 | 0 | goto no_match; |
1662 | 0 | t1 = t1->next; |
1663 | 0 | t2 = t2->next; |
1664 | 0 | } |
1665 | | |
1666 | 0 | if (!t1 && !t2) { |
1667 | 0 | dmt->dmi.v4 = task->dmi.v4; |
1668 | 0 | task->dmi.v4 = NULL; |
1669 | 0 | dm_task_destroy(task); |
1670 | 0 | return 1; |
1671 | 0 | } |
1672 | | |
1673 | 0 | no_match: |
1674 | 0 | dm_task_destroy(task); |
1675 | | |
1676 | | /* Now do the original reload */ |
1677 | 0 | dmt->suppress_identical_reload = 0; |
1678 | 0 | r = dm_task_run(dmt); |
1679 | |
|
1680 | 0 | return r; |
1681 | 0 | } |
1682 | | |
1683 | | static int _check_children_not_suspended_v4(struct dm_task *dmt, uint64_t device) |
1684 | 0 | { |
1685 | 0 | struct dm_task *task; |
1686 | 0 | struct dm_info info; |
1687 | 0 | struct dm_deps *deps; |
1688 | 0 | int r = 0; |
1689 | 0 | uint32_t i; |
1690 | | |
1691 | | /* Find dependencies */ |
1692 | 0 | if (!(task = dm_task_create(DM_DEVICE_DEPS))) |
1693 | 0 | return 0; |
1694 | | |
1695 | | /* Copy across or set relevant fields */ |
1696 | 0 | if (device) { |
1697 | 0 | task->major = MAJOR(device); |
1698 | 0 | task->minor = MINOR(device); |
1699 | 0 | } else { |
1700 | 0 | if (dmt->dev_name && !dm_task_set_name(task, dmt->dev_name)) |
1701 | 0 | goto out; |
1702 | | |
1703 | 0 | if (dmt->uuid && !dm_task_set_uuid(task, dmt->uuid)) |
1704 | 0 | goto out; |
1705 | | |
1706 | 0 | task->major = dmt->major; |
1707 | 0 | task->minor = dmt->minor; |
1708 | 0 | } |
1709 | | |
1710 | 0 | task->uid = dmt->uid; |
1711 | 0 | task->gid = dmt->gid; |
1712 | 0 | task->mode = dmt->mode; |
1713 | | /* FIXME: Just for udev_check in dm_task_run. Can we avoid this? */ |
1714 | 0 | task->event_nr = dmt->event_nr & DM_UDEV_FLAGS_MASK; |
1715 | 0 | task->cookie_set = dmt->cookie_set; |
1716 | 0 | task->add_node = dmt->add_node; |
1717 | | |
1718 | 0 | if (!(r = dm_task_run(task))) |
1719 | 0 | goto out; |
1720 | | |
1721 | 0 | if (!dm_task_get_info(task, &info) || !info.exists) |
1722 | 0 | goto out; |
1723 | | |
1724 | | /* |
1725 | | * Warn if any of the devices this device depends upon are already |
1726 | | * suspended: I/O could become trapped between the two devices. |
1727 | | */ |
1728 | 0 | if (info.suspended) { |
1729 | 0 | if (!device) |
1730 | 0 | log_debug_activation("Attempting to suspend a device that is already suspended " |
1731 | 0 | "(%u:%u)", info.major, info.minor); |
1732 | 0 | else |
1733 | 0 | log_error(INTERNAL_ERROR "Attempt to suspend device %s%s%s%.0d%s%.0d%s%s" |
1734 | 0 | "that uses already-suspended device (%u:%u)", |
1735 | 0 | DEV_NAME(dmt) ? : "", DEV_UUID(dmt) ? : "", |
1736 | 0 | dmt->major > 0 ? "(" : "", |
1737 | 0 | dmt->major > 0 ? dmt->major : 0, |
1738 | 0 | dmt->major > 0 ? ":" : "", |
1739 | 0 | dmt->minor > 0 ? dmt->minor : 0, |
1740 | 0 | dmt->major > 0 && dmt->minor == 0 ? "0" : "", |
1741 | 0 | dmt->major > 0 ? ") " : "", |
1742 | 0 | info.major, info.minor); |
1743 | | |
1744 | | /* No need for further recursion */ |
1745 | 0 | r = 1; |
1746 | 0 | goto out; |
1747 | 0 | } |
1748 | | |
1749 | 0 | if (!(deps = dm_task_get_deps(task))) |
1750 | 0 | goto out; |
1751 | | |
1752 | 0 | for (i = 0; i < deps->count; i++) { |
1753 | | /* Only recurse with dm devices */ |
1754 | 0 | if (MAJOR(deps->device[i]) != _dm_device_major) |
1755 | 0 | continue; |
1756 | | |
1757 | 0 | if (!_check_children_not_suspended_v4(task, deps->device[i])) |
1758 | 0 | goto out; |
1759 | 0 | } |
1760 | | |
1761 | 0 | r = 1; |
1762 | |
|
1763 | 0 | out: |
1764 | 0 | dm_task_destroy(task); |
1765 | |
|
1766 | 0 | return r; |
1767 | 0 | } |
1768 | | |
1769 | | static int _suspend_with_validation_v4(struct dm_task *dmt) |
1770 | 0 | { |
1771 | | /* Avoid recursion */ |
1772 | 0 | dmt->enable_checks = 0; |
1773 | | |
1774 | | /* |
1775 | | * Ensure we can't leave any I/O trapped between suspended devices. |
1776 | | */ |
1777 | 0 | if (!_check_children_not_suspended_v4(dmt, 0)) |
1778 | 0 | return 0; |
1779 | | |
1780 | | /* Finally, perform the original suspend. */ |
1781 | 0 | return dm_task_run(dmt); |
1782 | 0 | } |
1783 | | |
1784 | | static const char *_sanitise_message(char *message) |
1785 | 0 | { |
1786 | 0 | const char *sanitised_message = message ?: ""; |
1787 | | |
1788 | | /* FIXME: Check for whitespace variations. */ |
1789 | | /* This traps what cryptsetup sends us. */ |
1790 | 0 | if (message && !strncasecmp(message, "key set", 7)) |
1791 | 0 | sanitised_message = "key set"; |
1792 | |
|
1793 | 0 | return sanitised_message; |
1794 | 0 | } |
1795 | | |
1796 | | #ifdef DM_IOCTLS |
1797 | | static int _do_dm_ioctl_unmangle_string(char *str, const char *str_name, |
1798 | | char *buf, size_t buf_size, |
1799 | | dm_string_mangling_t mode) |
1800 | 0 | { |
1801 | 0 | int r; |
1802 | |
|
1803 | 0 | if (mode == DM_STRING_MANGLING_NONE) |
1804 | 0 | return 1; |
1805 | | |
1806 | 0 | if (!check_multiple_mangled_string_allowed(str, str_name, mode)) |
1807 | 0 | return_0; |
1808 | | |
1809 | 0 | if ((r = unmangle_string(str, str_name, strlen(str), buf, buf_size, mode)) < 0) { |
1810 | 0 | log_debug_activation("_do_dm_ioctl_unmangle_string: failed to " |
1811 | 0 | "unmangle %s \"%s\"", str_name, str); |
1812 | 0 | return 0; |
1813 | 0 | } |
1814 | | |
1815 | 0 | if (r) |
1816 | 0 | memcpy(str, buf, strlen(buf) + 1); |
1817 | |
|
1818 | 0 | return 1; |
1819 | 0 | } |
1820 | | |
1821 | | static int _dm_ioctl_unmangle_names(int type, struct dm_ioctl *dmi) |
1822 | 0 | { |
1823 | 0 | char buf[DM_NAME_LEN]; |
1824 | 0 | char buf_uuid[DM_UUID_LEN]; |
1825 | 0 | struct dm_name_list *names; |
1826 | 0 | unsigned next = 0; |
1827 | 0 | char *name; |
1828 | 0 | int r = 1; |
1829 | 0 | uint32_t *event_nr; |
1830 | 0 | char *uuid_ptr; |
1831 | 0 | dm_string_mangling_t mangling_mode = dm_get_name_mangling_mode(); |
1832 | |
|
1833 | 0 | if ((name = dmi->name)) |
1834 | 0 | r &= _do_dm_ioctl_unmangle_string(name, "name", buf, sizeof(buf), |
1835 | 0 | mangling_mode); |
1836 | |
|
1837 | 0 | if (type == DM_DEVICE_LIST && |
1838 | 0 | ((names = ((struct dm_name_list *) ((char *)dmi + dmi->data_start)))) && |
1839 | 0 | names->dev) { |
1840 | 0 | do { |
1841 | 0 | names = (struct dm_name_list *)((char *) names + next); |
1842 | 0 | event_nr = _align_ptr(names->name + strlen(names->name) + 1); |
1843 | 0 | r &= _do_dm_ioctl_unmangle_string(names->name, "name", |
1844 | 0 | buf, sizeof(buf), mangling_mode); |
1845 | | /* Unmangle also UUID within same loop */ |
1846 | 0 | if (_check_has_event_nr() && |
1847 | 0 | (event_nr[1] & DM_NAME_LIST_FLAG_HAS_UUID)) { |
1848 | 0 | uuid_ptr = _align_ptr(event_nr + 2); |
1849 | 0 | r &= _do_dm_ioctl_unmangle_string(uuid_ptr, "UUID", buf_uuid, |
1850 | 0 | sizeof(buf_uuid), mangling_mode); |
1851 | 0 | } |
1852 | 0 | next = names->next; |
1853 | 0 | } while (next); |
1854 | 0 | } |
1855 | |
|
1856 | 0 | return r; |
1857 | 0 | } |
1858 | | |
1859 | | static int _dm_ioctl_unmangle_uuids(int type, struct dm_ioctl *dmi) |
1860 | 0 | { |
1861 | 0 | char buf[DM_UUID_LEN]; |
1862 | 0 | char *uuid = dmi->uuid; |
1863 | |
|
1864 | 0 | if (uuid) |
1865 | 0 | return _do_dm_ioctl_unmangle_string(uuid, "UUID", buf, sizeof(buf), |
1866 | 0 | dm_get_name_mangling_mode()); |
1867 | | |
1868 | 0 | return 1; |
1869 | 0 | } |
1870 | | #endif |
1871 | | |
1872 | | static struct dm_ioctl *_do_dm_ioctl(struct dm_task *dmt, unsigned command, |
1873 | | unsigned buffer_repeat_count, |
1874 | | unsigned retry_repeat_count, |
1875 | | int *retryable) |
1876 | 0 | { |
1877 | 0 | struct dm_ioctl *dmi; |
1878 | 0 | int ioctl_with_uevent; |
1879 | 0 | int r; |
1880 | |
|
1881 | 0 | dmt->ioctl_errno = 0; |
1882 | |
|
1883 | 0 | dmi = _flatten(dmt, buffer_repeat_count); |
1884 | 0 | if (!dmi) { |
1885 | 0 | log_error("Couldn't create ioctl argument."); |
1886 | 0 | return NULL; |
1887 | 0 | } |
1888 | | |
1889 | 0 | if (dmt->type == DM_DEVICE_TABLE) |
1890 | 0 | dmi->flags |= DM_STATUS_TABLE_FLAG; |
1891 | |
|
1892 | 0 | dmi->flags |= DM_EXISTS_FLAG; /* FIXME */ |
1893 | |
|
1894 | 0 | if (dmt->no_open_count) |
1895 | 0 | dmi->flags |= DM_SKIP_BDGET_FLAG; |
1896 | |
|
1897 | 0 | ioctl_with_uevent = dmt->type == DM_DEVICE_RESUME || |
1898 | 0 | dmt->type == DM_DEVICE_REMOVE || |
1899 | 0 | dmt->type == DM_DEVICE_RENAME; |
1900 | |
|
1901 | 0 | if (ioctl_with_uevent && dm_cookie_supported()) { |
1902 | | /* |
1903 | | * Always mark events coming from libdevmapper as |
1904 | | * "primary sourced". This is needed to distinguish |
1905 | | * any spurious events so we can act appropriately. |
1906 | | * This needs to be applied even when udev_sync is |
1907 | | * not used because udev flags could be used alone. |
1908 | | */ |
1909 | 0 | dmi->event_nr |= DM_UDEV_PRIMARY_SOURCE_FLAG << |
1910 | 0 | DM_UDEV_FLAGS_SHIFT; |
1911 | | |
1912 | | /* |
1913 | | * Prevent udev vs. libdevmapper race when processing nodes |
1914 | | * and symlinks. This can happen when the udev rules are |
1915 | | * installed and udev synchronization code is enabled in |
1916 | | * libdevmapper but the software using libdevmapper does not |
1917 | | * make use of it (by not calling dm_task_set_cookie before). |
1918 | | * We need to instruct the udev rules not to be applied at |
1919 | | * all in this situation so we can gracefully fallback to |
1920 | | * libdevmapper's node and symlink creation code. |
1921 | | */ |
1922 | 0 | if (!dmt->cookie_set && dm_udev_get_sync_support()) { |
1923 | 0 | log_debug_activation("Cookie value is not set while trying to call %s " |
1924 | 0 | "ioctl. Please, consider using libdevmapper's udev " |
1925 | 0 | "synchronization interface or disable it explicitly " |
1926 | 0 | "by calling dm_udev_set_sync_support(0).", |
1927 | 0 | dmt->type == DM_DEVICE_RESUME ? "DM_DEVICE_RESUME" : |
1928 | 0 | dmt->type == DM_DEVICE_REMOVE ? "DM_DEVICE_REMOVE" : |
1929 | 0 | "DM_DEVICE_RENAME"); |
1930 | 0 | log_debug_activation("Switching off device-mapper and all subsystem related " |
1931 | 0 | "udev rules. Falling back to libdevmapper node creation."); |
1932 | | /* |
1933 | | * Disable general dm and subsystem rules but keep |
1934 | | * dm disk rules if not flagged out explicitly before. |
1935 | | * We need /dev/disk content for the software that expects it. |
1936 | | */ |
1937 | 0 | dmi->event_nr |= (DM_UDEV_DISABLE_DM_RULES_FLAG | |
1938 | 0 | DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG) << |
1939 | 0 | DM_UDEV_FLAGS_SHIFT; |
1940 | 0 | } |
1941 | 0 | } |
1942 | |
|
1943 | 0 | log_debug_activation("dm %s %s%s %s%s%s %s%.0d%s%.0d%s" |
1944 | 0 | "%s[ %s%s%s%s%s%s%s%s%s%s] %.0" PRIu64 " %s [%u] (*%u)", |
1945 | 0 | _cmd_data_v4[dmt->type].name, |
1946 | 0 | dmt->new_uuid ? "UUID " : "", |
1947 | 0 | dmi->name, dmi->uuid, dmt->newname ? " " : "", |
1948 | 0 | dmt->newname ? dmt->newname : "", |
1949 | 0 | dmt->major > 0 ? "(" : "", |
1950 | 0 | dmt->major > 0 ? dmt->major : 0, |
1951 | 0 | dmt->major > 0 ? ":" : "", |
1952 | 0 | dmt->minor > 0 ? dmt->minor : 0, |
1953 | 0 | dmt->major > 0 && dmt->minor == 0 ? "0" : "", |
1954 | 0 | dmt->major > 0 ? ") " : "", |
1955 | 0 | dmt->no_open_count ? "noopencount " : "opencount ", |
1956 | 0 | dmt->no_flush ? "noflush " : "flush ", |
1957 | 0 | dmt->read_only ? "readonly " : "", |
1958 | 0 | dmt->skip_lockfs ? "skiplockfs " : "", |
1959 | 0 | dmt->retry_remove ? "retryremove " : "", |
1960 | 0 | dmt->deferred_remove ? "deferredremove " : "", |
1961 | 0 | dmt->secure_data ? "securedata " : "", |
1962 | 0 | dmt->ima_measurement ? "ima_measurement " : "", |
1963 | 0 | dmt->query_inactive_table ? "inactive " : "", |
1964 | 0 | dmt->enable_checks ? "enablechecks " : "", |
1965 | 0 | dmt->sector, _sanitise_message(dmt->message), |
1966 | 0 | dmi->data_size, retry_repeat_count); |
1967 | 0 | #ifdef DM_IOCTLS |
1968 | 0 | r = ioctl(_control_fd, command, dmi); |
1969 | |
|
1970 | 0 | if (dmt->record_timestamp) |
1971 | 0 | if (!dm_timestamp_get(_dm_ioctl_timestamp)) |
1972 | 0 | stack; |
1973 | |
|
1974 | 0 | if (r < 0 && dmt->expected_errno != errno) { |
1975 | 0 | dmt->ioctl_errno = errno; |
1976 | 0 | if (dmt->ioctl_errno == ENXIO && ((dmt->type == DM_DEVICE_INFO) || |
1977 | 0 | (dmt->type == DM_DEVICE_MKNODES) || |
1978 | 0 | (dmt->type == DM_DEVICE_STATUS))) |
1979 | 0 | dmi->flags &= ~DM_EXISTS_FLAG; /* FIXME */ |
1980 | 0 | else { |
1981 | 0 | if (_log_suppress || dmt->ioctl_errno == EINTR) |
1982 | 0 | log_verbose("device-mapper: %s ioctl on %s %s%s%.0d%s%.0d%s%s " |
1983 | 0 | "failed: %s", |
1984 | 0 | _cmd_data_v4[dmt->type].name, |
1985 | 0 | dmi->name[0] ? dmi->name : DEV_NAME(dmt) ? : "", |
1986 | 0 | dmi->uuid[0] ? dmi->uuid : DEV_UUID(dmt) ? : "", |
1987 | 0 | dmt->major > 0 ? "(" : "", |
1988 | 0 | dmt->major > 0 ? dmt->major : 0, |
1989 | 0 | dmt->major > 0 ? ":" : "", |
1990 | 0 | dmt->minor > 0 ? dmt->minor : 0, |
1991 | 0 | dmt->major > 0 && dmt->minor == 0 ? "0" : "", |
1992 | 0 | dmt->major > 0 ? ")" : "", |
1993 | 0 | strerror(dmt->ioctl_errno)); |
1994 | 0 | else |
1995 | 0 | log_error("device-mapper: %s ioctl on %s %s%s%.0d%s%.0d%s%s " |
1996 | 0 | "failed: %s", |
1997 | 0 | _cmd_data_v4[dmt->type].name, |
1998 | 0 | dmi->name[0] ? dmi->name : DEV_NAME(dmt) ? : "", |
1999 | 0 | dmi->uuid[0] ? dmi->uuid : DEV_UUID(dmt) ? : "", |
2000 | 0 | dmt->major > 0 ? "(" : "", |
2001 | 0 | dmt->major > 0 ? dmt->major : 0, |
2002 | 0 | dmt->major > 0 ? ":" : "", |
2003 | 0 | dmt->minor > 0 ? dmt->minor : 0, |
2004 | 0 | dmt->major > 0 && dmt->minor == 0 ? "0" : "", |
2005 | 0 | dmt->major > 0 ? ")" : "", |
2006 | 0 | strerror(dmt->ioctl_errno)); |
2007 | | |
2008 | | /* |
2009 | | * It's sometimes worth retrying after EBUSY in case |
2010 | | * it's a transient failure caused by an asynchronous |
2011 | | * process quickly scanning the device. |
2012 | | */ |
2013 | 0 | *retryable = dmt->ioctl_errno == EBUSY; |
2014 | |
|
2015 | 0 | goto error; |
2016 | 0 | } |
2017 | 0 | } |
2018 | | |
2019 | 0 | if (ioctl_with_uevent && dm_udev_get_sync_support() && |
2020 | 0 | !_check_uevent_generated(dmi)) { |
2021 | 0 | log_debug_activation("Uevent not generated! Calling udev_complete " |
2022 | 0 | "internally to avoid process lock-up."); |
2023 | 0 | _udev_complete(dmt); |
2024 | 0 | } |
2025 | |
|
2026 | 0 | if (!_dm_ioctl_unmangle_names(dmt->type, dmi)) |
2027 | 0 | goto error; |
2028 | | |
2029 | 0 | if (dmt->type != DM_DEVICE_REMOVE && |
2030 | 0 | !_dm_ioctl_unmangle_uuids(dmt->type, dmi)) |
2031 | 0 | goto error; |
2032 | | |
2033 | | #else /* Userspace alternative for testing */ |
2034 | | goto error; |
2035 | | #endif |
2036 | 0 | return dmi; |
2037 | | |
2038 | 0 | error: |
2039 | 0 | _dm_zfree_dmi(dmi); |
2040 | 0 | return NULL; |
2041 | 0 | } |
2042 | | |
2043 | | void dm_task_update_nodes(void) |
2044 | 0 | { |
2045 | 0 | update_devs(); |
2046 | 0 | } |
2047 | | |
2048 | 0 | #define DM_IOCTL_RETRIES 25 |
2049 | 0 | #define DM_RETRY_USLEEP_DELAY 200000 |
2050 | | |
2051 | | int dm_task_get_errno(struct dm_task *dmt) |
2052 | 0 | { |
2053 | 0 | return dmt->ioctl_errno; |
2054 | 0 | } |
2055 | | |
2056 | | #if defined(GNU_SYMVER) |
2057 | | /* |
2058 | | * Enforce new version 1_02_197 of dm_task_run() that propagates |
2059 | | * ioctl() errno is being linked to app. |
2060 | | */ |
2061 | | DM_EXPORT_SYMBOL_BASE(dm_task_run) |
2062 | | int dm_task_run_base(struct dm_task *dmt); |
2063 | | int dm_task_run_base(struct dm_task *dmt) |
2064 | 0 | { |
2065 | 0 | return dm_task_run(dmt); |
2066 | 0 | } |
2067 | | #endif |
2068 | | |
2069 | | DM_EXPORT_NEW_SYMBOL(int, dm_task_run, 1_02_197) |
2070 | | (struct dm_task *dmt) |
2071 | 0 | { |
2072 | 0 | struct dm_ioctl *dmi; |
2073 | 0 | unsigned command; |
2074 | 0 | int check_udev; |
2075 | 0 | int rely_on_udev; |
2076 | 0 | int suspended_counter; |
2077 | 0 | unsigned ioctl_retry = 1; |
2078 | 0 | int retryable = 0; |
2079 | 0 | const char *dev_name = DEV_NAME(dmt); |
2080 | 0 | const char *dev_uuid = DEV_UUID(dmt); |
2081 | |
|
2082 | 0 | if ((unsigned) dmt->type >= DM_ARRAY_SIZE(_cmd_data_v4)) { |
2083 | 0 | log_error(INTERNAL_ERROR "unknown device-mapper task %d", |
2084 | 0 | dmt->type); |
2085 | 0 | return 0; |
2086 | 0 | } |
2087 | | |
2088 | 0 | command = _cmd_data_v4[dmt->type].cmd; |
2089 | | |
2090 | | /* Old-style creation had a table supplied */ |
2091 | 0 | if (dmt->type == DM_DEVICE_CREATE && dmt->head) |
2092 | 0 | return _create_and_load_v4(dmt); |
2093 | | |
2094 | 0 | if (dmt->type == DM_DEVICE_MKNODES && !dev_name && |
2095 | 0 | !dev_uuid && dmt->major <= 0) |
2096 | 0 | return _mknodes_v4(dmt); |
2097 | | |
2098 | 0 | if ((dmt->type == DM_DEVICE_RELOAD) && dmt->suppress_identical_reload) |
2099 | 0 | return _reload_with_suppression_v4(dmt); |
2100 | | |
2101 | 0 | if ((dmt->type == DM_DEVICE_SUSPEND) && dmt->enable_checks) |
2102 | 0 | return _suspend_with_validation_v4(dmt); |
2103 | | |
2104 | 0 | if (!_open_control()) { |
2105 | 0 | _udev_complete(dmt); |
2106 | 0 | return_0; |
2107 | 0 | } |
2108 | | |
2109 | 0 | if ((suspended_counter = dm_get_suspended_counter()) && |
2110 | 0 | dmt->type == DM_DEVICE_RELOAD) |
2111 | 0 | log_error(INTERNAL_ERROR "Performing unsafe table load while %d device(s) " |
2112 | 0 | "are known to be suspended: " |
2113 | 0 | "%s%s%s %s%.0d%s%.0d%s%s", |
2114 | 0 | suspended_counter, |
2115 | 0 | dev_name ? : "", |
2116 | 0 | dev_uuid ? " UUID " : "", |
2117 | 0 | dev_uuid ? : "", |
2118 | 0 | dmt->major > 0 ? "(" : "", |
2119 | 0 | dmt->major > 0 ? dmt->major : 0, |
2120 | 0 | dmt->major > 0 ? ":" : "", |
2121 | 0 | dmt->minor > 0 ? dmt->minor : 0, |
2122 | 0 | dmt->major > 0 && dmt->minor == 0 ? "0" : "", |
2123 | 0 | dmt->major > 0 ? ") " : ""); |
2124 | | |
2125 | | /* FIXME Detect and warn if cookie set but should not be. */ |
2126 | 0 | repeat_ioctl: |
2127 | 0 | if (!(dmi = _do_dm_ioctl(dmt, command, _ioctl_buffer_double_factor, |
2128 | 0 | ioctl_retry, &retryable))) { |
2129 | | /* |
2130 | | * Async udev rules that scan devices commonly cause transient |
2131 | | * failures. Normally you'd expect the user to have made sure |
2132 | | * nothing was using the device before issuing REMOVE, so it's |
2133 | | * worth retrying in case the failure is indeed transient. |
2134 | | */ |
2135 | 0 | if (retryable && dmt->type == DM_DEVICE_REMOVE && |
2136 | 0 | dmt->retry_remove && ++ioctl_retry <= DM_IOCTL_RETRIES) { |
2137 | 0 | usleep(DM_RETRY_USLEEP_DELAY); |
2138 | 0 | goto repeat_ioctl; |
2139 | 0 | } |
2140 | | |
2141 | 0 | _udev_complete(dmt); |
2142 | 0 | return 0; |
2143 | 0 | } |
2144 | | |
2145 | 0 | if (dmi->flags & DM_BUFFER_FULL_FLAG) { |
2146 | 0 | switch (dmt->type) { |
2147 | 0 | case DM_DEVICE_LIST_VERSIONS: |
2148 | 0 | case DM_DEVICE_LIST: |
2149 | 0 | case DM_DEVICE_DEPS: |
2150 | 0 | case DM_DEVICE_STATUS: |
2151 | 0 | case DM_DEVICE_TABLE: |
2152 | 0 | case DM_DEVICE_WAITEVENT: |
2153 | 0 | case DM_DEVICE_TARGET_MSG: |
2154 | 0 | _ioctl_buffer_double_factor++; |
2155 | 0 | _dm_zfree_dmi(dmi); |
2156 | 0 | goto repeat_ioctl; |
2157 | 0 | default: |
2158 | 0 | log_error("WARNING: libdevmapper buffer too small for data"); |
2159 | 0 | } |
2160 | 0 | } |
2161 | | |
2162 | | /* |
2163 | | * Are we expecting a udev operation to occur that we need to check for? |
2164 | | */ |
2165 | 0 | check_udev = dmt->cookie_set && |
2166 | 0 | !(dmt->event_nr >> DM_UDEV_FLAGS_SHIFT & |
2167 | 0 | DM_UDEV_DISABLE_DM_RULES_FLAG); |
2168 | |
|
2169 | 0 | rely_on_udev = dmt->cookie_set ? (dmt->event_nr >> DM_UDEV_FLAGS_SHIFT & |
2170 | 0 | DM_UDEV_DISABLE_LIBRARY_FALLBACK) : 0; |
2171 | |
|
2172 | 0 | switch (dmt->type) { |
2173 | 0 | case DM_DEVICE_CREATE: |
2174 | 0 | if ((dmt->add_node == DM_ADD_NODE_ON_CREATE) && |
2175 | 0 | dev_name && *dev_name && !rely_on_udev) |
2176 | 0 | add_dev_node(dev_name, MAJOR(dmi->dev), |
2177 | 0 | MINOR(dmi->dev), dmt->uid, dmt->gid, |
2178 | 0 | dmt->mode, check_udev, rely_on_udev); |
2179 | 0 | break; |
2180 | 0 | case DM_DEVICE_REMOVE: |
2181 | | /* FIXME Kernel needs to fill in dmi->name */ |
2182 | 0 | if (dev_name && !rely_on_udev) |
2183 | 0 | rm_dev_node(dev_name, check_udev, rely_on_udev); |
2184 | 0 | break; |
2185 | | |
2186 | 0 | case DM_DEVICE_RENAME: |
2187 | | /* FIXME Kernel needs to fill in dmi->name */ |
2188 | 0 | if (!dmt->new_uuid && dev_name) |
2189 | 0 | rename_dev_node(dev_name, dmt->newname, |
2190 | 0 | check_udev, rely_on_udev); |
2191 | 0 | break; |
2192 | | |
2193 | 0 | case DM_DEVICE_RESUME: |
2194 | 0 | if ((dmt->add_node == DM_ADD_NODE_ON_RESUME) && |
2195 | 0 | dev_name && *dev_name) |
2196 | 0 | add_dev_node(dev_name, MAJOR(dmi->dev), |
2197 | 0 | MINOR(dmi->dev), dmt->uid, dmt->gid, |
2198 | 0 | dmt->mode, check_udev, rely_on_udev); |
2199 | | /* FIXME Kernel needs to fill in dmi->name */ |
2200 | 0 | set_dev_node_read_ahead(dev_name, |
2201 | 0 | MAJOR(dmi->dev), MINOR(dmi->dev), |
2202 | 0 | dmt->read_ahead, dmt->read_ahead_flags); |
2203 | 0 | break; |
2204 | | |
2205 | 0 | case DM_DEVICE_MKNODES: |
2206 | 0 | if (dmi->flags & DM_EXISTS_FLAG) |
2207 | 0 | add_dev_node(dmi->name, MAJOR(dmi->dev), |
2208 | 0 | MINOR(dmi->dev), dmt->uid, |
2209 | 0 | dmt->gid, dmt->mode, 0, rely_on_udev); |
2210 | 0 | else if (dev_name) |
2211 | 0 | rm_dev_node(dev_name, 0, rely_on_udev); |
2212 | 0 | break; |
2213 | | |
2214 | 0 | case DM_DEVICE_STATUS: |
2215 | 0 | case DM_DEVICE_TABLE: |
2216 | 0 | case DM_DEVICE_WAITEVENT: |
2217 | 0 | if (!_unmarshal_status(dmt, dmi)) |
2218 | 0 | goto bad; |
2219 | 0 | break; |
2220 | 0 | } |
2221 | | |
2222 | | /* Was structure reused? */ |
2223 | 0 | _dm_zfree_dmi(dmt->dmi.v4); |
2224 | 0 | dmt->dmi.v4 = dmi; |
2225 | 0 | return 1; |
2226 | | |
2227 | 0 | bad: |
2228 | 0 | _dm_zfree_dmi(dmi); |
2229 | 0 | return 0; |
2230 | 0 | } |
2231 | | |
2232 | | void dm_hold_control_dev(int hold_open) |
2233 | 0 | { |
2234 | 0 | _hold_control_fd_open = hold_open ? 1 : 0; |
2235 | |
|
2236 | 0 | log_debug("Hold of control device is now %sset.", |
2237 | 0 | _hold_control_fd_open ? "" : "un"); |
2238 | 0 | } |
2239 | | |
2240 | | void dm_lib_release(void) |
2241 | 6.43k | { |
2242 | 6.43k | if (!_hold_control_fd_open) |
2243 | 6.43k | _close_control_fd(); |
2244 | 6.43k | dm_timestamp_destroy(_dm_ioctl_timestamp); |
2245 | 6.43k | _dm_ioctl_timestamp = NULL; |
2246 | 6.43k | update_devs(); |
2247 | 6.43k | } |
2248 | | |
2249 | | void dm_pools_check_leaks(void); |
2250 | | |
2251 | | void dm_lib_exit(void) |
2252 | 0 | { |
2253 | 0 | int suspended_counter; |
2254 | 0 | static unsigned _exited = 0; |
2255 | |
|
2256 | 0 | if (_exited++) |
2257 | 0 | return; |
2258 | | |
2259 | 0 | if ((suspended_counter = dm_get_suspended_counter())) |
2260 | 0 | log_error("libdevmapper exiting with %d device(s) still suspended.", suspended_counter); |
2261 | |
|
2262 | 0 | dm_lib_release(); |
2263 | 0 | selinux_release(); |
2264 | 0 | if (_dm_bitset) |
2265 | 0 | dm_bitset_destroy(_dm_bitset); |
2266 | 0 | _dm_bitset = NULL; |
2267 | 0 | dm_pools_check_leaks(); |
2268 | 0 | dm_dump_memory(); |
2269 | 0 | _version_ok = 1; |
2270 | 0 | _version_checked = 0; |
2271 | 0 | } |
2272 | | |
2273 | | #if defined(GNU_SYMVER) |
2274 | | /* |
2275 | | * Maintain binary backward compatibility. |
2276 | | * Version script mechanism works with 'gcc' compatible compilers only. |
2277 | | */ |
2278 | | |
2279 | | /* |
2280 | | * This following code is here to retain ABI compatibility after adding |
2281 | | * the field deferred_remove to struct dm_info in version 1.02.89. |
2282 | | * |
2283 | | * Binaries linked against version 1.02.88 of libdevmapper or earlier |
2284 | | * will use this function that returns dm_info without the |
2285 | | * deferred_remove field. |
2286 | | * |
2287 | | * Binaries compiled against version 1.02.89 onwards will use |
2288 | | * the new function dm_task_get_info_with_deferred_remove due to the |
2289 | | * #define. |
2290 | | * |
2291 | | * N.B. Keep this function at the end of the file to make sure that |
2292 | | * no code in this file accidentally calls it. |
2293 | | */ |
2294 | | |
2295 | | DM_EXPORT_SYMBOL_BASE(dm_task_get_info) |
2296 | | int dm_task_get_info_base(struct dm_task *dmt, struct dm_info *info); |
2297 | | int dm_task_get_info_base(struct dm_task *dmt, struct dm_info *info) |
2298 | 0 | { |
2299 | 0 | struct dm_info new_info; |
2300 | |
|
2301 | 0 | if (!dm_task_get_info(dmt, &new_info)) |
2302 | 0 | return 0; |
2303 | | |
2304 | 0 | memcpy(info, &new_info, offsetof(struct dm_info, deferred_remove)); |
2305 | |
|
2306 | 0 | return 1; |
2307 | 0 | } |
2308 | | |
2309 | | #endif |
2310 | | |
2311 | | int dm_task_get_info_with_deferred_remove(struct dm_task *dmt, struct dm_info *info); |
2312 | | int dm_task_get_info_with_deferred_remove(struct dm_task *dmt, struct dm_info *info) |
2313 | 0 | { |
2314 | 0 | struct dm_info new_info; |
2315 | |
|
2316 | 0 | if (!dm_task_get_info(dmt, &new_info)) |
2317 | 0 | return 0; |
2318 | | |
2319 | 0 | memcpy(info, &new_info, offsetof(struct dm_info, internal_suspend)); |
2320 | |
|
2321 | 0 | return 1; |
2322 | 0 | } |