/src/lvm2/libdm/libdevmapper.h
Line  | Count  | Source (jump to first uncovered line)  | 
1  |  | /*  | 
2  |  |  * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.  | 
3  |  |  * Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.  | 
4  |  |  * Copyright (C) 2006 Rackable Systems All rights reserved.  | 
5  |  |  *  | 
6  |  |  * This file is part of the device-mapper userspace tools.  | 
7  |  |  *  | 
8  |  |  * This copyrighted material is made available to anyone wishing to use,  | 
9  |  |  * modify, copy, or redistribute it subject to the terms and conditions  | 
10  |  |  * of the GNU Lesser General Public License v.2.1.  | 
11  |  |  *  | 
12  |  |  * You should have received a copy of the GNU Lesser General Public License  | 
13  |  |  * along with this program; if not, write to the Free Software Foundation,  | 
14  |  |  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.  | 
15  |  |  */  | 
16  |  |  | 
17  |  | #ifndef LIB_DEVICE_MAPPER_H  | 
18  |  | #define LIB_DEVICE_MAPPER_H  | 
19  |  |  | 
20  |  | #include <inttypes.h>  | 
21  |  | #include <stdarg.h>  | 
22  |  | #include <sys/types.h>  | 
23  |  | #include <sys/stat.h>  | 
24  |  |  | 
25  |  | #ifdef __linux__  | 
26  |  | #  include <linux/types.h>  | 
27  |  | #endif  | 
28  |  |  | 
29  |  | #include <limits.h>  | 
30  |  | #include <string.h>  | 
31  |  | #include <stdlib.h>  | 
32  |  | #include <stdio.h>  | 
33  |  | #include <stddef.h> /* offsetof */  | 
34  |  |  | 
35  |  | #ifndef __GNUC__  | 
36  |  | # define __typeof__ typeof  | 
37  |  | #endif  | 
38  |  |  | 
39  |  | /* Macros to make string defines */  | 
40  |  | #define DM_TO_STRING_EXP(A) #A  | 
41  |  | #define DM_TO_STRING(A) DM_TO_STRING_EXP(A)  | 
42  |  |  | 
43  | 0  | #define DM_ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))  | 
44  |  |  | 
45  |  | #ifdef __cplusplus  | 
46  |  | extern "C" { | 
47  |  | #endif  | 
48  |  |  | 
49  |  | /*****************************************************************  | 
50  |  |  * The first section of this file provides direct access to the  | 
51  |  |  * individual device-mapper ioctls.  Since it is quite laborious to  | 
52  |  |  * build the ioctl arguments for the device-mapper, people are  | 
53  |  |  * encouraged to use this library.  | 
54  |  |  ****************************************************************/  | 
55  |  |  | 
56  |  | /*  | 
57  |  |  * The library user may wish to register their own  | 
58  |  |  * logging function.  By default errors go to stderr.  | 
59  |  |  * Use dm_log_with_errno_init(NULL) to restore the default log fn.  | 
60  |  |  * Error messages may have a non-zero errno.  | 
61  |  |  * Debug messages may have a non-zero class.  | 
62  |  |  * Aborts on internal error when env DM_ABORT_ON_INTERNAL_ERRORS is 1  | 
63  |  |  */  | 
64  |  |  | 
65  |  | typedef void (*dm_log_with_errno_fn) (int level, const char *file, int line,  | 
66  |  |               int dm_errno_or_class, const char *f, ...)  | 
67  |  |     __attribute__ ((format(printf, 5, 6)));  | 
68  |  |  | 
69  |  | void dm_log_with_errno_init(dm_log_with_errno_fn fn);  | 
70  |  | void dm_log_init_verbose(int level);  | 
71  |  |  | 
72  |  | /*  | 
73  |  |  * Original version of this function.  | 
74  |  |  * dm_errno is set to 0.  | 
75  |  |  *  | 
76  |  |  * Deprecated: Use the _with_errno_ versions above instead.  | 
77  |  |  */  | 
78  |  | typedef void (*dm_log_fn) (int level, const char *file, int line,  | 
79  |  |          const char *f, ...)  | 
80  |  |     __attribute__ ((format(printf, 4, 5)));  | 
81  |  |  | 
82  |  | void dm_log_init(dm_log_fn fn);  | 
83  |  | /*  | 
84  |  |  * For backward-compatibility, indicate that dm_log_init() was used  | 
85  |  |  * to set a non-default value of dm_log().  | 
86  |  |  */  | 
87  |  | int dm_log_is_non_default(void);  | 
88  |  |  | 
89  |  | /*  | 
90  |  |  * Number of devices currently in suspended state (via the library).  | 
91  |  |  */  | 
92  |  | int dm_get_suspended_counter(void);  | 
93  |  |  | 
94  |  | enum { | 
95  |  |   DM_DEVICE_CREATE,  | 
96  |  |   DM_DEVICE_RELOAD,  | 
97  |  |   DM_DEVICE_REMOVE,  | 
98  |  |   DM_DEVICE_REMOVE_ALL,  | 
99  |  |  | 
100  |  |   DM_DEVICE_SUSPEND,  | 
101  |  |   DM_DEVICE_RESUME,  | 
102  |  |  | 
103  |  |   DM_DEVICE_INFO,  | 
104  |  |   DM_DEVICE_DEPS,  | 
105  |  |   DM_DEVICE_RENAME,  | 
106  |  |  | 
107  |  |   DM_DEVICE_VERSION,  | 
108  |  |  | 
109  |  |   DM_DEVICE_STATUS,  | 
110  |  |   DM_DEVICE_TABLE,  | 
111  |  |   DM_DEVICE_WAITEVENT,  | 
112  |  |  | 
113  |  |   DM_DEVICE_LIST,  | 
114  |  |  | 
115  |  |   DM_DEVICE_CLEAR,  | 
116  |  |  | 
117  |  |   DM_DEVICE_MKNODES,  | 
118  |  |  | 
119  |  |   DM_DEVICE_LIST_VERSIONS,  | 
120  |  |     | 
121  |  |   DM_DEVICE_TARGET_MSG,  | 
122  |  |  | 
123  |  |   DM_DEVICE_SET_GEOMETRY,  | 
124  |  |  | 
125  |  |   DM_DEVICE_ARM_POLL,  | 
126  |  |  | 
127  |  |   DM_DEVICE_GET_TARGET_VERSION  | 
128  |  | };  | 
129  |  |  | 
130  |  | /*  | 
131  |  |  * You will need to build a struct dm_task for  | 
132  |  |  * each ioctl command you want to execute.  | 
133  |  |  */  | 
134  |  |  | 
135  |  | struct dm_pool;  | 
136  |  | struct dm_task;  | 
137  |  | struct dm_timestamp;  | 
138  |  |  | 
139  |  | struct dm_task *dm_task_create(int type);  | 
140  |  | void dm_task_destroy(struct dm_task *dmt);  | 
141  |  |  | 
142  |  | int dm_task_set_name(struct dm_task *dmt, const char *name);  | 
143  |  | int dm_task_set_uuid(struct dm_task *dmt, const char *uuid);  | 
144  |  |  | 
145  |  | /*  | 
146  |  |  * Retrieve attributes after an info.  | 
147  |  |  */  | 
148  |  | struct dm_info { | 
149  |  |   int exists;  | 
150  |  |   int suspended;  | 
151  |  |   int live_table;  | 
152  |  |   int inactive_table;  | 
153  |  |   int32_t open_count;  | 
154  |  |   uint32_t event_nr;  | 
155  |  |   uint32_t major;  | 
156  |  |   uint32_t minor;   /* minor device number */  | 
157  |  |   int read_only;    /* 0:read-write; 1:read-only */  | 
158  |  |  | 
159  |  |   int32_t target_count;  | 
160  |  |  | 
161  |  |   int deferred_remove;  | 
162  |  |   int internal_suspend;  | 
163  |  | };  | 
164  |  |  | 
165  |  | struct dm_deps { | 
166  |  |   uint32_t count;  | 
167  |  |   uint32_t filler;  | 
168  |  |   uint64_t device[];  | 
169  |  | };  | 
170  |  |  | 
171  |  | struct dm_names { | 
172  |  |   uint64_t dev;  | 
173  |  |   uint32_t next;    /* Offset to next struct from start of this struct */  | 
174  |  |   char name[];  | 
175  |  | };  | 
176  |  |  | 
177  |  | struct dm_versions { | 
178  |  |   uint32_t next;    /* Offset to next struct from start of this struct */  | 
179  |  |   uint32_t version[3];  | 
180  |  |  | 
181  |  |   char name[];  | 
182  |  | };  | 
183  |  |  | 
184  |  | int dm_get_library_version(char *version, size_t size);  | 
185  |  | int dm_task_get_driver_version(struct dm_task *dmt, char *version, size_t size);  | 
186  |  | int dm_task_get_info(struct dm_task *dmt, struct dm_info *info);  | 
187  |  |  | 
188  |  | /*  | 
189  |  |  * This function returns dm device's UUID based on the value  | 
190  |  |  * of the mangling mode set during preceding dm_task_run call:  | 
191  |  |  *   - unmangled UUID for DM_STRING_MANGLING_{AUTO, HEX}, | 
192  |  |  *   - UUID without any changes for DM_STRING_MANGLING_NONE.  | 
193  |  |  *  | 
194  |  |  * To get mangled or unmangled form of the UUID directly, use  | 
195  |  |  * dm_task_get_uuid_mangled or dm_task_get_uuid_unmangled function.  | 
196  |  |  */  | 
197  |  | const char *dm_task_get_uuid(const struct dm_task *dmt);  | 
198  |  |  | 
199  |  | struct dm_deps *dm_task_get_deps(struct dm_task *dmt);  | 
200  |  | struct dm_versions *dm_task_get_versions(struct dm_task *dmt);  | 
201  |  | const char *dm_task_get_message_response(struct dm_task *dmt);  | 
202  |  |  | 
203  |  | /*  | 
204  |  |  * These functions return device-mapper names based on the value  | 
205  |  |  * of the mangling mode set during preceding dm_task_run call:  | 
206  |  |  *   - unmangled name for DM_STRING_MANGLING_{AUTO, HEX}, | 
207  |  |  *   - name without any changes for DM_STRING_MANGLING_NONE.  | 
208  |  |  *  | 
209  |  |  * To get mangled or unmangled form of the name directly, use  | 
210  |  |  * dm_task_get_name_mangled or dm_task_get_name_unmangled function.  | 
211  |  |  */  | 
212  |  | const char *dm_task_get_name(const struct dm_task *dmt);  | 
213  |  | struct dm_names *dm_task_get_names(struct dm_task *dmt);  | 
214  |  |  | 
215  |  | int dm_task_set_ro(struct dm_task *dmt);  | 
216  |  | int dm_task_set_newname(struct dm_task *dmt, const char *newname);  | 
217  |  | int dm_task_set_newuuid(struct dm_task *dmt, const char *newuuid);  | 
218  |  | int dm_task_set_minor(struct dm_task *dmt, int minor);  | 
219  |  | int dm_task_set_major(struct dm_task *dmt, int major);  | 
220  |  | int dm_task_set_major_minor(struct dm_task *dmt, int major, int minor, int allow_default_major_fallback);  | 
221  |  | int dm_task_set_uid(struct dm_task *dmt, uid_t uid);  | 
222  |  | int dm_task_set_gid(struct dm_task *dmt, gid_t gid);  | 
223  |  | int dm_task_set_mode(struct dm_task *dmt, mode_t mode);  | 
224  |  | /* See also description for DM_UDEV_DISABLE_LIBRARY_FALLBACK flag! */  | 
225  |  | int dm_task_set_cookie(struct dm_task *dmt, uint32_t *cookie, uint16_t flags);  | 
226  |  | int dm_task_set_event_nr(struct dm_task *dmt, uint32_t event_nr);  | 
227  |  | int dm_task_set_geometry(struct dm_task *dmt, const char *cylinders, const char *heads, const char *sectors, const char *start);  | 
228  |  | int dm_task_set_message(struct dm_task *dmt, const char *message);  | 
229  |  | int dm_task_set_sector(struct dm_task *dmt, uint64_t sector);  | 
230  |  | int dm_task_no_flush(struct dm_task *dmt);  | 
231  |  | int dm_task_no_open_count(struct dm_task *dmt);  | 
232  |  | int dm_task_skip_lockfs(struct dm_task *dmt);  | 
233  |  | int dm_task_query_inactive_table(struct dm_task *dmt);  | 
234  |  | int dm_task_suppress_identical_reload(struct dm_task *dmt);  | 
235  |  | int dm_task_secure_data(struct dm_task *dmt);  | 
236  |  | int dm_task_retry_remove(struct dm_task *dmt);  | 
237  |  | int dm_task_deferred_remove(struct dm_task *dmt);  | 
238  |  | int dm_task_ima_measurement(struct dm_task *dmt);  | 
239  |  |  | 
240  |  | /*  | 
241  |  |  * Record timestamp immediately after the ioctl returns.  | 
242  |  |  */  | 
243  |  | int dm_task_set_record_timestamp(struct dm_task *dmt);  | 
244  |  | struct dm_timestamp *dm_task_get_ioctl_timestamp(struct dm_task *dmt);  | 
245  |  |  | 
246  |  | /*  | 
247  |  |  * Enable checks for common mistakes such as issuing ioctls in an unsafe order.  | 
248  |  |  */  | 
249  |  | int dm_task_enable_checks(struct dm_task *dmt);  | 
250  |  |  | 
251  |  | typedef enum dm_add_node_e { | 
252  |  |   DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */  | 
253  |  |   DM_ADD_NODE_ON_CREATE  /* add /dev/mapper node with dmsetup create */  | 
254  |  | } dm_add_node_t;  | 
255  |  | int dm_task_set_add_node(struct dm_task *dmt, dm_add_node_t add_node);  | 
256  |  |  | 
257  |  | /*  | 
258  |  |  * Control read_ahead.  | 
259  |  |  */  | 
260  | 0  | #define DM_READ_AHEAD_AUTO UINT32_MAX  /* Use kernel default readahead */  | 
261  | 0  | #define DM_READ_AHEAD_NONE 0    /* Disable readahead */  | 
262  |  |  | 
263  | 0  | #define DM_READ_AHEAD_MINIMUM_FLAG  0x1  /* Value supplied is minimum */  | 
264  |  |  | 
265  |  | /*  | 
266  |  |  * Read ahead is set with DM_DEVICE_CREATE with a table or DM_DEVICE_RESUME.  | 
267  |  |  */  | 
268  |  | int dm_task_set_read_ahead(struct dm_task *dmt, uint32_t read_ahead,  | 
269  |  |          uint32_t read_ahead_flags);  | 
270  |  | uint32_t dm_task_get_read_ahead(const struct dm_task *dmt,  | 
271  |  |         uint32_t *read_ahead);  | 
272  |  |  | 
273  |  | /*  | 
274  |  |  * Use these to prepare for a create or reload.  | 
275  |  |  */  | 
276  |  | int dm_task_add_target(struct dm_task *dmt,  | 
277  |  |            uint64_t start,  | 
278  |  |            uint64_t size, const char *ttype, const char *params);  | 
279  |  |  | 
280  |  | /*  | 
281  |  |  * Format major/minor numbers correctly for input to driver.  | 
282  |  |  */  | 
283  |  | #define DM_FORMAT_DEV_BUFSIZE 13  /* Minimum bufsize to handle worst case. */  | 
284  |  | int dm_format_dev(char *buf, int bufsize, uint32_t dev_major, uint32_t dev_minor);  | 
285  |  |  | 
286  |  | /* Use this to retrieve target information returned from a STATUS call */  | 
287  |  | void *dm_get_next_target(struct dm_task *dmt,  | 
288  |  |        void *next, uint64_t *start, uint64_t *length,  | 
289  |  |        char **target_type, char **params);  | 
290  |  |  | 
291  |  | /*  | 
292  |  |  * Following dm_get_status_* functions will allocate appropriate status structure  | 
293  |  |  * from passed mempool together with the necessary character arrays.  | 
294  |  |  * Destroying the mempool will release all associated allocation.  | 
295  |  |  */  | 
296  |  |  | 
297  |  | /* Parse params from STATUS call for mirror target */  | 
298  |  | typedef enum dm_status_mirror_health_e { | 
299  |  |   DM_STATUS_MIRROR_ALIVE        = 'A',/* No failures */  | 
300  |  |   DM_STATUS_MIRROR_FLUSH_FAILED = 'F',/* Mirror out-of-sync */  | 
301  |  |   DM_STATUS_MIRROR_WRITE_FAILED = 'D',/* Mirror out-of-sync */  | 
302  |  |   DM_STATUS_MIRROR_SYNC_FAILED  = 'S',/* Mirror out-of-sync */  | 
303  |  |   DM_STATUS_MIRROR_READ_FAILED  = 'R',/* Mirror data unaffected */  | 
304  |  |   DM_STATUS_MIRROR_UNCLASSIFIED = 'U' /* Bug */  | 
305  |  | } dm_status_mirror_health_t;  | 
306  |  |  | 
307  |  | struct dm_status_mirror { | 
308  |  |   uint64_t total_regions;  | 
309  |  |   uint64_t insync_regions;  | 
310  |  |   uint32_t dev_count;             /* # of devs[] elements (<= 8) */  | 
311  |  |   struct dm_dev_leg_health_s { | 
312  |  |     dm_status_mirror_health_t health;  | 
313  |  |     uint32_t major;  | 
314  |  |     uint32_t minor;  | 
315  |  |   } *devs;                        /* array with individual legs */  | 
316  |  |   const char *log_type;           /* core, disk,.... */  | 
317  |  |   uint32_t log_count;   /* # of logs[] elements */  | 
318  |  |   struct dm_dev_log_health_s { | 
319  |  |     dm_status_mirror_health_t health;  | 
320  |  |     uint32_t major;  | 
321  |  |     uint32_t minor;  | 
322  |  |   } *logs;      /* array with individual logs */  | 
323  |  | };  | 
324  |  |  | 
325  |  | int dm_get_status_mirror(struct dm_pool *mem, const char *params,  | 
326  |  |        struct dm_status_mirror **status);  | 
327  |  |  | 
328  |  | /* Parse params from STATUS call for raid target */  | 
329  |  | struct dm_status_raid { | 
330  |  |   uint64_t reserved;  | 
331  |  |   uint64_t total_regions;   /* sectors */  | 
332  |  |   uint64_t insync_regions;  /* sectors */  | 
333  |  |   uint64_t mismatch_count;  | 
334  |  |   uint32_t dev_count;  | 
335  |  |   char *raid_type;  | 
336  |  |   /* A - alive,  a - alive not in-sync,  D - dead/failed */  | 
337  |  |   char *dev_health;  | 
338  |  |   /* idle, frozen, resync, recover, check, repair */  | 
339  |  |   char *sync_action;  | 
340  |  |   uint64_t data_offset; /* RAID out-of-place reshaping */  | 
341  |  | };  | 
342  |  |  | 
343  |  | int dm_get_status_raid(struct dm_pool *mem, const char *params,  | 
344  |  |            struct dm_status_raid **status);  | 
345  |  |  | 
346  |  | /* Parse params from STATUS call for cache target */  | 
347  |  | struct dm_status_cache { | 
348  |  |   uint64_t version;  /* zero for now */  | 
349  |  |  | 
350  |  |   uint32_t metadata_block_size;   /* in 512B sectors */  | 
351  |  |   uint32_t block_size;            /* AKA 'chunk_size' */  | 
352  |  |  | 
353  |  |   uint64_t metadata_used_blocks;  | 
354  |  |   uint64_t metadata_total_blocks;  | 
355  |  |  | 
356  |  |   uint64_t used_blocks;  | 
357  |  |   uint64_t dirty_blocks;  | 
358  |  |   uint64_t total_blocks;  | 
359  |  |  | 
360  |  |   uint64_t read_hits;  | 
361  |  |   uint64_t read_misses;  | 
362  |  |   uint64_t write_hits;  | 
363  |  |   uint64_t write_misses;  | 
364  |  |  | 
365  |  |   uint64_t demotions;  | 
366  |  |   uint64_t promotions;  | 
367  |  |  | 
368  |  |   uint64_t feature_flags;   /* DM_CACHE_FEATURE_? */  | 
369  |  |  | 
370  |  |   int core_argc;  | 
371  |  |   char **core_argv;  | 
372  |  |  | 
373  |  |   char *policy_name;  | 
374  |  |   int policy_argc;  | 
375  |  |   char **policy_argv;  | 
376  |  |  | 
377  |  |   unsigned error : 1;   /* detected error (switches to fail soon) */  | 
378  |  |   unsigned fail : 1;    /* all I/O fails */  | 
379  |  |   unsigned needs_check : 1; /* metadata needs check */  | 
380  |  |   unsigned read_only : 1;   /* metadata may not be changed */  | 
381  |  |   uint32_t reserved : 28;  | 
382  |  | };  | 
383  |  |  | 
384  |  | int dm_get_status_cache(struct dm_pool *mem, const char *params,  | 
385  |  |       struct dm_status_cache **status);  | 
386  |  |  | 
387  |  | /*  | 
388  |  |  * Parse params from STATUS call for snapshot target  | 
389  |  |  *  | 
390  |  |  * Snapshot target's format:  | 
391  |  |  * <= 1.7.0: <used_sectors>/<total_sectors>  | 
392  |  |  * >= 1.8.0: <used_sectors>/<total_sectors> <metadata_sectors>  | 
393  |  |  */  | 
394  |  | struct dm_status_snapshot { | 
395  |  |   uint64_t used_sectors;          /* in 512b units */  | 
396  |  |   uint64_t total_sectors;  | 
397  |  |   uint64_t metadata_sectors;  | 
398  |  |   unsigned has_metadata_sectors : 1; /* set when metadata_sectors is present */  | 
399  |  |   unsigned invalid : 1;   /* set when snapshot is invalidated */  | 
400  |  |   unsigned merge_failed : 1;  /* set when snapshot merge failed */  | 
401  |  |   unsigned overflow : 1;    /* set when snapshot overflows */  | 
402  |  | };  | 
403  |  |  | 
404  |  | int dm_get_status_snapshot(struct dm_pool *mem, const char *params,  | 
405  |  |          struct dm_status_snapshot **status);  | 
406  |  |  | 
407  |  | /* Parse params from STATUS call for thin_pool target */  | 
408  |  | typedef enum dm_thin_discards_e { | 
409  |  |   DM_THIN_DISCARDS_IGNORE,  | 
410  |  |   DM_THIN_DISCARDS_NO_PASSDOWN,  | 
411  |  |   DM_THIN_DISCARDS_PASSDOWN  | 
412  |  | } dm_thin_discards_t;  | 
413  |  |  | 
414  |  | struct dm_status_thin_pool { | 
415  |  |   uint64_t transaction_id;  | 
416  |  |   uint64_t used_metadata_blocks;  | 
417  |  |   uint64_t total_metadata_blocks;  | 
418  |  |   uint64_t used_data_blocks;  | 
419  |  |   uint64_t total_data_blocks;  | 
420  |  |   uint64_t held_metadata_root;  | 
421  |  |   uint32_t read_only;   /* metadata may not be changed */  | 
422  |  |   dm_thin_discards_t discards;  | 
423  |  |   uint32_t fail : 1;    /* all I/O fails */  | 
424  |  |   uint32_t error_if_no_space : 1; /* otherwise queue_if_no_space */  | 
425  |  |   uint32_t out_of_data_space : 1; /* metadata may be changed, but data may not be allocated (no rw) */  | 
426  |  |   uint32_t needs_check : 1; /* metadata needs check */  | 
427  |  |   uint32_t error : 1;   /* detected error (switches to fail soon) */  | 
428  |  |   uint32_t reserved : 27;  | 
429  |  | };  | 
430  |  |  | 
431  |  | int dm_get_status_thin_pool(struct dm_pool *mem, const char *params,  | 
432  |  |           struct dm_status_thin_pool **status);  | 
433  |  |  | 
434  |  | /* Parse params from STATUS call for thin target */  | 
435  |  | struct dm_status_thin { | 
436  |  |   uint64_t mapped_sectors;  | 
437  |  |   uint64_t highest_mapped_sector;  | 
438  |  |   uint32_t fail : 1;              /* Thin volume fails I/O */  | 
439  |  |   uint32_t reserved : 31;  | 
440  |  | };  | 
441  |  |  | 
442  |  | int dm_get_status_thin(struct dm_pool *mem, const char *params,  | 
443  |  |            struct dm_status_thin **status);  | 
444  |  |  | 
445  |  | /*  | 
446  |  |  * device-mapper statistics support  | 
447  |  |  */  | 
448  |  |  | 
449  |  | /*  | 
450  |  |  * Statistics handle.  | 
451  |  |  *  | 
452  |  |  * Operations on dm_stats objects include managing statistics regions  | 
453  |  |  * and obtaining and manipulating current counter values from the  | 
454  |  |  * kernel. Methods are provided to return basic count values and to  | 
455  |  |  * derive time-based metrics when a suitable interval estimate is  | 
456  |  |  * provided.  | 
457  |  |  *  | 
458  |  |  * Internally the dm_stats handle contains a pointer to a table of one  | 
459  |  |  * or more dm_stats_region objects representing the regions registered  | 
460  |  |  * with the dm_stats_create_region() method. These in turn point to a  | 
461  |  |  * table of one or more dm_stats_counters objects containing the  | 
462  |  |  * counter sets for each defined area within the region:  | 
463  |  |  *  | 
464  |  |  * dm_stats->dm_stats_region[nr_regions]->dm_stats_counters[nr_areas]  | 
465  |  |  *  | 
466  |  |  * This structure is private to the library and may change in future  | 
467  |  |  * versions: all users should make use of the public interface and treat  | 
468  |  |  * the dm_stats type as an opaque handle.  | 
469  |  |  *  | 
470  |  |  * Regions and counter sets are stored in order of increasing region_id.  | 
471  |  |  * Depending on region specifications and the sequence of create and  | 
472  |  |  * delete operations this may not correspond to increasing sector  | 
473  |  |  * number: users of the library should not assume that this is the case  | 
474  |  |  * unless region creation is deliberately managed to ensure this (by  | 
475  |  |  * always creating regions in strict order of ascending sector address).  | 
476  |  |  *  | 
477  |  |  * Regions may also overlap so the same sector range may be included in  | 
478  |  |  * more than one region or area: applications should be prepared to deal  | 
479  |  |  * with this or manage regions such that it does not occur.  | 
480  |  |  */  | 
481  |  | struct dm_stats;  | 
482  |  |  | 
483  |  | /*  | 
484  |  |  * Histogram handle.  | 
485  |  |  *  | 
486  |  |  * A histogram object represents the latency histogram values and bin  | 
487  |  |  * boundaries of the histogram associated with a particular area.  | 
488  |  |  *  | 
489  |  |  * Operations on the handle allow the number of bins, bin boundaries,  | 
490  |  |  * counts and relative proportions to be obtained as well as the  | 
491  |  |  * conversion of a histogram or its bounds to a compact string  | 
492  |  |  * representation.  | 
493  |  |  */  | 
494  |  | struct dm_histogram;  | 
495  |  |  | 
496  |  | /*  | 
497  |  |  * Allocate a dm_stats handle to use for subsequent device-mapper  | 
498  |  |  * statistics operations. A program_id may be specified and will be  | 
499  |  |  * used by default for subsequent operations on this handle.  | 
500  |  |  *  | 
501  |  |  * If program_id is NULL or the empty string a program_id will be  | 
502  |  |  * automatically set to the value contained in /proc/self/comm.  | 
503  |  |  */  | 
504  |  | struct dm_stats *dm_stats_create(const char *program_id);  | 
505  |  |  | 
506  |  | /*  | 
507  |  |  * Bind a dm_stats handle to the specified device major and minor  | 
508  |  |  * values. Any previous binding is cleared and any preexisting counter  | 
509  |  |  * data contained in the handle is released.  | 
510  |  |  */  | 
511  |  | int dm_stats_bind_devno(struct dm_stats *dms, int major, int minor);  | 
512  |  |  | 
513  |  | /*  | 
514  |  |  * Bind a dm_stats handle to the specified device name.  | 
515  |  |  * Any previous binding is cleared and any preexisting counter  | 
516  |  |  * data contained in the handle is released.  | 
517  |  |  */  | 
518  |  | int dm_stats_bind_name(struct dm_stats *dms, const char *name);  | 
519  |  |  | 
520  |  | /*  | 
521  |  |  * Bind a dm_stats handle to the specified device UUID.  | 
522  |  |  * Any previous binding is cleared and any preexisting counter  | 
523  |  |  * data contained in the handle is released.  | 
524  |  |  */  | 
525  |  | int dm_stats_bind_uuid(struct dm_stats *dms, const char *uuid);  | 
526  |  |  | 
527  |  | /*  | 
528  |  |  * Bind a dm_stats handle to the device backing the file referenced  | 
529  |  |  * by the specified file descriptor.  | 
530  |  |  *  | 
531  |  |  * File descriptor fd must reference a regular file, open for reading,  | 
532  |  |  * in a local file system, backed by a device-mapper device, that  | 
533  |  |  * supports the FIEMAP ioctl, and that returns data describing the  | 
534  |  |  * physical location of extents.  | 
535  |  |  */  | 
536  |  | int dm_stats_bind_from_fd(struct dm_stats *dms, int fd);  | 
537  |  | /*  | 
538  |  |  * Test whether the running kernel supports the precise_timestamps  | 
539  |  |  * feature. Presence of this feature also implies histogram support.  | 
540  |  |  * The library will check this call internally and fails any attempt  | 
541  |  |  * to use nanosecond counters or histograms on kernels that fail to  | 
542  |  |  * meet this check.  | 
543  |  |  */  | 
544  |  | int dm_message_supports_precise_timestamps(void);  | 
545  |  |  | 
546  |  | /*  | 
547  |  |  * Precise timestamps and histogram support.  | 
548  |  |  *   | 
549  |  |  * Test for the presence of precise_timestamps and histogram support.  | 
550  |  |  */  | 
551  |  | int dm_stats_driver_supports_precise(void);  | 
552  |  | int dm_stats_driver_supports_histogram(void);  | 
553  |  |  | 
554  |  | /*  | 
555  |  |  * Returns 1 if the specified region has the precise_timestamps feature  | 
556  |  |  * enabled (i.e. produces nanosecond-precision counter values) or 0 for  | 
557  |  |  * a region using the default millisecond precision.  | 
558  |  |  */  | 
559  |  | int dm_stats_get_region_precise_timestamps(const struct dm_stats *dms,  | 
560  |  |              uint64_t region_id);  | 
561  |  |  | 
562  |  | /*  | 
563  |  |  * Returns 1 if the region at the current cursor location has the  | 
564  |  |  * precise_timestamps feature enabled (i.e. produces  | 
565  |  |  * nanosecond-precision counter values) or 0 for a region using the  | 
566  |  |  * default millisecond precision.  | 
567  |  |  */  | 
568  |  | int dm_stats_get_current_region_precise_timestamps(const struct dm_stats *dms);  | 
569  |  |  | 
570  |  | #define DM_STATS_ALL_PROGRAMS ""  | 
571  |  | /*  | 
572  |  |  * Parse the response from a @stats_list message. dm_stats_list will  | 
573  |  |  * allocate the necessary dm_stats and dm_stats region structures from  | 
574  |  |  * the embedded dm_pool. No counter data will be obtained (the counters  | 
575  |  |  * members of dm_stats_region objects are set to NULL).  | 
576  |  |  *  | 
577  |  |  * A program_id may optionally be supplied; if the argument is non-NULL  | 
578  |  |  * only regions with a matching program_id value will be considered. If  | 
579  |  |  * the argument is NULL then the default program_id associated with the  | 
580  |  |  * dm_stats handle will be used. Passing the special value  | 
581  |  |  * DM_STATS_ALL_PROGRAMS will cause all regions to be queried  | 
582  |  |  * regardless of region program_id.  | 
583  |  |  */  | 
584  |  | int dm_stats_list(struct dm_stats *dms, const char *program_id);  | 
585  |  |  | 
586  |  | #define DM_STATS_REGIONS_ALL UINT64_MAX  | 
587  |  | /*  | 
588  |  |  * Populate a dm_stats object with statistics for one or more regions of  | 
589  |  |  * the specified device.  | 
590  |  |  *  | 
591  |  |  * A program_id may optionally be supplied; if the argument is non-NULL  | 
592  |  |  * only regions with a matching program_id value will be considered. If  | 
593  |  |  * the argument is NULL then the default program_id associated with the  | 
594  |  |  * dm_stats handle will be used. Passing the special value  | 
595  |  |  * DM_STATS_ALL_PROGRAMS will cause all regions to be queried  | 
596  |  |  * regardless of region program_id.  | 
597  |  |  *  | 
598  |  |  * Passing the special value DM_STATS_REGIONS_ALL as the region_id  | 
599  |  |  * argument will attempt to retrieve all regions selected by the  | 
600  |  |  * program_id argument.  | 
601  |  |  *  | 
602  |  |  * If region_id is used to request a single region_id to be populated  | 
603  |  |  * the program_id is ignored.  | 
604  |  |  */  | 
605  |  | int dm_stats_populate(struct dm_stats *dms, const char *program_id,  | 
606  |  |           uint64_t region_id);  | 
607  |  |  | 
608  |  | /*  | 
609  |  |  * Create a new statistics region on the device bound to dms.  | 
610  |  |  *  | 
611  |  |  * start and len specify the region start and length in 512b sectors.  | 
612  |  |  * Passing zero for both start and len will create a region spanning  | 
613  |  |  * the entire device.  | 
614  |  |  *  | 
615  |  |  * Step determines how to subdivide the region into discrete counter  | 
616  |  |  * sets: a positive value specifies the size of areas into which the  | 
617  |  |  * region should be split while a negative value will split the region  | 
618  |  |  * into a number of areas equal to the absolute value of step:  | 
619  |  |  *  | 
620  |  |  * - a region with one area spanning the entire device:  | 
621  |  |  *  | 
622  |  |  *   dm_stats_create_region(dms, 0, 0, -1, p, a);  | 
623  |  |  *  | 
624  |  |  * - a region with areas of 1MiB:  | 
625  |  |  *  | 
626  |  |  *   dm_stats_create_region(dms, 0, 0, 1 << 11, p, a);  | 
627  |  |  *  | 
628  |  |  * - one 1MiB region starting at 1024 sectors with two areas:  | 
629  |  |  *  | 
630  |  |  *   dm_stats_create_region(dms, 1024, 1 << 11, -2, p, a);  | 
631  |  |  *  | 
632  |  |  * If precise is non-zero attempt to create a region with nanosecond  | 
633  |  |  * precision counters using the kernel precise_timestamps feature.  | 
634  |  |  *  | 
635  |  |  * precise - A flag to request nanosecond precision counters  | 
636  |  |  * to be used for this region.  | 
637  |  |  *  | 
638  |  |  * histogram_bounds - specify the boundaries of a latency histogram to  | 
639  |  |  * be tracked for the region. The values are expressed as an array of  | 
640  |  |  * uint64_t terminated with a zero. Values must be in order of ascending  | 
641  |  |  * magnitude and specify the upper bounds of successive histogram bins  | 
642  |  |  * in nanoseconds (with an implicit lower bound of zero on the first bin  | 
643  |  |  * and an implicit upper bound of infinity on the final bin). For  | 
644  |  |  * example:  | 
645  |  |  *  | 
646  |  |  *   uint64_t bounds_ary[] = { 1000, 2000, 3000, 0 }; | 
647  |  |  *  | 
648  |  |  * Specifies a histogram with four bins: 0-1000ns, 1000-2000ns,  | 
649  |  |  * 2000-3000ns and >3000ns.  | 
650  |  |  *  | 
651  |  |  * The smallest latency value that can be tracked for a region not using  | 
652  |  |  * precise_timestamps is 1ms: attempting to create a region with  | 
653  |  |  * histogram boundaries < 1ms will cause the precise_timestamps feature  | 
654  |  |  * to be enabled for that region automatically if it was not requested  | 
655  |  |  * explicitly.  | 
656  |  |  *  | 
657  |  |  * program_id is an optional string argument that identifies the  | 
658  |  |  * program creating the region. If program_id is NULL or the empty  | 
659  |  |  * string the default program_id stored in the handle will be used.  | 
660  |  |  *  | 
661  |  |  * user_data is an optional string argument that is added to the  | 
662  |  |  * content of the aux_data field stored with the statistics region by  | 
663  |  |  * the kernel.  | 
664  |  |  *  | 
665  |  |  * The library may also use this space internally, for example, to  | 
666  |  |  * store a group descriptor or other metadata: in this case the  | 
667  |  |  * library will strip any internal data fields from the value before  | 
668  |  |  * it is returned via a call to dm_stats_get_region_aux_data().  | 
669  |  |  *  | 
670  |  |  * The user data stored is not accessed by the library or kernel and  | 
671  |  |  * may be used to store an arbitrary data word (embedded whitespace is  | 
672  |  |  * not permitted).  | 
673  |  |  *  | 
674  |  |  * An application using both the library and direct access to the  | 
675  |  |  * @stats_list device-mapper message may see the internal values stored  | 
676  |  |  * in this field by the library. In such cases any string up to and  | 
677  |  |  * including the first '#' in the field must be treated as an opaque  | 
678  |  |  * value and preserved across any external modification of aux_data.  | 
679  |  |  *  | 
680  |  |  * The region_id of the newly-created region is returned in *region_id  | 
681  |  |  * if it is non-NULL.  | 
682  |  |  */  | 
683  |  | int dm_stats_create_region(struct dm_stats *dms, uint64_t *region_id,  | 
684  |  |          uint64_t start, uint64_t len, int64_t step,  | 
685  |  |          int precise, struct dm_histogram *bounds,  | 
686  |  |          const char *program_id, const char *user_data);  | 
687  |  |  | 
688  |  | /*  | 
689  |  |  * Delete the specified statistics region. This will also mark the  | 
690  |  |  * region as not-present and discard any existing statistics data.  | 
691  |  |  */  | 
692  |  | int dm_stats_delete_region(struct dm_stats *dms, uint64_t region_id);  | 
693  |  |  | 
694  |  | /*  | 
695  |  |  * Clear the specified statistics region. This requests the kernel to  | 
696  |  |  * zero all counter values (except in-flight I/O). Note that this  | 
697  |  |  * operation is not atomic with respect to reads of the counters; any IO  | 
698  |  |  * events occurring between the last print operation and the clear will  | 
699  |  |  * be lost. This can be avoided by using the atomic print-and-clear  | 
700  |  |  * function of the dm_stats_print_region() call or by using the higher  | 
701  |  |  * level dm_stats_populate() interface.  | 
702  |  |  */  | 
703  |  | int dm_stats_clear_region(struct dm_stats *dms, uint64_t region_id);  | 
704  |  |  | 
705  |  | /*  | 
706  |  |  * Print the current counter values for the specified statistics region  | 
707  |  |  * and return them as a string. The memory for the string buffer will  | 
708  |  |  * be allocated from the dm_stats handle's private pool and should be  | 
709  |  |  * returned by calling dm_stats_buffer_destroy() when no longer  | 
710  |  |  * required. The pointer will become invalid following any call that  | 
711  |  |  * clears or reinitializes the handle (destroy, list, populate, bind).  | 
712  |  |  *  | 
713  |  |  * This allows applications that wish to access the raw message response  | 
714  |  |  * to obtain it via a dm_stats handle; no parsing of the textual counter  | 
715  |  |  * data is carried out by this function.  | 
716  |  |  *  | 
717  |  |  * Most users are recommended to use the dm_stats_populate() call  | 
718  |  |  * instead since this will automatically parse the statistics data into  | 
719  |  |  * numeric form accessible via the dm_stats_get_*() counter access  | 
720  |  |  * methods.  | 
721  |  |  *  | 
722  |  |  * A subset of the data lines may be requested by setting the  | 
723  |  |  * start_line and num_lines parameters. If both are zero all data  | 
724  |  |  * lines are returned.  | 
725  |  |  *  | 
726  |  |  * If the clear parameter is non-zero the operation will also  | 
727  |  |  * atomically reset all counter values to zero (except in-flight IO).  | 
728  |  |  */  | 
729  |  | char *dm_stats_print_region(struct dm_stats *dms, uint64_t region_id,  | 
730  |  |           unsigned start_line, unsigned num_lines,  | 
731  |  |           unsigned clear);  | 
732  |  |  | 
733  |  | /*  | 
734  |  |  * Destroy a statistics response buffer obtained from a call to  | 
735  |  |  * dm_stats_print_region().  | 
736  |  |  */  | 
737  |  | void dm_stats_buffer_destroy(struct dm_stats *dms, char *buffer);  | 
738  |  |  | 
739  |  | /*  | 
740  |  |  * Determine the number of regions contained in a dm_stats handle  | 
741  |  |  * following a dm_stats_list() or dm_stats_populate() call.  | 
742  |  |  *  | 
743  |  |  * The value returned is the number of registered regions visible with the  | 
744  |  |  * program_id value used for the list or populate operation and may not be  | 
745  |  |  * equal to the highest present region_id (either due to program_id  | 
746  |  |  * filtering or gaps in the sequence of region_id values).  | 
747  |  |  *  | 
748  |  |  * Always returns zero on an empty handle.  | 
749  |  |  */  | 
750  |  | uint64_t dm_stats_get_nr_regions(const struct dm_stats *dms);  | 
751  |  |  | 
752  |  | /*  | 
753  |  |  * Determine the number of groups contained in a dm_stats handle  | 
754  |  |  * following a dm_stats_list() or dm_stats_populate() call.  | 
755  |  |  *  | 
756  |  |  * The value returned is the number of registered groups visible with the  | 
757  |  |  * program_id value used for the list or populate operation and may not be  | 
758  |  |  * equal to the highest present group_id (either due to program_id  | 
759  |  |  * filtering or gaps in the sequence of group_id values).  | 
760  |  |  *  | 
761  |  |  * Always returns zero on an empty handle.  | 
762  |  |  */  | 
763  |  | uint64_t dm_stats_get_nr_groups(const struct dm_stats *dms);  | 
764  |  |  | 
765  |  | /*  | 
766  |  |  * Test whether region_id is present in this dm_stats handle.  | 
767  |  |  */  | 
768  |  | int dm_stats_region_present(const struct dm_stats *dms, uint64_t region_id);  | 
769  |  |  | 
770  |  | /*  | 
771  |  |  * Returns the number of areas (counter sets) contained in the specified  | 
772  |  |  * region_id of the supplied dm_stats handle.  | 
773  |  |  */  | 
774  |  | uint64_t dm_stats_get_region_nr_areas(const struct dm_stats *dms,  | 
775  |  |               uint64_t region_id);  | 
776  |  |  | 
777  |  | /*  | 
778  |  |  * Returns the total number of areas (counter sets) in all regions of the  | 
779  |  |  * given dm_stats object.  | 
780  |  |  */  | 
781  |  | uint64_t dm_stats_get_nr_areas(const struct dm_stats *dms);  | 
782  |  |  | 
783  |  | /*  | 
784  |  |  * Test whether group_id is present in this dm_stats handle.  | 
785  |  |  */  | 
786  |  | int dm_stats_group_present(const struct dm_stats *dms, uint64_t group_id);  | 
787  |  |  | 
788  |  | /*  | 
789  |  |  * Return the number of bins in the histogram configuration for the  | 
790  |  |  * specified region or zero if no histogram specification is configured.  | 
791  |  |  * Valid following a dm_stats_list() or dm_stats_populate() operation.  | 
792  |  |  */  | 
793  |  | int dm_stats_get_region_nr_histogram_bins(const struct dm_stats *dms,  | 
794  |  |             uint64_t region_id);  | 
795  |  |  | 
796  |  | /*  | 
797  |  |  * Parse a histogram string with optional unit suffixes into a  | 
798  |  |  * dm_histogram bounds description.  | 
799  |  |  *  | 
800  |  |  * A histogram string is a string of numbers "n1,n2,n3,..." that  | 
801  |  |  * represent the boundaries of a histogram. The first and final bins  | 
802  |  |  * have implicit lower and upper bounds of zero and infinity  | 
803  |  |  * respectively and boundary values must occur in order of ascending  | 
804  |  |  * magnitude.  Unless a unit suffix is given all values are specified in  | 
805  |  |  * nanoseconds.  | 
806  |  |  *  | 
807  |  |  * For example, if bounds_str="300,600,900", the region will be created  | 
808  |  |  * with a histogram containing four bins. Each report will include four  | 
809  |  |  * numbers a:b:c:d. a is the number of requests that took between 0 and  | 
810  |  |  * 300ns to complete, b is the number of requests that took 300-600ns to  | 
811  |  |  * complete, c is the number of requests that took 600-900ns to complete  | 
812  |  |  * and d is the number of requests that took more than 900ns to  | 
813  |  |  * complete.  | 
814  |  |  *  | 
815  |  |  * An optional unit suffix of 's', 'ms', 'us', or 'ns' may be used to  | 
816  |  |  * specify units of seconds, milliseconds, microseconds, or nanoseconds:  | 
817  |  |  *  | 
818  |  |  *   bounds_str="1ns,1us,1ms,1s"  | 
819  |  |  *   bounds_str="500us,1ms,1500us,2ms"  | 
820  |  |  *   bounds_str="200ms,400ms,600ms,800ms,1s"  | 
821  |  |  *  | 
822  |  |  * The smallest valid unit of time for a histogram specification depends  | 
823  |  |  * on whether the region uses precise timestamps: for a region with the  | 
824  |  |  * default millisecond precision the smallest possible histogram boundary  | 
825  |  |  * magnitude is one millisecond: attempting to use a histogram with a  | 
826  |  |  * boundary less than one millisecond when creating a region will cause  | 
827  |  |  * the region to be created with the precise_timestamps feature enabled.  | 
828  |  |  *  | 
829  |  |  * On success a pointer to the struct dm_histogram representing the  | 
830  |  |  * bounds values is returned, or NULL in the case of error. The returned  | 
831  |  |  * pointer should be freed using dm_free() when no longer required.  | 
832  |  |  */  | 
833  |  | struct dm_histogram *dm_histogram_bounds_from_string(const char *bounds_str);  | 
834  |  |  | 
835  |  | /*  | 
836  |  |  * Parse a zero terminated array of uint64_t into a dm_histogram bounds  | 
837  |  |  * description.  | 
838  |  |  *  | 
839  |  |  * Each value in the array specifies the upper bound of a bin in the  | 
840  |  |  * latency histogram in nanoseconds. Values must appear in ascending  | 
841  |  |  * order of magnitude.  | 
842  |  |  *  | 
843  |  |  * The smallest valid unit of time for a histogram specification depends  | 
844  |  |  * on whether the region uses precise timestamps: for a region with the  | 
845  |  |  * default millisecond precision the smallest possible histogram boundary  | 
846  |  |  * magnitude is one millisecond: attempting to use a histogram with a  | 
847  |  |  * boundary less than one millisecond when creating a region will cause  | 
848  |  |  * the region to be created with the precise_timestamps feature enabled.  | 
849  |  |  */  | 
850  |  | struct dm_histogram *dm_histogram_bounds_from_uint64(const uint64_t *bounds);  | 
851  |  |  | 
852  |  | /*  | 
853  |  |  * Destroy the histogram bounds array obtained from a call to  | 
854  |  |  * dm_histogram_bounds_from_string().  | 
855  |  |  */  | 
856  |  | void dm_histogram_bounds_destroy(struct dm_histogram *bounds);  | 
857  |  |  | 
858  |  | /*  | 
859  |  |  * Destroy a dm_stats object and all associated regions, counter  | 
860  |  |  * sets and histograms.  | 
861  |  |  */  | 
862  |  | void dm_stats_destroy(struct dm_stats *dms);  | 
863  |  |  | 
864  |  | /*  | 
865  |  |  * Counter sampling interval  | 
866  |  |  */  | 
867  |  |  | 
868  |  | /*  | 
869  |  |  * Set the sampling interval for counter data to the specified value in  | 
870  |  |  * either nanoseconds or milliseconds.  | 
871  |  |  *  | 
872  |  |  * The interval is used to calculate time-based metrics from the basic  | 
873  |  |  * counter data: an interval must be set before calling any of the  | 
874  |  |  * metric methods.  | 
875  |  |  *  | 
876  |  |  * For best accuracy the duration should be measured and updated at the  | 
877  |  |  * end of each interval.  | 
878  |  |  *  | 
879  |  |  * All values are stored internally with nanosecond precision and are  | 
880  |  |  * converted to or from ms when the millisecond interfaces are used.  | 
881  |  |  */  | 
882  |  | void dm_stats_set_sampling_interval_ns(struct dm_stats *dms,  | 
883  |  |                uint64_t interval_ns);  | 
884  |  |  | 
885  |  | void dm_stats_set_sampling_interval_ms(struct dm_stats *dms,  | 
886  |  |                uint64_t interval_ms);  | 
887  |  |  | 
888  |  | /*  | 
889  |  |  * Retrieve the configured sampling interval in either nanoseconds or  | 
890  |  |  * milliseconds.  | 
891  |  |  */  | 
892  |  | uint64_t dm_stats_get_sampling_interval_ns(const struct dm_stats *dms);  | 
893  |  | uint64_t dm_stats_get_sampling_interval_ms(const struct dm_stats *dms);  | 
894  |  |  | 
895  |  | /*  | 
896  |  |  * Override program_id. This may be used to change the default  | 
897  |  |  * program_id value for an existing handle. If the allow_empty argument  | 
898  |  |  * is non-zero a NULL or empty program_id is permitted.  | 
899  |  |  *  | 
900  |  |  * Use with caution! Most users of the library should set a valid,  | 
901  |  |  * non-NULL program_id for every statistics region created. Failing to  | 
902  |  |  * do so may result in confusing state when multiple programs are  | 
903  |  |  * creating and managing statistics regions.  | 
904  |  |  *  | 
905  |  |  * All users of the library are encouraged to choose an unambiguous,  | 
906  |  |  * unique program_id: this could be based on PID (for programs that  | 
907  |  |  * create, report, and delete regions in a single process), session id,  | 
908  |  |  * executable name, or some other distinguishing string.  | 
909  |  |  *  | 
910  |  |  * Use of the empty string as a program_id does not simplify use of the  | 
911  |  |  * library or the command line tools and use of this value is strongly  | 
912  |  |  * discouraged.  | 
913  |  |  */  | 
914  |  | int dm_stats_set_program_id(struct dm_stats *dms, int allow_empty,  | 
915  |  |           const char *program_id);  | 
916  |  |  | 
917  |  | /*  | 
918  |  |  * Region properties: size, length & area_len.  | 
919  |  |  *  | 
920  |  |  * Region start and length are returned in units of 512b as specified  | 
921  |  |  * at region creation time. The area_len value gives the size of areas  | 
922  |  |  * into which the region has been subdivided. For regions with a single  | 
923  |  |  * area spanning the range this value is equal to the region length.  | 
924  |  |  *  | 
925  |  |  * For regions created with a specified number of areas the value  | 
926  |  |  * represents the size of the areas into which the kernel divided the  | 
927  |  |  * region excluding any rounding of the last area size. The number of  | 
928  |  |  * areas may be obtained using the dm_stats_nr_areas_region() call.  | 
929  |  |  *  | 
930  |  |  * All values are returned in units of 512b sectors.  | 
931  |  |  */  | 
932  |  | int dm_stats_get_region_start(const struct dm_stats *dms, uint64_t *start,  | 
933  |  |             uint64_t region_id);  | 
934  |  |  | 
935  |  | int dm_stats_get_region_len(const struct dm_stats *dms, uint64_t *len,  | 
936  |  |           uint64_t region_id);  | 
937  |  |  | 
938  |  | int dm_stats_get_region_area_len(const struct dm_stats *dms,  | 
939  |  |          uint64_t *len, uint64_t region_id);  | 
940  |  |  | 
941  |  | /*  | 
942  |  |  * Area properties: start, offset and length.  | 
943  |  |  *  | 
944  |  |  * The area length is always equal to the area length of the region  | 
945  |  |  * that contains it and is obtained from dm_stats_get_region_area_len().  | 
946  |  |  *  | 
947  |  |  * The start of an area is a function of the area_id and the containing  | 
948  |  |  * region's start and area length: it gives the absolute offset into the  | 
949  |  |  * containing device of the beginning of the area.  | 
950  |  |  *  | 
951  |  |  * The offset expresses the area's relative offset into the current  | 
952  |  |  * region. I.e. the area start minus the start offset of the containing  | 
953  |  |  * region.  | 
954  |  |  *  | 
955  |  |  * All values are returned in units of 512b sectors.  | 
956  |  |  */  | 
957  |  | int dm_stats_get_area_start(const struct dm_stats *dms, uint64_t *start,  | 
958  |  |           uint64_t region_id, uint64_t area_id);  | 
959  |  |  | 
960  |  | int dm_stats_get_area_offset(const struct dm_stats *dms, uint64_t *offset,  | 
961  |  |            uint64_t region_id, uint64_t area_id);  | 
962  |  |  | 
963  |  | /*  | 
964  |  |  * Retrieve program_id and user aux_data for a specific region.  | 
965  |  |  *  | 
966  |  |  * Only valid following a call to dm_stats_list().  | 
967  |  |  */  | 
968  |  |  | 
969  |  | /*  | 
970  |  |  * Retrieve program_id for the specified region.  | 
971  |  |  *  | 
972  |  |  * The returned pointer does not need to be freed separately from the  | 
973  |  |  * dm_stats handle but will become invalid after a dm_stats_destroy(),  | 
974  |  |  * dm_stats_list(), dm_stats_populate(), or dm_stats_bind*() of the  | 
975  |  |  * handle from which it was obtained.  | 
976  |  |  */  | 
977  |  | const char *dm_stats_get_region_program_id(const struct dm_stats *dms,  | 
978  |  |              uint64_t region_id);  | 
979  |  |  | 
980  |  | /*  | 
981  |  |  * Retrieve user aux_data set for the specified region. This function  | 
982  |  |  * will return any stored user aux_data as a string in the memory  | 
983  |  |  * pointed to by the aux_data argument.  | 
984  |  |  *  | 
985  |  |  * Any library internal aux_data fields, such as DMS_GROUP descriptors,  | 
986  |  |  * are stripped before the value is returned.  | 
987  |  |  *  | 
988  |  |  * The returned pointer does not need to be freed separately from the  | 
989  |  |  * dm_stats handle but will become invalid after a dm_stats_destroy(),  | 
990  |  |  * dm_stats_list(), dm_stats_populate(), or dm_stats_bind*() of the  | 
991  |  |  * handle from which it was obtained.  | 
992  |  |  */  | 
993  |  | const char *dm_stats_get_region_aux_data(const struct dm_stats *dms,  | 
994  |  |            uint64_t region_id);  | 
995  |  |  | 
996  |  | typedef enum dm_stats_obj_type_e { | 
997  |  |   DM_STATS_OBJECT_TYPE_NONE,  | 
998  |  |   DM_STATS_OBJECT_TYPE_AREA,  | 
999  |  |   DM_STATS_OBJECT_TYPE_REGION,  | 
1000  |  |   DM_STATS_OBJECT_TYPE_GROUP  | 
1001  |  | } dm_stats_obj_type_t;  | 
1002  |  |  | 
1003  |  | /*  | 
1004  |  |  * Statistics cursor  | 
1005  |  |  *  | 
1006  |  |  * A dm_stats handle maintains an optional cursor into the statistics  | 
1007  |  |  * tables that it stores. Iterators are provided to visit each region,  | 
1008  |  |  * area, or group in a handle and accessor methods are provided to  | 
1009  |  |  * obtain properties and values for the object at the current cursor  | 
1010  |  |  * position.  | 
1011  |  |  *  | 
1012  |  |  * Using the cursor simplifies walking all regions or groups when  | 
1013  |  |  * the tables are sparse (i.e. contains some present and some  | 
1014  |  |  * non-present region_id or group_id values either due to program_id  | 
1015  |  |  * filtering or the ordering of region and group creation and deletion).  | 
1016  |  |  *  | 
1017  |  |  * Simple macros are provided to visit each area, region, or group,  | 
1018  |  |  * contained in a handle and applications are encouraged to use these  | 
1019  |  |  * where possible.  | 
1020  |  |  */  | 
1021  |  |  | 
1022  |  | /*  | 
1023  |  |  * Walk flags are used to initialise a dm_stats handle's cursor control  | 
1024  |  |  * and to select region or group aggregation when calling a metric or  | 
1025  |  |  * counter property method with immediate group, region, and area ID  | 
1026  |  |  * values.  | 
1027  |  |  *  | 
1028  |  |  * Walk flags are stored in the uppermost word of a uint64_t so that  | 
1029  |  |  * a region_id or group_id may be encoded in the lower bits. This  | 
1030  |  |  * allows an aggregate region_id or group_id to be specified when  | 
1031  |  |  * retrieving counter or metric values.  | 
1032  |  |  *  | 
1033  |  |  * Flags may be ORred together when used to initialise a dm_stats_walk:  | 
1034  |  |  * the resulting walk will visit instance of each type specified by  | 
1035  |  |  * the flag combination.  | 
1036  |  |  */  | 
1037  |  | #define DM_STATS_WALK_AREA   0x1000000000000ULL  | 
1038  |  | #define DM_STATS_WALK_REGION 0x2000000000000ULL  | 
1039  |  | #define DM_STATS_WALK_GROUP  0x4000000000000ULL  | 
1040  |  |  | 
1041  |  | #define DM_STATS_WALK_ALL    0x7000000000000ULL  | 
1042  |  | #define DM_STATS_WALK_DEFAULT (DM_STATS_WALK_AREA | DM_STATS_WALK_REGION)  | 
1043  |  |  | 
1044  |  | /*  | 
1045  |  |  * Skip regions from a DM_STATS_WALK_REGION that contain only a single  | 
1046  |  |  * area: in this case the region's aggregate values are identical to  | 
1047  |  |  * the values of the single contained area. Setting this flag will  | 
1048  |  |  * suppress these duplicate entries during a dm_stats_walk_* with the  | 
1049  |  |  * DM_STATS_WALK_REGION flag set.  | 
1050  |  |  */  | 
1051  |  | #define DM_STATS_WALK_SKIP_SINGLE_AREA   0x8000000000000ULL  | 
1052  |  |  | 
1053  |  | /*  | 
1054  |  |  * Initialise the cursor control of a dm_stats handle for the specified  | 
1055  |  |  * walk type(s). Including a walk flag in the flags argument will cause  | 
1056  |  |  * any subsequent walk to visit that type of object (until the next  | 
1057  |  |  * call to dm_stats_walk_init()).  | 
1058  |  |  */  | 
1059  |  | int dm_stats_walk_init(struct dm_stats *dms, uint64_t flags);  | 
1060  |  |  | 
1061  |  | /*  | 
1062  |  |  * Set the cursor of a dm_stats handle to address the first present  | 
1063  |  |  * group, region, or area of the currently configured walk. It is  | 
1064  |  |  * valid to attempt to walk a NULL stats handle or a handle containing  | 
1065  |  |  * no present regions; in this case any call to dm_stats_walk_next()  | 
1066  |  |  * becomes a no-op and all calls to dm_stats_walk_end() return true.  | 
1067  |  |  */  | 
1068  |  | void dm_stats_walk_start(struct dm_stats *dms);  | 
1069  |  |  | 
1070  |  | /*  | 
1071  |  |  * Advance the statistics cursor to the next area, or to the next  | 
1072  |  |  * present region if at the end of the current region. If the end of  | 
1073  |  |  * the region, area, or group tables is reached a subsequent call to  | 
1074  |  |  * dm_stats_walk_end() will return 1 and dm_stats_object_type() called  | 
1075  |  |  * on the location will return DM_STATS_OBJECT_TYPE_NONE,  | 
1076  |  |  */  | 
1077  |  | void dm_stats_walk_next(struct dm_stats *dms);  | 
1078  |  |  | 
1079  |  | /*  | 
1080  |  |  * Force the statistics cursor to advance to the next region. This will  | 
1081  |  |  * stop any in-progress area walk (by clearing DM_STATS_WALK_AREA) and  | 
1082  |  |  * advance the cursor to the next present region, the first present  | 
1083  |  |  * group (if DM_STATS_GROUP_WALK is set), or to the end. In this case a  | 
1084  |  |  * subsequent call to dm_stats_walk_end() will return 1 and a call to  | 
1085  |  |  * dm_stats_object_type() for the location will return  | 
1086  |  |  * DM_STATS_OBJECT_TYPE_NONE.  | 
1087  |  |  */  | 
1088  |  | void dm_stats_walk_next_region(struct dm_stats *dms);  | 
1089  |  |  | 
1090  |  | /*  | 
1091  |  |  * Test whether the end of a statistics walk has been reached.  | 
1092  |  |  */  | 
1093  |  | int dm_stats_walk_end(struct dm_stats *dms);  | 
1094  |  |  | 
1095  |  | /*  | 
1096  |  |  * Return the type of object at the location specified by region_id  | 
1097  |  |  * and area_id. If either region_id or area_id uses one of the special  | 
1098  |  |  * values DM_STATS_REGION_CURRENT or DM_STATS_AREA_CURRENT the  | 
1099  |  |  * corresponding region or area identifier will be taken from the  | 
1100  |  |  * current cursor location. If the cursor location or the value encoded  | 
1101  |  |  * by region_id and area_id indicates an aggregate region or group,  | 
1102  |  |  * this will be reflected in the value returned.  | 
1103  |  |  */  | 
1104  |  | dm_stats_obj_type_t dm_stats_object_type(const struct dm_stats *dms,  | 
1105  |  |            uint64_t region_id,  | 
1106  |  |            uint64_t area_id);  | 
1107  |  |  | 
1108  |  | /*  | 
1109  |  |  * Return the type of object at the current stats cursor location.  | 
1110  |  |  */  | 
1111  |  | dm_stats_obj_type_t dm_stats_current_object_type(const struct dm_stats *dms);  | 
1112  |  |  | 
1113  |  | /*  | 
1114  |  |  * Stats iterators  | 
1115  |  |  *  | 
1116  |  |  * C 'for' and 'do'/'while' style iterators for dm_stats data.  | 
1117  |  |  *  | 
1118  |  |  * It is not safe to call any function that modifies the region table  | 
1119  |  |  * within the loop body (i.e. dm_stats_list(), dm_stats_populate(),  | 
1120  |  |  * dm_stats_init(), or dm_stats_destroy()).  | 
1121  |  |  *  | 
1122  |  |  * All counter and property (dm_stats_get_*) access methods, as well as  | 
1123  |  |  * dm_stats_populate_region() can be safely called from loops.  | 
1124  |  |  *  | 
1125  |  |  */  | 
1126  |  |  | 
1127  |  | /*  | 
1128  |  |  * Iterate over the regions table visiting each region.  | 
1129  |  |  *  | 
1130  |  |  * If the region table is empty or unpopulated the loop body will not be  | 
1131  |  |  * executed.  | 
1132  |  |  */  | 
1133  |  | #define dm_stats_foreach_region(dms)        \  | 
1134  |  | for (dm_stats_walk_init((dms), DM_STATS_WALK_REGION),   \  | 
1135  |  |      dm_stats_walk_start((dms));        \  | 
1136  |  |      !dm_stats_walk_end((dms)); dm_stats_walk_next_region((dms)))  | 
1137  |  |  | 
1138  |  | /*  | 
1139  |  |  * Iterate over the regions table visiting each area.  | 
1140  |  |  *  | 
1141  |  |  * If the region table is empty or unpopulated the loop body will not  | 
1142  |  |  * be executed.  | 
1143  |  |  */  | 
1144  |  | #define dm_stats_foreach_area(dms)        \  | 
1145  |  | for (dm_stats_walk_init((dms), DM_STATS_WALK_AREA),   \  | 
1146  |  |      dm_stats_walk_start((dms));        \  | 
1147  |  |      !dm_stats_walk_end((dms)); dm_stats_walk_next((dms)))  | 
1148  |  |  | 
1149  |  | /*  | 
1150  |  |  * Iterate over the regions table visiting each group. Metric and  | 
1151  |  |  * counter methods will return values for the group.  | 
1152  |  |  *  | 
1153  |  |  * If the group table is empty or unpopulated the loop body will not  | 
1154  |  |  * be executed.  | 
1155  |  |  */  | 
1156  |  | #define dm_stats_foreach_group(dms)       \  | 
1157  |  | for (dm_stats_walk_init((dms), DM_STATS_WALK_GROUP),    \  | 
1158  |  |      dm_stats_walk_start(dms);          \  | 
1159  |  |      !dm_stats_walk_end(dms);         \  | 
1160  |  |      dm_stats_walk_next(dms))  | 
1161  |  |  | 
1162  |  | /*  | 
1163  |  |  * Start a walk iterating over the regions contained in dm_stats handle  | 
1164  |  |  * 'dms'.  | 
1165  |  |  *  | 
1166  |  |  * The body of the loop should call dm_stats_walk_next() or  | 
1167  |  |  * dm_stats_walk_next_region() to advance to the next element.  | 
1168  |  |  *  | 
1169  |  |  * The loop body is executed at least once even if the stats handle is  | 
1170  |  |  * empty.  | 
1171  |  |  */  | 
1172  |  | #define dm_stats_walk_do(dms)         \  | 
1173  |  | do {                \ | 
1174  |  |   dm_stats_walk_start((dms));       \  | 
1175  |  |   do  | 
1176  |  |  | 
1177  |  | /*  | 
1178  |  |  * Start a 'while' style loop or end a 'do..while' loop iterating over the  | 
1179  |  |  * regions contained in dm_stats handle 'dms'.  | 
1180  |  |  */  | 
1181  |  | #define dm_stats_walk_while(dms)        \  | 
1182  |  |   while(!dm_stats_walk_end((dms)));     \  | 
1183  |  | } while (0)  | 
1184  |  |  | 
1185  |  | /*  | 
1186  |  |  * Cursor relative property methods  | 
1187  |  |  *  | 
1188  |  |  * Calls with the prefix dm_stats_get_current_* operate relative to the  | 
1189  |  |  * current cursor location, returning properties for the current region  | 
1190  |  |  * or area of the supplied dm_stats handle.  | 
1191  |  |  *  | 
1192  |  |  */  | 
1193  |  |  | 
1194  |  | /*  | 
1195  |  |  * Returns the number of areas (counter sets) contained in the current  | 
1196  |  |  * region of the supplied dm_stats handle.  | 
1197  |  |  */  | 
1198  |  | uint64_t dm_stats_get_current_nr_areas(const struct dm_stats *dms);  | 
1199  |  |  | 
1200  |  | /*  | 
1201  |  |  * Retrieve the current values of the stats cursor.  | 
1202  |  |  */  | 
1203  |  | uint64_t dm_stats_get_current_region(const struct dm_stats *dms);  | 
1204  |  | uint64_t dm_stats_get_current_area(const struct dm_stats *dms);  | 
1205  |  |  | 
1206  |  | /*  | 
1207  |  |  * Current region properties: size, length & area_len.  | 
1208  |  |  *  | 
1209  |  |  * See the comments for the equivalent dm_stats_get_* versions for a  | 
1210  |  |  * complete description of these methods.  | 
1211  |  |  *  | 
1212  |  |  * All values are returned in units of 512b sectors.  | 
1213  |  |  */  | 
1214  |  | int dm_stats_get_current_region_start(const struct dm_stats *dms,  | 
1215  |  |               uint64_t *start);  | 
1216  |  |  | 
1217  |  | int dm_stats_get_current_region_len(const struct dm_stats *dms,  | 
1218  |  |             uint64_t *len);  | 
1219  |  |  | 
1220  |  | int dm_stats_get_current_region_area_len(const struct dm_stats *dms,  | 
1221  |  |            uint64_t *area_len);  | 
1222  |  |  | 
1223  |  | /*  | 
1224  |  |  * Current area properties: start and length.  | 
1225  |  |  *  | 
1226  |  |  * See the comments for the equivalent dm_stats_get_* versions for a  | 
1227  |  |  * complete description of these methods.  | 
1228  |  |  *  | 
1229  |  |  * All values are returned in units of 512b sectors.  | 
1230  |  |  */  | 
1231  |  | int dm_stats_get_current_area_start(const struct dm_stats *dms,  | 
1232  |  |             uint64_t *start);  | 
1233  |  |  | 
1234  |  | int dm_stats_get_current_area_offset(const struct dm_stats *dms,  | 
1235  |  |              uint64_t *offset);  | 
1236  |  |  | 
1237  |  | int dm_stats_get_current_area_len(const struct dm_stats *dms,  | 
1238  |  |                uint64_t *len);  | 
1239  |  |  | 
1240  |  | /*  | 
1241  |  |  * Return a pointer to the program_id string for region at the current  | 
1242  |  |  * cursor location.  | 
1243  |  |  */  | 
1244  |  | const char *dm_stats_get_current_region_program_id(const struct dm_stats *dms);  | 
1245  |  |  | 
1246  |  | /*  | 
1247  |  |  * Return a pointer to the user aux_data string for the region at the  | 
1248  |  |  * current cursor location.  | 
1249  |  |  */  | 
1250  |  | const char *dm_stats_get_current_region_aux_data(const struct dm_stats *dms);  | 
1251  |  |  | 
1252  |  | /*  | 
1253  |  |  * Statistics groups and data aggregation.  | 
1254  |  |  */  | 
1255  |  |  | 
1256  |  | /*  | 
1257  |  |  * Create a new group in stats handle dms from the group descriptor  | 
1258  |  |  * passed in group. The group descriptor is a string containing a list  | 
1259  |  |  * of region_id values that will be included in the group. The first  | 
1260  |  |  * region_id found will be the group leader. Ranges of identifiers may  | 
1261  |  |  * be expressed as "M-N", where M and N are the start and end region_id  | 
1262  |  |  * values for the range.  | 
1263  |  |  */  | 
1264  |  | int dm_stats_create_group(struct dm_stats *dms, const char *members,  | 
1265  |  |         const char *alias, uint64_t *group_id);  | 
1266  |  |  | 
1267  |  | /*  | 
1268  |  |  * Remove the specified group_id. If the remove argument is zero the  | 
1269  |  |  * group will be removed but the regions that it contained will remain.  | 
1270  |  |  * If remove is non-zero then all regions that belong to the group will  | 
1271  |  |  * also be removed.  | 
1272  |  |  */  | 
1273  |  | int dm_stats_delete_group(struct dm_stats *dms, uint64_t group_id, int remove);  | 
1274  |  |  | 
1275  |  | /*  | 
1276  |  |  * Set an alias for this group or region. The alias will be returned  | 
1277  |  |  * instead of the normal dm-stats name for this region or group.  | 
1278  |  |  */  | 
1279  |  | int dm_stats_set_alias(struct dm_stats *dms, uint64_t group_id,  | 
1280  |  |            const char *alias);  | 
1281  |  |  | 
1282  |  | /*  | 
1283  |  |  * Returns a pointer to the currently configured alias for id, or the  | 
1284  |  |  * name of the dm device the handle is bound to if no alias has been  | 
1285  |  |  * set. The pointer will be freed automatically when a new alias is set  | 
1286  |  |  * or when the stats handle is cleared.  | 
1287  |  |  */  | 
1288  |  | const char *dm_stats_get_alias(const struct dm_stats *dms, uint64_t id);  | 
1289  |  |  | 
1290  |  | #define DM_STATS_GROUP_NONE UINT64_MAX  | 
1291  |  | /*  | 
1292  |  |  * Return the group_id that the specified region_id belongs to, or the  | 
1293  |  |  * special value DM_STATS_GROUP_NONE if the region does not belong  | 
1294  |  |  * to any group.  | 
1295  |  |  */  | 
1296  |  | uint64_t dm_stats_get_group_id(const struct dm_stats *dms, uint64_t region_id);  | 
1297  |  |  | 
1298  |  | /*  | 
1299  |  |  * Store a pointer to a string describing the regions that are members  | 
1300  |  |  * of the group specified by group_id in the memory pointed to by buf.  | 
1301  |  |  * The string is in the same format as the 'group' argument to  | 
1302  |  |  * dm_stats_create_group().  | 
1303  |  |  *  | 
1304  |  |  * The pointer does not need to be freed explicitly by the caller: it  | 
1305  |  |  * will become invalid following a subsequent dm_stats_list(),  | 
1306  |  |  * dm_stats_populate() or dm_stats_destroy() of the corresponding  | 
1307  |  |  * dm_stats handle.  | 
1308  |  |  */  | 
1309  |  | int dm_stats_get_group_descriptor(const struct dm_stats *dms,  | 
1310  |  |           uint64_t group_id, char **buf);  | 
1311  |  |  | 
1312  |  | /*  | 
1313  |  |  * Create regions that correspond to the extents of a file in the  | 
1314  |  |  * filesystem and optionally place them into a group.  | 
1315  |  |  *  | 
1316  |  |  * File descriptor fd must reference a regular file, open for reading,  | 
1317  |  |  * in a local file system that supports the FIEMAP ioctl, and that  | 
1318  |  |  * returns data describing the physical location of extents.  | 
1319  |  |  *  | 
1320  |  |  * The file descriptor can be closed by the caller following the call  | 
1321  |  |  * to dm_stats_create_regions_from_fd().  | 
1322  |  |  *  | 
1323  |  |  * Unless nogroup is non-zero the regions will be placed into a group  | 
1324  |  |  * and the group alias set to the value supplied (if alias is NULL no  | 
1325  |  |  * group alias will be assigned).  | 
1326  |  |  *  | 
1327  |  |  * On success the function returns a pointer to an array of uint64_t  | 
1328  |  |  * containing the IDs of the newly created regions. The region_id  | 
1329  |  |  * array is terminated by the value DM_STATS_REGION_NOT_PRESENT and  | 
1330  |  |  * should be freed using dm_free() when no longer required.  | 
1331  |  |  *  | 
1332  |  |  * On error NULL is returned.  | 
1333  |  |  *  | 
1334  |  |  * Following a call to dm_stats_create_regions_from_fd() the handle  | 
1335  |  |  * is guaranteed to be in a listed state, and to contain any region  | 
1336  |  |  * and group identifiers created by the operation.  | 
1337  |  |  *  | 
1338  |  |  * The group_id for the new group is equal to the region_id value in  | 
1339  |  |  * the first array element.  | 
1340  |  |  */  | 
1341  |  | uint64_t *dm_stats_create_regions_from_fd(struct dm_stats *dms, int fd,  | 
1342  |  |             int group, int precise,  | 
1343  |  |             struct dm_histogram *bounds,  | 
1344  |  |             const char *alias);  | 
1345  |  | /*  | 
1346  |  |  * Update a group of regions that correspond to the extents of a file  | 
1347  |  |  * in the filesystem, adding and removing regions to account for  | 
1348  |  |  * allocation changes in the underlying file.  | 
1349  |  |  *  | 
1350  |  |  * File descriptor fd must reference a regular file, open for reading,  | 
1351  |  |  * in a local file system that supports the FIEMAP ioctl, and that  | 
1352  |  |  * returns data describing the physical location of extents.  | 
1353  |  |  *  | 
1354  |  |  * The file descriptor can be closed by the caller following the call  | 
1355  |  |  * to dm_stats_update_regions_from_fd().  | 
1356  |  |  *  | 
1357  |  |  * On success the function returns a pointer to an array of uint64_t  | 
1358  |  |  * containing the IDs of the updated regions (including any existing  | 
1359  |  |  * regions that were not modified by the call).  | 
1360  |  |  *  | 
1361  |  |  * The region_id array is terminated by the special value  | 
1362  |  |  * DM_STATS_REGION_NOT_PRESENT and should be freed using dm_free()  | 
1363  |  |  * when no longer required.  | 
1364  |  |  *  | 
1365  |  |  * On error NULL is returned.  | 
1366  |  |  *  | 
1367  |  |  * Following a call to dm_stats_update_regions_from_fd() the handle  | 
1368  |  |  * is guaranteed to be in a listed state, and to contain any region  | 
1369  |  |  * and group identifiers created by the operation.  | 
1370  |  |  *  | 
1371  |  |  * This function cannot be used with file mapped regions that are  | 
1372  |  |  * not members of a group: either group the regions, or remove them  | 
1373  |  |  * and re-map them with dm_stats_create_regions_from_fd().  | 
1374  |  |  */  | 
1375  |  | uint64_t *dm_stats_update_regions_from_fd(struct dm_stats *dms, int fd,  | 
1376  |  |             uint64_t group_id);  | 
1377  |  |  | 
1378  |  |  | 
1379  |  | /*  | 
1380  |  |  * The file map monitoring daemon can monitor files in two distinct  | 
1381  |  |  * ways: the mode affects the behaviour of the daemon when a file  | 
1382  |  |  * under monitoring is renamed or unlinked, and the conditions which  | 
1383  |  |  * cause the daemon to terminate.  | 
1384  |  |  *  | 
1385  |  |  * In both modes, the daemon will always shut down when the group  | 
1386  |  |  * being monitored is deleted.  | 
1387  |  |  *  | 
1388  |  |  * Follow inode:  | 
1389  |  |  * The daemon follows the inode of the file, as it was at the time the  | 
1390  |  |  * daemon started. The file descriptor referencing the file is kept  | 
1391  |  |  * open at all times, and the daemon will exit when it detects that  | 
1392  |  |  * the file has been unlinked and it is the last holder of a reference  | 
1393  |  |  * to the file.  | 
1394  |  |  *  | 
1395  |  |  * This mode is useful if the file is expected to be renamed, or moved  | 
1396  |  |  * within the file system, while it is being monitored.  | 
1397  |  |  *  | 
1398  |  |  * Follow path:  | 
1399  |  |  * The daemon follows the path that was given on the daemon command  | 
1400  |  |  * line. The file descriptor referencing the file is re-opened on each  | 
1401  |  |  * iteration of the daemon, and the daemon will exit if no file exists  | 
1402  |  |  * at this location (a tolerance is allowed so that a brief delay  | 
1403  |  |  * between unlink() and creat() is permitted).  | 
1404  |  |  *  | 
1405  |  |  * This mode is useful if the file is updated by unlinking the original  | 
1406  |  |  * and placing a new file at the same path.  | 
1407  |  |  */  | 
1408  |  |  | 
1409  |  | typedef enum dm_filemapd_mode_e { | 
1410  |  |   DM_FILEMAPD_FOLLOW_INODE,  | 
1411  |  |   DM_FILEMAPD_FOLLOW_PATH,  | 
1412  |  |   DM_FILEMAPD_FOLLOW_NONE  | 
1413  |  | } dm_filemapd_mode_t;  | 
1414  |  |  | 
1415  |  | /*  | 
1416  |  |  * Parse a string representation of a dmfilemapd mode.  | 
1417  |  |  *  | 
1418  |  |  * Returns a valid dm_filemapd_mode_t value on success, or  | 
1419  |  |  * DM_FILEMAPD_FOLLOW_NONE on error.  | 
1420  |  |  */  | 
1421  |  | dm_filemapd_mode_t dm_filemapd_mode_from_string(const char *mode_str);  | 
1422  |  |  | 
1423  |  | /*  | 
1424  |  |  * Start the dmfilemapd filemap monitoring daemon for the specified  | 
1425  |  |  * file descriptor, group, and file system path. The daemon will  | 
1426  |  |  * monitor the file for allocation changes, and when a change is  | 
1427  |  |  * detected, call dm_stats_update_regions_from_fd() to update the  | 
1428  |  |  * mapped regions for the file.  | 
1429  |  |  *  | 
1430  |  |  * The path provided to dm_stats_start_filemapd() must be an absolute  | 
1431  |  |  * path, and should reflect the path of 'fd' at the time that it was  | 
1432  |  |  * opened.  | 
1433  |  |  *  | 
1434  |  |  * The mode parameter controls the behaviour of the daemon when the  | 
1435  |  |  * file being monitored is unlinked or moved: see the comments for  | 
1436  |  |  * dm_filemapd_mode_t for a full description and possible values.  | 
1437  |  |  *  | 
1438  |  |  * The daemon can be stopped at any time by sending SIGTERM to the  | 
1439  |  |  * daemon pid.  | 
1440  |  |  */  | 
1441  |  | int dm_stats_start_filemapd(int fd, uint64_t group_id, const char *path,  | 
1442  |  |           dm_filemapd_mode_t mode, unsigned foreground,  | 
1443  |  |           unsigned verbose);  | 
1444  |  |  | 
1445  |  | /*  | 
1446  |  |  * Call this to actually run the ioctl.  | 
1447  |  |  */  | 
1448  |  | int dm_task_run(struct dm_task *dmt);  | 
1449  |  |  | 
1450  |  | /*  | 
1451  |  |  * The errno from the last device-mapper ioctl performed by dm_task_run.  | 
1452  |  |  */  | 
1453  |  | int dm_task_get_errno(struct dm_task *dmt);  | 
1454  |  |  | 
1455  |  | /*  | 
1456  |  |  * Call this to make or remove the device nodes associated with previously  | 
1457  |  |  * issued commands.  | 
1458  |  |  */  | 
1459  |  | void dm_task_update_nodes(void);  | 
1460  |  |  | 
1461  |  | /*  | 
1462  |  |  * Mangling support  | 
1463  |  |  *  | 
1464  |  |  * Character whitelist: 0-9, A-Z, a-z, #+-.:=@_  | 
1465  |  |  * HEX mangling format: \xNN, NN being the hex value of the character.  | 
1466  |  |  * (whitelist and format supported by udev)  | 
1467  |  | */  | 
1468  |  | typedef enum dm_string_mangling_e { | 
1469  |  |   DM_STRING_MANGLING_NONE, /* do not mangle at all */  | 
1470  |  |   DM_STRING_MANGLING_AUTO, /* mangle only if not already mangled with hex, error when mixed */  | 
1471  |  |   DM_STRING_MANGLING_HEX   /* always mangle with hex encoding, no matter what the input is */  | 
1472  |  | } dm_string_mangling_t;  | 
1473  |  |  | 
1474  |  | /*  | 
1475  |  |  * Set/get mangling mode used for device-mapper names and uuids.  | 
1476  |  |  */  | 
1477  |  | int dm_set_name_mangling_mode(dm_string_mangling_t name_mangling);  | 
1478  |  | dm_string_mangling_t dm_get_name_mangling_mode(void);  | 
1479  |  |  | 
1480  |  | /*  | 
1481  |  |  * Get mangled/unmangled form of the device-mapper name or uuid  | 
1482  |  |  * irrespective of the global setting (set by dm_set_name_mangling_mode).  | 
1483  |  |  * The name or uuid returned needs to be freed after use by calling dm_free!  | 
1484  |  |  */  | 
1485  |  | char *dm_task_get_name_mangled(const struct dm_task *dmt);  | 
1486  |  | char *dm_task_get_name_unmangled(const struct dm_task *dmt);  | 
1487  |  | char *dm_task_get_uuid_mangled(const struct dm_task *dmt);  | 
1488  |  | char *dm_task_get_uuid_unmangled(const struct dm_task *dmt);  | 
1489  |  |  | 
1490  |  | /*  | 
1491  |  |  * Configure the device-mapper directory  | 
1492  |  |  */  | 
1493  |  | int dm_set_dev_dir(const char *dir);  | 
1494  |  | const char *dm_dir(void);  | 
1495  |  |  | 
1496  |  | /*  | 
1497  |  |  * Configure sysfs directory, /sys by default  | 
1498  |  |  */  | 
1499  |  | int dm_set_sysfs_dir(const char *dir);  | 
1500  |  | const char *dm_sysfs_dir(void);  | 
1501  |  |  | 
1502  |  | /*  | 
1503  |  |  * Configure default UUID prefix string.  | 
1504  |  |  * Conventionally this is a short capitalized prefix indicating the subsystem  | 
1505  |  |  * that is managing the devices, e.g. "LVM-" or "MPATH-".  | 
1506  |  |  * To support stacks of devices from different subsystems, recursive functions  | 
1507  |  |  * stop recursing if they reach a device with a different prefix.  | 
1508  |  |  */  | 
1509  |  | int dm_set_uuid_prefix(const char *uuid_prefix);  | 
1510  |  | const char *dm_uuid_prefix(void);  | 
1511  |  |  | 
1512  |  | /*  | 
1513  |  |  * Determine whether a major number belongs to device-mapper or not.  | 
1514  |  |  */  | 
1515  |  | int dm_is_dm_major(uint32_t major);  | 
1516  |  |  | 
1517  |  | /*  | 
1518  |  |  * Get associated device name for given major and minor number by reading  | 
1519  |  |  * the sysfs content. If this is a dm device, get associated dm name, the one  | 
1520  |  |  * that appears in /dev/mapper. DM names could be resolved this way only if  | 
1521  |  |  * kernel used >= 2.6.29, kernel name is found otherwise (e.g. dm-0).  | 
1522  |  |  * If prefer_kernel_name is set, the kernel name is always preferred over  | 
1523  |  |  * device-mapper name for dm devices no matter what the kernel version is.  | 
1524  |  |  * For non-dm devices, we always get associated kernel name, e.g sda, md0 etc.  | 
1525  |  |  * Returns 0 on error or if sysfs is not used (or configured incorrectly),  | 
1526  |  |  * otherwise returns 1 and the supplied buffer holds the device name.  | 
1527  |  |  */  | 
1528  |  | int dm_device_get_name(uint32_t major, uint32_t minor,  | 
1529  |  |            int prefer_kernel_name,  | 
1530  |  |            char *buf, size_t buf_size);  | 
1531  |  |  | 
1532  |  | /*  | 
1533  |  |  * Determine whether a device has any holders (devices  | 
1534  |  |  * using this device). If sysfs is not used (or configured  | 
1535  |  |  * incorrectly), returns 0.  | 
1536  |  |  */  | 
1537  |  | int dm_device_has_holders(uint32_t major, uint32_t minor);  | 
1538  |  |  | 
1539  |  | /*  | 
1540  |  |  * Determine whether a device contains mounted filesystem.  | 
1541  |  |  * If sysfs is not used (or configured incorrectly), returns 0.  | 
1542  |  |  */  | 
1543  |  | int dm_device_has_mounted_fs(uint32_t major, uint32_t minor);  | 
1544  |  |  | 
1545  |  |  | 
1546  |  | /*  | 
1547  |  |  * Callback is invoked for individual mountinfo lines,  | 
1548  |  |  * minor, major and mount target are parsed and unmangled.  | 
1549  |  |  */  | 
1550  |  | typedef int (*dm_mountinfo_line_callback_fn) (char *line, unsigned maj, unsigned min,  | 
1551  |  |                 char *target, void *cb_data);  | 
1552  |  |  | 
1553  |  | /*  | 
1554  |  |  * Read all lines from /proc/self/mountinfo,  | 
1555  |  |  * for each line calls read_fn callback.  | 
1556  |  |  */  | 
1557  |  | int dm_mountinfo_read(dm_mountinfo_line_callback_fn read_fn, void *cb_data);  | 
1558  |  |  | 
1559  |  | /*  | 
1560  |  |  * Initialise library  | 
1561  |  |  */  | 
1562  |  | void dm_lib_init(void) __attribute__((constructor));  | 
1563  |  |  | 
1564  |  | /*  | 
1565  |  |  * Release library resources  | 
1566  |  |  */  | 
1567  |  | void dm_lib_release(void);  | 
1568  |  | void dm_lib_exit(void) __attribute__((destructor));  | 
1569  |  |  | 
1570  |  | /* An optimisation for clients making repeated calls involving dm ioctls */  | 
1571  |  | void dm_hold_control_dev(int hold_open);  | 
1572  |  |  | 
1573  |  | /*  | 
1574  |  |  * Use NULL for all devices.  | 
1575  |  |  */  | 
1576  |  | int dm_mknodes(const char *name);  | 
1577  |  | int dm_driver_version(char *version, size_t size);  | 
1578  |  |  | 
1579  |  | /******************************************************  | 
1580  |  |  * Functions to build and manipulate trees of devices *  | 
1581  |  |  ******************************************************/  | 
1582  |  | struct dm_tree;  | 
1583  |  | struct dm_tree_node;  | 
1584  |  |  | 
1585  |  | /*  | 
1586  |  |  * Initialise an empty dependency tree.  | 
1587  |  |  *  | 
1588  |  |  * The tree consists of a root node together with one node for each mapped  | 
1589  |  |  * device which has child nodes for each device referenced in its table.  | 
1590  |  |  *  | 
1591  |  |  * Every node in the tree has one or more children and one or more parents.  | 
1592  |  |  *  | 
1593  |  |  * The root node is the parent/child of every node that doesn't have other  | 
1594  |  |  * parents/children.  | 
1595  |  |  */  | 
1596  |  | struct dm_tree *dm_tree_create(void);  | 
1597  |  | void dm_tree_free(struct dm_tree *tree);  | 
1598  |  |  | 
1599  |  | /*  | 
1600  |  |  * List of suffixes to be ignored when matching uuids against existing devices.  | 
1601  |  |  */  | 
1602  |  | void dm_tree_set_optional_uuid_suffixes(struct dm_tree *dtree, const char **optional_uuid_suffixes);  | 
1603  |  |  | 
1604  |  | /*  | 
1605  |  |  * Add nodes to the tree for a given device and all the devices it uses.  | 
1606  |  |  */  | 
1607  |  | int dm_tree_add_dev(struct dm_tree *tree, uint32_t major, uint32_t minor);  | 
1608  |  | int dm_tree_add_dev_with_udev_flags(struct dm_tree *tree, uint32_t major,  | 
1609  |  |             uint32_t minor, uint16_t udev_flags);  | 
1610  |  |  | 
1611  |  | /*  | 
1612  |  |  * Add a new node to the tree if it doesn't already exist.  | 
1613  |  |  */  | 
1614  |  | struct dm_tree_node *dm_tree_add_new_dev(struct dm_tree *tree,  | 
1615  |  |            const char *name,  | 
1616  |  |            const char *uuid,  | 
1617  |  |            uint32_t major, uint32_t minor,  | 
1618  |  |            int read_only,  | 
1619  |  |            int clear_inactive,  | 
1620  |  |            void *context);  | 
1621  |  | struct dm_tree_node *dm_tree_add_new_dev_with_udev_flags(struct dm_tree *tree,  | 
1622  |  |                const char *name,  | 
1623  |  |                const char *uuid,  | 
1624  |  |                uint32_t major,  | 
1625  |  |                uint32_t minor,  | 
1626  |  |                int read_only,  | 
1627  |  |                int clear_inactive,  | 
1628  |  |                void *context,  | 
1629  |  |                uint16_t udev_flags);  | 
1630  |  |  | 
1631  |  | /*  | 
1632  |  |  * Search for a node in the tree.  | 
1633  |  |  * Set major and minor to 0 or uuid to NULL to get the root node.  | 
1634  |  |  */  | 
1635  |  | struct dm_tree_node *dm_tree_find_node(struct dm_tree *tree,  | 
1636  |  |                uint32_t major,  | 
1637  |  |                uint32_t minor);  | 
1638  |  | struct dm_tree_node *dm_tree_find_node_by_uuid(struct dm_tree *tree,  | 
1639  |  |                  const char *uuid);  | 
1640  |  |  | 
1641  |  | /*  | 
1642  |  |  * Use this to walk through all children of a given node.  | 
1643  |  |  * Set handle to NULL in first call.  | 
1644  |  |  * Returns NULL after the last child.  | 
1645  |  |  * Set inverted to use inverted tree.  | 
1646  |  |  */  | 
1647  |  | struct dm_tree_node *dm_tree_next_child(void **handle,  | 
1648  |  |           const struct dm_tree_node *parent,  | 
1649  |  |           uint32_t inverted);  | 
1650  |  |  | 
1651  |  | /*  | 
1652  |  |  * Get properties of a node.  | 
1653  |  |  */  | 
1654  |  | const char *dm_tree_node_get_name(const struct dm_tree_node *node);  | 
1655  |  | const char *dm_tree_node_get_uuid(const struct dm_tree_node *node);  | 
1656  |  | const struct dm_info *dm_tree_node_get_info(const struct dm_tree_node *node);  | 
1657  |  | void *dm_tree_node_get_context(const struct dm_tree_node *node);  | 
1658  |  | /*  | 
1659  |  |  * Returns  0 when node size and its children is unchanged.  | 
1660  |  |  * Returns  1 when node or any of its children has increased size.  | 
1661  |  |  * Returns -1 when node or any of its children has reduced size.  | 
1662  |  |  */  | 
1663  |  | int dm_tree_node_size_changed(const struct dm_tree_node *dnode);  | 
1664  |  |  | 
1665  |  | /*  | 
1666  |  |  * Returns the number of children of the given node (excluding the root node).  | 
1667  |  |  * Set inverted for the number of parents.  | 
1668  |  |  */  | 
1669  |  | int dm_tree_node_num_children(const struct dm_tree_node *node, uint32_t inverted);  | 
1670  |  |  | 
1671  |  | /*  | 
1672  |  |  * Deactivate a device plus all dependencies.  | 
1673  |  |  * Ignores devices that don't have a uuid starting with uuid_prefix.  | 
1674  |  |  */  | 
1675  |  | int dm_tree_deactivate_children(struct dm_tree_node *dnode,  | 
1676  |  |         const char *uuid_prefix,  | 
1677  |  |         size_t uuid_prefix_len);  | 
1678  |  | /*  | 
1679  |  |  * Preload/create a device plus all dependencies.  | 
1680  |  |  * Ignores devices that don't have a uuid starting with uuid_prefix.  | 
1681  |  |  */  | 
1682  |  | int dm_tree_preload_children(struct dm_tree_node *dnode,  | 
1683  |  |            const char *uuid_prefix,  | 
1684  |  |            size_t uuid_prefix_len);  | 
1685  |  |  | 
1686  |  | /*  | 
1687  |  |  * Resume a device plus all dependencies.  | 
1688  |  |  * Ignores devices that don't have a uuid starting with uuid_prefix.  | 
1689  |  |  */  | 
1690  |  | int dm_tree_activate_children(struct dm_tree_node *dnode,  | 
1691  |  |             const char *uuid_prefix,  | 
1692  |  |             size_t uuid_prefix_len);  | 
1693  |  |  | 
1694  |  | /*  | 
1695  |  |  * Suspend a device plus all dependencies.  | 
1696  |  |  * Ignores devices that don't have a uuid starting with uuid_prefix.  | 
1697  |  |  */  | 
1698  |  | int dm_tree_suspend_children(struct dm_tree_node *dnode,  | 
1699  |  |            const char *uuid_prefix,  | 
1700  |  |            size_t uuid_prefix_len);  | 
1701  |  |  | 
1702  |  | /*  | 
1703  |  |  * Skip the filesystem sync when suspending.  | 
1704  |  |  * Does nothing with other functions.  | 
1705  |  |  * Use this when no snapshots are involved.  | 
1706  |  |  */  | 
1707  |  | void dm_tree_skip_lockfs(struct dm_tree_node *dnode);  | 
1708  |  |  | 
1709  |  | /*  | 
1710  |  |  * Set the 'noflush' flag when suspending devices.  | 
1711  |  |  * If the kernel supports it, instead of erroring outstanding I/O that  | 
1712  |  |  * cannot be completed, the I/O is queued and resubmitted when the  | 
1713  |  |  * device is resumed.  This affects multipath devices when all paths  | 
1714  |  |  * have failed and queue_if_no_path is set, and mirror devices when  | 
1715  |  |  * block_on_error is set and the mirror log has failed.  | 
1716  |  |  */  | 
1717  |  | void dm_tree_use_no_flush_suspend(struct dm_tree_node *dnode);  | 
1718  |  |  | 
1719  |  | /*  | 
1720  |  |  * Retry removal of each device if not successful.  | 
1721  |  |  */  | 
1722  |  | void dm_tree_retry_remove(struct dm_tree_node *dnode);  | 
1723  |  |  | 
1724  |  | /*  | 
1725  |  |  * Is the uuid prefix present in the tree?  | 
1726  |  |  * Only returns 0 if every node was checked successfully.  | 
1727  |  |  * Returns 1 if the tree walk has to be aborted.  | 
1728  |  |  */  | 
1729  |  | int dm_tree_children_use_uuid(struct dm_tree_node *dnode,  | 
1730  |  |             const char *uuid_prefix,  | 
1731  |  |             size_t uuid_prefix_len);  | 
1732  |  |  | 
1733  |  | /*  | 
1734  |  |  * Construct tables for new nodes before activating them.  | 
1735  |  |  */  | 
1736  |  | int dm_tree_node_add_snapshot_origin_target(struct dm_tree_node *dnode,  | 
1737  |  |               uint64_t size,  | 
1738  |  |               const char *origin_uuid);  | 
1739  |  | int dm_tree_node_add_snapshot_target(struct dm_tree_node *node,  | 
1740  |  |              uint64_t size,  | 
1741  |  |              const char *origin_uuid,  | 
1742  |  |              const char *cow_uuid,  | 
1743  |  |              int persistent,  | 
1744  |  |              uint32_t chunk_size);  | 
1745  |  | int dm_tree_node_add_snapshot_merge_target(struct dm_tree_node *node,  | 
1746  |  |              uint64_t size,  | 
1747  |  |              const char *origin_uuid,  | 
1748  |  |              const char *cow_uuid,  | 
1749  |  |              const char *merge_uuid,  | 
1750  |  |              uint32_t chunk_size);  | 
1751  |  | int dm_tree_node_add_error_target(struct dm_tree_node *node,  | 
1752  |  |           uint64_t size);  | 
1753  |  | int dm_tree_node_add_zero_target(struct dm_tree_node *node,  | 
1754  |  |          uint64_t size);  | 
1755  |  | int dm_tree_node_add_linear_target(struct dm_tree_node *node,  | 
1756  |  |            uint64_t size);  | 
1757  |  | int dm_tree_node_add_striped_target(struct dm_tree_node *node,  | 
1758  |  |             uint64_t size,  | 
1759  |  |             uint32_t stripe_size);  | 
1760  |  |  | 
1761  |  | #define DM_CRYPT_IV_DEFAULT UINT64_C(-1)  /* iv_offset == seg offset */  | 
1762  |  | /*  | 
1763  |  |  * Function accepts one string in cipher specification  | 
1764  |  |  * (chainmode and iv should be NULL because included in cipher string)  | 
1765  |  |  *   or  | 
1766  |  |  * separate arguments which will be joined to "cipher-chainmode-iv"  | 
1767  |  |  */  | 
1768  |  | int dm_tree_node_add_crypt_target(struct dm_tree_node *node,  | 
1769  |  |           uint64_t size,  | 
1770  |  |           const char *cipher,  | 
1771  |  |           const char *chainmode,  | 
1772  |  |           const char *iv,  | 
1773  |  |           uint64_t iv_offset,  | 
1774  |  |           const char *key);  | 
1775  |  | int dm_tree_node_add_mirror_target(struct dm_tree_node *node,  | 
1776  |  |            uint64_t size);  | 
1777  |  |  | 
1778  |  | /* Mirror log flags */  | 
1779  |  | #define DM_NOSYNC   0x00000001  /* Known already in sync */  | 
1780  |  | #define DM_FORCESYNC    0x00000002  /* Force resync */  | 
1781  |  | #define DM_BLOCK_ON_ERROR 0x00000004  /* On error, suspend I/O */  | 
1782  |  | #define DM_CORELOG    0x00000008  /* In-memory log */  | 
1783  |  |  | 
1784  |  | int dm_tree_node_add_mirror_target_log(struct dm_tree_node *node,  | 
1785  |  |                uint32_t region_size,  | 
1786  |  |                unsigned clustered,  | 
1787  |  |                const char *log_uuid,  | 
1788  |  |                unsigned area_count,  | 
1789  |  |                uint32_t flags);  | 
1790  |  |  | 
1791  |  | int dm_tree_node_add_raid_target(struct dm_tree_node *node,  | 
1792  |  |          uint64_t size,  | 
1793  |  |          const char *raid_type,  | 
1794  |  |          uint32_t region_size,  | 
1795  |  |          uint32_t stripe_size,  | 
1796  |  |          uint64_t rebuilds,  | 
1797  |  |          uint64_t flags);  | 
1798  |  |  | 
1799  |  | /*  | 
1800  |  |  * Defines below are based on kernel's dm-cache.c defines  | 
1801  |  |  * DM_CACHE_MIN_DATA_BLOCK_SIZE (32 * 1024 >> SECTOR_SHIFT)  | 
1802  |  |  * DM_CACHE_MAX_DATA_BLOCK_SIZE (1024 * 1024 * 1024 >> SECTOR_SHIFT)  | 
1803  |  |  */  | 
1804  |  | #define DM_CACHE_MIN_DATA_BLOCK_SIZE (UINT32_C(64))  | 
1805  |  | #define DM_CACHE_MAX_DATA_BLOCK_SIZE (UINT32_C(2097152))  | 
1806  |  | /*  | 
1807  |  |  * Max supported size for cache pool metadata device.  | 
1808  |  |  * Limitation is hardcoded into the kernel and bigger device sizes  | 
1809  |  |  * are not accepted.  | 
1810  |  |  *  | 
1811  |  |  * Limit defined in drivers/md/dm-cache-metadata.h  | 
1812  |  |  */  | 
1813  |  | #define DM_CACHE_METADATA_MAX_SECTORS DM_THIN_METADATA_MAX_SECTORS  | 
1814  |  |  | 
1815  |  | /*  | 
1816  |  |  * Define number of elements in rebuild and writemostly arrays  | 
1817  |  |  * 'of struct dm_tree_node_raid_params'.  | 
1818  |  |  */  | 
1819  |  |  | 
1820  |  | struct dm_tree_node_raid_params { | 
1821  |  |   const char *raid_type;  | 
1822  |  |  | 
1823  |  |   uint32_t stripes;  | 
1824  |  |   uint32_t mirrors;  | 
1825  |  |   uint32_t region_size;  | 
1826  |  |   uint32_t stripe_size;  | 
1827  |  |  | 
1828  |  |   /*  | 
1829  |  |    * 'rebuilds' and 'writemostly' are bitfields that signify  | 
1830  |  |    * which devices in the array are to be rebuilt or marked  | 
1831  |  |    * writemostly.  The kernel supports up to 253 legs.  | 
1832  |  |    * We limit ourselves by choosing a lower value  | 
1833  |  |    * for DEFAULT_RAID{1}_MAX_IMAGES in defaults.h. | 
1834  |  |    */  | 
1835  |  |   uint64_t rebuilds;  | 
1836  |  |   uint64_t writemostly;  | 
1837  |  |   uint32_t writebehind;     /* I/Os (kernel default COUNTER_MAX / 2) */  | 
1838  |  |   uint32_t sync_daemon_sleep; /* ms (kernel default = 5sec) */  | 
1839  |  |   uint32_t max_recovery_rate; /* kB/sec/disk */  | 
1840  |  |   uint32_t min_recovery_rate; /* kB/sec/disk */  | 
1841  |  |   uint32_t stripe_cache;      /* sectors */  | 
1842  |  |  | 
1843  |  |   uint64_t flags;             /* [no]sync */  | 
1844  |  |   uint32_t reserved2;  | 
1845  |  | };  | 
1846  |  |  | 
1847  |  | /*  | 
1848  |  |  * Version 2 of above node raid params struct to keep API compatibility.  | 
1849  |  |  *  | 
1850  |  |  * Extended for more than 64 legs (max 253 in the MD kernel runtime!),  | 
1851  |  |  * delta_disks for disk add/remove reshaping,  | 
1852  |  |  * data_offset for out-of-place reshaping  | 
1853  |  |  * and data_copies for odd number of raid10 legs.  | 
1854  |  |  */  | 
1855  |  | #define RAID_BITMAP_SIZE 4 /* 4 * 64 bit elements in rebuilds/writemostly arrays */  | 
1856  |  | struct dm_tree_node_raid_params_v2 { | 
1857  |  |   const char *raid_type;  | 
1858  |  |  | 
1859  |  |   uint32_t stripes;  | 
1860  |  |   uint32_t mirrors;  | 
1861  |  |   uint32_t region_size;  | 
1862  |  |   uint32_t stripe_size;  | 
1863  |  |  | 
1864  |  |   int delta_disks; /* +/- number of disks to add/remove (reshaping) */  | 
1865  |  |   int data_offset; /* data offset to set (out-of-place reshaping) */  | 
1866  |  |  | 
1867  |  |   /*  | 
1868  |  |    * 'rebuilds' and 'writemostly' are bitfields that signify  | 
1869  |  |    * which devices in the array are to be rebuilt or marked  | 
1870  |  |    * writemostly.  The kernel supports up to 253 legs.  | 
1871  |  |    * We limit ourselves by choosing a lower value  | 
1872  |  |    * for DEFAULT_RAID_MAX_IMAGES.  | 
1873  |  |    */  | 
1874  |  |   uint64_t rebuilds[RAID_BITMAP_SIZE];  | 
1875  |  |   uint64_t writemostly[RAID_BITMAP_SIZE];  | 
1876  |  |   uint32_t writebehind;     /* I/Os (kernel default COUNTER_MAX / 2) */  | 
1877  |  |   uint32_t data_copies;     /* RAID # of data copies */  | 
1878  |  |   uint32_t sync_daemon_sleep; /* ms (kernel default = 5sec) */  | 
1879  |  |   uint32_t max_recovery_rate; /* kB/sec/disk */  | 
1880  |  |   uint32_t min_recovery_rate; /* kB/sec/disk */  | 
1881  |  |   uint32_t stripe_cache;      /* sectors */  | 
1882  |  |  | 
1883  |  |   uint64_t flags;             /* [no]sync */  | 
1884  |  | };  | 
1885  |  |  | 
1886  |  | int dm_tree_node_add_raid_target_with_params(struct dm_tree_node *node,  | 
1887  |  |                uint64_t size,  | 
1888  |  |                const struct dm_tree_node_raid_params *p);  | 
1889  |  |  | 
1890  |  | /* Version 2 API function taking dm_tree_node_raid_params_v2 for aforementioned extensions. */  | 
1891  |  | int dm_tree_node_add_raid_target_with_params_v2(struct dm_tree_node *node,  | 
1892  |  |             uint64_t size,  | 
1893  |  |             const struct dm_tree_node_raid_params_v2 *p);  | 
1894  |  |  | 
1895  |  | /* Cache feature_flags */  | 
1896  |  | #define DM_CACHE_FEATURE_WRITEBACK    0x00000001  | 
1897  |  | #define DM_CACHE_FEATURE_WRITETHROUGH 0x00000002  | 
1898  |  | #define DM_CACHE_FEATURE_PASSTHROUGH  0x00000004  | 
1899  |  | #define DM_CACHE_FEATURE_METADATA2    0x00000008 /* cache v1.10 */  | 
1900  |  | #define DM_CACHE_FEATURE_NO_DISCARD_PASSDOWN 0x00000010  | 
1901  |  |  | 
1902  |  | struct dm_config_node;  | 
1903  |  | /*  | 
1904  |  |  * Use for passing cache policy and all its args e.g.:  | 
1905  |  |  *  | 
1906  |  |  * policy_settings { | 
1907  |  |  *    migration_threshold=2048  | 
1908  |  |  *    sequential_threshold=100  | 
1909  |  |  *    ...  | 
1910  |  |  * }  | 
1911  |  |  *  | 
1912  |  |  * For policy without any parameters use NULL.  | 
1913  |  |  */  | 
1914  |  | int dm_tree_node_add_cache_target(struct dm_tree_node *node,  | 
1915  |  |           uint64_t size,  | 
1916  |  |           uint64_t feature_flags, /* DM_CACHE_FEATURE_* */  | 
1917  |  |           const char *metadata_uuid,  | 
1918  |  |           const char *data_uuid,  | 
1919  |  |           const char *origin_uuid,  | 
1920  |  |           const char *policy_name,  | 
1921  |  |           const struct dm_config_node *policy_settings,  | 
1922  |  |           uint32_t data_block_size);  | 
1923  |  |  | 
1924  |  | /*  | 
1925  |  |  * FIXME Add individual cache policy pairs  <key> = value, like:  | 
1926  |  |  * int dm_tree_node_add_cache_policy_arg(struct dm_tree_node *dnode,  | 
1927  |  |  *              const char *key, uint64_t value);  | 
1928  |  |  */  | 
1929  |  |  | 
1930  |  | /*  | 
1931  |  |  * Replicator operation mode  | 
1932  |  |  * Note: API for Replicator is not yet stable  | 
1933  |  |  */  | 
1934  |  | typedef enum dm_replicator_mode_e { | 
1935  |  |   DM_REPLICATOR_SYNC,     /* Synchronous replication */  | 
1936  |  |   DM_REPLICATOR_ASYNC_WARN,   /* Warn if async replicator is slow */  | 
1937  |  |   DM_REPLICATOR_ASYNC_STALL,    /* Stall replicator if not fast enough */  | 
1938  |  |   DM_REPLICATOR_ASYNC_DROP,   /* Drop sites out of sync */  | 
1939  |  |   DM_REPLICATOR_ASYNC_FAIL,   /* Fail replicator if slow */  | 
1940  |  |   NUM_DM_REPLICATOR_MODES  | 
1941  |  | } dm_replicator_mode_t;  | 
1942  |  |  | 
1943  |  | int dm_tree_node_add_replicator_target(struct dm_tree_node *node,  | 
1944  |  |                uint64_t size,  | 
1945  |  |                const char *rlog_uuid,  | 
1946  |  |                const char *rlog_type,  | 
1947  |  |                unsigned rsite_index,  | 
1948  |  |                dm_replicator_mode_t mode,  | 
1949  |  |                uint32_t async_timeout,  | 
1950  |  |                uint64_t fall_behind_data,  | 
1951  |  |                uint32_t fall_behind_ios);  | 
1952  |  |  | 
1953  |  | int dm_tree_node_add_replicator_dev_target(struct dm_tree_node *node,  | 
1954  |  |              uint64_t size,  | 
1955  |  |              const char *replicator_uuid, /* Replicator control device */  | 
1956  |  |              uint64_t rdevice_index,  | 
1957  |  |              const char *rdev_uuid, /* Rimage device name/uuid */  | 
1958  |  |              unsigned rsite_index,  | 
1959  |  |              const char *slog_uuid,  | 
1960  |  |              uint32_t slog_flags,   /* Mirror log flags */  | 
1961  |  |              uint32_t slog_region_size);  | 
1962  |  | /* End of Replicator API */  | 
1963  |  |  | 
1964  |  | /*  | 
1965  |  |  * FIXME: Defines below are based on kernel's dm-thin.c defines  | 
1966  |  |  * DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)  | 
1967  |  |  * DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)  | 
1968  |  |  */  | 
1969  |  | #define DM_THIN_MIN_DATA_BLOCK_SIZE (UINT32_C(128))  | 
1970  |  | #define DM_THIN_MAX_DATA_BLOCK_SIZE (UINT32_C(2097152))  | 
1971  |  | /*  | 
1972  |  |  * Max supported size for thin pool metadata device (17045913600 bytes)  | 
1973  |  |  * drivers/md/dm-thin-metadata.h THIN_METADATA_MAX_SECTORS  | 
1974  |  |  * But here DM_THIN_MAX_METADATA_SIZE got defined incorrectly  | 
1975  |  |  * Correct size is (UINT64_C(255) * ((1 << 14) - 64) * (4096 / (1 << 9)))  | 
1976  |  |  */  | 
1977  |  | #define DM_THIN_MAX_METADATA_SIZE   (UINT64_C(255) * (1 << 14) * (4096 / (1 << 9)) - 256 * 1024)  | 
1978  |  |  | 
1979  |  | int dm_tree_node_add_thin_pool_target(struct dm_tree_node *node,  | 
1980  |  |               uint64_t size,  | 
1981  |  |               uint64_t transaction_id,  | 
1982  |  |               const char *metadata_uuid,  | 
1983  |  |               const char *pool_uuid,  | 
1984  |  |               uint32_t data_block_size,  | 
1985  |  |               uint64_t low_water_mark,  | 
1986  |  |               unsigned skip_block_zeroing);  | 
1987  |  |  | 
1988  |  | int dm_tree_node_add_thin_pool_target_v1(struct dm_tree_node *node,  | 
1989  |  |            uint64_t size,  | 
1990  |  |            uint64_t transaction_id,  | 
1991  |  |            const char *metadata_uuid,  | 
1992  |  |            const char *pool_uuid,  | 
1993  |  |            uint32_t data_block_size,  | 
1994  |  |            uint64_t low_water_mark,  | 
1995  |  |            unsigned skip_block_zeroing,  | 
1996  |  |            unsigned crop_metadata);  | 
1997  |  |  | 
1998  |  | /* Supported messages for thin provision target */  | 
1999  |  | typedef enum dm_thin_message_e { | 
2000  |  |   DM_THIN_MESSAGE_CREATE_SNAP,    /* device_id, origin_id */  | 
2001  |  |   DM_THIN_MESSAGE_CREATE_THIN,    /* device_id */  | 
2002  |  |   DM_THIN_MESSAGE_DELETE,     /* device_id */  | 
2003  |  |   DM_THIN_MESSAGE_SET_TRANSACTION_ID, /* current_id, new_id */  | 
2004  |  |   DM_THIN_MESSAGE_RESERVE_METADATA_SNAP,  /* target version >= 1.1 */  | 
2005  |  |   DM_THIN_MESSAGE_RELEASE_METADATA_SNAP,  /* target version >= 1.1 */  | 
2006  |  | } dm_thin_message_t;  | 
2007  |  |  | 
2008  |  | int dm_tree_node_add_thin_pool_message(struct dm_tree_node *node,  | 
2009  |  |                dm_thin_message_t type,  | 
2010  |  |                uint64_t id1, uint64_t id2);  | 
2011  |  |  | 
2012  |  | /*  | 
2013  |  |  * Set thin pool discard features  | 
2014  |  |  *   ignore      - Disable support for discards  | 
2015  |  |  *   no_passdown - Don't pass discards down to underlying data device,  | 
2016  |  |  *                 just remove the mapping  | 
2017  |  |  * Feature is available since version 1.1 of the thin target.  | 
2018  |  |  */  | 
2019  |  | int dm_tree_node_set_thin_pool_discard(struct dm_tree_node *node,  | 
2020  |  |                unsigned ignore,  | 
2021  |  |                unsigned no_passdown);  | 
2022  |  | /*  | 
2023  |  |  * Set error if no space, instead of queueing for thin pool.  | 
2024  |  |  */  | 
2025  |  | int dm_tree_node_set_thin_pool_error_if_no_space(struct dm_tree_node *node,  | 
2026  |  |              unsigned error_if_no_space);  | 
2027  |  | /* Start thin pool with metadata in read-only mode */  | 
2028  |  | int dm_tree_node_set_thin_pool_read_only(struct dm_tree_node *node,  | 
2029  |  |            unsigned read_only);  | 
2030  |  | /*  | 
2031  |  |  * FIXME: Defines below are based on kernel's dm-thin.c defines  | 
2032  |  |  * MAX_DEV_ID ((1 << 24) - 1)  | 
2033  |  |  */  | 
2034  |  | #define DM_THIN_MAX_DEVICE_ID (UINT32_C((1 << 24) - 1))  | 
2035  |  | int dm_tree_node_add_thin_target(struct dm_tree_node *node,  | 
2036  |  |          uint64_t size,  | 
2037  |  |          const char *pool_uuid,  | 
2038  |  |          uint32_t device_id);  | 
2039  |  |  | 
2040  |  | int dm_tree_node_set_thin_external_origin(struct dm_tree_node *node,  | 
2041  |  |             const char *external_uuid);  | 
2042  |  |  | 
2043  |  | void dm_tree_node_set_udev_flags(struct dm_tree_node *node, uint16_t udev_flags);  | 
2044  |  |  | 
2045  |  | void dm_tree_node_set_presuspend_node(struct dm_tree_node *node,  | 
2046  |  |               struct dm_tree_node *presuspend_node);  | 
2047  |  |  | 
2048  |  | int dm_tree_node_add_target_area(struct dm_tree_node *node,  | 
2049  |  |          const char *dev_name,  | 
2050  |  |          const char *uuid,  | 
2051  |  |          uint64_t offset);  | 
2052  |  |  | 
2053  |  | /*  | 
2054  |  |  * Only for temporarily-missing raid devices where changes are tracked.  | 
2055  |  |  */  | 
2056  |  | int dm_tree_node_add_null_area(struct dm_tree_node *node, uint64_t offset);  | 
2057  |  |  | 
2058  |  | /*  | 
2059  |  |  * Set readahead (in sectors) after loading the node.  | 
2060  |  |  */  | 
2061  |  | void dm_tree_node_set_read_ahead(struct dm_tree_node *dnode,  | 
2062  |  |          uint32_t read_ahead,  | 
2063  |  |          uint32_t read_ahead_flags);  | 
2064  |  |  | 
2065  |  | /*  | 
2066  |  |  * Set node callback hook before de/activation.  | 
2067  |  |  * Callback is called before 'activation' of node for activation tree,  | 
2068  |  |  * or 'deactivation' of node for deactivation tree.  | 
2069  |  |  */  | 
2070  |  | typedef enum dm_node_callback_e { | 
2071  |  |   DM_NODE_CALLBACK_PRELOADED,   /* Node has preload deps */  | 
2072  |  |   DM_NODE_CALLBACK_DEACTIVATED, /* Node is deactivated */  | 
2073  |  | } dm_node_callback_t;  | 
2074  |  | typedef int (*dm_node_callback_fn) (struct dm_tree_node *node,  | 
2075  |  |             dm_node_callback_t type, void *cb_data);  | 
2076  |  | void dm_tree_node_set_callback(struct dm_tree_node *node,  | 
2077  |  |              dm_node_callback_fn cb, void *cb_data);  | 
2078  |  |  | 
2079  |  | void dm_tree_set_cookie(struct dm_tree_node *node, uint32_t cookie);  | 
2080  |  | uint32_t dm_tree_get_cookie(struct dm_tree_node *node);  | 
2081  |  |  | 
2082  |  | /*****************************************************************************  | 
2083  |  |  * Library functions  | 
2084  |  |  *****************************************************************************/  | 
2085  |  |  | 
2086  |  | /*******************  | 
2087  |  |  * Memory management  | 
2088  |  |  *******************/  | 
2089  |  |  | 
2090  |  | /*  | 
2091  |  |  * Never use these functions directly - use the macros following instead.  | 
2092  |  |  */  | 
2093  |  | void *dm_malloc_wrapper(size_t s, const char *file, int line)  | 
2094  |  |   __attribute__((__malloc__)) __attribute__((__warn_unused_result__));  | 
2095  |  | void *dm_malloc_aligned_wrapper(size_t s, size_t a, const char *file, int line)  | 
2096  |  |   __attribute__((__malloc__)) __attribute__((__warn_unused_result__));  | 
2097  |  | void *dm_zalloc_wrapper(size_t s, const char *file, int line)  | 
2098  |  |   __attribute__((__malloc__)) __attribute__((__warn_unused_result__));  | 
2099  |  | void *dm_realloc_wrapper(void *p, unsigned int s, const char *file, int line)  | 
2100  |  |   __attribute__((__warn_unused_result__));  | 
2101  |  | void dm_free_wrapper(void *ptr);  | 
2102  |  | char *dm_strdup_wrapper(const char *s, const char *file, int line)  | 
2103  |  |   __attribute__((__warn_unused_result__));  | 
2104  |  | int dm_dump_memory_wrapper(void);  | 
2105  |  | void dm_bounds_check_wrapper(void);  | 
2106  |  |  | 
2107  | 0  | #define dm_malloc(s) dm_malloc_wrapper((s), __FILE__, __LINE__)  | 
2108  |  | #define dm_malloc_aligned(s, a) dm_malloc_aligned_wrapper((s), (a),  __FILE__, __LINE__)  | 
2109  | 0  | #define dm_zalloc(s) dm_zalloc_wrapper((s), __FILE__, __LINE__)  | 
2110  | 0  | #define dm_strdup(s) dm_strdup_wrapper((s), __FILE__, __LINE__)  | 
2111  | 11.2k  | #define dm_free(p) dm_free_wrapper(p)  | 
2112  |  | #define dm_realloc(p, s) dm_realloc_wrapper((p), (s), __FILE__, __LINE__)  | 
2113  | 0  | #define dm_dump_memory() dm_dump_memory_wrapper()  | 
2114  | 0  | #define dm_bounds_check() dm_bounds_check_wrapper()  | 
2115  |  |  | 
2116  |  | /*  | 
2117  |  |  * The pool allocator is useful when you are going to allocate  | 
2118  |  |  * lots of memory, use the memory for a bit, and then free the  | 
2119  |  |  * memory in one go.  A surprising amount of code has this usage  | 
2120  |  |  * profile.  | 
2121  |  |  *  | 
2122  |  |  * You should think of the pool as an infinite, contiguous chunk  | 
2123  |  |  * of memory.  The front of this chunk of memory contains  | 
2124  |  |  * allocated objects, the second half is free.  dm_pool_alloc grabs  | 
2125  |  |  * the next 'size' bytes from the free half, in effect moving it  | 
2126  |  |  * into the allocated half.  This operation is very efficient.  | 
2127  |  |  *  | 
2128  |  |  * dm_pool_free frees the allocated object *and* all objects  | 
2129  |  |  * allocated after it.  It is important to note this semantic  | 
2130  |  |  * difference from malloc/free.  This is also extremely  | 
2131  |  |  * efficient, since a single dm_pool_free can dispose of a large  | 
2132  |  |  * complex object.  | 
2133  |  |  *  | 
2134  |  |  * dm_pool_destroy frees all allocated memory.  | 
2135  |  |  *  | 
2136  |  |  * eg, If you are building a binary tree in your program, and  | 
2137  |  |  * know that you are only ever going to insert into your tree,  | 
2138  |  |  * and not delete (eg, maintaining a symbol table for a  | 
2139  |  |  * compiler).  You can create yourself a pool, allocate the nodes  | 
2140  |  |  * from it, and when the tree becomes redundant call dm_pool_destroy  | 
2141  |  |  * (no nasty iterating through the tree to free nodes).  | 
2142  |  |  *  | 
2143  |  |  * eg, On the other hand if you wanted to repeatedly insert and  | 
2144  |  |  * remove objects into the tree, you would be better off  | 
2145  |  |  * allocating the nodes from a free list; you cannot free a  | 
2146  |  |  * single arbitrary node with pool.  | 
2147  |  |  */  | 
2148  |  |  | 
2149  |  | struct dm_pool;  | 
2150  |  |  | 
2151  |  | /* constructor and destructor */  | 
2152  |  | struct dm_pool *dm_pool_create(const char *name, size_t chunk_hint)  | 
2153  |  |   __attribute__((__warn_unused_result__));  | 
2154  |  | void dm_pool_destroy(struct dm_pool *p);  | 
2155  |  |  | 
2156  |  | /* simple allocation/free routines */  | 
2157  |  | void *dm_pool_alloc(struct dm_pool *p, size_t s)  | 
2158  |  |   __attribute__((__warn_unused_result__));  | 
2159  |  | void *dm_pool_alloc_aligned(struct dm_pool *p, size_t s, unsigned alignment)  | 
2160  |  |   __attribute__((__warn_unused_result__));  | 
2161  |  | void dm_pool_empty(struct dm_pool *p);  | 
2162  |  | void dm_pool_free(struct dm_pool *p, void *ptr);  | 
2163  |  |  | 
2164  |  | /*  | 
2165  |  |  * To aid debugging, a pool can be locked. Any modifications made  | 
2166  |  |  * to the content of the pool while it is locked can be detected.  | 
2167  |  |  * Default compilation is using a crc checksum to notice modifications.  | 
2168  |  |  * The pool locking is using the mprotect with the compilation flag  | 
2169  |  |  * DEBUG_ENFORCE_POOL_LOCKING to enforce the memory protection.  | 
2170  |  |  */  | 
2171  |  | /* query pool lock status */  | 
2172  |  | int dm_pool_locked(struct dm_pool *p);  | 
2173  |  | /* mark pool as locked */  | 
2174  |  | int dm_pool_lock(struct dm_pool *p, int crc)  | 
2175  |  |   __attribute__((__warn_unused_result__));  | 
2176  |  | /* mark pool as unlocked */  | 
2177  |  | int dm_pool_unlock(struct dm_pool *p, int crc)  | 
2178  |  |   __attribute__((__warn_unused_result__));  | 
2179  |  |  | 
2180  |  | /*  | 
2181  |  |  * Object building routines:  | 
2182  |  |  *  | 
2183  |  |  * These allow you to 'grow' an object, useful for  | 
2184  |  |  * building strings, or filling in dynamic  | 
2185  |  |  * arrays.  | 
2186  |  |  *  | 
2187  |  |  * It's probably best explained with an example:  | 
2188  |  |  *  | 
2189  |  |  * char *build_string(struct dm_pool *mem)  | 
2190  |  |  * { | 
2191  |  |  *      int i;  | 
2192  |  |  *      char buffer[16];  | 
2193  |  |  *  | 
2194  |  |  *      if (!dm_pool_begin_object(mem, 128))  | 
2195  |  |  *              return NULL;  | 
2196  |  |  *  | 
2197  |  |  *      for (i = 0; i < 50; i++) { | 
2198  |  |  *              snprintf(buffer, sizeof(buffer), "%d, ", i);  | 
2199  |  |  *              if (!dm_pool_grow_object(mem, buffer, 0))  | 
2200  |  |  *                      goto bad;  | 
2201  |  |  *      }  | 
2202  |  |  *  | 
2203  |  |  *  // add null  | 
2204  |  |  *      if (!dm_pool_grow_object(mem, "\0", 1))  | 
2205  |  |  *              goto bad;  | 
2206  |  |  *  | 
2207  |  |  *      return dm_pool_end_object(mem);  | 
2208  |  |  *  | 
2209  |  |  * bad:  | 
2210  |  |  *  | 
2211  |  |  *      dm_pool_abandon_object(mem);  | 
2212  |  |  *      return NULL;  | 
2213  |  |  *}  | 
2214  |  |  *  | 
2215  |  |  * So start an object by calling dm_pool_begin_object  | 
2216  |  |  * with a guess at the final object size - if in  | 
2217  |  |  * doubt make the guess too small.  | 
2218  |  |  *  | 
2219  |  |  * Then append chunks of data to your object with  | 
2220  |  |  * dm_pool_grow_object.  Finally get your object with  | 
2221  |  |  * a call to dm_pool_end_object.  | 
2222  |  |  *  | 
2223  |  |  * Setting delta to 0 means it will use strlen(extra).  | 
2224  |  |  */  | 
2225  |  | int dm_pool_begin_object(struct dm_pool *p, size_t hint);  | 
2226  |  | int dm_pool_grow_object(struct dm_pool *p, const void *extra, size_t delta);  | 
2227  |  | void *dm_pool_end_object(struct dm_pool *p);  | 
2228  |  | void dm_pool_abandon_object(struct dm_pool *p);  | 
2229  |  |  | 
2230  |  | /* utilities */  | 
2231  |  | char *dm_pool_strdup(struct dm_pool *p, const char *str)  | 
2232  |  |   __attribute__((__warn_unused_result__));  | 
2233  |  | char *dm_pool_strndup(struct dm_pool *p, const char *str, size_t n)  | 
2234  |  |   __attribute__((__warn_unused_result__));  | 
2235  |  | void *dm_pool_zalloc(struct dm_pool *p, size_t s)  | 
2236  |  |   __attribute__((__warn_unused_result__));  | 
2237  |  |  | 
2238  |  | /******************  | 
2239  |  |  * bitset functions  | 
2240  |  |  ******************/  | 
2241  |  |  | 
2242  |  | typedef uint32_t *dm_bitset_t;  | 
2243  |  |  | 
2244  |  | dm_bitset_t dm_bitset_create(struct dm_pool *mem, unsigned num_bits);  | 
2245  |  | void dm_bitset_destroy(dm_bitset_t bs);  | 
2246  |  |  | 
2247  |  | int dm_bitset_equal(dm_bitset_t in1, dm_bitset_t in2);  | 
2248  |  |  | 
2249  |  | void dm_bit_and(dm_bitset_t out, dm_bitset_t in1, dm_bitset_t in2);  | 
2250  |  | void dm_bit_union(dm_bitset_t out, dm_bitset_t in1, dm_bitset_t in2);  | 
2251  |  | int dm_bit_get_first(dm_bitset_t bs);  | 
2252  |  | int dm_bit_get_next(dm_bitset_t bs, int last_bit);  | 
2253  |  | int dm_bit_get_last(dm_bitset_t bs);  | 
2254  |  | int dm_bit_get_prev(dm_bitset_t bs, int last_bit);  | 
2255  |  |  | 
2256  | 0  | #define DM_BITS_PER_INT ((unsigned)sizeof(int) * CHAR_BIT)  | 
2257  |  |  | 
2258  |  | #define dm_bit(bs, i) \  | 
2259  | 0  |    ((bs)[((i) / DM_BITS_PER_INT) + 1] & (0x1 << ((i) & (DM_BITS_PER_INT - 1))))  | 
2260  |  |  | 
2261  |  | #define dm_bit_set(bs, i) \  | 
2262  | 0  |    ((bs)[((i) / DM_BITS_PER_INT) + 1] |= (0x1 << ((i) & (DM_BITS_PER_INT - 1))))  | 
2263  |  |  | 
2264  |  | #define dm_bit_clear(bs, i) \  | 
2265  |  |    ((bs)[((i) / DM_BITS_PER_INT) + 1] &= ~(0x1 << ((i) & (DM_BITS_PER_INT - 1))))  | 
2266  |  |  | 
2267  |  | #define dm_bit_set_all(bs) \  | 
2268  |  |    memset((bs) + 1, -1, ((*(bs) / DM_BITS_PER_INT) + 1) * sizeof(int))  | 
2269  |  |  | 
2270  |  | #define dm_bit_clear_all(bs) \  | 
2271  |  |    memset((bs) + 1, 0, ((*(bs) / DM_BITS_PER_INT) + 1) * sizeof(int))  | 
2272  |  |  | 
2273  |  | #define dm_bit_copy(bs1, bs2) \  | 
2274  |  |    memcpy((bs1) + 1, (bs2) + 1, ((*(bs2) / DM_BITS_PER_INT) + 1) * sizeof(int))  | 
2275  |  |  | 
2276  |  | /*  | 
2277  |  |  * Parse a string representation of a bitset into a dm_bitset_t. The  | 
2278  |  |  * notation used is identical to the kernel bitmap parser (cpuset etc.)  | 
2279  |  |  * and supports both lists ("1,2,3") and ranges ("1-2,5-8"). If the mem | 
2280  |  |  * parameter is NULL memory for the bitset will be allocated using  | 
2281  |  |  * dm_malloc(). Otherwise the bitset will be allocated using the supplied  | 
2282  |  |  * dm_pool.  | 
2283  |  |  */  | 
2284  |  | dm_bitset_t dm_bitset_parse_list(const char *str, struct dm_pool *mem,  | 
2285  |  |          size_t min_num_bits);  | 
2286  |  |  | 
2287  |  | /* Returns number of set bits */  | 
2288  |  | static inline unsigned hweight32(uint32_t i)  | 
2289  | 0  | { | 
2290  | 0  |   unsigned r = (i & 0x55555555) + ((i >> 1) & 0x55555555);  | 
2291  | 0  | 
  | 
2292  | 0  |   r =    (r & 0x33333333) + ((r >>  2) & 0x33333333);  | 
2293  | 0  |   r =    (r & 0x0F0F0F0F) + ((r >>  4) & 0x0F0F0F0F);  | 
2294  | 0  |   r =    (r & 0x00FF00FF) + ((r >>  8) & 0x00FF00FF);  | 
2295  | 0  |   return (r & 0x0000FFFF) + ((r >> 16) & 0x0000FFFF);  | 
2296  | 0  | } Unexecuted instantiation: libdm-common.c:hweight32 Unexecuted instantiation: libdm-file.c:hweight32 Unexecuted instantiation: libdm-string.c:hweight32 Unexecuted instantiation: dbg_malloc.c:hweight32 Unexecuted instantiation: pool.c:hweight32 Unexecuted instantiation: libdm-iface.c:hweight32 Unexecuted instantiation: bitset.c:hweight32 Unexecuted instantiation: list.c:hweight32 Unexecuted instantiation: libdm-timestamp.c:hweight32  | 
2297  |  |  | 
2298  |  | /****************  | 
2299  |  |  * hash functions  | 
2300  |  |  ****************/  | 
2301  |  |  | 
2302  |  | struct dm_hash_table;  | 
2303  |  | struct dm_hash_node;  | 
2304  |  |  | 
2305  |  | typedef void (*dm_hash_iterate_fn) (void *data);  | 
2306  |  |  | 
2307  |  | struct dm_hash_table *dm_hash_create(unsigned size_hint)  | 
2308  |  |   __attribute__((__warn_unused_result__));  | 
2309  |  | void dm_hash_destroy(struct dm_hash_table *t);  | 
2310  |  | void dm_hash_wipe(struct dm_hash_table *t);  | 
2311  |  |  | 
2312  |  | void *dm_hash_lookup(struct dm_hash_table *t, const char *key);  | 
2313  |  | int dm_hash_insert(struct dm_hash_table *t, const char *key, void *data);  | 
2314  |  | void dm_hash_remove(struct dm_hash_table *t, const char *key);  | 
2315  |  |  | 
2316  |  | void *dm_hash_lookup_binary(struct dm_hash_table *t, const void *key, uint32_t len);  | 
2317  |  | int dm_hash_insert_binary(struct dm_hash_table *t, const void *key, uint32_t len,  | 
2318  |  |         void *data);  | 
2319  |  | void dm_hash_remove_binary(struct dm_hash_table *t, const void *key, uint32_t len);  | 
2320  |  |  | 
2321  |  | unsigned dm_hash_get_num_entries(struct dm_hash_table *t);  | 
2322  |  | void dm_hash_iter(struct dm_hash_table *t, dm_hash_iterate_fn f);  | 
2323  |  |  | 
2324  |  | char *dm_hash_get_key(struct dm_hash_table *t, struct dm_hash_node *n);  | 
2325  |  | void *dm_hash_get_data(struct dm_hash_table *t, struct dm_hash_node *n);  | 
2326  |  | struct dm_hash_node *dm_hash_get_first(struct dm_hash_table *t);  | 
2327  |  | struct dm_hash_node *dm_hash_get_next(struct dm_hash_table *t, struct dm_hash_node *n);  | 
2328  |  |  | 
2329  |  | /*  | 
2330  |  |  * dm_hash_insert() replaces the value of an existing  | 
2331  |  |  * entry with a matching key if one exists.  Otherwise  | 
2332  |  |  * it adds a new entry.  | 
2333  |  |  *  | 
2334  |  |  * dm_hash_insert_with_val() inserts a new entry if  | 
2335  |  |  * another entry with the same key already exists.  | 
2336  |  |  * val_len is the size of the data being inserted.  | 
2337  |  |  *  | 
2338  |  |  * If two entries with the same key exist,  | 
2339  |  |  * (added using dm_hash_insert_allow_multiple), then:  | 
2340  |  |  * . dm_hash_lookup() returns the first one it finds, and  | 
2341  |  |  *   dm_hash_lookup_with_val() returns the one with a matching  | 
2342  |  |  *   val_len/val.  | 
2343  |  |  * . dm_hash_remove() removes the first one it finds, and  | 
2344  |  |  *   dm_hash_remove_with_val() removes the one with a matching  | 
2345  |  |  *   val_len/val.  | 
2346  |  |  *  | 
2347  |  |  * If a single entry with a given key exists, and it has  | 
2348  |  |  * zero val_len, then:  | 
2349  |  |  * . dm_hash_lookup() returns it  | 
2350  |  |  * . dm_hash_lookup_with_val(val_len=0) returns it  | 
2351  |  |  * . dm_hash_remove() removes it  | 
2352  |  |  * . dm_hash_remove_with_val(val_len=0) removes it  | 
2353  |  |  *  | 
2354  |  |  * dm_hash_lookup_with_count() is a single call that will  | 
2355  |  |  * both lookup a key's value and check if there is more  | 
2356  |  |  * than one entry with the given key.  | 
2357  |  |  *  | 
2358  |  |  * (It is not meant to retrieve all the entries with the  | 
2359  |  |  * given key.  In the common case where a single entry exists  | 
2360  |  |  * for the key, it is useful to have a single call that will  | 
2361  |  |  * both look up the value and indicate if multiple values  | 
2362  |  |  * exist for the key.)  | 
2363  |  |  *  | 
2364  |  |  * dm_hash_lookup_with_count:  | 
2365  |  |  * . If no entries exist, the function returns NULL, and  | 
2366  |  |  *   the count is set to 0.  | 
2367  |  |  * . If only one entry exists, the value of that entry is  | 
2368  |  |  *   returned and count is set to 1.  | 
2369  |  |  * . If N entries exists, the value of the first entry is  | 
2370  |  |  *   returned and count is set to N.  | 
2371  |  |  */  | 
2372  |  |  | 
2373  |  | void *dm_hash_lookup_with_val(struct dm_hash_table *t, const char *key,  | 
2374  |  |                               const void *val, uint32_t val_len);  | 
2375  |  | void dm_hash_remove_with_val(struct dm_hash_table *t, const char *key,  | 
2376  |  |                              const void *val, uint32_t val_len);  | 
2377  |  | int dm_hash_insert_allow_multiple(struct dm_hash_table *t, const char *key,  | 
2378  |  |                                   const void *val, uint32_t val_len);  | 
2379  |  | void *dm_hash_lookup_with_count(struct dm_hash_table *t, const char *key, int *count);  | 
2380  |  |  | 
2381  |  |  | 
2382  |  | #define dm_hash_iterate(v, h) \  | 
2383  |  |   for (v = dm_hash_get_first((h)); v; \  | 
2384  |  |        v = dm_hash_get_next((h), v))  | 
2385  |  |  | 
2386  |  | /****************  | 
2387  |  |  * list functions  | 
2388  |  |  ****************/  | 
2389  |  |  | 
2390  |  | /*  | 
2391  |  |  * A list consists of a list head plus elements.  | 
2392  |  |  * Each element has 'next' and 'previous' pointers.  | 
2393  |  |  * The list head's pointers point to the first and the last element.  | 
2394  |  |  */  | 
2395  |  |  | 
2396  |  | struct dm_list { | 
2397  |  |   struct dm_list *n, *p;  | 
2398  |  | };  | 
2399  |  |  | 
2400  |  | /*  | 
2401  |  |  * String list.  | 
2402  |  |  */  | 
2403  |  | struct dm_str_list { | 
2404  |  |   struct dm_list list;  | 
2405  |  |   const char *str;  | 
2406  |  | };  | 
2407  |  |  | 
2408  |  | /*  | 
2409  |  |  * Initialise a list before use.  | 
2410  |  |  * The list head's next and previous pointers point back to itself.  | 
2411  |  |  */  | 
2412  |  | #define DM_LIST_HEAD_INIT(name)  { &(name), &(name) } | 
2413  |  | #define DM_LIST_INIT(name)  struct dm_list name = DM_LIST_HEAD_INIT(name)  | 
2414  |  | void dm_list_init(struct dm_list *head);  | 
2415  |  |  | 
2416  |  | /*  | 
2417  |  |  * Insert an element before 'head'.  | 
2418  |  |  * If 'head' is the list head, this adds an element to the end of the list.  | 
2419  |  |  */  | 
2420  |  | void dm_list_add(struct dm_list *head, struct dm_list *elem);  | 
2421  |  |  | 
2422  |  | /*  | 
2423  |  |  * Insert an element after 'head'.  | 
2424  |  |  * If 'head' is the list head, this adds an element to the front of the list.  | 
2425  |  |  */  | 
2426  |  | void dm_list_add_h(struct dm_list *head, struct dm_list *elem);  | 
2427  |  |  | 
2428  |  | /*  | 
2429  |  |  * Delete an element from its list.  | 
2430  |  |  * Note that this doesn't change the element itself - it may still be safe  | 
2431  |  |  * to follow its pointers.  | 
2432  |  |  */  | 
2433  |  | void dm_list_del(struct dm_list *elem);  | 
2434  |  |  | 
2435  |  | /*  | 
2436  |  |  * Remove an element from existing list and insert before 'head'.  | 
2437  |  |  */  | 
2438  |  | void dm_list_move(struct dm_list *head, struct dm_list *elem);  | 
2439  |  |  | 
2440  |  | /*  | 
2441  |  |  * Join 'head1' to the end of 'head'.  | 
2442  |  |  */  | 
2443  |  | void dm_list_splice(struct dm_list *head, struct dm_list *head1);  | 
2444  |  |  | 
2445  |  | /*  | 
2446  |  |  * Is the list empty?  | 
2447  |  |  */  | 
2448  |  | int dm_list_empty(const struct dm_list *head);  | 
2449  |  |  | 
2450  |  | /*  | 
2451  |  |  * Is this the first element of the list?  | 
2452  |  |  */  | 
2453  |  | int dm_list_start(const struct dm_list *head, const struct dm_list *elem);  | 
2454  |  |  | 
2455  |  | /*  | 
2456  |  |  * Is this the last element of the list?  | 
2457  |  |  */  | 
2458  |  | int dm_list_end(const struct dm_list *head, const struct dm_list *elem);  | 
2459  |  |  | 
2460  |  | /*  | 
2461  |  |  * Return first element of the list or NULL if empty  | 
2462  |  |  */  | 
2463  |  | struct dm_list *dm_list_first(const struct dm_list *head);  | 
2464  |  |  | 
2465  |  | /*  | 
2466  |  |  * Return last element of the list or NULL if empty  | 
2467  |  |  */  | 
2468  |  | struct dm_list *dm_list_last(const struct dm_list *head);  | 
2469  |  |  | 
2470  |  | /*  | 
2471  |  |  * Return the previous element of the list, or NULL if we've reached the start.  | 
2472  |  |  */  | 
2473  |  | struct dm_list *dm_list_prev(const struct dm_list *head, const struct dm_list *elem);  | 
2474  |  |  | 
2475  |  | /*  | 
2476  |  |  * Return the next element of the list, or NULL if we've reached the end.  | 
2477  |  |  */  | 
2478  |  | struct dm_list *dm_list_next(const struct dm_list *head, const struct dm_list *elem);  | 
2479  |  |  | 
2480  |  | /*  | 
2481  |  |  * Given the address v of an instance of 'struct dm_list' called 'head'  | 
2482  |  |  * contained in a structure of type t, return the containing structure.  | 
2483  |  |  */  | 
2484  |  | #define dm_list_struct_base(v, t, head) \  | 
2485  | 0  |     ((t *)((char *)(v) - offsetof(t, head)))  | 
2486  |  |  | 
2487  |  | /*  | 
2488  |  |  * Given the address v of an instance of 'struct dm_list list' contained in  | 
2489  |  |  * a structure of type t, return the containing structure.  | 
2490  |  |  */  | 
2491  | 0  | #define dm_list_item(v, t) dm_list_struct_base((v), t, list)  | 
2492  |  |  | 
2493  |  | /*  | 
2494  |  |  * Given the address v of one known element e in a known structure of type t,  | 
2495  |  |  * return another element f.  | 
2496  |  |  */  | 
2497  |  | #define dm_struct_field(v, t, e, f) \  | 
2498  |  |     (((t *)((uintptr_t)(v) - offsetof(t, e))->f)  | 
2499  |  |  | 
2500  |  | /*  | 
2501  |  |  * Given the address v of a known element e in a known structure of type t,  | 
2502  |  |  * return the list head 'list'  | 
2503  |  |  */  | 
2504  |  | #define dm_list_head(v, t, e) dm_struct_field(v, t, e, list)  | 
2505  |  |  | 
2506  |  | /*  | 
2507  |  |  * Set v to each element of a list in turn.  | 
2508  |  |  */  | 
2509  |  | #define dm_list_iterate(v, head) \  | 
2510  | 0  |   for (v = (head)->n; v != head; v = v->n)  | 
2511  |  |  | 
2512  |  | /*  | 
2513  |  |  * Set v to each element in a list in turn, starting from the element  | 
2514  |  |  * in front of 'start'.  | 
2515  |  |  * You can use this to 'unwind' a list_iterate and back out actions on  | 
2516  |  |  * already-processed elements.  | 
2517  |  |  * If 'start' is 'head' it walks the list backwards.  | 
2518  |  |  */  | 
2519  |  | #define dm_list_uniterate(v, head, start) \  | 
2520  |  |   for (v = (start)->p; v != head; v = v->p)  | 
2521  |  |  | 
2522  |  | /*  | 
2523  |  |  * A safe way to walk a list and delete and free some elements along  | 
2524  |  |  * the way.  | 
2525  |  |  * t must be defined as a temporary variable of the same type as v.  | 
2526  |  |  */  | 
2527  |  | #define dm_list_iterate_safe(v, t, head) \  | 
2528  | 11.2k  |   for (v = (head)->n, t = v->n; v != head; v = t, t = v->n)  | 
2529  |  |  | 
2530  |  | /*  | 
2531  |  |  * Walk a list, setting 'v' in turn to the containing structure of each item.  | 
2532  |  |  * The containing structure should be the same type as 'v'.  | 
2533  |  |  * The 'struct dm_list' variable within the containing structure is 'field'.  | 
2534  |  |  */  | 
2535  |  | #define dm_list_iterate_items_gen(v, head, field) \  | 
2536  | 0  |   for (v = dm_list_struct_base((head)->n, __typeof__(*v), field); \  | 
2537  | 0  |        &v->field != (head); \  | 
2538  | 0  |        v = dm_list_struct_base(v->field.n, __typeof__(*v), field))  | 
2539  |  |  | 
2540  |  | /*  | 
2541  |  |  * Walk a list, setting 'v' in turn to the containing structure of each item.  | 
2542  |  |  * The containing structure should be the same type as 'v'.  | 
2543  |  |  * The list should be 'struct dm_list list' within the containing structure.  | 
2544  |  |  */  | 
2545  | 0  | #define dm_list_iterate_items(v, head) dm_list_iterate_items_gen(v, (head), list)  | 
2546  |  |  | 
2547  |  | /*  | 
2548  |  |  * Walk a list, setting 'v' in turn to the containing structure of each item.  | 
2549  |  |  * The containing structure should be the same type as 'v'.  | 
2550  |  |  * The 'struct dm_list' variable within the containing structure is 'field'.  | 
2551  |  |  * t must be defined as a temporary variable of the same type as v.  | 
2552  |  |  */  | 
2553  |  | #define dm_list_iterate_items_gen_safe(v, t, head, field) \  | 
2554  |  |   for (v = dm_list_struct_base((head)->n, __typeof__(*v), field), \  | 
2555  |  |        t = dm_list_struct_base(v->field.n, __typeof__(*v), field); \  | 
2556  |  |        &v->field != (head); \  | 
2557  |  |        v = t, t = dm_list_struct_base(v->field.n, __typeof__(*v), field))  | 
2558  |  | /*  | 
2559  |  |  * Walk a list, setting 'v' in turn to the containing structure of each item.  | 
2560  |  |  * The containing structure should be the same type as 'v'.  | 
2561  |  |  * The list should be 'struct dm_list list' within the containing structure.  | 
2562  |  |  * t must be defined as a temporary variable of the same type as v.  | 
2563  |  |  */  | 
2564  |  | #define dm_list_iterate_items_safe(v, t, head) \  | 
2565  |  |   dm_list_iterate_items_gen_safe(v, t, (head), list)  | 
2566  |  |  | 
2567  |  | /*  | 
2568  |  |  * Walk a list backwards, setting 'v' in turn to the containing structure  | 
2569  |  |  * of each item.  | 
2570  |  |  * The containing structure should be the same type as 'v'.  | 
2571  |  |  * The 'struct dm_list' variable within the containing structure is 'field'.  | 
2572  |  |  */  | 
2573  |  | #define dm_list_iterate_back_items_gen(v, head, field) \  | 
2574  |  |   for (v = dm_list_struct_base((head)->p, __typeof__(*v), field); \  | 
2575  |  |        &v->field != (head); \  | 
2576  |  |        v = dm_list_struct_base(v->field.p, __typeof__(*v), field))  | 
2577  |  |  | 
2578  |  | /*  | 
2579  |  |  * Walk a list backwards, setting 'v' in turn to the containing structure  | 
2580  |  |  * of each item.  | 
2581  |  |  * The containing structure should be the same type as 'v'.  | 
2582  |  |  * The list should be 'struct dm_list list' within the containing structure.  | 
2583  |  |  */  | 
2584  |  | #define dm_list_iterate_back_items(v, head) dm_list_iterate_back_items_gen(v, (head), list)  | 
2585  |  |  | 
2586  |  | /*  | 
2587  |  |  * Return the number of elements in a list by walking it.  | 
2588  |  |  */  | 
2589  |  | unsigned int dm_list_size(const struct dm_list *head);  | 
2590  |  |  | 
2591  |  | /*********  | 
2592  |  |  * selinux  | 
2593  |  |  *********/  | 
2594  |  |  | 
2595  |  | /*  | 
2596  |  |  * Obtain SELinux security context assigned for the path and set this  | 
2597  |  |  * context for creating a new file system object. This security context  | 
2598  |  |  * is global and it is used until reset to default policy behaviour  | 
2599  |  |  * by calling 'dm_prepare_selinux_context(NULL, 0)'.  | 
2600  |  |  */  | 
2601  |  | int dm_prepare_selinux_context(const char *path, mode_t mode);  | 
2602  |  | /*  | 
2603  |  |  * Set SELinux context for existing file system object.  | 
2604  |  |  */  | 
2605  |  | int dm_set_selinux_context(const char *path, mode_t mode);  | 
2606  |  |  | 
2607  |  | /*********************  | 
2608  |  |  * string manipulation  | 
2609  |  |  *********************/  | 
2610  |  |  | 
2611  |  | /*  | 
2612  |  |  * Break up the name of a mapped device into its constituent  | 
2613  |  |  * Volume Group, Logical Volume and Layer (if present).  | 
2614  |  |  * If mem is supplied, the result is allocated from the mempool.  | 
2615  |  |  * Otherwise the strings are changed in situ.  | 
2616  |  |  */  | 
2617  |  | int dm_split_lvm_name(struct dm_pool *mem, const char *dmname,  | 
2618  |  |           char **vgname, char **lvname, char **layer);  | 
2619  |  |  | 
2620  |  | /*  | 
2621  |  |  * Destructively split buffer into NULL-separated words in argv.  | 
2622  |  |  * Returns number of words.  | 
2623  |  |  */  | 
2624  |  | int dm_split_words(char *buffer, unsigned max,  | 
2625  |  |        unsigned ignore_comments, /* Not implemented */  | 
2626  |  |        char **argv);  | 
2627  |  |  | 
2628  |  | /*  | 
2629  |  |  * Returns -1 if buffer too small  | 
2630  |  |  */  | 
2631  |  | int dm_snprintf(char *buf, size_t bufsize, const char *format, ...)  | 
2632  |  |     __attribute__ ((format(printf, 3, 4)));  | 
2633  |  |  | 
2634  |  | /*  | 
2635  |  |  * Returns pointer to the last component of the path.  | 
2636  |  |  */  | 
2637  |  | const char *dm_basename(const char *path);  | 
2638  |  |  | 
2639  |  | /*  | 
2640  |  |  * Returns number of occurrences of 'c' in 'str' of length 'size'.  | 
2641  |  |  */  | 
2642  |  | unsigned dm_count_chars(const char *str, size_t len, const int c);  | 
2643  |  |  | 
2644  |  | /*  | 
2645  |  |  * Length of string after escaping double quotes and backslashes.  | 
2646  |  |  */  | 
2647  |  | size_t dm_escaped_len(const char *str);  | 
2648  |  |  | 
2649  |  | /*  | 
2650  |  |  * <vg>-<lv>-<layer> or if !layer just <vg>-<lv>.  | 
2651  |  |  */  | 
2652  |  | char *dm_build_dm_name(struct dm_pool *mem, const char *vgname,  | 
2653  |  |            const char *lvname, const char *layer);  | 
2654  |  | char *dm_build_dm_uuid(struct dm_pool *mem, const char *prefix, const char *lvid, const char *layer);  | 
2655  |  |  | 
2656  |  | /*  | 
2657  |  |  * Copies a string, quoting double quotes with backslashes.  | 
2658  |  |  */  | 
2659  |  | char *dm_escape_double_quotes(char *out, const char *src);  | 
2660  |  |  | 
2661  |  | /*  | 
2662  |  |  * Undo quoting in situ.  | 
2663  |  |  */  | 
2664  |  | void dm_unescape_double_quotes(char *src);  | 
2665  |  |  | 
2666  |  | /*  | 
2667  |  |  * Unescape colons and "at" signs in situ and save the substrings  | 
2668  |  |  * starting at the position of the first unescaped colon and the  | 
2669  |  |  * first unescaped "at" sign. This is normally used to unescape  | 
2670  |  |  * device names used as PVs.  | 
2671  |  |  */  | 
2672  |  | void dm_unescape_colons_and_at_signs(char *src,  | 
2673  |  |              char **substr_first_unquoted_colon,  | 
2674  |  |              char **substr_first_unquoted_at_sign);  | 
2675  |  |  | 
2676  |  | /*  | 
2677  |  |  * Replacement for strncpy() function.  | 
2678  |  |  *  | 
2679  |  |  * Copies no more than n bytes from string pointed by src to the buffer  | 
2680  |  |  * pointed by dest and ensure string is finished with '\0'.  | 
2681  |  |  * Returns 0 if the whole string does not fit.  | 
2682  |  |  */  | 
2683  |  | int dm_strncpy(char *dest, const char *src, size_t n);  | 
2684  |  |  | 
2685  |  | /*  | 
2686  |  |  * Recognize unit specifier in the 'units' arg and return a factor  | 
2687  |  |  * representing that unit. If the 'units' contains a prefix with digits,  | 
2688  |  |  * the 'units' is considered to be a custom unit.  | 
2689  |  |  *  | 
2690  |  |  * Also, set 'unit_type' output arg to the character that represents  | 
2691  |  |  * the unit specified. The 'unit_type' character equals to the unit  | 
2692  |  |  * character itself recognized in the 'units' arg for canonical units.  | 
2693  |  |  * Otherwise, the 'unit_type' character is set to 'U' for custom unit.  | 
2694  |  |  *  | 
2695  |  |  * An example for k/K canonical units and 8k/8K custom units:  | 
2696  |  |  *  | 
2697  |  |  *   units  unit_type  return value (factor)  | 
2698  |  |  *   k      k          1024  | 
2699  |  |  *   K      K          1000  | 
2700  |  |  *   8k     U          1024*8  | 
2701  |  |  *   8K     U          1000*8  | 
2702  |  |  *   etc...  | 
2703  |  |  *  | 
2704  |  |  * Recognized units:  | 
2705  |  |  *  | 
2706  |  |  *   h/H - human readable (returns 1 for both)  | 
2707  |  |  *   b/B - byte (returns 1 for both)  | 
2708  |  |  *   s/S - sector (returns 512 for both)  | 
2709  |  |  *   k/K - kilo (returns 1024/1000 respectively)  | 
2710  |  |  *   m/M - mega (returns 1024^2/1000^2 respectively)  | 
2711  |  |  *   g/G - giga (returns 1024^3/1000^3 respectively)  | 
2712  |  |  *   t/T - tera (returns 1024^4/1000^4 respectively)  | 
2713  |  |  *   p/P - peta (returns 1024^5/1000^5 respectively)  | 
2714  |  |  *   e/E - exa (returns 1024^6/1000^6 respectively)  | 
2715  |  |  *  | 
2716  |  |  * Only one units character is allowed in the 'units' arg  | 
2717  |  |  * if strict mode is enabled by 'strict' arg.  | 
2718  |  |  *  | 
2719  |  |  * The 'endptr' output arg, if not NULL, saves the pointer  | 
2720  |  |  * in the 'units' string which follows the unit specifier  | 
2721  |  |  * recognized (IOW the position where the parsing of the  | 
2722  |  |  * unit specifier stopped).  | 
2723  |  |  *  | 
2724  |  |  * Returns the unit factor or 0 if no unit is recognized.  | 
2725  |  |  */  | 
2726  |  | uint64_t dm_units_to_factor(const char *units, char *unit_type,  | 
2727  |  |           int strict, const char **endptr);  | 
2728  |  |  | 
2729  |  | /*  | 
2730  |  |  * Type of unit specifier used by dm_size_to_string().  | 
2731  |  |  */  | 
2732  |  | typedef enum dm_size_suffix_e { | 
2733  |  |   DM_SIZE_LONG = 0, /* Megabyte */  | 
2734  |  |   DM_SIZE_SHORT = 1,  /* MB or MiB */  | 
2735  |  |   DM_SIZE_UNIT = 2  /* M or m */  | 
2736  |  | } dm_size_suffix_t;  | 
2737  |  |  | 
2738  |  | /*  | 
2739  |  |  * Convert a size (in 512-byte sectors) into a printable string using units of unit_type.  | 
2740  |  |  * An upper-case unit_type indicates output units based on powers of 1000 are  | 
2741  |  |  * required; a lower-case unit_type indicates powers of 1024.  | 
2742  |  |  * For correct operation, unit_factor must be one of:  | 
2743  |  |  *  0 - the correct value will be calculated internally;  | 
2744  |  |  *   or the output from dm_units_to_factor() corresponding to unit_type;  | 
2745  |  |  *   or 'u' or 'U', an arbitrary number of bytes to use as the power base.  | 
2746  |  |  * Set include_suffix to 1 to include a suffix of suffix_type.  | 
2747  |  |  * Set use_si_units to 0 for suffixes that don't distinguish between 1000 and 1024.  | 
2748  |  |  * Set use_si_units to 1 for a suffix that does distinguish.  | 
2749  |  |  */  | 
2750  |  | const char *dm_size_to_string(struct dm_pool *mem, uint64_t size,  | 
2751  |  |             char unit_type, int use_si_units,  | 
2752  |  |             uint64_t unit_factor, int include_suffix,  | 
2753  |  |             dm_size_suffix_t suffix_type);  | 
2754  |  |  | 
2755  |  | /**************************  | 
2756  |  |  * file/stream manipulation  | 
2757  |  |  **************************/  | 
2758  |  |  | 
2759  |  | /*  | 
2760  |  |  * Create a directory (with parent directories if necessary).  | 
2761  |  |  * Returns 1 on success, 0 on failure.  | 
2762  |  |  */  | 
2763  |  | int dm_create_dir(const char *dir);  | 
2764  |  |  | 
2765  |  | int dm_is_empty_dir(const char *dir);  | 
2766  |  |  | 
2767  |  | /*  | 
2768  |  |  * Close a stream, with nicer error checking than fclose's.  | 
2769  |  |  * Derived from gnulib's close-stream.c.  | 
2770  |  |  *  | 
2771  |  |  * Close "stream".  Return 0 if successful, and EOF (setting errno)  | 
2772  |  |  * otherwise.  Upon failure, set errno to 0 if the error number  | 
2773  |  |  * cannot be determined.  Useful mainly for writable streams.  | 
2774  |  |  */  | 
2775  |  | int dm_fclose(FILE *stream);  | 
2776  |  |  | 
2777  |  | /*  | 
2778  |  |  * Returns size of a buffer which is allocated with dm_malloc.  | 
2779  |  |  * Pointer to the buffer is stored in *buf.  | 
2780  |  |  * Returns -1 on failure leaving buf undefined.  | 
2781  |  |  */  | 
2782  |  | int dm_asprintf(char **result, const char *format, ...)  | 
2783  |  |     __attribute__ ((format(printf, 2, 3)));  | 
2784  |  | int dm_vasprintf(char **result, const char *format, va_list aq)  | 
2785  |  |     __attribute__ ((format(printf, 2, 0)));  | 
2786  |  |  | 
2787  |  | /*  | 
2788  |  |  * create lockfile (pidfile) - create and lock a lock file  | 
2789  |  |  * @lockfile: location of lock file  | 
2790  |  |  *  | 
2791  |  |  * Returns: 1 on success, 0 otherwise, errno is handled internally  | 
2792  |  |  */  | 
2793  |  | int dm_create_lockfile(const char* lockfile);  | 
2794  |  |  | 
2795  |  | /*  | 
2796  |  |  * Query whether a daemon is running based on its lockfile  | 
2797  |  |  *  | 
2798  |  |  * Returns: 1 if running, 0 if not  | 
2799  |  |  */  | 
2800  |  | int dm_daemon_is_running(const char* lockfile);  | 
2801  |  |  | 
2802  |  | /*********************  | 
2803  |  |  * regular expressions  | 
2804  |  |  *********************/  | 
2805  |  | struct dm_regex;  | 
2806  |  |  | 
2807  |  | /*  | 
2808  |  |  * Initialise an array of num patterns for matching.  | 
2809  |  |  * Uses memory from mem.  | 
2810  |  |  */  | 
2811  |  | struct dm_regex *dm_regex_create(struct dm_pool *mem, const char * const *patterns,  | 
2812  |  |          unsigned num_patterns);  | 
2813  |  |  | 
2814  |  | /*  | 
2815  |  |  * Match string s against the patterns.  | 
2816  |  |  * Returns the index of the highest pattern in the array that matches,  | 
2817  |  |  * or -1 if none match.  | 
2818  |  |  */  | 
2819  |  | int dm_regex_match(struct dm_regex *regex, const char *s);  | 
2820  |  |  | 
2821  |  | /*  | 
2822  |  |  * This is useful for regression testing only.  The idea is if two  | 
2823  |  |  * fingerprints are different, then the two dfas are certainly not  | 
2824  |  |  * isomorphic.  If two fingerprints _are_ the same then it's very likely  | 
2825  |  |  * that the dfas are isomorphic.  | 
2826  |  |  *  | 
2827  |  |  * This function must be called before any matching is done.  | 
2828  |  |  */  | 
2829  |  | uint32_t dm_regex_fingerprint(struct dm_regex *regex);  | 
2830  |  |  | 
2831  |  | /******************  | 
2832  |  |  * percent handling  | 
2833  |  |  ******************/  | 
2834  |  | /*  | 
2835  |  |  * A fixed-point representation of percent values. One percent equals to  | 
2836  |  |  * DM_PERCENT_1 as defined below. Values that are not multiples of DM_PERCENT_1  | 
2837  |  |  * represent fractions, with precision of 1/1000000 of a percent. See  | 
2838  |  |  * dm_percent_to_float for a conversion to a floating-point representation.  | 
2839  |  |  *  | 
2840  |  |  * You should always use dm_make_percent when building dm_percent_t values. The  | 
2841  |  |  * implementation of dm_make_percent is biased towards the middle: it ensures that  | 
2842  |  |  * the result is DM_PERCENT_0 or DM_PERCENT_100 if and only if this is the actual  | 
2843  |  |  * value -- it never rounds any intermediate value (> 0 or < 100) to either 0  | 
2844  |  |  * or 100.  | 
2845  |  | */  | 
2846  |  | #define DM_PERCENT_CHAR '%'  | 
2847  |  |  | 
2848  |  | typedef enum dm_percent_range_e { | 
2849  |  |   DM_PERCENT_0 = 0,  | 
2850  |  |   DM_PERCENT_1 = 1000000,  | 
2851  |  |   DM_PERCENT_100 = 100 * DM_PERCENT_1,  | 
2852  |  |   DM_PERCENT_INVALID = -1,  | 
2853  |  |   DM_PERCENT_FAILED = -2  | 
2854  |  | } dm_percent_range_t;  | 
2855  |  |  | 
2856  |  | typedef int32_t dm_percent_t;  | 
2857  |  |  | 
2858  |  | float dm_percent_to_float(dm_percent_t percent);  | 
2859  |  | /*  | 
2860  |  |  * Return adjusted/rounded float for better percent value printing.  | 
2861  |  |  * Function ensures for given precision of digits:  | 
2862  |  |  * 100.0% returns only when the value is DM_PERCENT_100  | 
2863  |  |  *        for close smaller values rounds to nearest smaller value  | 
2864  |  |  * 0.0% returns only for value DM_PERCENT_0  | 
2865  |  |  *        for close bigger values rounds to nearest bigger value  | 
2866  |  |  * In all other cases returns same value as dm_percent_to_float()  | 
2867  |  |  */  | 
2868  |  | float dm_percent_to_round_float(dm_percent_t percent, unsigned digits);  | 
2869  |  | dm_percent_t dm_make_percent(uint64_t numerator, uint64_t denominator);  | 
2870  |  |  | 
2871  |  | /********************  | 
2872  |  |  * timestamp handling  | 
2873  |  |  ********************/  | 
2874  |  |  | 
2875  |  | /*  | 
2876  |  |  * Create a dm_timestamp object to use with dm_timestamp_get.  | 
2877  |  |  */  | 
2878  |  | struct dm_timestamp *dm_timestamp_alloc(void);  | 
2879  |  |  | 
2880  |  | /*  | 
2881  |  |  * Update dm_timestamp object to represent the current time.  | 
2882  |  |  */  | 
2883  |  | int dm_timestamp_get(struct dm_timestamp *ts);  | 
2884  |  |  | 
2885  |  | /*  | 
2886  |  |  * Copy a timestamp from ts_old to ts_new.  | 
2887  |  |  */  | 
2888  |  | void dm_timestamp_copy(struct dm_timestamp *ts_new, struct dm_timestamp *ts_old);  | 
2889  |  |  | 
2890  |  | /*  | 
2891  |  |  * Compare two timestamps.  | 
2892  |  |  *  | 
2893  |  |  * Return: -1 if ts1 is less than ts2  | 
2894  |  |  *        0 if ts1 is equal to ts2  | 
2895  |  |  *          1 if ts1 is greater than ts2  | 
2896  |  |  */  | 
2897  |  | int dm_timestamp_compare(struct dm_timestamp *ts1, struct dm_timestamp *ts2);  | 
2898  |  |  | 
2899  |  | /*  | 
2900  |  |  * Return the absolute difference in nanoseconds between  | 
2901  |  |  * the dm_timestamp objects ts1 and ts2.  | 
2902  |  |  *  | 
2903  |  |  * Callers that need to know whether ts1 is before, equal to, or after ts2  | 
2904  |  |  * in addition to the magnitude should use dm_timestamp_compare.  | 
2905  |  |  */  | 
2906  |  | uint64_t dm_timestamp_delta(struct dm_timestamp *ts1, struct dm_timestamp *ts2);  | 
2907  |  |  | 
2908  |  | /*  | 
2909  |  |  * Destroy a dm_timestamp object.  | 
2910  |  |  */  | 
2911  |  | void dm_timestamp_destroy(struct dm_timestamp *ts);  | 
2912  |  |  | 
2913  |  | /*********************  | 
2914  |  |  * reporting functions  | 
2915  |  |  *********************/  | 
2916  |  |  | 
2917  |  | struct dm_report_object_type { | 
2918  |  |   uint32_t id;      /* Powers of 2 */  | 
2919  |  |   const char *desc;  | 
2920  |  |   const char *prefix;   /* field id string prefix (optional) */  | 
2921  |  |   /* FIXME: convert to proper usage of const pointers here */  | 
2922  |  |   void *(*data_fn)(void *object); /* callback from report_object() */  | 
2923  |  | };  | 
2924  |  |  | 
2925  |  | struct dm_report_field;  | 
2926  |  |  | 
2927  |  | /*  | 
2928  |  |  * dm_report_field_type flags  | 
2929  |  |  */  | 
2930  |  | #define DM_REPORT_FIELD_MASK        0x00000FFF  | 
2931  |  | #define DM_REPORT_FIELD_ALIGN_MASK      0x0000000F  | 
2932  |  | #define DM_REPORT_FIELD_ALIGN_LEFT      0x00000001  | 
2933  |  | #define DM_REPORT_FIELD_ALIGN_RIGHT     0x00000002  | 
2934  |  | #define DM_REPORT_FIELD_TYPE_MASK     0x00000FF0  | 
2935  |  | #define DM_REPORT_FIELD_TYPE_NONE     0x00000000  | 
2936  |  | #define DM_REPORT_FIELD_TYPE_STRING     0x00000010  | 
2937  |  | #define DM_REPORT_FIELD_TYPE_NUMBER     0x00000020  | 
2938  |  | #define DM_REPORT_FIELD_TYPE_SIZE     0x00000040  | 
2939  |  | #define DM_REPORT_FIELD_TYPE_PERCENT      0x00000080  | 
2940  |  | #define DM_REPORT_FIELD_TYPE_STRING_LIST    0x00000100  | 
2941  |  | #define DM_REPORT_FIELD_TYPE_TIME     0x00000200  | 
2942  |  |  | 
2943  |  | /* For use with reserved values only! */  | 
2944  |  | #define DM_REPORT_FIELD_RESERVED_VALUE_MASK   0x0000000F  | 
2945  |  | #define DM_REPORT_FIELD_RESERVED_VALUE_NAMED    0x00000001 /* only named value, less strict form of reservation */  | 
2946  |  | #define DM_REPORT_FIELD_RESERVED_VALUE_RANGE    0x00000002 /* value is range - low and high value defined */  | 
2947  |  | #define DM_REPORT_FIELD_RESERVED_VALUE_DYNAMIC_VALUE  0x00000004 /* value is computed in runtime */  | 
2948  |  | #define DM_REPORT_FIELD_RESERVED_VALUE_FUZZY_NAMES  0x00000008 /* value names are recognized in runtime */  | 
2949  |  |  | 
2950  |  | #define DM_REPORT_FIELD_TYPE_ID_LEN 32  | 
2951  |  | #define DM_REPORT_FIELD_TYPE_HEADING_LEN 32  | 
2952  |  |  | 
2953  |  | struct dm_report;  | 
2954  |  | struct dm_report_field_type { | 
2955  |  |   uint32_t type;    /* object type id */  | 
2956  |  |   uint32_t flags;   /* DM_REPORT_FIELD_* */  | 
2957  |  |   uint32_t offset;  /* byte offset in the object */  | 
2958  |  |   int32_t width;    /* default width */  | 
2959  |  |   /* string used to specify the field */  | 
2960  |  |   const char id[DM_REPORT_FIELD_TYPE_ID_LEN];  | 
2961  |  |   /* string printed in header */  | 
2962  |  |   const char heading[DM_REPORT_FIELD_TYPE_HEADING_LEN];  | 
2963  |  |   int (*report_fn)(struct dm_report *rh, struct dm_pool *mem,  | 
2964  |  |        struct dm_report_field *field, const void *data,  | 
2965  |  |        void *private_data);  | 
2966  |  |   const char *desc; /* description of the field */  | 
2967  |  | };  | 
2968  |  |  | 
2969  |  | /*  | 
2970  |  |  * Per-field reserved value.  | 
2971  |  |  */  | 
2972  |  | struct dm_report_field_reserved_value { | 
2973  |  |   /* field_num is the position of the field in 'fields'  | 
2974  |  |      array passed to dm_report_init_with_selection */  | 
2975  |  |   uint32_t field_num;  | 
2976  |  |   /* the value is of the same type as the field  | 
2977  |  |      identified by field_num */  | 
2978  |  |   const void *value;  | 
2979  |  | };  | 
2980  |  |  | 
2981  |  | /*  | 
2982  |  |  * Reserved value is a 'value' that is used directly if any of the 'names' is hit  | 
2983  |  |  * or in case of fuzzy names, if such fuzzy name matches.  | 
2984  |  |  *  | 
2985  |  |  * If type is any of DM_REPORT_FIELD_TYPE_*, the reserved value is recognized  | 
2986  |  |  * for all fields of that type.  | 
2987  |  |  *  | 
2988  |  |  * If type is DM_REPORT_FIELD_TYPE_NONE, the reserved value is recognized  | 
2989  |  |  * for the exact field specified - hence the type of the value is automatically  | 
2990  |  |  * the same as the type of the field itself.  | 
2991  |  |  *  | 
2992  |  |  * The array of reserved values is used to initialize reporting with  | 
2993  |  |  * selection enabled (see also dm_report_init_with_selection function).  | 
2994  |  |  */  | 
2995  |  | struct dm_report_reserved_value { | 
2996  |  |   const uint32_t type;    /* DM_REPORT_FIELD_RESERVED_VALUE_* and DM_REPORT_FIELD_TYPE_*  */  | 
2997  |  |   const void *value;    /* reserved value:  | 
2998  |  |             uint64_t for DM_REPORT_FIELD_TYPE_NUMBER  | 
2999  |  |             uint64_t for DM_REPORT_FIELD_TYPE_SIZE (number of 512-byte sectors)  | 
3000  |  |             uint64_t for DM_REPORT_FIELD_TYPE_PERCENT  | 
3001  |  |             const char* for DM_REPORT_FIELD_TYPE_STRING  | 
3002  |  |             struct dm_report_field_reserved_value for DM_REPORT_FIELD_TYPE_NONE  | 
3003  |  |             dm_report_reserved_handler* if DM_REPORT_FIELD_RESERVED_VALUE_{DYNAMIC_VALUE,FUZZY_NAMES} is used */ | 
3004  |  |   const char **names;   /* null-terminated array of static names for this reserved value */  | 
3005  |  |   const char *description;  /* description of the reserved value */  | 
3006  |  | };  | 
3007  |  |  | 
3008  |  | /*  | 
3009  |  |  * Available actions for dm_report_reserved_value_handler.  | 
3010  |  |  */  | 
3011  |  | typedef enum dm_report_reserved_action_e { | 
3012  |  |   DM_REPORT_RESERVED_PARSE_FUZZY_NAME,  | 
3013  |  |   DM_REPORT_RESERVED_GET_DYNAMIC_VALUE,  | 
3014  |  | } dm_report_reserved_action_t;  | 
3015  |  |  | 
3016  |  | /*  | 
3017  |  |  * Generic reserved value handler to process reserved value names and/or values.  | 
3018  |  |  *  | 
3019  |  |  * Actions and their input/output:  | 
3020  |  |  *  | 
3021  |  |  *  DM_REPORT_RESERVED_PARSE_FUZZY_NAME  | 
3022  |  |  *    data_in:  const char *fuzzy_name  | 
3023  |  |  *    data_out: const char *canonical_name, NULL if fuzzy_name not recognized  | 
3024  |  |  *  | 
3025  |  |  *  DM_REPORT_RESERVED_GET_DYNAMIC_VALUE  | 
3026  |  |  *    data_in:  const char *canonical_name  | 
3027  |  |  *    data_out: void *value, NULL if canonical_name not recognized  | 
3028  |  |  *  | 
3029  |  |  * All actions return:  | 
3030  |  |  *  | 
3031  |  |  *  -1 if action not implemented  | 
3032  |  |  *  0 on error  | 
3033  |  |  *  1 on success  | 
3034  |  |  */  | 
3035  |  | typedef int (*dm_report_reserved_handler) (struct dm_report *rh,  | 
3036  |  |              struct dm_pool *mem,  | 
3037  |  |              uint32_t field_num,  | 
3038  |  |              dm_report_reserved_action_t action,  | 
3039  |  |              const void *data_in,  | 
3040  |  |              const void **data_out);  | 
3041  |  |  | 
3042  |  | /*  | 
3043  |  |  * The dm_report_value_cache_{set,get} are helper functions to store and retrieve | 
3044  |  |  * various values used during reporting (dm_report_field_type.report_fn) and/or  | 
3045  |  |  * selection processing (dm_report_reserved_handler instances) to avoid  | 
3046  |  |  * recalculation of these values or to share values among calls.  | 
3047  |  |  */  | 
3048  |  | int dm_report_value_cache_set(struct dm_report *rh, const char *name, const void *data);  | 
3049  |  | const void *dm_report_value_cache_get(struct dm_report *rh, const char *name);  | 
3050  |  | /*  | 
3051  |  |  * dm_report_init output_flags  | 
3052  |  |  */  | 
3053  |  | #define DM_REPORT_OUTPUT_MASK     0x000000FF  | 
3054  |  | #define DM_REPORT_OUTPUT_ALIGNED    0x00000001  | 
3055  |  | #define DM_REPORT_OUTPUT_BUFFERED   0x00000002  | 
3056  |  | #define DM_REPORT_OUTPUT_HEADINGS   0x00000004  | 
3057  |  | #define DM_REPORT_OUTPUT_FIELD_NAME_PREFIX  0x00000008  | 
3058  |  | #define DM_REPORT_OUTPUT_FIELD_UNQUOTED   0x00000010  | 
3059  |  | #define DM_REPORT_OUTPUT_COLUMNS_AS_ROWS  0x00000020  | 
3060  |  | #define DM_REPORT_OUTPUT_MULTIPLE_TIMES   0x00000040  | 
3061  |  | #define DM_REPORT_OUTPUT_FIELD_IDS_IN_HEADINGS  0x00000080  | 
3062  |  |  | 
3063  |  | struct dm_report *dm_report_init(uint32_t *report_types,  | 
3064  |  |          const struct dm_report_object_type *types,  | 
3065  |  |          const struct dm_report_field_type *fields,  | 
3066  |  |          const char *output_fields,  | 
3067  |  |          const char *output_separator,  | 
3068  |  |          uint32_t output_flags,  | 
3069  |  |          const char *sort_keys,  | 
3070  |  |          void *private_data);  | 
3071  |  | struct dm_report *dm_report_init_with_selection(uint32_t *report_types,  | 
3072  |  |             const struct dm_report_object_type *types,  | 
3073  |  |             const struct dm_report_field_type *fields,  | 
3074  |  |             const char *output_fields,  | 
3075  |  |             const char *output_separator,  | 
3076  |  |             uint32_t output_flags,  | 
3077  |  |             const char *sort_keys,  | 
3078  |  |             const char *selection,  | 
3079  |  |             const struct dm_report_reserved_value reserved_values[],  | 
3080  |  |             void *private_data);  | 
3081  |  | /*  | 
3082  |  |  * Report an object, pass it through the selection criteria if they  | 
3083  |  |  * are present and display the result on output if it passes the criteria.  | 
3084  |  |  */  | 
3085  |  | int dm_report_object(struct dm_report *rh, void *object);  | 
3086  |  | /*  | 
3087  |  |  * The same as dm_report_object, but display the result on output only if  | 
3088  |  |  * 'do_output' arg is set. Also, save the result of selection in 'selected'  | 
3089  |  |  * arg if it's not NULL (either 1 if the object passes, otherwise 0).  | 
3090  |  |  */  | 
3091  |  | int dm_report_object_is_selected(struct dm_report *rh, void *object, int do_output, int *selected);  | 
3092  |  |  | 
3093  |  | /*  | 
3094  |  |  * Compact report output so that if field value is empty for all rows in  | 
3095  |  |  * the report, drop the field from output completely (including headers).  | 
3096  |  |  * Compact output is applicable only if report is buffered, otherwise  | 
3097  |  |  * this function has no effect.  | 
3098  |  |  */  | 
3099  |  | int dm_report_compact_fields(struct dm_report *rh);  | 
3100  |  |  | 
3101  |  | /*  | 
3102  |  |  * The same as dm_report_compact_fields, but for selected fields only.  | 
3103  |  |  * The "fields" arg is comma separated list of field names (the same format  | 
3104  |  |  * as used for "output_fields" arg in dm_report_init fn).  | 
3105  |  |  */  | 
3106  |  | int dm_report_compact_given_fields(struct dm_report *rh, const char *fields);  | 
3107  |  |  | 
3108  |  | /*  | 
3109  |  |  * Returns 1 if there is no data waiting to be output.  | 
3110  |  |  */  | 
3111  |  | int dm_report_is_empty(struct dm_report *rh);  | 
3112  |  |  | 
3113  |  | /*  | 
3114  |  |  * Destroy report content without doing output.  | 
3115  |  |  */  | 
3116  |  | void dm_report_destroy_rows(struct dm_report *rh);  | 
3117  |  |  | 
3118  |  | int dm_report_output(struct dm_report *rh);  | 
3119  |  |  | 
3120  |  | /*  | 
3121  |  |  * Output the report headings for a columns-based report, even if they  | 
3122  |  |  * have already been shown. Useful for repeating reports that wish to  | 
3123  |  |  * issue a periodic reminder of the column headings.  | 
3124  |  |  */  | 
3125  |  | int dm_report_column_headings(struct dm_report *rh);  | 
3126  |  |  | 
3127  |  | void dm_report_free(struct dm_report *rh);  | 
3128  |  |  | 
3129  |  | /*  | 
3130  |  |  * Prefix added to each field name with DM_REPORT_OUTPUT_FIELD_NAME_PREFIX  | 
3131  |  |  */  | 
3132  |  | int dm_report_set_output_field_name_prefix(struct dm_report *rh,  | 
3133  |  |              const char *output_field_name_prefix);  | 
3134  |  |  | 
3135  |  | int dm_report_set_selection(struct dm_report *rh, const char *selection);  | 
3136  |  |  | 
3137  |  | /*  | 
3138  |  |  * Report functions are provided for simple data types.  | 
3139  |  |  * They take care of allocating copies of the data.  | 
3140  |  |  */  | 
3141  |  | int dm_report_field_string(struct dm_report *rh, struct dm_report_field *field,  | 
3142  |  |          const char *const *data);  | 
3143  |  | int dm_report_field_string_list(struct dm_report *rh, struct dm_report_field *field,  | 
3144  |  |         const struct dm_list *data, const char *delimiter);  | 
3145  |  | int dm_report_field_string_list_unsorted(struct dm_report *rh, struct dm_report_field *field,  | 
3146  |  |            const struct dm_list *data, const char *delimiter);  | 
3147  |  | int dm_report_field_int32(struct dm_report *rh, struct dm_report_field *field,  | 
3148  |  |         const int32_t *data);  | 
3149  |  | int dm_report_field_uint32(struct dm_report *rh, struct dm_report_field *field,  | 
3150  |  |          const uint32_t *data);  | 
3151  |  | int dm_report_field_int(struct dm_report *rh, struct dm_report_field *field,  | 
3152  |  |       const int *data);  | 
3153  |  | int dm_report_field_uint64(struct dm_report *rh, struct dm_report_field *field,  | 
3154  |  |          const uint64_t *data);  | 
3155  |  | int dm_report_field_percent(struct dm_report *rh, struct dm_report_field *field,  | 
3156  |  |           const dm_percent_t *data);  | 
3157  |  |  | 
3158  |  | /*  | 
3159  |  |  * For custom fields, allocate the data in 'mem' and use  | 
3160  |  |  * dm_report_field_set_value().  | 
3161  |  |  * 'sortvalue' may be NULL if it matches 'value'  | 
3162  |  |  */  | 
3163  |  | void dm_report_field_set_value(struct dm_report_field *field, const void *value,  | 
3164  |  |              const void *sortvalue);  | 
3165  |  |  | 
3166  |  | /*  | 
3167  |  |  * Report group support.  | 
3168  |  |  */  | 
3169  |  | struct dm_report_group;  | 
3170  |  |  | 
3171  |  | typedef enum dm_report_group_type_e { | 
3172  |  |   DM_REPORT_GROUP_SINGLE,  | 
3173  |  |   DM_REPORT_GROUP_BASIC,  | 
3174  |  |   DM_REPORT_GROUP_JSON,  | 
3175  |  |   DM_REPORT_GROUP_JSON_STD  | 
3176  |  | } dm_report_group_type_t;  | 
3177  |  |  | 
3178  |  | struct dm_report_group *dm_report_group_create(dm_report_group_type_t type, void *data);  | 
3179  |  | int dm_report_group_push(struct dm_report_group *group, struct dm_report *report, void *data);  | 
3180  |  | int dm_report_group_pop(struct dm_report_group *group);  | 
3181  |  | int dm_report_group_output_and_pop_all(struct dm_report_group *group);  | 
3182  |  | int dm_report_group_destroy(struct dm_report_group *group);  | 
3183  |  |  | 
3184  |  | /*  | 
3185  |  |  * Stats counter access methods  | 
3186  |  |  *  | 
3187  |  |  * Each method returns the corresponding stats counter value from the  | 
3188  |  |  * supplied dm_stats handle for the specified region_id and area_id.  | 
3189  |  |  * If either region_id or area_id uses one of the special values  | 
3190  |  |  * DM_STATS_REGION_CURRENT or DM_STATS_AREA_CURRENT then the region  | 
3191  |  |  * or area is selected according to the current state of the dm_stats  | 
3192  |  |  * handle's embedded cursor.  | 
3193  |  |  *  | 
3194  |  |  * Two methods are provided to access counter values: a named function  | 
3195  |  |  * for each available counter field and a single function that accepts  | 
3196  |  |  * an enum value specifying the required field. New code is encouraged  | 
3197  |  |  * to use the enum based interface as calls to the named functions are  | 
3198  |  |  * implemented using the enum method internally.  | 
3199  |  |  *  | 
3200  |  |  * See the kernel documentation for complete descriptions of each  | 
3201  |  |  * counter field:  | 
3202  |  |  *  | 
3203  |  |  * Documentation/device-mapper/statistics.txt  | 
3204  |  |  * Documentation/iostats.txt  | 
3205  |  |  *  | 
3206  |  |  * reads: the number of reads completed  | 
3207  |  |  * reads_merged: the number of reads merged  | 
3208  |  |  * read_sectors: the number of sectors read  | 
3209  |  |  * read_nsecs: the number of nanoseconds spent reading  | 
3210  |  |  * writes: the number of writes completed  | 
3211  |  |  * writes_merged: the number of writes merged  | 
3212  |  |  * write_sectors: the number of sectors written  | 
3213  |  |  * write_nsecs: the number of nanoseconds spent writing  | 
3214  |  |  * io_in_progress: the number of I/Os currently in progress  | 
3215  |  |  * io_nsecs: the number of nanoseconds spent doing I/Os  | 
3216  |  |  * weighted_io_nsecs: the weighted number of nanoseconds spent doing I/Os  | 
3217  |  |  * total_read_nsecs: the total time spent reading in nanoseconds  | 
3218  |  |  * total_write_nsecs: the total time spent writing in nanoseconds  | 
3219  |  |  */  | 
3220  |  |  | 
3221  |  | #define DM_STATS_REGION_CURRENT UINT64_MAX  | 
3222  |  | #define DM_STATS_AREA_CURRENT UINT64_MAX  | 
3223  |  |  | 
3224  |  | typedef enum dm_stats_counter_e { | 
3225  |  |   DM_STATS_READS_COUNT,  | 
3226  |  |   DM_STATS_READS_MERGED_COUNT,  | 
3227  |  |   DM_STATS_READ_SECTORS_COUNT,  | 
3228  |  |   DM_STATS_READ_NSECS,  | 
3229  |  |   DM_STATS_WRITES_COUNT,  | 
3230  |  |   DM_STATS_WRITES_MERGED_COUNT,  | 
3231  |  |   DM_STATS_WRITE_SECTORS_COUNT,  | 
3232  |  |   DM_STATS_WRITE_NSECS,  | 
3233  |  |   DM_STATS_IO_IN_PROGRESS_COUNT,  | 
3234  |  |   DM_STATS_IO_NSECS,  | 
3235  |  |   DM_STATS_WEIGHTED_IO_NSECS,  | 
3236  |  |   DM_STATS_TOTAL_READ_NSECS,  | 
3237  |  |   DM_STATS_TOTAL_WRITE_NSECS,  | 
3238  |  |   DM_STATS_NR_COUNTERS  | 
3239  |  | } dm_stats_counter_t;  | 
3240  |  |  | 
3241  |  | uint64_t dm_stats_get_counter(const struct dm_stats *dms,  | 
3242  |  |             dm_stats_counter_t counter,  | 
3243  |  |             uint64_t region_id, uint64_t area_id);  | 
3244  |  |  | 
3245  |  | uint64_t dm_stats_get_reads(const struct dm_stats *dms,  | 
3246  |  |           uint64_t region_id, uint64_t area_id);  | 
3247  |  |  | 
3248  |  | uint64_t dm_stats_get_reads_merged(const struct dm_stats *dms,  | 
3249  |  |            uint64_t region_id, uint64_t area_id);  | 
3250  |  |  | 
3251  |  | uint64_t dm_stats_get_read_sectors(const struct dm_stats *dms,  | 
3252  |  |            uint64_t region_id, uint64_t area_id);  | 
3253  |  |  | 
3254  |  | uint64_t dm_stats_get_read_nsecs(const struct dm_stats *dms,  | 
3255  |  |          uint64_t region_id, uint64_t area_id);  | 
3256  |  |  | 
3257  |  | uint64_t dm_stats_get_writes(const struct dm_stats *dms,  | 
3258  |  |            uint64_t region_id, uint64_t area_id);  | 
3259  |  |  | 
3260  |  | uint64_t dm_stats_get_writes_merged(const struct dm_stats *dms,  | 
3261  |  |             uint64_t region_id, uint64_t area_id);  | 
3262  |  |  | 
3263  |  | uint64_t dm_stats_get_write_sectors(const struct dm_stats *dms,  | 
3264  |  |             uint64_t region_id, uint64_t area_id);  | 
3265  |  |  | 
3266  |  | uint64_t dm_stats_get_write_nsecs(const struct dm_stats *dms,  | 
3267  |  |           uint64_t region_id, uint64_t area_id);  | 
3268  |  |  | 
3269  |  | uint64_t dm_stats_get_io_in_progress(const struct dm_stats *dms,  | 
3270  |  |              uint64_t region_id, uint64_t area_id);  | 
3271  |  |  | 
3272  |  | uint64_t dm_stats_get_io_nsecs(const struct dm_stats *dms,  | 
3273  |  |              uint64_t region_id, uint64_t area_id);  | 
3274  |  |  | 
3275  |  | uint64_t dm_stats_get_weighted_io_nsecs(const struct dm_stats *dms,  | 
3276  |  |           uint64_t region_id, uint64_t area_id);  | 
3277  |  |  | 
3278  |  | uint64_t dm_stats_get_total_read_nsecs(const struct dm_stats *dms,  | 
3279  |  |                uint64_t region_id, uint64_t area_id);  | 
3280  |  |  | 
3281  |  | uint64_t dm_stats_get_total_write_nsecs(const struct dm_stats *dms,  | 
3282  |  |           uint64_t region_id, uint64_t area_id);  | 
3283  |  |  | 
3284  |  | /*  | 
3285  |  |  * Derived statistics access methods  | 
3286  |  |  *  | 
3287  |  |  * Each method returns the corresponding value calculated from the  | 
3288  |  |  * counters stored in the supplied dm_stats handle for the specified  | 
3289  |  |  * region_id and area_id. If either region_id or area_id uses one of the  | 
3290  |  |  * special values DM_STATS_REGION_CURRENT or DM_STATS_AREA_CURRENT then  | 
3291  |  |  * the region or area is selected according to the current state of the  | 
3292  |  |  * dm_stats handle's embedded cursor.  | 
3293  |  |  *  | 
3294  |  |  * The set of metrics is based on the fields provided by the Linux  | 
3295  |  |  * iostats program.  | 
3296  |  |  *  | 
3297  |  |  * rd_merges_per_sec: the number of reads merged per second  | 
3298  |  |  * wr_merges_per_sec: the number of writes merged per second  | 
3299  |  |  * reads_per_sec: the number of reads completed per second  | 
3300  |  |  * writes_per_sec: the number of writes completed per second  | 
3301  |  |  * read_sectors_per_sec: the number of sectors read per second  | 
3302  |  |  * write_sectors_per_sec: the number of sectors written per second  | 
3303  |  |  * average_request_size: the average size of requests submitted  | 
3304  |  |  * service_time: the average service time (in ns) for requests issued  | 
3305  |  |  * average_queue_size: the average queue length  | 
3306  |  |  * average_wait_time: the average time for requests to be served (in ns)  | 
3307  |  |  * average_rd_wait_time: the average read wait time  | 
3308  |  |  * average_wr_wait_time: the average write wait time  | 
3309  |  |  */  | 
3310  |  |  | 
3311  |  | typedef enum dm_stats_metric_e { | 
3312  |  |   DM_STATS_RD_MERGES_PER_SEC,  | 
3313  |  |   DM_STATS_WR_MERGES_PER_SEC,  | 
3314  |  |   DM_STATS_READS_PER_SEC,  | 
3315  |  |   DM_STATS_WRITES_PER_SEC,  | 
3316  |  |   DM_STATS_READ_SECTORS_PER_SEC,  | 
3317  |  |   DM_STATS_WRITE_SECTORS_PER_SEC,  | 
3318  |  |   DM_STATS_AVERAGE_REQUEST_SIZE,  | 
3319  |  |   DM_STATS_AVERAGE_QUEUE_SIZE,  | 
3320  |  |   DM_STATS_AVERAGE_WAIT_TIME,  | 
3321  |  |   DM_STATS_AVERAGE_RD_WAIT_TIME,  | 
3322  |  |   DM_STATS_AVERAGE_WR_WAIT_TIME,  | 
3323  |  |   DM_STATS_SERVICE_TIME,  | 
3324  |  |   DM_STATS_THROUGHPUT,  | 
3325  |  |   DM_STATS_UTILIZATION,  | 
3326  |  |   DM_STATS_NR_METRICS  | 
3327  |  | } dm_stats_metric_t;  | 
3328  |  |  | 
3329  |  | int dm_stats_get_metric(const struct dm_stats *dms, int metric,  | 
3330  |  |       uint64_t region_id, uint64_t area_id, double *value);  | 
3331  |  |  | 
3332  |  | int dm_stats_get_rd_merges_per_sec(const struct dm_stats *dms, double *rrqm,  | 
3333  |  |            uint64_t region_id, uint64_t area_id);  | 
3334  |  |  | 
3335  |  | int dm_stats_get_wr_merges_per_sec(const struct dm_stats *dms, double *wrqm,  | 
3336  |  |            uint64_t region_id, uint64_t area_id);  | 
3337  |  |  | 
3338  |  | int dm_stats_get_reads_per_sec(const struct dm_stats *dms, double *rd_s,  | 
3339  |  |              uint64_t region_id, uint64_t area_id);  | 
3340  |  |  | 
3341  |  | int dm_stats_get_writes_per_sec(const struct dm_stats *dms, double *wr_s,  | 
3342  |  |         uint64_t region_id, uint64_t area_id);  | 
3343  |  |  | 
3344  |  | int dm_stats_get_read_sectors_per_sec(const struct dm_stats *dms,  | 
3345  |  |               double *rsec_s, uint64_t region_id,  | 
3346  |  |               uint64_t area_id);  | 
3347  |  |  | 
3348  |  | int dm_stats_get_write_sectors_per_sec(const struct dm_stats *dms,  | 
3349  |  |                double *wsec_s, uint64_t region_id,  | 
3350  |  |                uint64_t area_id);  | 
3351  |  |  | 
3352  |  | int dm_stats_get_average_request_size(const struct dm_stats *dms,  | 
3353  |  |               double *arqsz, uint64_t region_id,  | 
3354  |  |               uint64_t area_id);  | 
3355  |  |  | 
3356  |  | int dm_stats_get_service_time(const struct dm_stats *dms, double *svctm,  | 
3357  |  |             uint64_t region_id, uint64_t area_id);  | 
3358  |  |  | 
3359  |  | int dm_stats_get_average_queue_size(const struct dm_stats *dms, double *qusz,  | 
3360  |  |             uint64_t region_id, uint64_t area_id);  | 
3361  |  |  | 
3362  |  | int dm_stats_get_average_wait_time(const struct dm_stats *dms, double *await,  | 
3363  |  |            uint64_t region_id, uint64_t area_id);  | 
3364  |  |  | 
3365  |  | int dm_stats_get_average_rd_wait_time(const struct dm_stats *dms,  | 
3366  |  |               double *await, uint64_t region_id,  | 
3367  |  |               uint64_t area_id);  | 
3368  |  |  | 
3369  |  | int dm_stats_get_average_wr_wait_time(const struct dm_stats *dms,  | 
3370  |  |               double *await, uint64_t region_id,  | 
3371  |  |               uint64_t area_id);  | 
3372  |  |  | 
3373  |  | int dm_stats_get_throughput(const struct dm_stats *dms, double *tput,  | 
3374  |  |           uint64_t region_id, uint64_t area_id);  | 
3375  |  |  | 
3376  |  | int dm_stats_get_utilization(const struct dm_stats *dms, dm_percent_t *util,  | 
3377  |  |            uint64_t region_id, uint64_t area_id);  | 
3378  |  |  | 
3379  |  | /*  | 
3380  |  |  * Statistics histogram access methods.  | 
3381  |  |  *  | 
3382  |  |  * Methods to access latency histograms for regions that have them  | 
3383  |  |  * enabled. Each histogram contains a configurable number of bins  | 
3384  |  |  * spanning a user defined latency interval.  | 
3385  |  |  *  | 
3386  |  |  * The bin count, upper and lower bin bounds, and bin values are  | 
3387  |  |  * made available via the following area methods.  | 
3388  |  |  *  | 
3389  |  |  * Methods to obtain a simple string representation of the histogram  | 
3390  |  |  * and its bounds are also provided.  | 
3391  |  |  */  | 
3392  |  |  | 
3393  |  | /*  | 
3394  |  |  * Retrieve a pointer to the histogram associated with the specified  | 
3395  |  |  * area. If the area does not have a histogram configured this function  | 
3396  |  |  * returns NULL.  | 
3397  |  |  *  | 
3398  |  |  * The pointer does not need to be freed explicitly by the caller: it  | 
3399  |  |  * will become invalid following a subsequent dm_stats_list(),  | 
3400  |  |  * dm_stats_populate() or dm_stats_destroy() of the corresponding  | 
3401  |  |  * dm_stats handle.  | 
3402  |  |  *  | 
3403  |  |  * If region_id or area_id is one of the special values  | 
3404  |  |  * DM_STATS_REGION_CURRENT or DM_STATS_AREA_CURRENT the current cursor  | 
3405  |  |  * value is used to select the region or area.  | 
3406  |  |  */  | 
3407  |  | struct dm_histogram *dm_stats_get_histogram(const struct dm_stats *dms,  | 
3408  |  |               uint64_t region_id,  | 
3409  |  |               uint64_t area_id);  | 
3410  |  |  | 
3411  |  | /*  | 
3412  |  |  * Return the number of bins in the specified histogram handle.  | 
3413  |  |  */  | 
3414  |  | int dm_histogram_get_nr_bins(const struct dm_histogram *dmh);  | 
3415  |  |  | 
3416  |  | /*  | 
3417  |  |  * Get the lower bound of the specified bin of the histogram for the  | 
3418  |  |  * area specified by region_id and area_id. The value is returned in  | 
3419  |  |  * nanoseconds.  | 
3420  |  |  */  | 
3421  |  | uint64_t dm_histogram_get_bin_lower(const struct dm_histogram *dmh, int bin);  | 
3422  |  |  | 
3423  |  | /*  | 
3424  |  |  * Get the upper bound of the specified bin of the histogram for the  | 
3425  |  |  * area specified by region_id and area_id. The value is returned in  | 
3426  |  |  * nanoseconds.  | 
3427  |  |  */  | 
3428  |  | uint64_t dm_histogram_get_bin_upper(const struct dm_histogram *dmh, int bin);  | 
3429  |  |  | 
3430  |  | /*  | 
3431  |  |  * Get the width of the specified bin of the histogram for the area  | 
3432  |  |  * specified by region_id and area_id. The width is equal to the bin  | 
3433  |  |  * upper bound minus the lower bound and yields the range of latency  | 
3434  |  |  * values covered by this bin. The value is returned in nanoseconds.  | 
3435  |  |  */  | 
3436  |  | uint64_t dm_histogram_get_bin_width(const struct dm_histogram *dmh, int bin);  | 
3437  |  |  | 
3438  |  | /*  | 
3439  |  |  * Get the value of the specified bin of the histogram for the area  | 
3440  |  |  * specified by region_id and area_id.  | 
3441  |  |  */  | 
3442  |  | uint64_t dm_histogram_get_bin_count(const struct dm_histogram *dmh, int bin);  | 
3443  |  |  | 
3444  |  | /*  | 
3445  |  |  * Get the percentage (relative frequency) of the specified bin of the  | 
3446  |  |  * histogram for the area specified by region_id and area_id.  | 
3447  |  |  */  | 
3448  |  | dm_percent_t dm_histogram_get_bin_percent(const struct dm_histogram *dmh,  | 
3449  |  |             int bin);  | 
3450  |  |  | 
3451  |  | /*  | 
3452  |  |  * Return the total observations (sum of bin counts) for the histogram  | 
3453  |  |  * of the area specified by region_id and area_id.  | 
3454  |  |  */  | 
3455  |  | uint64_t dm_histogram_get_sum(const struct dm_histogram *dmh);  | 
3456  |  |  | 
3457  |  | /*  | 
3458  |  |  * Histogram formatting flags.  | 
3459  |  |  */  | 
3460  |  | #define DM_HISTOGRAM_SUFFIX  0x1  | 
3461  |  | #define DM_HISTOGRAM_VALUES  0x2  | 
3462  |  | #define DM_HISTOGRAM_PERCENT 0X4  | 
3463  |  | #define DM_HISTOGRAM_BOUNDS_LOWER 0x10  | 
3464  |  | #define DM_HISTOGRAM_BOUNDS_UPPER 0x20  | 
3465  |  | #define DM_HISTOGRAM_BOUNDS_RANGE 0x30  | 
3466  |  |  | 
3467  |  | /*  | 
3468  |  |  * Return a string representation of the supplied histogram's values and  | 
3469  |  |  * bin boundaries.  | 
3470  |  |  *  | 
3471  |  |  * The bin argument selects the bin to format. If this argument is less  | 
3472  |  |  * than zero all bins will be included in the resulting string.  | 
3473  |  |  *  | 
3474  |  |  * width specifies a minimum width for the field in characters; if it is  | 
3475  |  |  * zero the width will be determined automatically based on the options  | 
3476  |  |  * selected for formatting. A value less than zero disables field width  | 
3477  |  |  * control: bin boundaries and values will be output with a minimum  | 
3478  |  |  * amount of whitespace.  | 
3479  |  |  *  | 
3480  |  |  * flags is a collection of flag arguments that control the string format:  | 
3481  |  |  *  | 
3482  |  |  * DM_HISTOGRAM_VALUES  - Include bin values in the string.  | 
3483  |  |  * DM_HISTOGRAM_SUFFIX  - Include time unit suffixes when printing bounds.  | 
3484  |  |  * DM_HISTOGRAM_PERCENT - Format bin values as a percentage.  | 
3485  |  |  *  | 
3486  |  |  * DM_HISTOGRAM_BOUNDS_LOWER - Include the lower bound of each bin.  | 
3487  |  |  * DM_HISTOGRAM_BOUNDS_UPPER - Include the upper bound of each bin.  | 
3488  |  |  * DM_HISTOGRAM_BOUNDS_RANGE - Show the span of each bin as "lo-up".  | 
3489  |  |  *  | 
3490  |  |  * The returned pointer does not need to be freed explicitly by the  | 
3491  |  |  * caller: it will become invalid following a subsequent  | 
3492  |  |  * dm_stats_list(), dm_stats_populate() or dm_stats_destroy() of the  | 
3493  |  |  * corresponding dm_stats handle.  | 
3494  |  |  */  | 
3495  |  | const char *dm_histogram_to_string(const struct dm_histogram *dmh, int bin,  | 
3496  |  |            int width, int flags);  | 
3497  |  |  | 
3498  |  | /*************************  | 
3499  |  |  * config file parse/print  | 
3500  |  |  *************************/  | 
3501  |  | typedef enum dm_config_value_type_e { | 
3502  |  |   DM_CFG_INT,  | 
3503  |  |   DM_CFG_FLOAT,  | 
3504  |  |   DM_CFG_STRING,  | 
3505  |  |   DM_CFG_EMPTY_ARRAY  | 
3506  |  | } dm_config_value_type_t;  | 
3507  |  |  | 
3508  |  | struct dm_config_value { | 
3509  |  |   dm_config_value_type_t type;  | 
3510  |  |  | 
3511  |  |   union dm_config_value_u { | 
3512  |  |     int64_t i;  | 
3513  |  |     float f;  | 
3514  |  |     double d;         /* Unused. */  | 
3515  |  |     const char *str;  | 
3516  |  |   } v;  | 
3517  |  |  | 
3518  |  |   struct dm_config_value *next; /* For arrays */  | 
3519  |  |   uint32_t format_flags;  | 
3520  |  | };  | 
3521  |  |  | 
3522  |  | struct dm_config_node { | 
3523  |  |   const char *key;  | 
3524  |  |   struct dm_config_node *parent, *sib, *child;  | 
3525  |  |   struct dm_config_value *v;  | 
3526  |  |   int id;  | 
3527  |  | };  | 
3528  |  |  | 
3529  |  | struct dm_config_tree { | 
3530  |  |   struct dm_config_node *root;  | 
3531  |  |   struct dm_config_tree *cascade;  | 
3532  |  |   struct dm_pool *mem;  | 
3533  |  |   void *custom;  | 
3534  |  | };  | 
3535  |  |  | 
3536  |  | struct dm_config_tree *dm_config_create(void);  | 
3537  |  | struct dm_config_tree *dm_config_from_string(const char *config_settings);  | 
3538  |  | int dm_config_parse(struct dm_config_tree *cft, const char *start, const char *end);  | 
3539  |  | int dm_config_parse_without_dup_node_check(struct dm_config_tree *cft, const char *start, const char *end);  | 
3540  |  | int dm_config_parse_only_section(struct dm_config_tree *cft, const char *start, const char *end, const char *section);  | 
3541  |  |  | 
3542  |  | void *dm_config_get_custom(struct dm_config_tree *cft);  | 
3543  |  | void dm_config_set_custom(struct dm_config_tree *cft, void *custom);  | 
3544  |  |  | 
3545  |  | /*  | 
3546  |  |  * When searching, first_cft is checked before second_cft.  | 
3547  |  |  */  | 
3548  |  | struct dm_config_tree *dm_config_insert_cascaded_tree(struct dm_config_tree *first_cft, struct dm_config_tree *second_cft);  | 
3549  |  |  | 
3550  |  | /*  | 
3551  |  |  * If there's a cascaded dm_config_tree, remove the top layer  | 
3552  |  |  * and return the layer below.  Otherwise return NULL.  | 
3553  |  |  */  | 
3554  |  | struct dm_config_tree *dm_config_remove_cascaded_tree(struct dm_config_tree *cft);  | 
3555  |  |  | 
3556  |  | /*  | 
3557  |  |  * Create a new, uncascaded config tree equivalent to the input cascade.  | 
3558  |  |  */  | 
3559  |  | struct dm_config_tree *dm_config_flatten(struct dm_config_tree *cft);  | 
3560  |  |  | 
3561  |  | void dm_config_destroy(struct dm_config_tree *cft);  | 
3562  |  |  | 
3563  |  | /* Simple output line by line. */  | 
3564  |  | typedef int (*dm_putline_fn)(const char *line, void *baton);  | 
3565  |  | /* More advanced output with config node reference. */  | 
3566  |  | typedef int (*dm_config_node_out_fn)(const struct dm_config_node *cn, const char *line, void *baton);  | 
3567  |  |  | 
3568  |  | /*  | 
3569  |  |  * Specification for advanced config node output.  | 
3570  |  |  */  | 
3571  |  | struct dm_config_node_out_spec { | 
3572  |  |   dm_config_node_out_fn prefix_fn; /* called before processing config node lines */  | 
3573  |  |   dm_config_node_out_fn line_fn; /* called for each config node line */  | 
3574  |  |   dm_config_node_out_fn suffix_fn; /* called after processing config node lines */  | 
3575  |  | };  | 
3576  |  |  | 
3577  |  | /* Write the node and any subsequent siblings it has. */  | 
3578  |  | int dm_config_write_node(const struct dm_config_node *cn, dm_putline_fn putline, void *baton);  | 
3579  |  | int dm_config_write_node_out(const struct dm_config_node *cn, const struct dm_config_node_out_spec *out_spec, void *baton);  | 
3580  |  |  | 
3581  |  | /* Write given node only without subsequent siblings. */  | 
3582  |  | int dm_config_write_one_node(const struct dm_config_node *cn, dm_putline_fn putline, void *baton);  | 
3583  |  | int dm_config_write_one_node_out(const struct dm_config_node *cn, const struct dm_config_node_out_spec *out_spec, void *baton);  | 
3584  |  |  | 
3585  |  | struct dm_config_node *dm_config_find_node(const struct dm_config_node *cn, const char *path);  | 
3586  |  | int dm_config_has_node(const struct dm_config_node *cn, const char *path);  | 
3587  |  | int dm_config_remove_node(struct dm_config_node *parent, struct dm_config_node *rem_node);  | 
3588  |  | const char *dm_config_find_str(const struct dm_config_node *cn, const char *path, const char *fail);  | 
3589  |  | const char *dm_config_find_str_allow_empty(const struct dm_config_node *cn, const char *path, const char *fail);  | 
3590  |  | int dm_config_find_int(const struct dm_config_node *cn, const char *path, int fail);  | 
3591  |  | int64_t dm_config_find_int64(const struct dm_config_node *cn, const char *path, int64_t fail);  | 
3592  |  | float dm_config_find_float(const struct dm_config_node *cn, const char *path, float fail);  | 
3593  |  |  | 
3594  |  | const struct dm_config_node *dm_config_tree_find_node(const struct dm_config_tree *cft, const char *path);  | 
3595  |  | const char *dm_config_tree_find_str(const struct dm_config_tree *cft, const char *path, const char *fail);  | 
3596  |  | const char *dm_config_tree_find_str_allow_empty(const struct dm_config_tree *cft, const char *path, const char *fail);  | 
3597  |  | int dm_config_tree_find_int(const struct dm_config_tree *cft, const char *path, int fail);  | 
3598  |  | int64_t dm_config_tree_find_int64(const struct dm_config_tree *cft, const char *path, int64_t fail);  | 
3599  |  | float dm_config_tree_find_float(const struct dm_config_tree *cft, const char *path, float fail);  | 
3600  |  | int dm_config_tree_find_bool(const struct dm_config_tree *cft, const char *path, int fail);  | 
3601  |  |  | 
3602  |  | /*  | 
3603  |  |  * Understands (0, ~0), (y, n), (yes, no), (on,  | 
3604  |  |  * off), (true, false).  | 
3605  |  |  */  | 
3606  |  | int dm_config_find_bool(const struct dm_config_node *cn, const char *path, int fail);  | 
3607  |  | int dm_config_value_is_bool(const struct dm_config_value *v);  | 
3608  |  |  | 
3609  |  | int dm_config_get_uint32(const struct dm_config_node *cn, const char *path, uint32_t *result);  | 
3610  |  | int dm_config_get_uint64(const struct dm_config_node *cn, const char *path, uint64_t *result);  | 
3611  |  | int dm_config_get_str(const struct dm_config_node *cn, const char *path, const char **result);  | 
3612  |  | int dm_config_get_list(const struct dm_config_node *cn, const char *path, const struct dm_config_value **result);  | 
3613  |  | int dm_config_get_section(const struct dm_config_node *cn, const char *path, const struct dm_config_node **result);  | 
3614  |  |  | 
3615  |  | unsigned dm_config_maybe_section(const char *str, unsigned len);  | 
3616  |  |  | 
3617  |  | const char *dm_config_parent_name(const struct dm_config_node *n);  | 
3618  |  |  | 
3619  |  | struct dm_config_node *dm_config_clone_node_with_mem(struct dm_pool *mem, const struct dm_config_node *cn, int siblings);  | 
3620  |  | struct dm_config_node *dm_config_create_node(struct dm_config_tree *cft, const char *key);  | 
3621  |  | struct dm_config_value *dm_config_create_value(struct dm_config_tree *cft);  | 
3622  |  | struct dm_config_node *dm_config_clone_node(struct dm_config_tree *cft, const struct dm_config_node *cn, int siblings);  | 
3623  |  |  | 
3624  |  | /*  | 
3625  |  |  * Common formatting flags applicable to all config node types (lower 16 bits).  | 
3626  |  |  */  | 
3627  |  | #define DM_CONFIG_VALUE_FMT_COMMON_ARRAY             0x00000001 /* value is array */  | 
3628  |  | #define DM_CONFIG_VALUE_FMT_COMMON_EXTRA_SPACES      0x00000002 /* add spaces in "key = value" pairs in contrast to "key=value" for better readability */  | 
3629  |  |  | 
3630  |  | /*  | 
3631  |  |  * Type-related config node formatting flags (higher 16 bits).  | 
3632  |  |  */  | 
3633  |  | /* int-related formatting flags */  | 
3634  |  | #define DM_CONFIG_VALUE_FMT_INT_OCTAL                0x00010000 /* print number in octal form */  | 
3635  |  |  | 
3636  |  | /* string-related formatting flags */  | 
3637  |  | #define DM_CONFIG_VALUE_FMT_STRING_NO_QUOTES         0x00010000 /* do not print quotes around string value */  | 
3638  |  |  | 
3639  |  | void dm_config_value_set_format_flags(struct dm_config_value *cv, uint32_t format_flags);  | 
3640  |  | uint32_t dm_config_value_get_format_flags(struct dm_config_value *cv);  | 
3641  |  |  | 
3642  |  | struct dm_pool *dm_config_memory(struct dm_config_tree *cft);  | 
3643  |  |  | 
3644  |  | /* Udev device directory. */  | 
3645  |  | #define DM_UDEV_DEV_DIR "/dev/"  | 
3646  |  |  | 
3647  |  | /* Cookie prefixes.  | 
3648  |  |  *  | 
3649  |  |  * The cookie value consists of a prefix (16 bits) and a base (16 bits).  | 
3650  |  |  * We can use the prefix to store the flags. These flags are sent to  | 
3651  |  |  * kernel within given dm task. When returned back to userspace in  | 
3652  |  |  * DM_COOKIE udev environment variable, we can control several aspects  | 
3653  |  |  * of udev rules we use by decoding the cookie prefix. When doing the  | 
3654  |  |  * notification, we replace the cookie prefix with DM_COOKIE_MAGIC,  | 
3655  |  |  * so we notify the right semaphore.  | 
3656  |  |  *  | 
3657  |  |  * It is still possible to use cookies for passing the flags to udev  | 
3658  |  |  * rules even when udev_sync is disabled. The base part of the cookie  | 
3659  |  |  * will be zero (there's no notification semaphore) and prefix will be  | 
3660  |  |  * set then. However, having udev_sync enabled is highly recommended.  | 
3661  |  |  */  | 
3662  | 0  | #define DM_COOKIE_MAGIC 0x0D4D  | 
3663  | 0  | #define DM_UDEV_FLAGS_MASK 0xFFFF0000  | 
3664  | 0  | #define DM_UDEV_FLAGS_SHIFT 16  | 
3665  |  |  | 
3666  |  | /*  | 
3667  |  |  * DM_UDEV_DISABLE_DM_RULES_FLAG is set in case we need to disable  | 
3668  |  |  * basic device-mapper udev rules that create symlinks in /dev/<DM_DIR>  | 
3669  |  |  * directory. However, we can't reliably prevent creating default  | 
3670  |  |  * nodes by udev (commonly /dev/dm-X, where X is a number).  | 
3671  |  |  */  | 
3672  | 0  | #define DM_UDEV_DISABLE_DM_RULES_FLAG 0x0001  | 
3673  |  | /*  | 
3674  |  |  * DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG is set in case we need to disable  | 
3675  |  |  * subsystem udev rules, but still we need the general DM udev rules to  | 
3676  |  |  * be applied (to create the nodes and symlinks under /dev and /dev/disk).  | 
3677  |  |  */  | 
3678  | 0  | #define DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG 0x0002  | 
3679  |  | /*  | 
3680  |  |  * DM_UDEV_DISABLE_DISK_RULES_FLAG is set in case we need to disable  | 
3681  |  |  * general DM rules that set symlinks in /dev/disk directory.  | 
3682  |  |  */  | 
3683  |  | #define DM_UDEV_DISABLE_DISK_RULES_FLAG 0x0004  | 
3684  |  | /*  | 
3685  |  |  * DM_UDEV_DISABLE_OTHER_RULES_FLAG is set in case we need to disable  | 
3686  |  |  * all the other rules that are not general device-mapper nor subsystem  | 
3687  |  |  * related (the rules belong to other software or packages). All foreign  | 
3688  |  |  * rules should check this flag directly and they should ignore further  | 
3689  |  |  * rule processing for such event.  | 
3690  |  |  */  | 
3691  |  | #define DM_UDEV_DISABLE_OTHER_RULES_FLAG 0x0008  | 
3692  |  | /*  | 
3693  |  |  * DM_UDEV_LOW_PRIORITY_FLAG is set in case we need to instruct the  | 
3694  |  |  * udev rules to give low priority to the device that is currently  | 
3695  |  |  * processed. For example, this provides a way to select which symlinks  | 
3696  |  |  * could be overwritten by high priority ones if their names are equal.  | 
3697  |  |  * Common situation is a name based on FS UUID while using origin and  | 
3698  |  |  * snapshot devices.  | 
3699  |  |  */  | 
3700  |  | #define DM_UDEV_LOW_PRIORITY_FLAG 0x0010  | 
3701  |  | /*  | 
3702  |  |  * DM_UDEV_DISABLE_LIBRARY_FALLBACK is set in case we need to disable  | 
3703  |  |  * libdevmapper's node management. We will rely on udev completely  | 
3704  |  |  * and there will be no fallback action provided by libdevmapper if  | 
3705  |  |  * udev does something improperly. Using the library fallback code has  | 
3706  |  |  * a consequence that you need to take into account: any device node  | 
3707  |  |  * or symlink created without udev is not recorded in udev database  | 
3708  |  |  * which other applications may read to get complete list of devices.  | 
3709  |  |  * For this reason, use of DM_UDEV_DISABLE_LIBRARY_FALLBACK is  | 
3710  |  |  * recommended on systems where udev is used. Keep library fallback  | 
3711  |  |  * enabled just for exceptional cases where you need to debug udev-related  | 
3712  |  |  * problems. If you hit such problems, please contact us through upstream  | 
3713  |  |  * LVM2 development mailing list (see also README file). This flag is  | 
3714  |  |  * currently not set by default in libdevmapper so you need to set it  | 
3715  |  |  * explicitly if you're sure that udev is behaving correctly on your  | 
3716  |  |  * setups.  | 
3717  |  |  */  | 
3718  | 0  | #define DM_UDEV_DISABLE_LIBRARY_FALLBACK 0x0020  | 
3719  |  | /*  | 
3720  |  |  * DM_UDEV_PRIMARY_SOURCE_FLAG is automatically appended by  | 
3721  |  |  * libdevmapper for all ioctls generating udev uevents. Once used in  | 
3722  |  |  * udev rules, we know if this is a real "primary sourced" event or not.  | 
3723  |  |  * We need to distinguish real events originated in libdevmapper from  | 
3724  |  |  * any spurious events to gather all missing information (e.g. events  | 
3725  |  |  * generated as a result of "udevadm trigger" command or as a result  | 
3726  |  |  * of the "watch" udev rule).  | 
3727  |  |  */  | 
3728  | 0  | #define DM_UDEV_PRIMARY_SOURCE_FLAG 0x0040  | 
3729  |  |  | 
3730  |  | /*  | 
3731  |  |  * Udev flags reserved for use by any device-mapper subsystem.  | 
3732  |  |  */  | 
3733  |  | #define DM_SUBSYSTEM_UDEV_FLAG0 0x0100  | 
3734  |  | #define DM_SUBSYSTEM_UDEV_FLAG1 0x0200  | 
3735  |  | #define DM_SUBSYSTEM_UDEV_FLAG2 0x0400  | 
3736  |  | #define DM_SUBSYSTEM_UDEV_FLAG3 0x0800  | 
3737  |  | #define DM_SUBSYSTEM_UDEV_FLAG4 0x1000  | 
3738  |  | #define DM_SUBSYSTEM_UDEV_FLAG5 0x2000  | 
3739  |  | #define DM_SUBSYSTEM_UDEV_FLAG6 0x4000  | 
3740  |  | #define DM_SUBSYSTEM_UDEV_FLAG7 0x8000  | 
3741  |  |  | 
3742  |  | int dm_cookie_supported(void);  | 
3743  |  |  | 
3744  |  | /*  | 
3745  |  |  * Udev synchronization functions.  | 
3746  |  |  */  | 
3747  |  | void dm_udev_set_sync_support(int sync_with_udev);  | 
3748  |  | int dm_udev_get_sync_support(void);  | 
3749  |  | void dm_udev_set_checking(int checking);  | 
3750  |  | int dm_udev_get_checking(void);  | 
3751  |  |  | 
3752  |  | /*  | 
3753  |  |  * Default value to get new auto generated cookie created  | 
3754  |  |  */  | 
3755  |  | #define DM_COOKIE_AUTO_CREATE 0  | 
3756  |  | int dm_udev_create_cookie(uint32_t *cookie);  | 
3757  |  | int dm_udev_complete(uint32_t cookie);  | 
3758  |  | int dm_udev_wait(uint32_t cookie);  | 
3759  |  |  | 
3760  |  | /*  | 
3761  |  |  * dm_dev_wait_immediate   | 
3762  |  |  * If *ready is 1 on return, the wait is complete.  | 
3763  |  |  * If *ready is 0 on return, the wait is incomplete and either  | 
3764  |  |  * this function or dm_udev_wait() must be called again.  | 
3765  |  |  * Returns 0 on error, when neither function should be called again.  | 
3766  |  |  */  | 
3767  |  | int dm_udev_wait_immediate(uint32_t cookie, int *ready);  | 
3768  |  |  | 
3769  | 0  | #define DM_DEV_DIR_UMASK 0022  | 
3770  | 0  | #define DM_CONTROL_NODE_UMASK 0177  | 
3771  |  |  | 
3772  |  | #ifdef __cplusplus  | 
3773  |  | }  | 
3774  |  | #endif  | 
3775  |  | #endif        /* LIB_DEVICE_MAPPER_H */  |