Line | Count | Source (jump to first uncovered line) |
1 | | /* Copyright (C) 2003,2004 Andi Kleen, SuSE Labs. |
2 | | |
3 | | libnuma is free software; you can redistribute it and/or |
4 | | modify it under the terms of the GNU Lesser General Public |
5 | | License as published by the Free Software Foundation; version |
6 | | 2.1. |
7 | | |
8 | | libnuma is distributed in the hope that it will be useful, |
9 | | but WITHOUT ANY WARRANTY; without even the implied warranty of |
10 | | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
11 | | Lesser General Public License for more details. |
12 | | |
13 | | You should find a copy of v2.1 of the GNU Lesser General Public License |
14 | | somewhere on your Linux system; if not, write to the Free Software |
15 | | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ |
16 | | |
17 | | #ifndef _NUMA_H |
18 | | #define _NUMA_H 1 |
19 | | |
20 | | /* allow an application to test for the current programming interface: */ |
21 | | #define LIBNUMA_API_VERSION 2 |
22 | | |
23 | | /* Simple NUMA policy library */ |
24 | | |
25 | | #include <stddef.h> |
26 | | #include <string.h> |
27 | | #include <sys/types.h> |
28 | | #include <stdlib.h> |
29 | | |
30 | | #if defined(__x86_64__) || defined(__i386__) |
31 | 0 | #define NUMA_NUM_NODES 128 |
32 | | #else |
33 | | #define NUMA_NUM_NODES 2048 |
34 | | #endif |
35 | | |
36 | | #ifdef __cplusplus |
37 | | extern "C" { |
38 | | #endif |
39 | | |
40 | | typedef struct { |
41 | | unsigned long n[NUMA_NUM_NODES/(sizeof(unsigned long)*8)]; |
42 | | } nodemask_t; |
43 | | |
44 | | struct bitmask { |
45 | | unsigned long size; /* number of bits in the map */ |
46 | | unsigned long *maskp; |
47 | | }; |
48 | | |
49 | | /* operations on struct bitmask */ |
50 | | int numa_bitmask_isbitset(const struct bitmask *, unsigned int); |
51 | | struct bitmask *numa_bitmask_setall(struct bitmask *); |
52 | | struct bitmask *numa_bitmask_clearall(struct bitmask *); |
53 | | struct bitmask *numa_bitmask_setbit(struct bitmask *, unsigned int); |
54 | | struct bitmask *numa_bitmask_clearbit(struct bitmask *, unsigned int); |
55 | | unsigned int numa_bitmask_nbytes(struct bitmask *); |
56 | | unsigned int numa_bitmask_weight(const struct bitmask *); |
57 | | struct bitmask *numa_bitmask_alloc(unsigned int); |
58 | | void numa_bitmask_free(struct bitmask *); |
59 | | int numa_bitmask_equal(const struct bitmask *, const struct bitmask *); |
60 | | void copy_nodemask_to_bitmask(nodemask_t *, struct bitmask *); |
61 | | void copy_bitmask_to_nodemask(struct bitmask *, nodemask_t *); |
62 | | void copy_bitmask_to_bitmask(struct bitmask *, struct bitmask *); |
63 | | |
64 | | /* compatibility for codes that used them: */ |
65 | | |
66 | | static inline void nodemask_zero(nodemask_t *mask) |
67 | 0 | { |
68 | 0 | struct bitmask tmp; |
69 | 0 |
|
70 | 0 | tmp.maskp = (unsigned long *)mask; |
71 | 0 | tmp.size = sizeof(nodemask_t) * 8; |
72 | 0 | numa_bitmask_clearall(&tmp); |
73 | 0 | } Unexecuted instantiation: fuzz_parse_str.c:nodemask_zero Unexecuted instantiation: libnuma.c:nodemask_zero Unexecuted instantiation: syscall.c:nodemask_zero Unexecuted instantiation: affinity.c:nodemask_zero Unexecuted instantiation: sysfs.c:nodemask_zero |
74 | | |
75 | | static inline void nodemask_zero_compat(nodemask_t *mask) |
76 | 0 | { |
77 | 0 | struct bitmask tmp; |
78 | 0 |
|
79 | 0 | tmp.maskp = (unsigned long *)mask; |
80 | 0 | tmp.size = sizeof(nodemask_t) * 8; |
81 | 0 | numa_bitmask_clearall(&tmp); |
82 | 0 | } Unexecuted instantiation: fuzz_parse_str.c:nodemask_zero_compat Unexecuted instantiation: libnuma.c:nodemask_zero_compat Unexecuted instantiation: syscall.c:nodemask_zero_compat Unexecuted instantiation: affinity.c:nodemask_zero_compat Unexecuted instantiation: sysfs.c:nodemask_zero_compat |
83 | | |
84 | | static inline void nodemask_set_compat(nodemask_t *mask, int node) |
85 | 2 | { |
86 | 2 | mask->n[node / (8*sizeof(unsigned long))] |= |
87 | 2 | (1UL<<(node%(8*sizeof(unsigned long)))); |
88 | 2 | } Unexecuted instantiation: fuzz_parse_str.c:nodemask_set_compat libnuma.c:nodemask_set_compat Line | Count | Source | 85 | 2 | { | 86 | 2 | mask->n[node / (8*sizeof(unsigned long))] |= | 87 | 2 | (1UL<<(node%(8*sizeof(unsigned long)))); | 88 | 2 | } |
Unexecuted instantiation: syscall.c:nodemask_set_compat Unexecuted instantiation: affinity.c:nodemask_set_compat Unexecuted instantiation: sysfs.c:nodemask_set_compat |
89 | | |
90 | | static inline void nodemask_clr_compat(nodemask_t *mask, int node) |
91 | 0 | { |
92 | 0 | mask->n[node / (8*sizeof(unsigned long))] &= |
93 | 0 | ~(1UL<<(node%(8*sizeof(unsigned long)))); |
94 | 0 | } Unexecuted instantiation: fuzz_parse_str.c:nodemask_clr_compat Unexecuted instantiation: libnuma.c:nodemask_clr_compat Unexecuted instantiation: syscall.c:nodemask_clr_compat Unexecuted instantiation: affinity.c:nodemask_clr_compat Unexecuted instantiation: sysfs.c:nodemask_clr_compat |
95 | | |
96 | | static inline int nodemask_isset_compat(const nodemask_t *mask, int node) |
97 | 0 | { |
98 | 0 | if ((unsigned)node >= NUMA_NUM_NODES) |
99 | 0 | return 0; |
100 | 0 | if (mask->n[node / (8*sizeof(unsigned long))] & |
101 | 0 | (1UL<<(node%(8*sizeof(unsigned long))))) |
102 | 0 | return 1; |
103 | 0 | return 0; |
104 | 0 | } Unexecuted instantiation: fuzz_parse_str.c:nodemask_isset_compat Unexecuted instantiation: libnuma.c:nodemask_isset_compat Unexecuted instantiation: syscall.c:nodemask_isset_compat Unexecuted instantiation: affinity.c:nodemask_isset_compat Unexecuted instantiation: sysfs.c:nodemask_isset_compat |
105 | | |
106 | | static inline int nodemask_equal(const nodemask_t *a, const nodemask_t *b) |
107 | 0 | { |
108 | 0 | struct bitmask tmp_a, tmp_b; |
109 | 0 |
|
110 | 0 | tmp_a.maskp = (unsigned long *)a; |
111 | 0 | tmp_a.size = sizeof(nodemask_t) * 8; |
112 | 0 |
|
113 | 0 | tmp_b.maskp = (unsigned long *)b; |
114 | 0 | tmp_b.size = sizeof(nodemask_t) * 8; |
115 | 0 |
|
116 | 0 | return numa_bitmask_equal(&tmp_a, &tmp_b); |
117 | 0 | } Unexecuted instantiation: fuzz_parse_str.c:nodemask_equal Unexecuted instantiation: libnuma.c:nodemask_equal Unexecuted instantiation: syscall.c:nodemask_equal Unexecuted instantiation: affinity.c:nodemask_equal Unexecuted instantiation: sysfs.c:nodemask_equal |
118 | | |
119 | | static inline int nodemask_equal_compat(const nodemask_t *a, const nodemask_t *b) |
120 | 0 | { |
121 | 0 | struct bitmask tmp_a, tmp_b; |
122 | 0 |
|
123 | 0 | tmp_a.maskp = (unsigned long *)a; |
124 | 0 | tmp_a.size = sizeof(nodemask_t) * 8; |
125 | 0 |
|
126 | 0 | tmp_b.maskp = (unsigned long *)b; |
127 | 0 | tmp_b.size = sizeof(nodemask_t) * 8; |
128 | 0 |
|
129 | 0 | return numa_bitmask_equal(&tmp_a, &tmp_b); |
130 | 0 | } Unexecuted instantiation: fuzz_parse_str.c:nodemask_equal_compat Unexecuted instantiation: libnuma.c:nodemask_equal_compat Unexecuted instantiation: syscall.c:nodemask_equal_compat Unexecuted instantiation: affinity.c:nodemask_equal_compat Unexecuted instantiation: sysfs.c:nodemask_equal_compat |
131 | | |
132 | | /* NUMA support available. If this returns a negative value all other function |
133 | | in this library are undefined. */ |
134 | | int numa_available(void); |
135 | | |
136 | | /* Basic NUMA state */ |
137 | | |
138 | | /* Get max available node */ |
139 | | int numa_max_node(void); |
140 | | int numa_max_possible_node(void); |
141 | | /* Return preferred node */ |
142 | | int numa_preferred(void); |
143 | | /* If the preferred node is unavailable, return an error; |
144 | | otherwise, return the preferred node */ |
145 | | int numa_preferred_err(void); |
146 | | |
147 | | /* Return node size and free memory */ |
148 | | long long numa_node_size64(int node, long long *freep); |
149 | | long numa_node_size(int node, long *freep); |
150 | | |
151 | | int numa_pagesize(void); |
152 | | |
153 | | /* Set with all nodes from which the calling process may allocate memory. |
154 | | Only valid after numa_available. */ |
155 | | extern struct bitmask *numa_all_nodes_ptr; |
156 | | |
157 | | /* Set with all nodes the kernel has exposed to userspace */ |
158 | | extern struct bitmask *numa_nodes_ptr; |
159 | | |
160 | | /* For source compatibility */ |
161 | | extern nodemask_t numa_all_nodes; |
162 | | |
163 | | /* Set with all cpus. */ |
164 | | extern struct bitmask *numa_all_cpus_ptr; |
165 | | |
166 | | /* Set with no nodes */ |
167 | | extern struct bitmask *numa_no_nodes_ptr; |
168 | | |
169 | | /* Source compatibility */ |
170 | | extern nodemask_t numa_no_nodes; |
171 | | |
172 | | /* Only run and allocate memory from a specific set of nodes. */ |
173 | | void numa_bind(struct bitmask *nodes); |
174 | | |
175 | | /* Set the NUMA node interleaving mask. 0 to turn off interleaving */ |
176 | | void numa_set_interleave_mask(struct bitmask *nodemask); |
177 | | |
178 | | /* Set the NUMA node weighted interleaving mask. 0 to turn off */ |
179 | | void numa_set_weighted_interleave_mask(struct bitmask *nodemask); |
180 | | |
181 | | /* Return the current interleaving mask */ |
182 | | struct bitmask *numa_get_interleave_mask(void); |
183 | | |
184 | | /* Return the current weighted interleaving mask */ |
185 | | struct bitmask *numa_get_weighted_interleave_mask(void); |
186 | | |
187 | | /* allocate a bitmask big enough for all nodes */ |
188 | | struct bitmask *numa_allocate_nodemask(void); |
189 | | |
190 | | static inline void numa_free_nodemask(struct bitmask *b) |
191 | 0 | { |
192 | 0 | numa_bitmask_free(b); |
193 | 0 | } Unexecuted instantiation: fuzz_parse_str.c:numa_free_nodemask Unexecuted instantiation: libnuma.c:numa_free_nodemask Unexecuted instantiation: syscall.c:numa_free_nodemask Unexecuted instantiation: affinity.c:numa_free_nodemask Unexecuted instantiation: sysfs.c:numa_free_nodemask |
194 | | |
195 | | /* Some node to preferably allocate memory from for task. */ |
196 | | void numa_set_preferred(int node); |
197 | | |
198 | | /* Returns whether or not the platform supports MPOL_PREFERRED_MANY */ |
199 | | int numa_has_preferred_many(void); |
200 | | |
201 | | /* Set of nodes to preferably allocate memory from for task. */ |
202 | | void numa_set_preferred_many(struct bitmask *bitmask); |
203 | | |
204 | | /* Return preferred nodes */ |
205 | | struct bitmask *numa_preferred_many(void); |
206 | | |
207 | | /* Set local memory allocation policy for task */ |
208 | | void numa_set_localalloc(void); |
209 | | |
210 | | /* Only allocate memory from the nodes set in mask. 0 to turn off */ |
211 | | void numa_set_membind(struct bitmask *nodemask); |
212 | | |
213 | | /* Only allocate memory from the nodes set in mask. Optimize page |
214 | | placement with Linux kernel NUMA balancing if possible. 0 to turn off */ |
215 | | void numa_set_membind_balancing(struct bitmask *bmp); |
216 | | |
217 | | /* Return current membind */ |
218 | | struct bitmask *numa_get_membind(void); |
219 | | |
220 | | /* Return allowed memories [nodes] */ |
221 | | struct bitmask *numa_get_mems_allowed(void); |
222 | | |
223 | | int numa_get_interleave_node(void); |
224 | | |
225 | | /* NUMA memory allocation. These functions always round to page size |
226 | | and are relatively slow. */ |
227 | | |
228 | | /* Alloc memory page interleaved on nodes in mask */ |
229 | | void *numa_alloc_interleaved_subset(size_t size, struct bitmask *nodemask); |
230 | | /* Alloc memory page interleaved on all nodes. */ |
231 | | void *numa_alloc_interleaved(size_t size); |
232 | | /* Alloc memory page interleaved on nodes in mask using weights */ |
233 | | void *numa_alloc_weighted_interleaved_subset(size_t size, struct bitmask *nodemask); |
234 | | /* Alloc memory page interleaved on all nodes using weights */ |
235 | | void *numa_alloc_weighted_interleaved(size_t size); |
236 | | |
237 | | /* Alloc memory located on node */ |
238 | | void *numa_alloc_onnode(size_t size, int node); |
239 | | /* Alloc memory on local node */ |
240 | | void *numa_alloc_local(size_t size); |
241 | | /* Allocation with current policy */ |
242 | | void *numa_alloc(size_t size); |
243 | | /* Change the size of a memory area preserving the memory policy */ |
244 | | void *numa_realloc(void *old_addr, size_t old_size, size_t new_size); |
245 | | /* Free memory allocated by the functions above */ |
246 | | void numa_free(void *mem, size_t size); |
247 | | |
248 | | /* Low level functions, primarily for shared memory. All memory |
249 | | processed by these must not be touched yet */ |
250 | | |
251 | | /* Interleave a memory area. */ |
252 | | void numa_interleave_memory(void *mem, size_t size, struct bitmask *mask); |
253 | | /* Interleave a memory area using weights. */ |
254 | | void numa_weighted_interleave_memory(void *mem, size_t size, struct bitmask *mask); |
255 | | |
256 | | /* Allocate a memory area on a specific node. */ |
257 | | void numa_tonode_memory(void *start, size_t size, int node); |
258 | | |
259 | | /* Allocate memory on a mask of nodes. */ |
260 | | void numa_tonodemask_memory(void *mem, size_t size, struct bitmask *mask); |
261 | | |
262 | | /* Allocate a memory area on the current node. */ |
263 | | void numa_setlocal_memory(void *start, size_t size); |
264 | | |
265 | | /* Allocate memory area with current memory policy */ |
266 | | void numa_police_memory(void *start, size_t size); |
267 | | |
268 | | /* Run current task only on nodes in mask */ |
269 | | int numa_run_on_node_mask(struct bitmask *mask); |
270 | | /* Run current task on nodes in mask without any cpuset awareness */ |
271 | | int numa_run_on_node_mask_all(struct bitmask *mask); |
272 | | /* Run current task only on node */ |
273 | | int numa_run_on_node(int node); |
274 | | /* Return current mask of nodes the task can run on */ |
275 | | struct bitmask * numa_get_run_node_mask(void); |
276 | | |
277 | | /* When strict fail allocation when memory cannot be allocated in target node(s). */ |
278 | | void numa_set_bind_policy(int strict); |
279 | | |
280 | | /* Fail when existing memory has incompatible policy */ |
281 | | void numa_set_strict(int flag); |
282 | | |
283 | | /* maximum nodes (size of kernel nodemask_t) */ |
284 | | int numa_num_possible_nodes(void); |
285 | | |
286 | | /* maximum cpus (size of kernel cpumask_t) */ |
287 | | int numa_num_possible_cpus(void); |
288 | | |
289 | | /* nodes in the system */ |
290 | | int numa_num_configured_nodes(void); |
291 | | |
292 | | /* maximum cpus */ |
293 | | int numa_num_configured_cpus(void); |
294 | | |
295 | | /* maximum cpus allowed to current task */ |
296 | | int numa_num_task_cpus(void); |
297 | | int numa_num_thread_cpus(void); /* backward compatibility */ |
298 | | |
299 | | /* maximum nodes allowed to current task */ |
300 | | int numa_num_task_nodes(void); |
301 | | int numa_num_thread_nodes(void); /* backward compatibility */ |
302 | | |
303 | | /* allocate a bitmask the size of the kernel cpumask_t */ |
304 | | struct bitmask *numa_allocate_cpumask(void); |
305 | | |
306 | | static inline void numa_free_cpumask(struct bitmask *b) |
307 | 0 | { |
308 | 0 | numa_bitmask_free(b); |
309 | 0 | } Unexecuted instantiation: fuzz_parse_str.c:numa_free_cpumask Unexecuted instantiation: libnuma.c:numa_free_cpumask Unexecuted instantiation: syscall.c:numa_free_cpumask Unexecuted instantiation: affinity.c:numa_free_cpumask Unexecuted instantiation: sysfs.c:numa_free_cpumask |
310 | | |
311 | | /* Convert node to CPU mask. -1/errno on failure, otherwise 0. */ |
312 | | int numa_node_to_cpus(int, struct bitmask *); |
313 | | |
314 | | void numa_node_to_cpu_update(void); |
315 | | |
316 | | /* report the node of the specified cpu. -1/errno on invalid cpu. */ |
317 | | int numa_node_of_cpu(int cpu); |
318 | | |
319 | | /* Report distance of node1 from node2. 0 on error.*/ |
320 | | int numa_distance(int node1, int node2); |
321 | | |
322 | | /* Error handling. */ |
323 | | /* This is an internal function in libnuma that can be overwritten by an user |
324 | | program. Default is to print an error to stderr and exit if numa_exit_on_error |
325 | | is true. */ |
326 | | void numa_error(char *where); |
327 | | |
328 | | /* When true exit the program when a NUMA system call (except numa_available) |
329 | | fails */ |
330 | | extern int numa_exit_on_error; |
331 | | |
332 | | /* When true exit when libnuma would print a warning. */ |
333 | | extern int numa_exit_on_warn; |
334 | | |
335 | | /* When true make numa_alloc functions fail when policy could not be set. |
336 | | Default false. */ |
337 | | extern int numa_fail_alloc_on_error; |
338 | | |
339 | | /* Warning function. Can also be overwritten. Default is to print on stderr |
340 | | once. */ |
341 | | void numa_warn(int num, char *fmt, ...); |
342 | | |
343 | | /* When true exit the program on a numa_warn() call */ |
344 | | extern int numa_exit_on_warn; |
345 | | |
346 | | int numa_migrate_pages(int pid, struct bitmask *from, struct bitmask *to); |
347 | | |
348 | | int numa_move_pages(int pid, unsigned long count, void **pages, |
349 | | const int *nodes, int *status, int flags); |
350 | | |
351 | | int numa_sched_getaffinity(pid_t, struct bitmask *); |
352 | | int numa_sched_setaffinity(pid_t, struct bitmask *); |
353 | | |
354 | | /* Convert an ascii list of nodes to a bitmask */ |
355 | | struct bitmask *numa_parse_nodestring(const char *); |
356 | | |
357 | | /* Convert an ascii list of nodes to a bitmask without current nodeset |
358 | | * dependency */ |
359 | | struct bitmask *numa_parse_nodestring_all(const char *); |
360 | | |
361 | | /* Convert an ascii list of cpu to a bitmask */ |
362 | | struct bitmask *numa_parse_cpustring(const char *); |
363 | | |
364 | | /* Convert an ascii list of cpu to a bitmask without current taskset |
365 | | * dependency */ |
366 | | struct bitmask *numa_parse_cpustring_all(const char *); |
367 | | |
368 | | /* Returns whether or not the system supports setting home_node for mbind |
369 | | * and preferred_many. |
370 | | */ |
371 | | int numa_has_home_node(void); |
372 | | |
373 | | /* set the home node for a VMA policy present in the task's address range */ |
374 | | int numa_set_mempolicy_home_node(void *start, unsigned long len, |
375 | | int home_node, int flags); |
376 | | |
377 | | /* |
378 | | * The following functions are for source code compatibility |
379 | | * with releases prior to version 2. |
380 | | * Such codes should be compiled with NUMA_VERSION1_COMPATIBILITY defined. |
381 | | */ |
382 | | |
383 | | static inline void numa_set_interleave_mask_compat(nodemask_t *nodemask) |
384 | 0 | { |
385 | 0 | struct bitmask tmp; |
386 | 0 |
|
387 | 0 | tmp.maskp = (unsigned long *)nodemask; |
388 | 0 | tmp.size = sizeof(nodemask_t) * 8; |
389 | 0 | numa_set_interleave_mask(&tmp); |
390 | 0 | } Unexecuted instantiation: fuzz_parse_str.c:numa_set_interleave_mask_compat Unexecuted instantiation: libnuma.c:numa_set_interleave_mask_compat Unexecuted instantiation: syscall.c:numa_set_interleave_mask_compat Unexecuted instantiation: affinity.c:numa_set_interleave_mask_compat Unexecuted instantiation: sysfs.c:numa_set_interleave_mask_compat |
391 | | |
392 | | static inline nodemask_t numa_get_interleave_mask_compat(void) |
393 | 0 | { |
394 | 0 | struct bitmask *tp; |
395 | 0 | nodemask_t mask; |
396 | 0 |
|
397 | 0 | tp = numa_get_interleave_mask(); |
398 | 0 | copy_bitmask_to_nodemask(tp, &mask); |
399 | 0 | numa_bitmask_free(tp); |
400 | 0 | return mask; |
401 | 0 | } Unexecuted instantiation: fuzz_parse_str.c:numa_get_interleave_mask_compat Unexecuted instantiation: libnuma.c:numa_get_interleave_mask_compat Unexecuted instantiation: syscall.c:numa_get_interleave_mask_compat Unexecuted instantiation: affinity.c:numa_get_interleave_mask_compat Unexecuted instantiation: sysfs.c:numa_get_interleave_mask_compat |
402 | | |
403 | | static inline void numa_bind_compat(nodemask_t *mask) |
404 | 0 | { |
405 | 0 | struct bitmask *tp; |
406 | 0 |
|
407 | 0 | tp = numa_allocate_nodemask(); |
408 | 0 | copy_nodemask_to_bitmask(mask, tp); |
409 | 0 | numa_bind(tp); |
410 | 0 | numa_bitmask_free(tp); |
411 | 0 | } Unexecuted instantiation: fuzz_parse_str.c:numa_bind_compat Unexecuted instantiation: libnuma.c:numa_bind_compat Unexecuted instantiation: syscall.c:numa_bind_compat Unexecuted instantiation: affinity.c:numa_bind_compat Unexecuted instantiation: sysfs.c:numa_bind_compat |
412 | | |
413 | | static inline void numa_set_membind_compat(nodemask_t *mask) |
414 | 0 | { |
415 | 0 | struct bitmask tmp; |
416 | 0 |
|
417 | 0 | tmp.maskp = (unsigned long *)mask; |
418 | 0 | tmp.size = sizeof(nodemask_t) * 8; |
419 | 0 | numa_set_membind(&tmp); |
420 | 0 | } Unexecuted instantiation: fuzz_parse_str.c:numa_set_membind_compat Unexecuted instantiation: libnuma.c:numa_set_membind_compat Unexecuted instantiation: syscall.c:numa_set_membind_compat Unexecuted instantiation: affinity.c:numa_set_membind_compat Unexecuted instantiation: sysfs.c:numa_set_membind_compat |
421 | | |
422 | | static inline nodemask_t numa_get_membind_compat(void) |
423 | 0 | { |
424 | 0 | struct bitmask *tp; |
425 | 0 | nodemask_t mask; |
426 | 0 |
|
427 | 0 | tp = numa_get_membind(); |
428 | 0 | copy_bitmask_to_nodemask(tp, &mask); |
429 | 0 | numa_bitmask_free(tp); |
430 | 0 | return mask; |
431 | 0 | } Unexecuted instantiation: fuzz_parse_str.c:numa_get_membind_compat Unexecuted instantiation: libnuma.c:numa_get_membind_compat Unexecuted instantiation: syscall.c:numa_get_membind_compat Unexecuted instantiation: affinity.c:numa_get_membind_compat Unexecuted instantiation: sysfs.c:numa_get_membind_compat |
432 | | |
433 | | static inline void *numa_alloc_interleaved_subset_compat(size_t size, |
434 | | const nodemask_t *mask) |
435 | 0 | { |
436 | 0 | struct bitmask tmp; |
437 | 0 |
|
438 | 0 | tmp.maskp = (unsigned long *)mask; |
439 | 0 | tmp.size = sizeof(nodemask_t) * 8; |
440 | 0 | return numa_alloc_interleaved_subset(size, &tmp); |
441 | 0 | } Unexecuted instantiation: fuzz_parse_str.c:numa_alloc_interleaved_subset_compat Unexecuted instantiation: libnuma.c:numa_alloc_interleaved_subset_compat Unexecuted instantiation: syscall.c:numa_alloc_interleaved_subset_compat Unexecuted instantiation: affinity.c:numa_alloc_interleaved_subset_compat Unexecuted instantiation: sysfs.c:numa_alloc_interleaved_subset_compat |
442 | | |
443 | | static inline int numa_run_on_node_mask_compat(const nodemask_t *mask) |
444 | 0 | { |
445 | 0 | struct bitmask tmp; |
446 | 0 |
|
447 | 0 | tmp.maskp = (unsigned long *)mask; |
448 | 0 | tmp.size = sizeof(nodemask_t) * 8; |
449 | 0 | return numa_run_on_node_mask(&tmp); |
450 | 0 | } Unexecuted instantiation: fuzz_parse_str.c:numa_run_on_node_mask_compat Unexecuted instantiation: libnuma.c:numa_run_on_node_mask_compat Unexecuted instantiation: syscall.c:numa_run_on_node_mask_compat Unexecuted instantiation: affinity.c:numa_run_on_node_mask_compat Unexecuted instantiation: sysfs.c:numa_run_on_node_mask_compat |
451 | | |
452 | | static inline nodemask_t numa_get_run_node_mask_compat(void) |
453 | 0 | { |
454 | 0 | struct bitmask *tp; |
455 | 0 | nodemask_t mask; |
456 | 0 |
|
457 | 0 | tp = numa_get_run_node_mask(); |
458 | 0 | copy_bitmask_to_nodemask(tp, &mask); |
459 | 0 | numa_bitmask_free(tp); |
460 | 0 | return mask; |
461 | 0 | } Unexecuted instantiation: fuzz_parse_str.c:numa_get_run_node_mask_compat Unexecuted instantiation: libnuma.c:numa_get_run_node_mask_compat Unexecuted instantiation: syscall.c:numa_get_run_node_mask_compat Unexecuted instantiation: affinity.c:numa_get_run_node_mask_compat Unexecuted instantiation: sysfs.c:numa_get_run_node_mask_compat |
462 | | |
463 | | static inline void numa_interleave_memory_compat(void *mem, size_t size, |
464 | | const nodemask_t *mask) |
465 | 0 | { |
466 | 0 | struct bitmask tmp; |
467 | 0 |
|
468 | 0 | tmp.maskp = (unsigned long *)mask; |
469 | 0 | tmp.size = sizeof(nodemask_t) * 8; |
470 | 0 | numa_interleave_memory(mem, size, &tmp); |
471 | 0 | } Unexecuted instantiation: fuzz_parse_str.c:numa_interleave_memory_compat Unexecuted instantiation: libnuma.c:numa_interleave_memory_compat Unexecuted instantiation: syscall.c:numa_interleave_memory_compat Unexecuted instantiation: affinity.c:numa_interleave_memory_compat Unexecuted instantiation: sysfs.c:numa_interleave_memory_compat |
472 | | |
473 | | static inline void numa_tonodemask_memory_compat(void *mem, size_t size, |
474 | | const nodemask_t *mask) |
475 | 0 | { |
476 | 0 | struct bitmask tmp; |
477 | 0 |
|
478 | 0 | tmp.maskp = (unsigned long *)mask; |
479 | 0 | tmp.size = sizeof(nodemask_t) * 8; |
480 | 0 | numa_tonodemask_memory(mem, size, &tmp); |
481 | 0 | } Unexecuted instantiation: fuzz_parse_str.c:numa_tonodemask_memory_compat Unexecuted instantiation: libnuma.c:numa_tonodemask_memory_compat Unexecuted instantiation: syscall.c:numa_tonodemask_memory_compat Unexecuted instantiation: affinity.c:numa_tonodemask_memory_compat Unexecuted instantiation: sysfs.c:numa_tonodemask_memory_compat |
482 | | |
483 | | static inline int numa_sched_getaffinity_compat(pid_t pid, unsigned len, |
484 | | unsigned long *mask) |
485 | 0 | { |
486 | 0 | struct bitmask tmp; |
487 | 0 |
|
488 | 0 | tmp.maskp = (unsigned long *)mask; |
489 | 0 | tmp.size = len * 8; |
490 | 0 | return numa_sched_getaffinity(pid, &tmp); |
491 | 0 | } Unexecuted instantiation: fuzz_parse_str.c:numa_sched_getaffinity_compat Unexecuted instantiation: libnuma.c:numa_sched_getaffinity_compat Unexecuted instantiation: syscall.c:numa_sched_getaffinity_compat Unexecuted instantiation: affinity.c:numa_sched_getaffinity_compat Unexecuted instantiation: sysfs.c:numa_sched_getaffinity_compat |
492 | | |
493 | | static inline int numa_sched_setaffinity_compat(pid_t pid, unsigned len, |
494 | | unsigned long *mask) |
495 | 0 | { |
496 | 0 | struct bitmask tmp; |
497 | 0 |
|
498 | 0 | tmp.maskp = (unsigned long *)mask; |
499 | 0 | tmp.size = len * 8; |
500 | 0 | return numa_sched_setaffinity(pid, &tmp); |
501 | 0 | } Unexecuted instantiation: fuzz_parse_str.c:numa_sched_setaffinity_compat Unexecuted instantiation: libnuma.c:numa_sched_setaffinity_compat Unexecuted instantiation: syscall.c:numa_sched_setaffinity_compat Unexecuted instantiation: affinity.c:numa_sched_setaffinity_compat Unexecuted instantiation: sysfs.c:numa_sched_setaffinity_compat |
502 | | |
503 | | static inline int numa_node_to_cpus_compat(int node, unsigned long *buffer, |
504 | | int buffer_len) |
505 | 0 | { |
506 | 0 | struct bitmask tmp; |
507 | 0 |
|
508 | 0 | tmp.maskp = (unsigned long *)buffer; |
509 | 0 | tmp.size = buffer_len * 8; |
510 | 0 | return numa_node_to_cpus(node, &tmp); |
511 | 0 | } Unexecuted instantiation: fuzz_parse_str.c:numa_node_to_cpus_compat Unexecuted instantiation: libnuma.c:numa_node_to_cpus_compat Unexecuted instantiation: syscall.c:numa_node_to_cpus_compat Unexecuted instantiation: affinity.c:numa_node_to_cpus_compat Unexecuted instantiation: sysfs.c:numa_node_to_cpus_compat |
512 | | |
513 | | /* end of version 1 compatibility functions */ |
514 | | |
515 | | /* |
516 | | * To compile an application that uses libnuma version 1: |
517 | | * add -DNUMA_VERSION1_COMPATIBILITY to your Makefile's CFLAGS |
518 | | */ |
519 | | #ifdef NUMA_VERSION1_COMPATIBILITY |
520 | | #include <numacompat1.h> |
521 | | #endif |
522 | | |
523 | | #ifdef __cplusplus |
524 | | } |
525 | | #endif |
526 | | |
527 | | #endif |