/src/hwloc/include/private/private.h
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright © 2009 CNRS |
3 | | * Copyright © 2009-2025 Inria. All rights reserved. |
4 | | * Copyright © 2009-2012, 2020 Université Bordeaux |
5 | | * Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved. |
6 | | * |
7 | | * See COPYING in top-level directory. |
8 | | */ |
9 | | |
10 | | /* Internal types and helpers. */ |
11 | | |
12 | | |
13 | | #ifdef HWLOC_INSIDE_PLUGIN |
14 | | /* |
15 | | * these declarations are internal only, they are not available to plugins |
16 | | * (many functions below are internal static symbols). |
17 | | */ |
18 | | #error This file should not be used in plugins |
19 | | #endif |
20 | | |
21 | | |
22 | | #ifndef HWLOC_PRIVATE_H |
23 | | #define HWLOC_PRIVATE_H |
24 | | |
25 | | #include "private/autogen/config.h" |
26 | | #include "hwloc.h" |
27 | | #include "hwloc/bitmap.h" |
28 | | #include "private/components.h" |
29 | | #include "private/misc.h" |
30 | | |
31 | | #include <sys/types.h> |
32 | | #ifdef HAVE_UNISTD_H |
33 | | #include <unistd.h> |
34 | | #endif |
35 | | #ifdef HAVE_STDINT_H |
36 | | #include <stdint.h> |
37 | | #endif |
38 | | #ifdef HAVE_SYS_UTSNAME_H |
39 | | #include <sys/utsname.h> |
40 | | #endif |
41 | | #include <string.h> |
42 | | |
43 | | #define HWLOC_TOPOLOGY_ABI 0x30000 /* version of the layout of struct topology */ |
44 | | |
45 | | struct hwloc_internal_location_s { |
46 | | enum hwloc_location_type_e type; |
47 | | union { |
48 | | struct { |
49 | | hwloc_obj_t obj; /* cached between refreshes */ |
50 | | uint64_t gp_index; |
51 | | hwloc_obj_type_t type; |
52 | | } object; /* if type == HWLOC_LOCATION_TYPE_OBJECT */ |
53 | | hwloc_cpuset_t cpuset; /* if type == HWLOC_LOCATION_TYPE_CPUSET */ |
54 | | } location; |
55 | | }; |
56 | | |
57 | | /***************************************************** |
58 | | * WARNING: |
59 | | * changes below in this structure (and its children) |
60 | | * should cause a bump of HWLOC_TOPOLOGY_ABI. |
61 | | *****************************************************/ |
62 | | |
63 | | enum hwloc_topology_state_e { |
64 | | HWLOC_TOPOLOGY_STATE_IS_THISSYSTEM = (1UL<<0), |
65 | | HWLOC_TOPOLOGY_STATE_IS_LOADED = (1UL<<1), |
66 | | HWLOC_TOPOLOGY_STATE_IS_LOADING = (1UL<<2), |
67 | | HWLOC_TOPOLOGY_STATE_IS_INIT = (1UL<<3) |
68 | | }; |
69 | | |
70 | | struct hwloc_topology { |
71 | | unsigned topology_abi; |
72 | | |
73 | | unsigned nb_levels; /* Number of horizontal levels */ |
74 | | unsigned nb_levels_allocated; /* Number of levels allocated and zeroed in level_nbobjects and levels below */ |
75 | | unsigned *level_nbobjects; /* Number of objects on each horizontal level */ |
76 | | struct hwloc_obj ***levels; /* Direct access to levels, levels[l = 0 .. nblevels-1][0..level_nbobjects[l]] */ |
77 | | unsigned long flags; |
78 | | int type_depth[HWLOC_OBJ_TYPE_MAX]; |
79 | | enum hwloc_type_filter_e type_filter[HWLOC_OBJ_TYPE_MAX]; |
80 | | unsigned long state; /* OR'ed enum hwloc_topology_state_e */ |
81 | | unsigned long modified; /* >0 if objects were added/removed recently, which means a reconnect is needed, |
82 | | * not inside "state" in case we want to store bits, numbers, etc. |
83 | | */ |
84 | | hwloc_pid_t pid; /* Process ID the topology is view from, 0 for self */ |
85 | | void *userdata; |
86 | | uint64_t next_gp_index; |
87 | | |
88 | | void *adopted_shmem_addr; |
89 | | size_t adopted_shmem_length; |
90 | | |
91 | | #define HWLOC_NR_SLEVELS 6 |
92 | | #define HWLOC_SLEVEL_NUMANODE 0 |
93 | | #define HWLOC_SLEVEL_BRIDGE 1 |
94 | | #define HWLOC_SLEVEL_PCIDEV 2 |
95 | | #define HWLOC_SLEVEL_OSDEV 3 |
96 | | #define HWLOC_SLEVEL_MISC 4 |
97 | | #define HWLOC_SLEVEL_MEMCACHE 5 |
98 | | /* order must match negative depth, it's asserted in setup_defaults() */ |
99 | | #define HWLOC_SLEVEL_FROM_DEPTH(x) (HWLOC_TYPE_DEPTH_NUMANODE-(x)) |
100 | | #define HWLOC_SLEVEL_TO_DEPTH(x) (HWLOC_TYPE_DEPTH_NUMANODE-(x)) |
101 | | struct hwloc_special_level_s { |
102 | | unsigned nbobjs; |
103 | | struct hwloc_obj **objs; |
104 | | struct hwloc_obj *first, *last; /* Temporarily used while listing object before building the objs array */ |
105 | | } slevels[HWLOC_NR_SLEVELS]; |
106 | | |
107 | | hwloc_bitmap_t allowed_cpuset; |
108 | | hwloc_bitmap_t allowed_nodeset; |
109 | | |
110 | | struct hwloc_binding_hooks { |
111 | | /* These are actually rather OS hooks since some of them are not about binding */ |
112 | | int (*set_thisproc_cpubind)(hwloc_topology_t topology, hwloc_const_cpuset_t set, int flags); |
113 | | int (*get_thisproc_cpubind)(hwloc_topology_t topology, hwloc_cpuset_t set, int flags); |
114 | | int (*set_thisthread_cpubind)(hwloc_topology_t topology, hwloc_const_cpuset_t set, int flags); |
115 | | int (*get_thisthread_cpubind)(hwloc_topology_t topology, hwloc_cpuset_t set, int flags); |
116 | | int (*set_proc_cpubind)(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_const_cpuset_t set, int flags); |
117 | | int (*get_proc_cpubind)(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_cpuset_t set, int flags); |
118 | | #ifdef hwloc_thread_t |
119 | | int (*set_thread_cpubind)(hwloc_topology_t topology, hwloc_thread_t tid, hwloc_const_cpuset_t set, int flags); |
120 | | int (*get_thread_cpubind)(hwloc_topology_t topology, hwloc_thread_t tid, hwloc_cpuset_t set, int flags); |
121 | | #endif |
122 | | |
123 | | int (*get_thisproc_last_cpu_location)(hwloc_topology_t topology, hwloc_cpuset_t set, int flags); |
124 | | int (*get_thisthread_last_cpu_location)(hwloc_topology_t topology, hwloc_cpuset_t set, int flags); |
125 | | int (*get_proc_last_cpu_location)(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_cpuset_t set, int flags); |
126 | | |
127 | | int (*set_thisproc_membind)(hwloc_topology_t topology, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags); |
128 | | int (*get_thisproc_membind)(hwloc_topology_t topology, hwloc_nodeset_t nodeset, hwloc_membind_policy_t * policy, int flags); |
129 | | int (*set_thisthread_membind)(hwloc_topology_t topology, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags); |
130 | | int (*get_thisthread_membind)(hwloc_topology_t topology, hwloc_nodeset_t nodeset, hwloc_membind_policy_t * policy, int flags); |
131 | | int (*set_proc_membind)(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags); |
132 | | int (*get_proc_membind)(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_nodeset_t nodeset, hwloc_membind_policy_t * policy, int flags); |
133 | | int (*set_area_membind)(hwloc_topology_t topology, const void *addr, size_t len, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags); |
134 | | int (*get_area_membind)(hwloc_topology_t topology, const void *addr, size_t len, hwloc_nodeset_t nodeset, hwloc_membind_policy_t * policy, int flags); |
135 | | int (*get_area_memlocation)(hwloc_topology_t topology, const void *addr, size_t len, hwloc_nodeset_t nodeset, int flags); |
136 | | /* This has to return the same kind of pointer as alloc_membind, so that free_membind can be used on it */ |
137 | | void *(*alloc)(hwloc_topology_t topology, size_t len); |
138 | | /* alloc_membind has to always succeed if !(flags & HWLOC_MEMBIND_STRICT). |
139 | | * see hwloc_alloc_or_fail which is convenient for that. */ |
140 | | void *(*alloc_membind)(hwloc_topology_t topology, size_t len, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags); |
141 | | int (*free_membind)(hwloc_topology_t topology, void *addr, size_t len); |
142 | | |
143 | | int (*get_allowed_resources)(hwloc_topology_t topology); |
144 | | } binding_hooks; |
145 | | |
146 | | struct hwloc_topology_support support; |
147 | | |
148 | | struct hwloc_infos_s infos; |
149 | | |
150 | | void (*userdata_export_cb)(void *reserved, struct hwloc_topology *topology, struct hwloc_obj *obj); |
151 | | void (*userdata_import_cb)(struct hwloc_topology *topology, struct hwloc_obj *obj, const char *name, const void *buffer, size_t length); |
152 | | int userdata_not_decoded; |
153 | | |
154 | | struct hwloc_internal_distances_s { |
155 | | char *name; /* FIXME: needs an API to set it from user */ |
156 | | |
157 | | unsigned id; /* to match the container id field of public distances structure |
158 | | * not exported to XML, regenerated during _add() |
159 | | */ |
160 | | |
161 | | /* if all objects have the same type, different_types is NULL and unique_type is valid. |
162 | | * otherwise unique_type is HWLOC_OBJ_TYPE_NONE and different_types contains individual objects types. |
163 | | */ |
164 | | hwloc_obj_type_t unique_type; |
165 | | hwloc_obj_type_t *different_types; |
166 | | |
167 | | /* add union hwloc_obj_attr_u if we ever support groups */ |
168 | | unsigned nbobjs; |
169 | | uint64_t *indexes; /* array of OS or GP indexes before we can convert them into objs. |
170 | | * OS indexes for distances covering only PUs or only NUMAnodes. |
171 | | */ |
172 | | #define HWLOC_DIST_TYPE_USE_OS_INDEX(_type) ((_type) == HWLOC_OBJ_PU || (_type == HWLOC_OBJ_NUMANODE)) |
173 | | uint64_t *values; /* distance matrices, ordered according to the above indexes/objs array. |
174 | | * distance from i to j is stored in slot i*nbnodes+j. |
175 | | */ |
176 | | unsigned long kind; |
177 | | |
178 | | #define HWLOC_INTERNAL_DIST_FLAG_OBJS_VALID (1U<<0) /* if the objs array is valid below */ |
179 | | #define HWLOC_INTERNAL_DIST_FLAG_NOT_COMMITTED (1U<<1) /* if the distances isn't in the list yet */ |
180 | | unsigned iflags; |
181 | | |
182 | | /* objects are currently stored in physical_index order */ |
183 | | hwloc_obj_t *objs; /* array of objects */ |
184 | | |
185 | | struct hwloc_internal_distances_s *prev, *next; |
186 | | } *first_dist, *last_dist; |
187 | | unsigned next_dist_id; |
188 | | |
189 | | /* memory attributes */ |
190 | | unsigned nr_memattrs; |
191 | | struct hwloc_internal_memattr_s { |
192 | | /* memattr info */ |
193 | | char *name; /* TODO unit is implicit, in the documentation of standard attributes, or in the name? */ |
194 | | unsigned long flags; |
195 | | #define HWLOC_IMATTR_FLAG_STATIC_NAME (1U<<0) /* no need to free name */ |
196 | | #define HWLOC_IMATTR_FLAG_CACHE_VALID (1U<<1) /* target and initiator are valid */ |
197 | | #define HWLOC_IMATTR_FLAG_CONVENIENCE (1U<<2) /* convenience attribute reporting values from non-memattr attributes (R/O and no actual targets stored) */ |
198 | | unsigned iflags; |
199 | | |
200 | | /* array of values */ |
201 | | unsigned nr_targets; |
202 | | struct hwloc_internal_memattr_target_s { |
203 | | /* target object */ |
204 | | hwloc_obj_t obj; /* cached between refreshes */ |
205 | | hwloc_obj_type_t type; |
206 | | unsigned os_index; /* only used temporarily during discovery when there's no obj/gp_index yet */ |
207 | | hwloc_uint64_t gp_index; |
208 | | |
209 | | /* value if there are no initiator for this attr */ |
210 | | hwloc_uint64_t noinitiator_value; |
211 | | /* initiators otherwise */ |
212 | | unsigned nr_initiators; |
213 | | struct hwloc_internal_memattr_initiator_s { |
214 | | struct hwloc_internal_location_s initiator; |
215 | | hwloc_uint64_t value; |
216 | | } *initiators; |
217 | | } *targets; |
218 | | } *memattrs; |
219 | | |
220 | | /* hybridcpus */ |
221 | | unsigned nr_cpukinds; |
222 | | unsigned nr_cpukinds_allocated; |
223 | | struct hwloc_internal_cpukind_s { |
224 | | hwloc_cpuset_t cpuset; |
225 | | #define HWLOC_CPUKIND_EFFICIENCY_UNKNOWN -1 |
226 | | int efficiency; |
227 | | int forced_efficiency; /* returned by the hardware or OS if any */ |
228 | | hwloc_uint64_t ranking_value; /* internal value for ranking */ |
229 | | struct hwloc_infos_s infos; |
230 | | } *cpukinds; |
231 | | |
232 | | int grouping; |
233 | | int grouping_verbose; |
234 | | unsigned grouping_nbaccuracies; |
235 | | float grouping_accuracies[5]; |
236 | | unsigned grouping_next_subkind; |
237 | | |
238 | | /* list of enabled backends. */ |
239 | | struct hwloc_backend * backends; |
240 | | struct hwloc_backend * get_pci_busid_cpuset_backend; /* first backend that provides get_pci_busid_cpuset() callback */ |
241 | | unsigned backend_phases; |
242 | | unsigned backend_excluded_phases; |
243 | | |
244 | | /* memory allocator for topology objects */ |
245 | | struct hwloc_tma * tma; |
246 | | |
247 | | /***************************************************** |
248 | | * WARNING: |
249 | | * changes above in this structure (and its children) |
250 | | * should cause a bump of HWLOC_TOPOLOGY_ABI. |
251 | | *****************************************************/ |
252 | | |
253 | | /* |
254 | | * temporary variables during discovery |
255 | | */ |
256 | | |
257 | | /* set to 1 at the beginning of load() if the filter of any cpu cache type (L1 to L3i) is not NONE, |
258 | | * may be checked by backends before querying caches |
259 | | * (when they don't know the level of caches they are querying). |
260 | | */ |
261 | | int want_some_cpu_caches; |
262 | | |
263 | | /* machine-wide memory. |
264 | | * temporarily stored there by OSes that only provide this without NUMA information, |
265 | | * and actually used later by the core. |
266 | | */ |
267 | | struct hwloc_numanode_attr_s machine_memory; |
268 | | |
269 | | /* pci stuff */ |
270 | | int pci_has_forced_locality; |
271 | | unsigned pci_forced_locality_nr; |
272 | | struct hwloc_pci_forced_locality_s { |
273 | | unsigned domain; |
274 | | unsigned bus_first, bus_last; |
275 | | hwloc_bitmap_t cpuset; |
276 | | } * pci_forced_locality; |
277 | | hwloc_uint64_t pci_locality_quirks; |
278 | | |
279 | | /* component blacklisting */ |
280 | | unsigned nr_blacklisted_components; |
281 | | struct hwloc_topology_forced_component_s { |
282 | | struct hwloc_disc_component *component; |
283 | | unsigned phases; |
284 | | } *blacklisted_components; |
285 | | |
286 | | /* FIXME: keep until topo destroy and reuse for finding specific buses */ |
287 | | struct hwloc_pci_locality_s { |
288 | | unsigned domain; |
289 | | unsigned bus_min; |
290 | | unsigned bus_max; |
291 | | hwloc_bitmap_t cpuset; |
292 | | hwloc_obj_t parent; |
293 | | struct hwloc_pci_locality_s *prev, *next; |
294 | | } *first_pci_locality, *last_pci_locality; |
295 | | }; |
296 | | |
297 | | extern void hwloc_alloc_root_sets(hwloc_obj_t root); |
298 | | extern void hwloc_setup_pu_level(struct hwloc_topology *topology, unsigned nb_pus); |
299 | | extern int hwloc_get_sysctlbyname(const char *name, int64_t *n); |
300 | | extern int hwloc_get_sysctl(int name[], unsigned namelen, int64_t *n); |
301 | | |
302 | | /* returns the number of CPU from the OS (only valid if thissystem) */ |
303 | | #define HWLOC_FALLBACK_NBPROCESSORS_INCLUDE_OFFLINE 1 /* by default we try to get only the online CPUs */ |
304 | | extern int hwloc_fallback_nbprocessors(unsigned flags); |
305 | | /* returns the memory size from the OS (only valid if thissystem) */ |
306 | | extern int64_t hwloc_fallback_memsize(void); |
307 | | |
308 | | extern int hwloc__object_cpusets_compare_first(hwloc_obj_t obj1, hwloc_obj_t obj2); |
309 | | extern void hwloc__reorder_children(hwloc_obj_t parent); |
310 | | |
311 | | extern void hwloc_topology_setup_defaults(struct hwloc_topology *topology); |
312 | | extern void hwloc_topology_clear(struct hwloc_topology *topology); |
313 | | |
314 | | #define _HWLOC_RECONNECT_FLAG_KEEPSTRUCTURE (1UL<<0) |
315 | | extern int hwloc__reconnect(struct hwloc_topology *topology, unsigned long flags); |
316 | | |
317 | | /* insert memory object as memory child of normal parent */ |
318 | | extern struct hwloc_obj * hwloc__attach_memory_object(struct hwloc_topology *topology, hwloc_obj_t parent, |
319 | | hwloc_obj_t obj, const char *reason); |
320 | | |
321 | | extern hwloc_obj_t hwloc_get_obj_by_type_and_gp_index(hwloc_topology_t topology, hwloc_obj_type_t type, uint64_t gp_index); |
322 | | |
323 | | extern void hwloc_pci_discovery_init(struct hwloc_topology *topology); |
324 | | extern void hwloc_pci_discovery_prepare(struct hwloc_topology *topology); |
325 | | extern void hwloc_pci_discovery_exit(struct hwloc_topology *topology); |
326 | | |
327 | | /* Look for an object matching complete cpuset exactly, or insert one. |
328 | | * Return NULL on failure. |
329 | | * Return a good fallback (object above) on failure to insert. |
330 | | */ |
331 | | extern hwloc_obj_t hwloc_find_insert_io_parent_by_complete_cpuset(struct hwloc_topology *topology, hwloc_cpuset_t cpuset); |
332 | | |
333 | | extern int hwloc__add_info(struct hwloc_infos_s *infos, const char *name, const char *value); |
334 | | extern int hwloc__replace_infos(struct hwloc_infos_s *infos, const char *name, const char *value); |
335 | | extern int hwloc__remove_infos(struct hwloc_infos_s *infos, const char *name, const char *value); |
336 | | extern int hwloc__move_infos(struct hwloc_infos_s *dst_infos, struct hwloc_infos_s *src_infos); |
337 | | extern int hwloc__tma_dup_infos(struct hwloc_tma *tma, struct hwloc_infos_s *dst_infos, struct hwloc_infos_s *src_infos); |
338 | | extern void hwloc__free_infos(struct hwloc_infos_s *infos); |
339 | | |
340 | | /* set native OS binding hooks */ |
341 | | extern void hwloc_set_native_binding_hooks(struct hwloc_binding_hooks *hooks, struct hwloc_topology_support *support); |
342 | | /* set either native OS binding hooks (if thissystem), or dummy ones */ |
343 | | extern void hwloc_set_binding_hooks(struct hwloc_topology *topology); |
344 | | |
345 | | #if defined(HWLOC_LINUX_SYS) |
346 | | extern void hwloc_set_linuxfs_hooks(struct hwloc_binding_hooks *binding_hooks, struct hwloc_topology_support *support); |
347 | | #endif /* HWLOC_LINUX_SYS */ |
348 | | |
349 | | #ifdef HWLOC_SOLARIS_SYS |
350 | | extern void hwloc_set_solaris_hooks(struct hwloc_binding_hooks *binding_hooks, struct hwloc_topology_support *support); |
351 | | #endif /* HWLOC_SOLARIS_SYS */ |
352 | | |
353 | | #ifdef HWLOC_AIX_SYS |
354 | | extern void hwloc_set_aix_hooks(struct hwloc_binding_hooks *binding_hooks, struct hwloc_topology_support *support); |
355 | | #endif /* HWLOC_AIX_SYS */ |
356 | | |
357 | | #ifdef HWLOC_WIN_SYS |
358 | | extern void hwloc_set_windows_hooks(struct hwloc_binding_hooks *binding_hooks, struct hwloc_topology_support *support); |
359 | | #endif /* HWLOC_WIN_SYS */ |
360 | | |
361 | | #ifdef HWLOC_DARWIN_SYS |
362 | | extern void hwloc_set_darwin_hooks(struct hwloc_binding_hooks *binding_hooks, struct hwloc_topology_support *support); |
363 | | #endif /* HWLOC_DARWIN_SYS */ |
364 | | |
365 | | #ifdef HWLOC_FREEBSD_SYS |
366 | | extern void hwloc_set_freebsd_hooks(struct hwloc_binding_hooks *binding_hooks, struct hwloc_topology_support *support); |
367 | | #endif /* HWLOC_FREEBSD_SYS */ |
368 | | |
369 | | #ifdef HWLOC_NETBSD_SYS |
370 | | extern void hwloc_set_netbsd_hooks(struct hwloc_binding_hooks *binding_hooks, struct hwloc_topology_support *support); |
371 | | #endif /* HWLOC_NETBSD_SYS */ |
372 | | |
373 | | #ifdef HWLOC_HPUX_SYS |
374 | | extern void hwloc_set_hpux_hooks(struct hwloc_binding_hooks *binding_hooks, struct hwloc_topology_support *support); |
375 | | #endif /* HWLOC_HPUX_SYS */ |
376 | | |
377 | | extern int hwloc_look_hardwired_fujitsu_k(struct hwloc_topology *topology); |
378 | | extern int hwloc_look_hardwired_fujitsu_fx10(struct hwloc_topology *topology); |
379 | | extern int hwloc_look_hardwired_fujitsu_fx100(struct hwloc_topology *topology); |
380 | | |
381 | | /* Insert uname-specific names/values in the object infos array. |
382 | | * If cached_uname isn't NULL, it is used as a struct utsname instead of recalling uname. |
383 | | * Any field that starts with \0 is ignored. |
384 | | */ |
385 | | extern void hwloc_add_uname_info(struct hwloc_topology *topology, void *cached_uname); |
386 | | |
387 | | /* Free obj and its attributes assuming it's not linked to a parent and doesn't have any child */ |
388 | | extern void hwloc_free_unlinked_object(hwloc_obj_t obj); |
389 | | |
390 | | /* Free obj and its children, assuming it's not linked to a parent */ |
391 | | extern void hwloc_free_object_and_children(hwloc_obj_t obj); |
392 | | |
393 | | /* Free obj, its next siblings, and their children, assuming they're not linked to a parent */ |
394 | | extern void hwloc_free_object_siblings_and_children(hwloc_obj_t obj); |
395 | | |
396 | | /* This can be used for the alloc field to get allocated data that can be freed by free() */ |
397 | | void *hwloc_alloc_heap(hwloc_topology_t topology, size_t len); |
398 | | |
399 | | /* This can be used for the alloc field to get allocated data that can be freed by munmap() */ |
400 | | void *hwloc_alloc_mmap(hwloc_topology_t topology, size_t len); |
401 | | |
402 | | /* This can be used for the free_membind field to free data using free() */ |
403 | | int hwloc_free_heap(hwloc_topology_t topology, void *addr, size_t len); |
404 | | |
405 | | /* This can be used for the free_membind field to free data using munmap() */ |
406 | | int hwloc_free_mmap(hwloc_topology_t topology, void *addr, size_t len); |
407 | | |
408 | | /* Allocates unbound memory or fail, depending on whether STRICT is requested |
409 | | * or not */ |
410 | | static __hwloc_inline void * |
411 | | hwloc_alloc_or_fail(hwloc_topology_t topology, size_t len, int flags) |
412 | 0 | { |
413 | 0 | if (flags & HWLOC_MEMBIND_STRICT) |
414 | 0 | return NULL; |
415 | 0 | return hwloc_alloc(topology, len); |
416 | 0 | } Unexecuted instantiation: hwloc_fuzzer.c:hwloc_alloc_or_fail Unexecuted instantiation: base64.c:hwloc_alloc_or_fail |
417 | | |
418 | | extern void hwloc_internal_distances_init(hwloc_topology_t topology); |
419 | | extern void hwloc_internal_distances_prepare(hwloc_topology_t topology); |
420 | | extern void hwloc_internal_distances_destroy(hwloc_topology_t topology); |
421 | | extern int hwloc_internal_distances_dup(hwloc_topology_t new, hwloc_topology_t old); |
422 | | extern void hwloc_internal_distances_refresh(hwloc_topology_t topology); |
423 | | extern void hwloc_internal_distances_invalidate_cached_objs(hwloc_topology_t topology); |
424 | | |
425 | | /* these distances_add() functions are higher-level than those in hwloc/plugins.h |
426 | | * but they may change in the future, hence they are not exported to plugins. |
427 | | */ |
428 | | extern int hwloc_internal_distances_add_by_index(hwloc_topology_t topology, const char *name, hwloc_obj_type_t unique_type, hwloc_obj_type_t *different_types, unsigned nbobjs, uint64_t *indexes, uint64_t *values, unsigned long kind, unsigned long flags); |
429 | | extern int hwloc_internal_distances_add(hwloc_topology_t topology, const char *name, unsigned nbobjs, hwloc_obj_t *objs, uint64_t *values, unsigned long kind, unsigned long flags); |
430 | | |
431 | | extern void hwloc_internal_memattrs_init(hwloc_topology_t topology); |
432 | | extern void hwloc_internal_memattrs_prepare(hwloc_topology_t topology); |
433 | | extern void hwloc_internal_memattrs_destroy(hwloc_topology_t topology); |
434 | | extern void hwloc_internal_memattrs_need_refresh(hwloc_topology_t topology); |
435 | | extern void hwloc_internal_memattrs_refresh(hwloc_topology_t topology); |
436 | | extern int hwloc_internal_memattrs_dup(hwloc_topology_t new, hwloc_topology_t old); |
437 | | extern int hwloc_internal_memattr_set_value(hwloc_topology_t topology, hwloc_memattr_id_t id, hwloc_obj_type_t target_type, hwloc_uint64_t target_gp_index, unsigned target_os_index, struct hwloc_internal_location_s *initiator, hwloc_uint64_t value); |
438 | | extern int hwloc_internal_memattrs_guess_memory_tiers(hwloc_topology_t topology, int force_subtype); |
439 | | |
440 | | extern void hwloc_internal_cpukinds_init(hwloc_topology_t topology); |
441 | | extern int hwloc_internal_cpukinds_rank(hwloc_topology_t topology); |
442 | | extern void hwloc_internal_cpukinds_destroy(hwloc_topology_t topology); |
443 | | extern int hwloc_internal_cpukinds_dup(hwloc_topology_t new, hwloc_topology_t old); |
444 | | #define HWLOC_CPUKINDS_REGISTER_FLAG_OVERWRITE_FORCED_EFFICIENCY (1<<0) |
445 | | extern int hwloc_internal_cpukinds_register(hwloc_topology_t topology, hwloc_cpuset_t cpuset, int forced_efficiency, const struct hwloc_infos_s *infos, unsigned long flags); |
446 | | extern void hwloc_internal_cpukinds_restrict(hwloc_topology_t topology); |
447 | | |
448 | | /* encode src buffer into target buffer. |
449 | | * targsize must be at least 4*((srclength+2)/3)+1. |
450 | | * target will be 0-terminated. |
451 | | */ |
452 | | extern int hwloc_encode_to_base64(const char *src, size_t srclength, char *target, size_t targsize); |
453 | | /* decode src buffer into target buffer. |
454 | | * src is 0-terminated. |
455 | | * targsize must be at least srclength*3/4+1 (srclength not including \0) |
456 | | * but only srclength*3/4 characters will be meaningful |
457 | | * (the next one may be partially written during decoding, but it should be ignored). |
458 | | */ |
459 | | extern int hwloc_decode_from_base64(char const *src, char *target, size_t targsize); |
460 | | |
461 | | /* On some systems, snprintf returns the size of written data, not the actually |
462 | | * required size. Sometimes it returns -1 on truncation too. |
463 | | * And sometimes it doesn't like NULL output buffers. |
464 | | * http://www.gnu.org/software/gnulib/manual/html_node/snprintf.html |
465 | | * |
466 | | * hwloc_snprintf behaves properly, but it's a bit overkill on the vast majority |
467 | | * of platforms, so don't enable it unless really needed. |
468 | | */ |
469 | | #ifdef HWLOC_HAVE_CORRECT_SNPRINTF |
470 | | #define hwloc_snprintf snprintf |
471 | | #else |
472 | | extern int hwloc_snprintf(char *str, size_t size, const char *format, ...) __hwloc_attribute_format(printf, 3, 4); |
473 | | #endif |
474 | | |
475 | | /* uses HWLOC_OBJ_SNPRINTF_FLAG_ flags */ |
476 | | static __hwloc_inline int hwloc_memory_size_snprintf(char *buffer, size_t bufsize, unsigned long long size, unsigned long flags) |
477 | 0 | { |
478 | 0 | /* no units */ |
479 | 0 | if (flags & HWLOC_OBJ_SNPRINTF_FLAG_NO_UNITS) { |
480 | 0 | return snprintf(buffer, bufsize, "%llu", size); |
481 | 0 | } |
482 | 0 |
|
483 | 0 | /* old deprecated format (KiB value with KB units) */ |
484 | 0 | if (flags & HWLOC_OBJ_SNPRINTF_FLAG_OLD_VERBOSE) { |
485 | 0 | return snprintf(buffer, bufsize, "%llu%s", ((size>>9)+1)>>1, "KB"); |
486 | 0 | } |
487 | 0 |
|
488 | 0 | /* units 1000 */ |
489 | 0 | if (flags & HWLOC_OBJ_SNPRINTF_FLAG_UNITS_1000) { |
490 | 0 | if (size < 10000000ULL) { |
491 | 0 | return snprintf(buffer, bufsize, "%llu%s", ((size/500)+1)/2, "KB"); |
492 | 0 | } else if (size < 10000000000ULL) { |
493 | 0 | return snprintf(buffer, bufsize, "%llu%s", ((size/500000)+1)/2, "MB"); |
494 | 0 | } else if (size < 10000000000000ULL) { |
495 | 0 | return snprintf(buffer, bufsize, "%llu%s", ((size/500000000)+1)/2, "GB"); |
496 | 0 | } else { |
497 | 0 | return snprintf(buffer, bufsize, "%llu%s", ((size/500000000000ULL)+1)/2, "TB"); |
498 | 0 | } |
499 | 0 | } |
500 | 0 |
|
501 | 0 | /* units 1024 */ |
502 | 0 | if (size < (10ULL<<20)) { |
503 | 0 | return snprintf(buffer, bufsize, "%llu%s", ((size>>9)+1)>>1, "KiB"); |
504 | 0 | } else if (size < (10ULL<<30)) { |
505 | 0 | return snprintf(buffer, bufsize, "%llu%s", ((size>>19)+1)>>1, "MiB"); |
506 | 0 | } else if (size < (10ULL<<40)) { |
507 | 0 | return snprintf(buffer, bufsize, "%llu%s", ((size>>29)+1)>>1, "GiB"); |
508 | 0 | } else { |
509 | 0 | return snprintf(buffer, bufsize, "%llu%s", ((size>>39)+1)>>1, "TiB"); |
510 | 0 | } |
511 | 0 | } Unexecuted instantiation: hwloc_fuzzer.c:hwloc_memory_size_snprintf Unexecuted instantiation: base64.c:hwloc_memory_size_snprintf |
512 | | |
513 | | /* Return the name of the currently running program, if supported. |
514 | | * If not NULL, must be freed by the caller. |
515 | | */ |
516 | | extern char * hwloc_progname(struct hwloc_topology *topology); |
517 | | |
518 | | /* obj->attr->group.kind internal values. |
519 | | * the core will keep the smallest ones when merging two groups, |
520 | | * that's why user-given kinds are first. |
521 | | */ |
522 | | /* first, user-given groups, should remain as long as possible */ |
523 | | #define HWLOC_GROUP_KIND_USER 0 /* user-given, user may use subkind too */ |
524 | | #define HWLOC_GROUP_KIND_SYNTHETIC 10 /* subkind is group depth within synthetic description */ |
525 | | /* then, hardware-specific groups */ |
526 | | #define HWLOC_GROUP_KIND_INTEL_KNL_SUBNUMA_CLUSTER 100 /* no subkind */ |
527 | | #define HWLOC_GROUP_KIND_INTEL_EXTTOPOENUM_UNKNOWN 101 /* subkind is unknown level */ |
528 | | #define HWLOC_GROUP_KIND_INTEL_MODULE 102 /* no subkind */ |
529 | | #define HWLOC_GROUP_KIND_INTEL_TILE 103 /* no subkind */ |
530 | | #define HWLOC_GROUP_KIND_INTEL_DIE 104 /* no subkind */ |
531 | | #define HWLOC_GROUP_KIND_S390_BOOK 110 /* subkind 0 is book, subkind 1 is drawer (group of books) */ |
532 | | #define HWLOC_GROUP_KIND_AMD_COMPUTE_UNIT 120 /* no subkind */ |
533 | | #define HWLOC_GROUP_KIND_AMD_COMPLEX 121 /* no subkind */ |
534 | | /* then, OS-specific groups */ |
535 | | #define HWLOC_GROUP_KIND_SOLARIS_PG_HW_PERF 200 /* subkind is group width */ |
536 | | #define HWLOC_GROUP_KIND_AIX_SDL_UNKNOWN 210 /* subkind is SDL level */ |
537 | | #define HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP 220 /* no subkind */ |
538 | | #define HWLOC_GROUP_KIND_WINDOWS_RELATIONSHIP_UNKNOWN 221 /* no subkind */ |
539 | | #define HWLOC_GROUP_KIND_LINUX_CLUSTER 222 /* no subkind */ |
540 | | /* distance groups */ |
541 | | #define HWLOC_GROUP_KIND_DISTANCE 900 /* subkind is round of adding these groups during distance based grouping */ |
542 | | /* finally, hwloc-specific groups required to insert something else, should disappear as soon as possible */ |
543 | | #define HWLOC_GROUP_KIND_IO 1000 /* no subkind */ |
544 | | #define HWLOC_GROUP_KIND_MEMORY 1001 /* no subkind */ |
545 | | |
546 | | /* memory allocator for topology objects */ |
547 | | struct hwloc_tma { |
548 | | void * (*malloc)(struct hwloc_tma *, size_t); |
549 | | void *data; |
550 | | int dontfree; /* when set, free() or realloc() cannot be used, and tma->malloc() cannot fail */ |
551 | | }; |
552 | | |
553 | | static __hwloc_inline void * |
554 | | hwloc_tma_malloc(struct hwloc_tma *tma, |
555 | | size_t size) |
556 | 0 | { |
557 | 0 | if (tma) { |
558 | 0 | return tma->malloc(tma, size); |
559 | 0 | } else { |
560 | 0 | return malloc(size); |
561 | 0 | } |
562 | 0 | } Unexecuted instantiation: hwloc_fuzzer.c:hwloc_tma_malloc Unexecuted instantiation: base64.c:hwloc_tma_malloc |
563 | | |
564 | | static __hwloc_inline void * |
565 | | hwloc_tma_calloc(struct hwloc_tma *tma, |
566 | | size_t size) |
567 | 0 | { |
568 | 0 | char *ptr = hwloc_tma_malloc(tma, size); |
569 | 0 | if (ptr) |
570 | 0 | memset(ptr, 0, size); |
571 | 0 | return ptr; |
572 | 0 | } Unexecuted instantiation: hwloc_fuzzer.c:hwloc_tma_calloc Unexecuted instantiation: base64.c:hwloc_tma_calloc |
573 | | |
574 | | static __hwloc_inline char * |
575 | | hwloc_tma_strdup(struct hwloc_tma *tma, |
576 | | const char *src) |
577 | 0 | { |
578 | 0 | size_t len = strlen(src); |
579 | 0 | char *ptr = hwloc_tma_malloc(tma, len+1); |
580 | 0 | if (ptr) |
581 | 0 | memcpy(ptr, src, len+1); |
582 | 0 | return ptr; |
583 | 0 | } Unexecuted instantiation: hwloc_fuzzer.c:hwloc_tma_strdup Unexecuted instantiation: base64.c:hwloc_tma_strdup |
584 | | |
585 | | /* bitmap allocator to be used inside hwloc */ |
586 | | extern hwloc_bitmap_t hwloc_bitmap_tma_dup(struct hwloc_tma *tma, hwloc_const_bitmap_t old); |
587 | | |
588 | | extern int hwloc__topology_dup(hwloc_topology_t *newp, hwloc_topology_t old, struct hwloc_tma *tma); |
589 | | extern void hwloc__topology_disadopt(hwloc_topology_t topology); |
590 | | |
591 | | #endif /* HWLOC_PRIVATE_H */ |