Line | Count | Source (jump to first uncovered line) |
1 | | // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) |
2 | | /* Copyright (c) 2018 Facebook */ |
3 | | |
4 | | #include <byteswap.h> |
5 | | #include <endian.h> |
6 | | #include <stdio.h> |
7 | | #include <stdlib.h> |
8 | | #include <string.h> |
9 | | #include <fcntl.h> |
10 | | #include <unistd.h> |
11 | | #include <errno.h> |
12 | | #include <sys/utsname.h> |
13 | | #include <sys/param.h> |
14 | | #include <sys/stat.h> |
15 | | #include <sys/mman.h> |
16 | | #include <linux/kernel.h> |
17 | | #include <linux/err.h> |
18 | | #include <linux/btf.h> |
19 | | #include <gelf.h> |
20 | | #include "btf.h" |
21 | | #include "bpf.h" |
22 | | #include "libbpf.h" |
23 | | #include "libbpf_internal.h" |
24 | | #include "hashmap.h" |
25 | | #include "strset.h" |
26 | | #include "str_error.h" |
27 | | |
28 | 42.6k | #define BTF_MAX_NR_TYPES 0x7fffffffU |
29 | 7.20k | #define BTF_MAX_STR_OFFSET 0x7fffffffU |
30 | | |
31 | | static struct btf_type btf_void; |
32 | | |
33 | | struct btf { |
34 | | /* raw BTF data in native endianness */ |
35 | | void *raw_data; |
36 | | /* raw BTF data in non-native endianness */ |
37 | | void *raw_data_swapped; |
38 | | __u32 raw_size; |
39 | | /* whether target endianness differs from the native one */ |
40 | | bool swapped_endian; |
41 | | |
42 | | /* |
43 | | * When BTF is loaded from an ELF or raw memory it is stored |
44 | | * in a contiguous memory block. The hdr, type_data, and, strs_data |
45 | | * point inside that memory region to their respective parts of BTF |
46 | | * representation: |
47 | | * |
48 | | * +--------------------------------+ |
49 | | * | Header | Types | Strings | |
50 | | * +--------------------------------+ |
51 | | * ^ ^ ^ |
52 | | * | | | |
53 | | * hdr | | |
54 | | * types_data-+ | |
55 | | * strs_data------------+ |
56 | | * |
57 | | * If BTF data is later modified, e.g., due to types added or |
58 | | * removed, BTF deduplication performed, etc, this contiguous |
59 | | * representation is broken up into three independently allocated |
60 | | * memory regions to be able to modify them independently. |
61 | | * raw_data is nulled out at that point, but can be later allocated |
62 | | * and cached again if user calls btf__raw_data(), at which point |
63 | | * raw_data will contain a contiguous copy of header, types, and |
64 | | * strings: |
65 | | * |
66 | | * +----------+ +---------+ +-----------+ |
67 | | * | Header | | Types | | Strings | |
68 | | * +----------+ +---------+ +-----------+ |
69 | | * ^ ^ ^ |
70 | | * | | | |
71 | | * hdr | | |
72 | | * types_data----+ | |
73 | | * strset__data(strs_set)-----+ |
74 | | * |
75 | | * +----------+---------+-----------+ |
76 | | * | Header | Types | Strings | |
77 | | * raw_data----->+----------+---------+-----------+ |
78 | | */ |
79 | | struct btf_header *hdr; |
80 | | |
81 | | void *types_data; |
82 | | size_t types_data_cap; /* used size stored in hdr->type_len */ |
83 | | |
84 | | /* type ID to `struct btf_type *` lookup index |
85 | | * type_offs[0] corresponds to the first non-VOID type: |
86 | | * - for base BTF it's type [1]; |
87 | | * - for split BTF it's the first non-base BTF type. |
88 | | */ |
89 | | __u32 *type_offs; |
90 | | size_t type_offs_cap; |
91 | | /* number of types in this BTF instance: |
92 | | * - doesn't include special [0] void type; |
93 | | * - for split BTF counts number of types added on top of base BTF. |
94 | | */ |
95 | | __u32 nr_types; |
96 | | /* if not NULL, points to the base BTF on top of which the current |
97 | | * split BTF is based |
98 | | */ |
99 | | struct btf *base_btf; |
100 | | /* BTF type ID of the first type in this BTF instance: |
101 | | * - for base BTF it's equal to 1; |
102 | | * - for split BTF it's equal to biggest type ID of base BTF plus 1. |
103 | | */ |
104 | | int start_id; |
105 | | /* logical string offset of this BTF instance: |
106 | | * - for base BTF it's equal to 0; |
107 | | * - for split BTF it's equal to total size of base BTF's string section size. |
108 | | */ |
109 | | int start_str_off; |
110 | | |
111 | | /* only one of strs_data or strs_set can be non-NULL, depending on |
112 | | * whether BTF is in a modifiable state (strs_set is used) or not |
113 | | * (strs_data points inside raw_data) |
114 | | */ |
115 | | void *strs_data; |
116 | | /* a set of unique strings */ |
117 | | struct strset *strs_set; |
118 | | /* whether strings are already deduplicated */ |
119 | | bool strs_deduped; |
120 | | |
121 | | /* whether base_btf should be freed in btf_free for this instance */ |
122 | | bool owns_base; |
123 | | |
124 | | /* whether raw_data is a (read-only) mmap */ |
125 | | bool raw_data_is_mmap; |
126 | | |
127 | | /* BTF object FD, if loaded into kernel */ |
128 | | int fd; |
129 | | |
130 | | /* Pointer size (in bytes) for a target architecture of this BTF */ |
131 | | int ptr_sz; |
132 | | }; |
133 | | |
134 | | static inline __u64 ptr_to_u64(const void *ptr) |
135 | 0 | { |
136 | 0 | return (__u64) (unsigned long) ptr; |
137 | 0 | } |
138 | | |
139 | | /* Ensure given dynamically allocated memory region pointed to by *data* with |
140 | | * capacity of *cap_cnt* elements each taking *elem_sz* bytes has enough |
141 | | * memory to accommodate *add_cnt* new elements, assuming *cur_cnt* elements |
142 | | * are already used. At most *max_cnt* elements can be ever allocated. |
143 | | * If necessary, memory is reallocated and all existing data is copied over, |
144 | | * new pointer to the memory region is stored at *data, new memory region |
145 | | * capacity (in number of elements) is stored in *cap. |
146 | | * On success, memory pointer to the beginning of unused memory is returned. |
147 | | * On error, NULL is returned. |
148 | | */ |
149 | | void *libbpf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz, |
150 | | size_t cur_cnt, size_t max_cnt, size_t add_cnt) |
151 | 44.2k | { |
152 | 44.2k | size_t new_cnt; |
153 | 44.2k | void *new_data; |
154 | | |
155 | 44.2k | if (cur_cnt + add_cnt <= *cap_cnt) |
156 | 38.2k | return *data + cur_cnt * elem_sz; |
157 | | |
158 | | /* requested more than the set limit */ |
159 | 5.97k | if (cur_cnt + add_cnt > max_cnt) |
160 | 0 | return NULL; |
161 | | |
162 | 5.97k | new_cnt = *cap_cnt; |
163 | 5.97k | new_cnt += new_cnt / 4; /* expand by 25% */ |
164 | 5.97k | if (new_cnt < 16) /* but at least 16 elements */ |
165 | 4.34k | new_cnt = 16; |
166 | 5.97k | if (new_cnt > max_cnt) /* but not exceeding a set limit */ |
167 | 0 | new_cnt = max_cnt; |
168 | 5.97k | if (new_cnt < cur_cnt + add_cnt) /* also ensure we have enough memory */ |
169 | 14 | new_cnt = cur_cnt + add_cnt; |
170 | | |
171 | 5.97k | new_data = libbpf_reallocarray(*data, new_cnt, elem_sz); |
172 | 5.97k | if (!new_data) |
173 | 0 | return NULL; |
174 | | |
175 | | /* zero out newly allocated portion of memory */ |
176 | 5.97k | memset(new_data + (*cap_cnt) * elem_sz, 0, (new_cnt - *cap_cnt) * elem_sz); |
177 | | |
178 | 5.97k | *data = new_data; |
179 | 5.97k | *cap_cnt = new_cnt; |
180 | 5.97k | return new_data + cur_cnt * elem_sz; |
181 | 5.97k | } |
182 | | |
183 | | /* Ensure given dynamically allocated memory region has enough allocated space |
184 | | * to accommodate *need_cnt* elements of size *elem_sz* bytes each |
185 | | */ |
186 | | int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt) |
187 | 2.33k | { |
188 | 2.33k | void *p; |
189 | | |
190 | 2.33k | if (need_cnt <= *cap_cnt) |
191 | 1.20k | return 0; |
192 | | |
193 | 1.12k | p = libbpf_add_mem(data, cap_cnt, elem_sz, *cap_cnt, SIZE_MAX, need_cnt - *cap_cnt); |
194 | 1.12k | if (!p) |
195 | 0 | return -ENOMEM; |
196 | | |
197 | 1.12k | return 0; |
198 | 1.12k | } |
199 | | |
200 | | static void *btf_add_type_offs_mem(struct btf *btf, size_t add_cnt) |
201 | 42.3k | { |
202 | 42.3k | return libbpf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32), |
203 | 42.3k | btf->nr_types, BTF_MAX_NR_TYPES, add_cnt); |
204 | 42.3k | } |
205 | | |
206 | | static int btf_add_type_idx_entry(struct btf *btf, __u32 type_off) |
207 | 42.3k | { |
208 | 42.3k | __u32 *p; |
209 | | |
210 | 42.3k | p = btf_add_type_offs_mem(btf, 1); |
211 | 42.3k | if (!p) |
212 | 0 | return -ENOMEM; |
213 | | |
214 | 42.3k | *p = type_off; |
215 | 42.3k | return 0; |
216 | 42.3k | } |
217 | | |
218 | | static void btf_bswap_hdr(struct btf_header *h) |
219 | 362 | { |
220 | 362 | h->magic = bswap_16(h->magic); |
221 | 362 | h->hdr_len = bswap_32(h->hdr_len); |
222 | 362 | h->type_off = bswap_32(h->type_off); |
223 | 362 | h->type_len = bswap_32(h->type_len); |
224 | 362 | h->str_off = bswap_32(h->str_off); |
225 | 362 | h->str_len = bswap_32(h->str_len); |
226 | 362 | } |
227 | | |
228 | | static int btf_parse_hdr(struct btf *btf) |
229 | 3.60k | { |
230 | 3.60k | struct btf_header *hdr = btf->hdr; |
231 | 3.60k | __u32 meta_left; |
232 | | |
233 | 3.60k | if (btf->raw_size < sizeof(struct btf_header)) { |
234 | 14 | pr_debug("BTF header not found\n"); |
235 | 14 | return -EINVAL; |
236 | 14 | } |
237 | | |
238 | 3.58k | if (hdr->magic == bswap_16(BTF_MAGIC)) { |
239 | 391 | btf->swapped_endian = true; |
240 | 391 | if (bswap_32(hdr->hdr_len) != sizeof(struct btf_header)) { |
241 | 29 | pr_warn("Can't load BTF with non-native endianness due to unsupported header length %u\n", |
242 | 29 | bswap_32(hdr->hdr_len)); |
243 | 29 | return -ENOTSUP; |
244 | 29 | } |
245 | 362 | btf_bswap_hdr(hdr); |
246 | 3.19k | } else if (hdr->magic != BTF_MAGIC) { |
247 | 37 | pr_debug("Invalid BTF magic: %x\n", hdr->magic); |
248 | 37 | return -EINVAL; |
249 | 37 | } |
250 | | |
251 | 3.52k | if (btf->raw_size < hdr->hdr_len) { |
252 | 49 | pr_debug("BTF header len %u larger than data size %u\n", |
253 | 49 | hdr->hdr_len, btf->raw_size); |
254 | 49 | return -EINVAL; |
255 | 49 | } |
256 | | |
257 | 3.47k | meta_left = btf->raw_size - hdr->hdr_len; |
258 | 3.47k | if (meta_left < (long long)hdr->str_off + hdr->str_len) { |
259 | 57 | pr_debug("Invalid BTF total size: %u\n", btf->raw_size); |
260 | 57 | return -EINVAL; |
261 | 57 | } |
262 | | |
263 | 3.41k | if ((long long)hdr->type_off + hdr->type_len > hdr->str_off) { |
264 | 12 | pr_debug("Invalid BTF data sections layout: type data at %u + %u, strings data at %u + %u\n", |
265 | 12 | hdr->type_off, hdr->type_len, hdr->str_off, hdr->str_len); |
266 | 12 | return -EINVAL; |
267 | 12 | } |
268 | | |
269 | 3.40k | if (hdr->type_off % 4) { |
270 | 2 | pr_debug("BTF type section is not aligned to 4 bytes\n"); |
271 | 2 | return -EINVAL; |
272 | 2 | } |
273 | | |
274 | 3.40k | return 0; |
275 | 3.40k | } |
276 | | |
277 | | static int btf_parse_str_sec(struct btf *btf) |
278 | 3.40k | { |
279 | 3.40k | const struct btf_header *hdr = btf->hdr; |
280 | 3.40k | const char *start = btf->strs_data; |
281 | 3.40k | const char *end = start + btf->hdr->str_len; |
282 | | |
283 | 3.40k | if (btf->base_btf && hdr->str_len == 0) |
284 | 0 | return 0; |
285 | 3.40k | if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET || end[-1]) { |
286 | 11 | pr_debug("Invalid BTF string section\n"); |
287 | 11 | return -EINVAL; |
288 | 11 | } |
289 | 3.39k | if (!btf->base_btf && start[0]) { |
290 | 4 | pr_debug("Malformed BTF string section, did you forget to provide base BTF?\n"); |
291 | 4 | return -EINVAL; |
292 | 4 | } |
293 | 3.38k | return 0; |
294 | 3.39k | } |
295 | | |
296 | | static int btf_type_size(const struct btf_type *t) |
297 | 42.0k | { |
298 | 42.0k | const int base_size = sizeof(struct btf_type); |
299 | 42.0k | __u16 vlen = btf_vlen(t); |
300 | | |
301 | 42.0k | switch (btf_kind(t)) { |
302 | 938 | case BTF_KIND_FWD: |
303 | 2.35k | case BTF_KIND_CONST: |
304 | 3.17k | case BTF_KIND_VOLATILE: |
305 | 4.55k | case BTF_KIND_RESTRICT: |
306 | 6.23k | case BTF_KIND_PTR: |
307 | 7.05k | case BTF_KIND_TYPEDEF: |
308 | 8.94k | case BTF_KIND_FUNC: |
309 | 10.6k | case BTF_KIND_FLOAT: |
310 | 11.3k | case BTF_KIND_TYPE_TAG: |
311 | 11.3k | return base_size; |
312 | 6.90k | case BTF_KIND_INT: |
313 | 6.90k | return base_size + sizeof(__u32); |
314 | 1.09k | case BTF_KIND_ENUM: |
315 | 1.09k | return base_size + vlen * sizeof(struct btf_enum); |
316 | 1.73k | case BTF_KIND_ENUM64: |
317 | 1.73k | return base_size + vlen * sizeof(struct btf_enum64); |
318 | 3.63k | case BTF_KIND_ARRAY: |
319 | 3.63k | return base_size + sizeof(struct btf_array); |
320 | 1.48k | case BTF_KIND_STRUCT: |
321 | 2.11k | case BTF_KIND_UNION: |
322 | 2.11k | return base_size + vlen * sizeof(struct btf_member); |
323 | 2.57k | case BTF_KIND_FUNC_PROTO: |
324 | 2.57k | return base_size + vlen * sizeof(struct btf_param); |
325 | 5.23k | case BTF_KIND_VAR: |
326 | 5.23k | return base_size + sizeof(struct btf_var); |
327 | 6.36k | case BTF_KIND_DATASEC: |
328 | 6.36k | return base_size + vlen * sizeof(struct btf_var_secinfo); |
329 | 956 | case BTF_KIND_DECL_TAG: |
330 | 956 | return base_size + sizeof(struct btf_decl_tag); |
331 | 76 | default: |
332 | 76 | pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t)); |
333 | 76 | return -EINVAL; |
334 | 42.0k | } |
335 | 42.0k | } |
336 | | |
337 | | static void btf_bswap_type_base(struct btf_type *t) |
338 | 10.1k | { |
339 | 10.1k | t->name_off = bswap_32(t->name_off); |
340 | 10.1k | t->info = bswap_32(t->info); |
341 | 10.1k | t->type = bswap_32(t->type); |
342 | 10.1k | } |
343 | | |
344 | | static int btf_bswap_type_rest(struct btf_type *t) |
345 | 10.0k | { |
346 | 10.0k | struct btf_var_secinfo *v; |
347 | 10.0k | struct btf_enum64 *e64; |
348 | 10.0k | struct btf_member *m; |
349 | 10.0k | struct btf_array *a; |
350 | 10.0k | struct btf_param *p; |
351 | 10.0k | struct btf_enum *e; |
352 | 10.0k | __u16 vlen = btf_vlen(t); |
353 | 10.0k | int i; |
354 | | |
355 | 10.0k | switch (btf_kind(t)) { |
356 | 381 | case BTF_KIND_FWD: |
357 | 874 | case BTF_KIND_CONST: |
358 | 1.45k | case BTF_KIND_VOLATILE: |
359 | 2.27k | case BTF_KIND_RESTRICT: |
360 | 2.70k | case BTF_KIND_PTR: |
361 | 3.10k | case BTF_KIND_TYPEDEF: |
362 | 3.66k | case BTF_KIND_FUNC: |
363 | 4.56k | case BTF_KIND_FLOAT: |
364 | 5.02k | case BTF_KIND_TYPE_TAG: |
365 | 5.02k | return 0; |
366 | 494 | case BTF_KIND_INT: |
367 | 494 | *(__u32 *)(t + 1) = bswap_32(*(__u32 *)(t + 1)); |
368 | 494 | return 0; |
369 | 501 | case BTF_KIND_ENUM: |
370 | 2.56k | for (i = 0, e = btf_enum(t); i < vlen; i++, e++) { |
371 | 2.06k | e->name_off = bswap_32(e->name_off); |
372 | 2.06k | e->val = bswap_32(e->val); |
373 | 2.06k | } |
374 | 501 | return 0; |
375 | 567 | case BTF_KIND_ENUM64: |
376 | 1.96k | for (i = 0, e64 = btf_enum64(t); i < vlen; i++, e64++) { |
377 | 1.39k | e64->name_off = bswap_32(e64->name_off); |
378 | 1.39k | e64->val_lo32 = bswap_32(e64->val_lo32); |
379 | 1.39k | e64->val_hi32 = bswap_32(e64->val_hi32); |
380 | 1.39k | } |
381 | 567 | return 0; |
382 | 509 | case BTF_KIND_ARRAY: |
383 | 509 | a = btf_array(t); |
384 | 509 | a->type = bswap_32(a->type); |
385 | 509 | a->index_type = bswap_32(a->index_type); |
386 | 509 | a->nelems = bswap_32(a->nelems); |
387 | 509 | return 0; |
388 | 413 | case BTF_KIND_STRUCT: |
389 | 754 | case BTF_KIND_UNION: |
390 | 2.01k | for (i = 0, m = btf_members(t); i < vlen; i++, m++) { |
391 | 1.25k | m->name_off = bswap_32(m->name_off); |
392 | 1.25k | m->type = bswap_32(m->type); |
393 | 1.25k | m->offset = bswap_32(m->offset); |
394 | 1.25k | } |
395 | 754 | return 0; |
396 | 497 | case BTF_KIND_FUNC_PROTO: |
397 | 2.48k | for (i = 0, p = btf_params(t); i < vlen; i++, p++) { |
398 | 1.98k | p->name_off = bswap_32(p->name_off); |
399 | 1.98k | p->type = bswap_32(p->type); |
400 | 1.98k | } |
401 | 497 | return 0; |
402 | 776 | case BTF_KIND_VAR: |
403 | 776 | btf_var(t)->linkage = bswap_32(btf_var(t)->linkage); |
404 | 776 | return 0; |
405 | 449 | case BTF_KIND_DATASEC: |
406 | 2.02k | for (i = 0, v = btf_var_secinfos(t); i < vlen; i++, v++) { |
407 | 1.57k | v->type = bswap_32(v->type); |
408 | 1.57k | v->offset = bswap_32(v->offset); |
409 | 1.57k | v->size = bswap_32(v->size); |
410 | 1.57k | } |
411 | 449 | return 0; |
412 | 521 | case BTF_KIND_DECL_TAG: |
413 | 521 | btf_decl_tag(t)->component_idx = bswap_32(btf_decl_tag(t)->component_idx); |
414 | 521 | return 0; |
415 | 0 | default: |
416 | 0 | pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t)); |
417 | 0 | return -EINVAL; |
418 | 10.0k | } |
419 | 10.0k | } |
420 | | |
421 | | static int btf_parse_type_sec(struct btf *btf) |
422 | 3.38k | { |
423 | 3.38k | struct btf_header *hdr = btf->hdr; |
424 | 3.38k | void *next_type = btf->types_data; |
425 | 3.38k | void *end_type = next_type + hdr->type_len; |
426 | 3.38k | int err, type_size; |
427 | | |
428 | 45.2k | while (next_type + sizeof(struct btf_type) <= end_type) { |
429 | 42.0k | if (btf->swapped_endian) |
430 | 10.1k | btf_bswap_type_base(next_type); |
431 | | |
432 | 42.0k | type_size = btf_type_size(next_type); |
433 | 42.0k | if (type_size < 0) |
434 | 76 | return type_size; |
435 | 41.9k | if (next_type + type_size > end_type) { |
436 | 26 | pr_warn("BTF type [%d] is malformed\n", btf->start_id + btf->nr_types); |
437 | 26 | return -EINVAL; |
438 | 26 | } |
439 | | |
440 | 41.9k | if (btf->swapped_endian && btf_bswap_type_rest(next_type)) |
441 | 0 | return -EINVAL; |
442 | | |
443 | 41.9k | err = btf_add_type_idx_entry(btf, next_type - btf->types_data); |
444 | 41.9k | if (err) |
445 | 0 | return err; |
446 | | |
447 | 41.9k | next_type += type_size; |
448 | 41.9k | btf->nr_types++; |
449 | 41.9k | } |
450 | | |
451 | 3.28k | if (next_type != end_type) { |
452 | 50 | pr_warn("BTF types data is malformed\n"); |
453 | 50 | return -EINVAL; |
454 | 50 | } |
455 | | |
456 | 3.23k | return 0; |
457 | 3.28k | } |
458 | | |
459 | | static int btf_validate_str(const struct btf *btf, __u32 str_off, const char *what, __u32 type_id) |
460 | 39.9k | { |
461 | 39.9k | const char *s; |
462 | | |
463 | 39.9k | s = btf__str_by_offset(btf, str_off); |
464 | 39.9k | if (!s) { |
465 | 135 | pr_warn("btf: type [%u]: invalid %s (string offset %u)\n", type_id, what, str_off); |
466 | 135 | return -EINVAL; |
467 | 135 | } |
468 | | |
469 | 39.8k | return 0; |
470 | 39.9k | } |
471 | | |
472 | | static int btf_validate_id(const struct btf *btf, __u32 id, __u32 ctx_id) |
473 | 29.6k | { |
474 | 29.6k | const struct btf_type *t; |
475 | | |
476 | 29.6k | t = btf__type_by_id(btf, id); |
477 | 29.6k | if (!t) { |
478 | 285 | pr_warn("btf: type [%u]: invalid referenced type ID %u\n", ctx_id, id); |
479 | 285 | return -EINVAL; |
480 | 285 | } |
481 | | |
482 | 29.3k | return 0; |
483 | 29.6k | } |
484 | | |
485 | | static int btf_validate_type(const struct btf *btf, const struct btf_type *t, __u32 id) |
486 | 31.8k | { |
487 | 31.8k | __u32 kind = btf_kind(t); |
488 | 31.8k | int err, i, n; |
489 | | |
490 | 31.8k | err = btf_validate_str(btf, t->name_off, "type name", id); |
491 | 31.8k | if (err) |
492 | 96 | return err; |
493 | | |
494 | 31.7k | switch (kind) { |
495 | 0 | case BTF_KIND_UNKN: |
496 | 6.27k | case BTF_KIND_INT: |
497 | 6.87k | case BTF_KIND_FWD: |
498 | 7.73k | case BTF_KIND_FLOAT: |
499 | 7.73k | break; |
500 | 1.27k | case BTF_KIND_PTR: |
501 | 1.68k | case BTF_KIND_TYPEDEF: |
502 | 1.96k | case BTF_KIND_VOLATILE: |
503 | 2.95k | case BTF_KIND_CONST: |
504 | 3.53k | case BTF_KIND_RESTRICT: |
505 | 7.88k | case BTF_KIND_VAR: |
506 | 8.31k | case BTF_KIND_DECL_TAG: |
507 | 8.50k | case BTF_KIND_TYPE_TAG: |
508 | 8.50k | err = btf_validate_id(btf, t->type, id); |
509 | 8.50k | if (err) |
510 | 56 | return err; |
511 | 8.45k | break; |
512 | 8.45k | case BTF_KIND_ARRAY: { |
513 | 3.00k | const struct btf_array *a = btf_array(t); |
514 | | |
515 | 3.00k | err = btf_validate_id(btf, a->type, id); |
516 | 3.00k | err = err ?: btf_validate_id(btf, a->index_type, id); |
517 | 3.00k | if (err) |
518 | 68 | return err; |
519 | 2.90k | break; |
520 | 2.97k | } |
521 | 2.90k | case BTF_KIND_STRUCT: |
522 | 1.43k | case BTF_KIND_UNION: { |
523 | 1.43k | const struct btf_member *m = btf_members(t); |
524 | | |
525 | 1.43k | n = btf_vlen(t); |
526 | 3.96k | for (i = 0; i < n; i++, m++) { |
527 | 2.59k | err = btf_validate_str(btf, m->name_off, "field name", id); |
528 | 2.59k | err = err ?: btf_validate_id(btf, m->type, id); |
529 | 2.59k | if (err) |
530 | 56 | return err; |
531 | 2.58k | } |
532 | 1.36k | break; |
533 | 1.43k | } |
534 | 1.36k | case BTF_KIND_ENUM: { |
535 | 719 | const struct btf_enum *m = btf_enum(t); |
536 | | |
537 | 719 | n = btf_vlen(t); |
538 | 1.44k | for (i = 0; i < n; i++, m++) { |
539 | 742 | err = btf_validate_str(btf, m->name_off, "enum name", id); |
540 | 742 | if (err) |
541 | 15 | return err; |
542 | 742 | } |
543 | 704 | break; |
544 | 719 | } |
545 | 1.15k | case BTF_KIND_ENUM64: { |
546 | 1.15k | const struct btf_enum64 *m = btf_enum64(t); |
547 | | |
548 | 1.15k | n = btf_vlen(t); |
549 | 4.04k | for (i = 0; i < n; i++, m++) { |
550 | 2.89k | err = btf_validate_str(btf, m->name_off, "enum name", id); |
551 | 2.89k | if (err) |
552 | 8 | return err; |
553 | 2.89k | } |
554 | 1.14k | break; |
555 | 1.15k | } |
556 | 1.26k | case BTF_KIND_FUNC: { |
557 | 1.26k | const struct btf_type *ft; |
558 | | |
559 | 1.26k | err = btf_validate_id(btf, t->type, id); |
560 | 1.26k | if (err) |
561 | 41 | return err; |
562 | 1.22k | ft = btf__type_by_id(btf, t->type); |
563 | 1.22k | if (btf_kind(ft) != BTF_KIND_FUNC_PROTO) { |
564 | 9 | pr_warn("btf: type [%u]: referenced type [%u] is not FUNC_PROTO\n", id, t->type); |
565 | 9 | return -EINVAL; |
566 | 9 | } |
567 | 1.21k | break; |
568 | 1.22k | } |
569 | 2.10k | case BTF_KIND_FUNC_PROTO: { |
570 | 2.10k | const struct btf_param *m = btf_params(t); |
571 | | |
572 | 2.10k | n = btf_vlen(t); |
573 | 3.92k | for (i = 0; i < n; i++, m++) { |
574 | 1.86k | err = btf_validate_str(btf, m->name_off, "param name", id); |
575 | 1.86k | err = err ?: btf_validate_id(btf, m->type, id); |
576 | 1.86k | if (err) |
577 | 38 | return err; |
578 | 1.86k | } |
579 | 2.05k | break; |
580 | 2.10k | } |
581 | 5.88k | case BTF_KIND_DATASEC: { |
582 | 5.88k | const struct btf_var_secinfo *m = btf_var_secinfos(t); |
583 | | |
584 | 5.88k | n = btf_vlen(t); |
585 | 15.2k | for (i = 0; i < n; i++, m++) { |
586 | 9.45k | err = btf_validate_id(btf, m->type, id); |
587 | 9.45k | if (err) |
588 | 42 | return err; |
589 | 9.45k | } |
590 | 5.83k | break; |
591 | 5.88k | } |
592 | 5.83k | default: |
593 | 0 | pr_warn("btf: type [%u]: unrecognized kind %u\n", id, kind); |
594 | 0 | return -EINVAL; |
595 | 31.7k | } |
596 | 31.4k | return 0; |
597 | 31.7k | } |
598 | | |
599 | | /* Validate basic sanity of BTF. It's intentionally less thorough than |
600 | | * kernel's validation and validates only properties of BTF that libbpf relies |
601 | | * on to be correct (e.g., valid type IDs, valid string offsets, etc) |
602 | | */ |
603 | | static int btf_sanity_check(const struct btf *btf) |
604 | 3.23k | { |
605 | 3.23k | const struct btf_type *t; |
606 | 3.23k | __u32 i, n = btf__type_cnt(btf); |
607 | 3.23k | int err; |
608 | | |
609 | 34.6k | for (i = btf->start_id; i < n; i++) { |
610 | 31.8k | t = btf_type_by_id(btf, i); |
611 | 31.8k | err = btf_validate_type(btf, t, i); |
612 | 31.8k | if (err) |
613 | 429 | return err; |
614 | 31.8k | } |
615 | 2.80k | return 0; |
616 | 3.23k | } |
617 | | |
618 | | __u32 btf__type_cnt(const struct btf *btf) |
619 | 15.7k | { |
620 | 15.7k | return btf->start_id + btf->nr_types; |
621 | 15.7k | } |
622 | | |
623 | | const struct btf *btf__base_btf(const struct btf *btf) |
624 | 0 | { |
625 | 0 | return btf->base_btf; |
626 | 0 | } |
627 | | |
628 | | /* internal helper returning non-const pointer to a type */ |
629 | | struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id) |
630 | 211k | { |
631 | 211k | if (type_id == 0) |
632 | 7.19k | return &btf_void; |
633 | 203k | if (type_id < btf->start_id) |
634 | 0 | return btf_type_by_id(btf->base_btf, type_id); |
635 | 203k | return btf->types_data + btf->type_offs[type_id - btf->start_id]; |
636 | 203k | } |
637 | | |
638 | | const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id) |
639 | 159k | { |
640 | 159k | if (type_id >= btf->start_id + btf->nr_types) |
641 | 285 | return errno = EINVAL, NULL; |
642 | 159k | return btf_type_by_id((struct btf *)btf, type_id); |
643 | 159k | } |
644 | | |
645 | | static int determine_ptr_size(const struct btf *btf) |
646 | 0 | { |
647 | 0 | static const char * const long_aliases[] = { |
648 | 0 | "long", |
649 | 0 | "long int", |
650 | 0 | "int long", |
651 | 0 | "unsigned long", |
652 | 0 | "long unsigned", |
653 | 0 | "unsigned long int", |
654 | 0 | "unsigned int long", |
655 | 0 | "long unsigned int", |
656 | 0 | "long int unsigned", |
657 | 0 | "int unsigned long", |
658 | 0 | "int long unsigned", |
659 | 0 | }; |
660 | 0 | const struct btf_type *t; |
661 | 0 | const char *name; |
662 | 0 | int i, j, n; |
663 | |
|
664 | 0 | if (btf->base_btf && btf->base_btf->ptr_sz > 0) |
665 | 0 | return btf->base_btf->ptr_sz; |
666 | | |
667 | 0 | n = btf__type_cnt(btf); |
668 | 0 | for (i = 1; i < n; i++) { |
669 | 0 | t = btf__type_by_id(btf, i); |
670 | 0 | if (!btf_is_int(t)) |
671 | 0 | continue; |
672 | | |
673 | 0 | if (t->size != 4 && t->size != 8) |
674 | 0 | continue; |
675 | | |
676 | 0 | name = btf__name_by_offset(btf, t->name_off); |
677 | 0 | if (!name) |
678 | 0 | continue; |
679 | | |
680 | 0 | for (j = 0; j < ARRAY_SIZE(long_aliases); j++) { |
681 | 0 | if (strcmp(name, long_aliases[j]) == 0) |
682 | 0 | return t->size; |
683 | 0 | } |
684 | 0 | } |
685 | | |
686 | 0 | return -1; |
687 | 0 | } |
688 | | |
689 | | static size_t btf_ptr_sz(const struct btf *btf) |
690 | 898 | { |
691 | 898 | if (!btf->ptr_sz) |
692 | 0 | ((struct btf *)btf)->ptr_sz = determine_ptr_size(btf); |
693 | 898 | return btf->ptr_sz < 0 ? sizeof(void *) : btf->ptr_sz; |
694 | 898 | } |
695 | | |
696 | | /* Return pointer size this BTF instance assumes. The size is heuristically |
697 | | * determined by looking for 'long' or 'unsigned long' integer type and |
698 | | * recording its size in bytes. If BTF type information doesn't have any such |
699 | | * type, this function returns 0. In the latter case, native architecture's |
700 | | * pointer size is assumed, so will be either 4 or 8, depending on |
701 | | * architecture that libbpf was compiled for. It's possible to override |
702 | | * guessed value by using btf__set_pointer_size() API. |
703 | | */ |
704 | | size_t btf__pointer_size(const struct btf *btf) |
705 | 0 | { |
706 | 0 | if (!btf->ptr_sz) |
707 | 0 | ((struct btf *)btf)->ptr_sz = determine_ptr_size(btf); |
708 | |
|
709 | 0 | if (btf->ptr_sz < 0) |
710 | | /* not enough BTF type info to guess */ |
711 | 0 | return 0; |
712 | | |
713 | 0 | return btf->ptr_sz; |
714 | 0 | } |
715 | | |
716 | | /* Override or set pointer size in bytes. Only values of 4 and 8 are |
717 | | * supported. |
718 | | */ |
719 | | int btf__set_pointer_size(struct btf *btf, size_t ptr_sz) |
720 | 2.80k | { |
721 | 2.80k | if (ptr_sz != 4 && ptr_sz != 8) |
722 | 0 | return libbpf_err(-EINVAL); |
723 | 2.80k | btf->ptr_sz = ptr_sz; |
724 | 2.80k | return 0; |
725 | 2.80k | } |
726 | | |
727 | | static bool is_host_big_endian(void) |
728 | 0 | { |
729 | 0 | #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ |
730 | 0 | return false; |
731 | | #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ |
732 | | return true; |
733 | | #else |
734 | | # error "Unrecognized __BYTE_ORDER__" |
735 | | #endif |
736 | 0 | } |
737 | | |
738 | | enum btf_endianness btf__endianness(const struct btf *btf) |
739 | 0 | { |
740 | 0 | if (is_host_big_endian()) |
741 | 0 | return btf->swapped_endian ? BTF_LITTLE_ENDIAN : BTF_BIG_ENDIAN; |
742 | 0 | else |
743 | 0 | return btf->swapped_endian ? BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN; |
744 | 0 | } |
745 | | |
746 | | int btf__set_endianness(struct btf *btf, enum btf_endianness endian) |
747 | 0 | { |
748 | 0 | if (endian != BTF_LITTLE_ENDIAN && endian != BTF_BIG_ENDIAN) |
749 | 0 | return libbpf_err(-EINVAL); |
750 | | |
751 | 0 | btf->swapped_endian = is_host_big_endian() != (endian == BTF_BIG_ENDIAN); |
752 | 0 | if (!btf->swapped_endian) { |
753 | 0 | free(btf->raw_data_swapped); |
754 | 0 | btf->raw_data_swapped = NULL; |
755 | 0 | } |
756 | 0 | return 0; |
757 | 0 | } |
758 | | |
759 | | static bool btf_type_is_void(const struct btf_type *t) |
760 | 3.87k | { |
761 | 3.87k | return t == &btf_void || btf_is_fwd(t); |
762 | 3.87k | } |
763 | | |
764 | | static bool btf_type_is_void_or_null(const struct btf_type *t) |
765 | 3.87k | { |
766 | 3.87k | return !t || btf_type_is_void(t); |
767 | 3.87k | } |
768 | | |
769 | 7.83k | #define MAX_RESOLVE_DEPTH 32 |
770 | | |
771 | | __s64 btf__resolve_size(const struct btf *btf, __u32 type_id) |
772 | 1.38k | { |
773 | 1.38k | const struct btf_array *array; |
774 | 1.38k | const struct btf_type *t; |
775 | 1.38k | __u32 nelems = 1; |
776 | 1.38k | __s64 size = -1; |
777 | 1.38k | int i; |
778 | | |
779 | 1.38k | t = btf__type_by_id(btf, type_id); |
780 | 3.40k | for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t); i++) { |
781 | 3.37k | switch (btf_kind(t)) { |
782 | 757 | case BTF_KIND_INT: |
783 | 833 | case BTF_KIND_STRUCT: |
784 | 857 | case BTF_KIND_UNION: |
785 | 1.07k | case BTF_KIND_ENUM: |
786 | 1.17k | case BTF_KIND_ENUM64: |
787 | 1.23k | case BTF_KIND_DATASEC: |
788 | 1.28k | case BTF_KIND_FLOAT: |
789 | 1.28k | size = t->size; |
790 | 1.28k | goto done; |
791 | 43 | case BTF_KIND_PTR: |
792 | 43 | size = btf_ptr_sz(btf); |
793 | 43 | goto done; |
794 | 202 | case BTF_KIND_TYPEDEF: |
795 | 320 | case BTF_KIND_VOLATILE: |
796 | 527 | case BTF_KIND_CONST: |
797 | 765 | case BTF_KIND_RESTRICT: |
798 | 990 | case BTF_KIND_VAR: |
799 | 1.08k | case BTF_KIND_DECL_TAG: |
800 | 1.24k | case BTF_KIND_TYPE_TAG: |
801 | 1.24k | type_id = t->type; |
802 | 1.24k | break; |
803 | 800 | case BTF_KIND_ARRAY: |
804 | 800 | array = btf_array(t); |
805 | 800 | if (nelems && array->nelems > UINT32_MAX / nelems) |
806 | 24 | return libbpf_err(-E2BIG); |
807 | 776 | nelems *= array->nelems; |
808 | 776 | type_id = array->type; |
809 | 776 | break; |
810 | 3 | default: |
811 | 3 | return libbpf_err(-EINVAL); |
812 | 3.37k | } |
813 | | |
814 | 2.01k | t = btf__type_by_id(btf, type_id); |
815 | 2.01k | } |
816 | | |
817 | 1.35k | done: |
818 | 1.35k | if (size < 0) |
819 | 29 | return libbpf_err(-EINVAL); |
820 | 1.33k | if (nelems && size > UINT32_MAX / nelems) |
821 | 12 | return libbpf_err(-E2BIG); |
822 | | |
823 | 1.31k | return nelems * size; |
824 | 1.33k | } |
825 | | |
826 | | int btf__align_of(const struct btf *btf, __u32 id) |
827 | 1.60k | { |
828 | 1.60k | const struct btf_type *t = btf__type_by_id(btf, id); |
829 | 1.60k | __u16 kind = btf_kind(t); |
830 | | |
831 | 1.60k | switch (kind) { |
832 | 649 | case BTF_KIND_INT: |
833 | 720 | case BTF_KIND_ENUM: |
834 | 825 | case BTF_KIND_ENUM64: |
835 | 842 | case BTF_KIND_FLOAT: |
836 | 842 | return min(btf_ptr_sz(btf), (size_t)t->size); |
837 | 13 | case BTF_KIND_PTR: |
838 | 13 | return btf_ptr_sz(btf); |
839 | 94 | case BTF_KIND_TYPEDEF: |
840 | 154 | case BTF_KIND_VOLATILE: |
841 | 292 | case BTF_KIND_CONST: |
842 | 446 | case BTF_KIND_RESTRICT: |
843 | 527 | case BTF_KIND_TYPE_TAG: |
844 | 527 | return btf__align_of(btf, t->type); |
845 | 161 | case BTF_KIND_ARRAY: |
846 | 161 | return btf__align_of(btf, btf_array(t)->type); |
847 | 28 | case BTF_KIND_STRUCT: |
848 | 52 | case BTF_KIND_UNION: { |
849 | 52 | const struct btf_member *m = btf_members(t); |
850 | 52 | __u16 vlen = btf_vlen(t); |
851 | 52 | int i, max_align = 1, align; |
852 | | |
853 | 115 | for (i = 0; i < vlen; i++, m++) { |
854 | 79 | align = btf__align_of(btf, m->type); |
855 | 79 | if (align <= 0) |
856 | 5 | return libbpf_err(align); |
857 | 74 | max_align = max(max_align, align); |
858 | | |
859 | | /* if field offset isn't aligned according to field |
860 | | * type's alignment, then struct must be packed |
861 | | */ |
862 | 74 | if (btf_member_bitfield_size(t, i) == 0 && |
863 | 74 | (m->offset % (8 * align)) != 0) |
864 | 11 | return 1; |
865 | 74 | } |
866 | | |
867 | | /* if struct/union size isn't a multiple of its alignment, |
868 | | * then struct must be packed |
869 | | */ |
870 | 36 | if ((t->size % max_align) != 0) |
871 | 10 | return 1; |
872 | | |
873 | 26 | return max_align; |
874 | 36 | } |
875 | 8 | default: |
876 | 8 | pr_warn("unsupported BTF_KIND:%u\n", btf_kind(t)); |
877 | 8 | return errno = EINVAL, 0; |
878 | 1.60k | } |
879 | 1.60k | } |
880 | | |
881 | | int btf__resolve_type(const struct btf *btf, __u32 type_id) |
882 | 69 | { |
883 | 69 | const struct btf_type *t; |
884 | 69 | int depth = 0; |
885 | | |
886 | 69 | t = btf__type_by_id(btf, type_id); |
887 | 446 | while (depth < MAX_RESOLVE_DEPTH && |
888 | 446 | !btf_type_is_void_or_null(t) && |
889 | 446 | (btf_is_mod(t) || btf_is_typedef(t) || btf_is_var(t))) { |
890 | 377 | type_id = t->type; |
891 | 377 | t = btf__type_by_id(btf, type_id); |
892 | 377 | depth++; |
893 | 377 | } |
894 | | |
895 | 69 | if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t)) |
896 | 10 | return libbpf_err(-EINVAL); |
897 | | |
898 | 59 | return type_id; |
899 | 69 | } |
900 | | |
901 | | __s32 btf__find_by_name(const struct btf *btf, const char *type_name) |
902 | 482 | { |
903 | 482 | __u32 i, nr_types = btf__type_cnt(btf); |
904 | | |
905 | 482 | if (!strcmp(type_name, "void")) |
906 | 0 | return 0; |
907 | | |
908 | 6.69k | for (i = 1; i < nr_types; i++) { |
909 | 6.38k | const struct btf_type *t = btf__type_by_id(btf, i); |
910 | 6.38k | const char *name = btf__name_by_offset(btf, t->name_off); |
911 | | |
912 | 6.38k | if (name && !strcmp(type_name, name)) |
913 | 178 | return i; |
914 | 6.38k | } |
915 | | |
916 | 304 | return libbpf_err(-ENOENT); |
917 | 482 | } |
918 | | |
919 | | static __s32 btf_find_by_name_kind(const struct btf *btf, int start_id, |
920 | | const char *type_name, __u32 kind) |
921 | 2.84k | { |
922 | 2.84k | __u32 i, nr_types = btf__type_cnt(btf); |
923 | | |
924 | 2.84k | if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void")) |
925 | 0 | return 0; |
926 | | |
927 | 33.6k | for (i = start_id; i < nr_types; i++) { |
928 | 31.4k | const struct btf_type *t = btf__type_by_id(btf, i); |
929 | 31.4k | const char *name; |
930 | | |
931 | 31.4k | if (btf_kind(t) != kind) |
932 | 25.6k | continue; |
933 | 5.83k | name = btf__name_by_offset(btf, t->name_off); |
934 | 5.83k | if (name && !strcmp(type_name, name)) |
935 | 611 | return i; |
936 | 5.83k | } |
937 | | |
938 | 2.23k | return libbpf_err(-ENOENT); |
939 | 2.84k | } |
940 | | |
941 | | __s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name, |
942 | | __u32 kind) |
943 | 0 | { |
944 | 0 | return btf_find_by_name_kind(btf, btf->start_id, type_name, kind); |
945 | 0 | } |
946 | | |
947 | | __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name, |
948 | | __u32 kind) |
949 | 2.84k | { |
950 | 2.84k | return btf_find_by_name_kind(btf, 1, type_name, kind); |
951 | 2.84k | } |
952 | | |
953 | | static bool btf_is_modifiable(const struct btf *btf) |
954 | 4.39k | { |
955 | 4.39k | return (void *)btf->hdr != btf->raw_data; |
956 | 4.39k | } |
957 | | |
958 | | static void btf_free_raw_data(struct btf *btf) |
959 | 3.99k | { |
960 | 3.99k | if (btf->raw_data_is_mmap) { |
961 | 0 | munmap(btf->raw_data, btf->raw_size); |
962 | 0 | btf->raw_data_is_mmap = false; |
963 | 3.99k | } else { |
964 | 3.99k | free(btf->raw_data); |
965 | 3.99k | } |
966 | 3.99k | btf->raw_data = NULL; |
967 | 3.99k | } |
968 | | |
969 | | void btf__free(struct btf *btf) |
970 | 31.7k | { |
971 | 31.7k | if (IS_ERR_OR_NULL(btf)) |
972 | 28.1k | return; |
973 | | |
974 | 3.60k | if (btf->fd >= 0) |
975 | 0 | close(btf->fd); |
976 | | |
977 | 3.60k | if (btf_is_modifiable(btf)) { |
978 | | /* if BTF was modified after loading, it will have a split |
979 | | * in-memory representation for header, types, and strings |
980 | | * sections, so we need to free all of them individually. It |
981 | | * might still have a cached contiguous raw data present, |
982 | | * which will be unconditionally freed below. |
983 | | */ |
984 | 396 | free(btf->hdr); |
985 | 396 | free(btf->types_data); |
986 | 396 | strset__free(btf->strs_set); |
987 | 396 | } |
988 | 3.60k | btf_free_raw_data(btf); |
989 | 3.60k | free(btf->raw_data_swapped); |
990 | 3.60k | free(btf->type_offs); |
991 | 3.60k | if (btf->owns_base) |
992 | 0 | btf__free(btf->base_btf); |
993 | 3.60k | free(btf); |
994 | 3.60k | } |
995 | | |
996 | | static struct btf *btf_new_empty(struct btf *base_btf) |
997 | 0 | { |
998 | 0 | struct btf *btf; |
999 | |
|
1000 | 0 | btf = calloc(1, sizeof(*btf)); |
1001 | 0 | if (!btf) |
1002 | 0 | return ERR_PTR(-ENOMEM); |
1003 | | |
1004 | 0 | btf->nr_types = 0; |
1005 | 0 | btf->start_id = 1; |
1006 | 0 | btf->start_str_off = 0; |
1007 | 0 | btf->fd = -1; |
1008 | 0 | btf->ptr_sz = sizeof(void *); |
1009 | 0 | btf->swapped_endian = false; |
1010 | |
|
1011 | 0 | if (base_btf) { |
1012 | 0 | btf->base_btf = base_btf; |
1013 | 0 | btf->start_id = btf__type_cnt(base_btf); |
1014 | 0 | btf->start_str_off = base_btf->hdr->str_len + base_btf->start_str_off; |
1015 | 0 | btf->swapped_endian = base_btf->swapped_endian; |
1016 | 0 | } |
1017 | | |
1018 | | /* +1 for empty string at offset 0 */ |
1019 | 0 | btf->raw_size = sizeof(struct btf_header) + (base_btf ? 0 : 1); |
1020 | 0 | btf->raw_data = calloc(1, btf->raw_size); |
1021 | 0 | if (!btf->raw_data) { |
1022 | 0 | free(btf); |
1023 | 0 | return ERR_PTR(-ENOMEM); |
1024 | 0 | } |
1025 | | |
1026 | 0 | btf->hdr = btf->raw_data; |
1027 | 0 | btf->hdr->hdr_len = sizeof(struct btf_header); |
1028 | 0 | btf->hdr->magic = BTF_MAGIC; |
1029 | 0 | btf->hdr->version = BTF_VERSION; |
1030 | |
|
1031 | 0 | btf->types_data = btf->raw_data + btf->hdr->hdr_len; |
1032 | 0 | btf->strs_data = btf->raw_data + btf->hdr->hdr_len; |
1033 | 0 | btf->hdr->str_len = base_btf ? 0 : 1; /* empty string at offset 0 */ |
1034 | |
|
1035 | 0 | return btf; |
1036 | 0 | } |
1037 | | |
1038 | | struct btf *btf__new_empty(void) |
1039 | 0 | { |
1040 | 0 | return libbpf_ptr(btf_new_empty(NULL)); |
1041 | 0 | } |
1042 | | |
1043 | | struct btf *btf__new_empty_split(struct btf *base_btf) |
1044 | 0 | { |
1045 | 0 | return libbpf_ptr(btf_new_empty(base_btf)); |
1046 | 0 | } |
1047 | | |
1048 | | static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf, bool is_mmap) |
1049 | 3.60k | { |
1050 | 3.60k | struct btf *btf; |
1051 | 3.60k | int err; |
1052 | | |
1053 | 3.60k | btf = calloc(1, sizeof(struct btf)); |
1054 | 3.60k | if (!btf) |
1055 | 0 | return ERR_PTR(-ENOMEM); |
1056 | | |
1057 | 3.60k | btf->nr_types = 0; |
1058 | 3.60k | btf->start_id = 1; |
1059 | 3.60k | btf->start_str_off = 0; |
1060 | 3.60k | btf->fd = -1; |
1061 | | |
1062 | 3.60k | if (base_btf) { |
1063 | 0 | btf->base_btf = base_btf; |
1064 | 0 | btf->start_id = btf__type_cnt(base_btf); |
1065 | 0 | btf->start_str_off = base_btf->hdr->str_len; |
1066 | 0 | } |
1067 | | |
1068 | 3.60k | if (is_mmap) { |
1069 | 0 | btf->raw_data = (void *)data; |
1070 | 0 | btf->raw_data_is_mmap = true; |
1071 | 3.60k | } else { |
1072 | 3.60k | btf->raw_data = malloc(size); |
1073 | 3.60k | if (!btf->raw_data) { |
1074 | 0 | err = -ENOMEM; |
1075 | 0 | goto done; |
1076 | 0 | } |
1077 | 3.60k | memcpy(btf->raw_data, data, size); |
1078 | 3.60k | } |
1079 | | |
1080 | 3.60k | btf->raw_size = size; |
1081 | | |
1082 | 3.60k | btf->hdr = btf->raw_data; |
1083 | 3.60k | err = btf_parse_hdr(btf); |
1084 | 3.60k | if (err) |
1085 | 200 | goto done; |
1086 | | |
1087 | 3.40k | btf->strs_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->str_off; |
1088 | 3.40k | btf->types_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->type_off; |
1089 | | |
1090 | 3.40k | err = btf_parse_str_sec(btf); |
1091 | 3.40k | err = err ?: btf_parse_type_sec(btf); |
1092 | 3.40k | err = err ?: btf_sanity_check(btf); |
1093 | 3.38k | if (err) |
1094 | 596 | goto done; |
1095 | | |
1096 | 3.60k | done: |
1097 | 3.60k | if (err) { |
1098 | 796 | btf__free(btf); |
1099 | 796 | return ERR_PTR(err); |
1100 | 796 | } |
1101 | | |
1102 | 2.80k | return btf; |
1103 | 3.60k | } |
1104 | | |
1105 | | struct btf *btf__new(const void *data, __u32 size) |
1106 | 3.60k | { |
1107 | 3.60k | return libbpf_ptr(btf_new(data, size, NULL, false)); |
1108 | 3.60k | } |
1109 | | |
1110 | | struct btf *btf__new_split(const void *data, __u32 size, struct btf *base_btf) |
1111 | 0 | { |
1112 | 0 | return libbpf_ptr(btf_new(data, size, base_btf, false)); |
1113 | 0 | } |
1114 | | |
1115 | | struct btf_elf_secs { |
1116 | | Elf_Data *btf_data; |
1117 | | Elf_Data *btf_ext_data; |
1118 | | Elf_Data *btf_base_data; |
1119 | | }; |
1120 | | |
1121 | | static int btf_find_elf_sections(Elf *elf, const char *path, struct btf_elf_secs *secs) |
1122 | 0 | { |
1123 | 0 | Elf_Scn *scn = NULL; |
1124 | 0 | Elf_Data *data; |
1125 | 0 | GElf_Ehdr ehdr; |
1126 | 0 | size_t shstrndx; |
1127 | 0 | int idx = 0; |
1128 | |
|
1129 | 0 | if (!gelf_getehdr(elf, &ehdr)) { |
1130 | 0 | pr_warn("failed to get EHDR from %s\n", path); |
1131 | 0 | goto err; |
1132 | 0 | } |
1133 | | |
1134 | 0 | if (elf_getshdrstrndx(elf, &shstrndx)) { |
1135 | 0 | pr_warn("failed to get section names section index for %s\n", |
1136 | 0 | path); |
1137 | 0 | goto err; |
1138 | 0 | } |
1139 | | |
1140 | 0 | if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL)) { |
1141 | 0 | pr_warn("failed to get e_shstrndx from %s\n", path); |
1142 | 0 | goto err; |
1143 | 0 | } |
1144 | | |
1145 | 0 | while ((scn = elf_nextscn(elf, scn)) != NULL) { |
1146 | 0 | Elf_Data **field; |
1147 | 0 | GElf_Shdr sh; |
1148 | 0 | char *name; |
1149 | |
|
1150 | 0 | idx++; |
1151 | 0 | if (gelf_getshdr(scn, &sh) != &sh) { |
1152 | 0 | pr_warn("failed to get section(%d) header from %s\n", |
1153 | 0 | idx, path); |
1154 | 0 | goto err; |
1155 | 0 | } |
1156 | 0 | name = elf_strptr(elf, shstrndx, sh.sh_name); |
1157 | 0 | if (!name) { |
1158 | 0 | pr_warn("failed to get section(%d) name from %s\n", |
1159 | 0 | idx, path); |
1160 | 0 | goto err; |
1161 | 0 | } |
1162 | | |
1163 | 0 | if (strcmp(name, BTF_ELF_SEC) == 0) |
1164 | 0 | field = &secs->btf_data; |
1165 | 0 | else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) |
1166 | 0 | field = &secs->btf_ext_data; |
1167 | 0 | else if (strcmp(name, BTF_BASE_ELF_SEC) == 0) |
1168 | 0 | field = &secs->btf_base_data; |
1169 | 0 | else |
1170 | 0 | continue; |
1171 | | |
1172 | 0 | if (sh.sh_type != SHT_PROGBITS) { |
1173 | 0 | pr_warn("unexpected section type (%d) of section(%d, %s) from %s\n", |
1174 | 0 | sh.sh_type, idx, name, path); |
1175 | 0 | goto err; |
1176 | 0 | } |
1177 | | |
1178 | 0 | data = elf_getdata(scn, 0); |
1179 | 0 | if (!data) { |
1180 | 0 | pr_warn("failed to get section(%d, %s) data from %s\n", |
1181 | 0 | idx, name, path); |
1182 | 0 | goto err; |
1183 | 0 | } |
1184 | 0 | *field = data; |
1185 | 0 | } |
1186 | | |
1187 | 0 | return 0; |
1188 | | |
1189 | 0 | err: |
1190 | 0 | return -LIBBPF_ERRNO__FORMAT; |
1191 | 0 | } |
1192 | | |
1193 | | static struct btf *btf_parse_elf(const char *path, struct btf *base_btf, |
1194 | | struct btf_ext **btf_ext) |
1195 | 0 | { |
1196 | 0 | struct btf_elf_secs secs = {}; |
1197 | 0 | struct btf *dist_base_btf = NULL; |
1198 | 0 | struct btf *btf = NULL; |
1199 | 0 | int err = 0, fd = -1; |
1200 | 0 | Elf *elf = NULL; |
1201 | |
|
1202 | 0 | if (elf_version(EV_CURRENT) == EV_NONE) { |
1203 | 0 | pr_warn("failed to init libelf for %s\n", path); |
1204 | 0 | return ERR_PTR(-LIBBPF_ERRNO__LIBELF); |
1205 | 0 | } |
1206 | | |
1207 | 0 | fd = open(path, O_RDONLY | O_CLOEXEC); |
1208 | 0 | if (fd < 0) { |
1209 | 0 | err = -errno; |
1210 | 0 | pr_warn("failed to open %s: %s\n", path, errstr(err)); |
1211 | 0 | return ERR_PTR(err); |
1212 | 0 | } |
1213 | | |
1214 | 0 | elf = elf_begin(fd, ELF_C_READ, NULL); |
1215 | 0 | if (!elf) { |
1216 | 0 | err = -LIBBPF_ERRNO__FORMAT; |
1217 | 0 | pr_warn("failed to open %s as ELF file\n", path); |
1218 | 0 | goto done; |
1219 | 0 | } |
1220 | | |
1221 | 0 | err = btf_find_elf_sections(elf, path, &secs); |
1222 | 0 | if (err) |
1223 | 0 | goto done; |
1224 | | |
1225 | 0 | if (!secs.btf_data) { |
1226 | 0 | pr_warn("failed to find '%s' ELF section in %s\n", BTF_ELF_SEC, path); |
1227 | 0 | err = -ENODATA; |
1228 | 0 | goto done; |
1229 | 0 | } |
1230 | | |
1231 | 0 | if (secs.btf_base_data) { |
1232 | 0 | dist_base_btf = btf_new(secs.btf_base_data->d_buf, secs.btf_base_data->d_size, |
1233 | 0 | NULL, false); |
1234 | 0 | if (IS_ERR(dist_base_btf)) { |
1235 | 0 | err = PTR_ERR(dist_base_btf); |
1236 | 0 | dist_base_btf = NULL; |
1237 | 0 | goto done; |
1238 | 0 | } |
1239 | 0 | } |
1240 | | |
1241 | 0 | btf = btf_new(secs.btf_data->d_buf, secs.btf_data->d_size, |
1242 | 0 | dist_base_btf ?: base_btf, false); |
1243 | 0 | if (IS_ERR(btf)) { |
1244 | 0 | err = PTR_ERR(btf); |
1245 | 0 | goto done; |
1246 | 0 | } |
1247 | 0 | if (dist_base_btf && base_btf) { |
1248 | 0 | err = btf__relocate(btf, base_btf); |
1249 | 0 | if (err) |
1250 | 0 | goto done; |
1251 | 0 | btf__free(dist_base_btf); |
1252 | 0 | dist_base_btf = NULL; |
1253 | 0 | } |
1254 | | |
1255 | 0 | if (dist_base_btf) |
1256 | 0 | btf->owns_base = true; |
1257 | |
|
1258 | 0 | switch (gelf_getclass(elf)) { |
1259 | 0 | case ELFCLASS32: |
1260 | 0 | btf__set_pointer_size(btf, 4); |
1261 | 0 | break; |
1262 | 0 | case ELFCLASS64: |
1263 | 0 | btf__set_pointer_size(btf, 8); |
1264 | 0 | break; |
1265 | 0 | default: |
1266 | 0 | pr_warn("failed to get ELF class (bitness) for %s\n", path); |
1267 | 0 | break; |
1268 | 0 | } |
1269 | | |
1270 | 0 | if (btf_ext && secs.btf_ext_data) { |
1271 | 0 | *btf_ext = btf_ext__new(secs.btf_ext_data->d_buf, secs.btf_ext_data->d_size); |
1272 | 0 | if (IS_ERR(*btf_ext)) { |
1273 | 0 | err = PTR_ERR(*btf_ext); |
1274 | 0 | goto done; |
1275 | 0 | } |
1276 | 0 | } else if (btf_ext) { |
1277 | 0 | *btf_ext = NULL; |
1278 | 0 | } |
1279 | 0 | done: |
1280 | 0 | if (elf) |
1281 | 0 | elf_end(elf); |
1282 | 0 | close(fd); |
1283 | |
|
1284 | 0 | if (!err) |
1285 | 0 | return btf; |
1286 | | |
1287 | 0 | if (btf_ext) |
1288 | 0 | btf_ext__free(*btf_ext); |
1289 | 0 | btf__free(dist_base_btf); |
1290 | 0 | btf__free(btf); |
1291 | |
|
1292 | 0 | return ERR_PTR(err); |
1293 | 0 | } |
1294 | | |
1295 | | struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext) |
1296 | 0 | { |
1297 | 0 | return libbpf_ptr(btf_parse_elf(path, NULL, btf_ext)); |
1298 | 0 | } |
1299 | | |
1300 | | struct btf *btf__parse_elf_split(const char *path, struct btf *base_btf) |
1301 | 0 | { |
1302 | 0 | return libbpf_ptr(btf_parse_elf(path, base_btf, NULL)); |
1303 | 0 | } |
1304 | | |
1305 | | static struct btf *btf_parse_raw(const char *path, struct btf *base_btf) |
1306 | 0 | { |
1307 | 0 | struct btf *btf = NULL; |
1308 | 0 | void *data = NULL; |
1309 | 0 | FILE *f = NULL; |
1310 | 0 | __u16 magic; |
1311 | 0 | int err = 0; |
1312 | 0 | long sz; |
1313 | |
|
1314 | 0 | f = fopen(path, "rbe"); |
1315 | 0 | if (!f) { |
1316 | 0 | err = -errno; |
1317 | 0 | goto err_out; |
1318 | 0 | } |
1319 | | |
1320 | | /* check BTF magic */ |
1321 | 0 | if (fread(&magic, 1, sizeof(magic), f) < sizeof(magic)) { |
1322 | 0 | err = -EIO; |
1323 | 0 | goto err_out; |
1324 | 0 | } |
1325 | 0 | if (magic != BTF_MAGIC && magic != bswap_16(BTF_MAGIC)) { |
1326 | | /* definitely not a raw BTF */ |
1327 | 0 | err = -EPROTO; |
1328 | 0 | goto err_out; |
1329 | 0 | } |
1330 | | |
1331 | | /* get file size */ |
1332 | 0 | if (fseek(f, 0, SEEK_END)) { |
1333 | 0 | err = -errno; |
1334 | 0 | goto err_out; |
1335 | 0 | } |
1336 | 0 | sz = ftell(f); |
1337 | 0 | if (sz < 0) { |
1338 | 0 | err = -errno; |
1339 | 0 | goto err_out; |
1340 | 0 | } |
1341 | | /* rewind to the start */ |
1342 | 0 | if (fseek(f, 0, SEEK_SET)) { |
1343 | 0 | err = -errno; |
1344 | 0 | goto err_out; |
1345 | 0 | } |
1346 | | |
1347 | | /* pre-alloc memory and read all of BTF data */ |
1348 | 0 | data = malloc(sz); |
1349 | 0 | if (!data) { |
1350 | 0 | err = -ENOMEM; |
1351 | 0 | goto err_out; |
1352 | 0 | } |
1353 | 0 | if (fread(data, 1, sz, f) < sz) { |
1354 | 0 | err = -EIO; |
1355 | 0 | goto err_out; |
1356 | 0 | } |
1357 | | |
1358 | | /* finally parse BTF data */ |
1359 | 0 | btf = btf_new(data, sz, base_btf, false); |
1360 | |
|
1361 | 0 | err_out: |
1362 | 0 | free(data); |
1363 | 0 | if (f) |
1364 | 0 | fclose(f); |
1365 | 0 | return err ? ERR_PTR(err) : btf; |
1366 | 0 | } |
1367 | | |
1368 | | struct btf *btf__parse_raw(const char *path) |
1369 | 0 | { |
1370 | 0 | return libbpf_ptr(btf_parse_raw(path, NULL)); |
1371 | 0 | } |
1372 | | |
1373 | | struct btf *btf__parse_raw_split(const char *path, struct btf *base_btf) |
1374 | 0 | { |
1375 | 0 | return libbpf_ptr(btf_parse_raw(path, base_btf)); |
1376 | 0 | } |
1377 | | |
1378 | | static struct btf *btf_parse_raw_mmap(const char *path, struct btf *base_btf) |
1379 | 0 | { |
1380 | 0 | struct stat st; |
1381 | 0 | void *data; |
1382 | 0 | struct btf *btf; |
1383 | 0 | int fd, err; |
1384 | |
|
1385 | 0 | fd = open(path, O_RDONLY); |
1386 | 0 | if (fd < 0) |
1387 | 0 | return ERR_PTR(-errno); |
1388 | | |
1389 | 0 | if (fstat(fd, &st) < 0) { |
1390 | 0 | err = -errno; |
1391 | 0 | close(fd); |
1392 | 0 | return ERR_PTR(err); |
1393 | 0 | } |
1394 | | |
1395 | 0 | data = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0); |
1396 | 0 | err = -errno; |
1397 | 0 | close(fd); |
1398 | |
|
1399 | 0 | if (data == MAP_FAILED) |
1400 | 0 | return ERR_PTR(err); |
1401 | | |
1402 | 0 | btf = btf_new(data, st.st_size, base_btf, true); |
1403 | 0 | if (IS_ERR(btf)) |
1404 | 0 | munmap(data, st.st_size); |
1405 | |
|
1406 | 0 | return btf; |
1407 | 0 | } |
1408 | | |
1409 | | static struct btf *btf_parse(const char *path, struct btf *base_btf, struct btf_ext **btf_ext) |
1410 | 0 | { |
1411 | 0 | struct btf *btf; |
1412 | 0 | int err; |
1413 | |
|
1414 | 0 | if (btf_ext) |
1415 | 0 | *btf_ext = NULL; |
1416 | |
|
1417 | 0 | btf = btf_parse_raw(path, base_btf); |
1418 | 0 | err = libbpf_get_error(btf); |
1419 | 0 | if (!err) |
1420 | 0 | return btf; |
1421 | 0 | if (err != -EPROTO) |
1422 | 0 | return ERR_PTR(err); |
1423 | 0 | return btf_parse_elf(path, base_btf, btf_ext); |
1424 | 0 | } |
1425 | | |
1426 | | struct btf *btf__parse(const char *path, struct btf_ext **btf_ext) |
1427 | 0 | { |
1428 | 0 | return libbpf_ptr(btf_parse(path, NULL, btf_ext)); |
1429 | 0 | } |
1430 | | |
1431 | | struct btf *btf__parse_split(const char *path, struct btf *base_btf) |
1432 | 0 | { |
1433 | 0 | return libbpf_ptr(btf_parse(path, base_btf, NULL)); |
1434 | 0 | } |
1435 | | |
1436 | | static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian); |
1437 | | |
1438 | | int btf_load_into_kernel(struct btf *btf, |
1439 | | char *log_buf, size_t log_sz, __u32 log_level, |
1440 | | int token_fd) |
1441 | 0 | { |
1442 | 0 | LIBBPF_OPTS(bpf_btf_load_opts, opts); |
1443 | 0 | __u32 buf_sz = 0, raw_size; |
1444 | 0 | char *buf = NULL, *tmp; |
1445 | 0 | void *raw_data; |
1446 | 0 | int err = 0; |
1447 | |
|
1448 | 0 | if (btf->fd >= 0) |
1449 | 0 | return libbpf_err(-EEXIST); |
1450 | 0 | if (log_sz && !log_buf) |
1451 | 0 | return libbpf_err(-EINVAL); |
1452 | | |
1453 | | /* cache native raw data representation */ |
1454 | 0 | raw_data = btf_get_raw_data(btf, &raw_size, false); |
1455 | 0 | if (!raw_data) { |
1456 | 0 | err = -ENOMEM; |
1457 | 0 | goto done; |
1458 | 0 | } |
1459 | 0 | btf->raw_size = raw_size; |
1460 | 0 | btf->raw_data = raw_data; |
1461 | |
|
1462 | 0 | retry_load: |
1463 | | /* if log_level is 0, we won't provide log_buf/log_size to the kernel, |
1464 | | * initially. Only if BTF loading fails, we bump log_level to 1 and |
1465 | | * retry, using either auto-allocated or custom log_buf. This way |
1466 | | * non-NULL custom log_buf provides a buffer just in case, but hopes |
1467 | | * for successful load and no need for log_buf. |
1468 | | */ |
1469 | 0 | if (log_level) { |
1470 | | /* if caller didn't provide custom log_buf, we'll keep |
1471 | | * allocating our own progressively bigger buffers for BTF |
1472 | | * verification log |
1473 | | */ |
1474 | 0 | if (!log_buf) { |
1475 | 0 | buf_sz = max((__u32)BPF_LOG_BUF_SIZE, buf_sz * 2); |
1476 | 0 | tmp = realloc(buf, buf_sz); |
1477 | 0 | if (!tmp) { |
1478 | 0 | err = -ENOMEM; |
1479 | 0 | goto done; |
1480 | 0 | } |
1481 | 0 | buf = tmp; |
1482 | 0 | buf[0] = '\0'; |
1483 | 0 | } |
1484 | | |
1485 | 0 | opts.log_buf = log_buf ? log_buf : buf; |
1486 | 0 | opts.log_size = log_buf ? log_sz : buf_sz; |
1487 | 0 | opts.log_level = log_level; |
1488 | 0 | } |
1489 | | |
1490 | 0 | opts.token_fd = token_fd; |
1491 | 0 | if (token_fd) |
1492 | 0 | opts.btf_flags |= BPF_F_TOKEN_FD; |
1493 | |
|
1494 | 0 | btf->fd = bpf_btf_load(raw_data, raw_size, &opts); |
1495 | 0 | if (btf->fd < 0) { |
1496 | | /* time to turn on verbose mode and try again */ |
1497 | 0 | if (log_level == 0) { |
1498 | 0 | log_level = 1; |
1499 | 0 | goto retry_load; |
1500 | 0 | } |
1501 | | /* only retry if caller didn't provide custom log_buf, but |
1502 | | * make sure we can never overflow buf_sz |
1503 | | */ |
1504 | 0 | if (!log_buf && errno == ENOSPC && buf_sz <= UINT_MAX / 2) |
1505 | 0 | goto retry_load; |
1506 | | |
1507 | 0 | err = -errno; |
1508 | 0 | pr_warn("BTF loading error: %s\n", errstr(err)); |
1509 | | /* don't print out contents of custom log_buf */ |
1510 | 0 | if (!log_buf && buf[0]) |
1511 | 0 | pr_warn("-- BEGIN BTF LOAD LOG ---\n%s\n-- END BTF LOAD LOG --\n", buf); |
1512 | 0 | } |
1513 | | |
1514 | 0 | done: |
1515 | 0 | free(buf); |
1516 | 0 | return libbpf_err(err); |
1517 | 0 | } |
1518 | | |
1519 | | int btf__load_into_kernel(struct btf *btf) |
1520 | 0 | { |
1521 | 0 | return btf_load_into_kernel(btf, NULL, 0, 0, 0); |
1522 | 0 | } |
1523 | | |
1524 | | int btf__fd(const struct btf *btf) |
1525 | 0 | { |
1526 | 0 | return btf->fd; |
1527 | 0 | } |
1528 | | |
1529 | | void btf__set_fd(struct btf *btf, int fd) |
1530 | 0 | { |
1531 | 0 | btf->fd = fd; |
1532 | 0 | } |
1533 | | |
1534 | | static const void *btf_strs_data(const struct btf *btf) |
1535 | 74.7k | { |
1536 | 74.7k | return btf->strs_data ? btf->strs_data : strset__data(btf->strs_set); |
1537 | 74.7k | } |
1538 | | |
1539 | | static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian) |
1540 | 0 | { |
1541 | 0 | struct btf_header *hdr = btf->hdr; |
1542 | 0 | struct btf_type *t; |
1543 | 0 | void *data, *p; |
1544 | 0 | __u32 data_sz; |
1545 | 0 | int i; |
1546 | |
|
1547 | 0 | data = swap_endian ? btf->raw_data_swapped : btf->raw_data; |
1548 | 0 | if (data) { |
1549 | 0 | *size = btf->raw_size; |
1550 | 0 | return data; |
1551 | 0 | } |
1552 | | |
1553 | 0 | data_sz = hdr->hdr_len + hdr->type_len + hdr->str_len; |
1554 | 0 | data = calloc(1, data_sz); |
1555 | 0 | if (!data) |
1556 | 0 | return NULL; |
1557 | 0 | p = data; |
1558 | |
|
1559 | 0 | memcpy(p, hdr, hdr->hdr_len); |
1560 | 0 | if (swap_endian) |
1561 | 0 | btf_bswap_hdr(p); |
1562 | 0 | p += hdr->hdr_len; |
1563 | |
|
1564 | 0 | memcpy(p, btf->types_data, hdr->type_len); |
1565 | 0 | if (swap_endian) { |
1566 | 0 | for (i = 0; i < btf->nr_types; i++) { |
1567 | 0 | t = p + btf->type_offs[i]; |
1568 | | /* btf_bswap_type_rest() relies on native t->info, so |
1569 | | * we swap base type info after we swapped all the |
1570 | | * additional information |
1571 | | */ |
1572 | 0 | if (btf_bswap_type_rest(t)) |
1573 | 0 | goto err_out; |
1574 | 0 | btf_bswap_type_base(t); |
1575 | 0 | } |
1576 | 0 | } |
1577 | 0 | p += hdr->type_len; |
1578 | |
|
1579 | 0 | memcpy(p, btf_strs_data(btf), hdr->str_len); |
1580 | 0 | p += hdr->str_len; |
1581 | |
|
1582 | 0 | *size = data_sz; |
1583 | 0 | return data; |
1584 | 0 | err_out: |
1585 | 0 | free(data); |
1586 | 0 | return NULL; |
1587 | 0 | } |
1588 | | |
1589 | | const void *btf__raw_data(const struct btf *btf_ro, __u32 *size) |
1590 | 0 | { |
1591 | 0 | struct btf *btf = (struct btf *)btf_ro; |
1592 | 0 | __u32 data_sz; |
1593 | 0 | void *data; |
1594 | |
|
1595 | 0 | data = btf_get_raw_data(btf, &data_sz, btf->swapped_endian); |
1596 | 0 | if (!data) |
1597 | 0 | return errno = ENOMEM, NULL; |
1598 | | |
1599 | 0 | btf->raw_size = data_sz; |
1600 | 0 | if (btf->swapped_endian) |
1601 | 0 | btf->raw_data_swapped = data; |
1602 | 0 | else |
1603 | 0 | btf->raw_data = data; |
1604 | 0 | *size = data_sz; |
1605 | 0 | return data; |
1606 | 0 | } |
1607 | | |
1608 | | __attribute__((alias("btf__raw_data"))) |
1609 | | const void *btf__get_raw_data(const struct btf *btf, __u32 *size); |
1610 | | |
1611 | | const char *btf__str_by_offset(const struct btf *btf, __u32 offset) |
1612 | 74.9k | { |
1613 | 74.9k | if (offset < btf->start_str_off) |
1614 | 0 | return btf__str_by_offset(btf->base_btf, offset); |
1615 | 74.9k | else if (offset - btf->start_str_off < btf->hdr->str_len) |
1616 | 74.7k | return btf_strs_data(btf) + (offset - btf->start_str_off); |
1617 | 217 | else |
1618 | 217 | return errno = EINVAL, NULL; |
1619 | 74.9k | } |
1620 | | |
1621 | | const char *btf__name_by_offset(const struct btf *btf, __u32 offset) |
1622 | 34.9k | { |
1623 | 34.9k | return btf__str_by_offset(btf, offset); |
1624 | 34.9k | } |
1625 | | |
1626 | | struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf) |
1627 | 0 | { |
1628 | 0 | struct bpf_btf_info btf_info; |
1629 | 0 | __u32 len = sizeof(btf_info); |
1630 | 0 | __u32 last_size; |
1631 | 0 | struct btf *btf; |
1632 | 0 | void *ptr; |
1633 | 0 | int err; |
1634 | | |
1635 | | /* we won't know btf_size until we call bpf_btf_get_info_by_fd(). so |
1636 | | * let's start with a sane default - 4KiB here - and resize it only if |
1637 | | * bpf_btf_get_info_by_fd() needs a bigger buffer. |
1638 | | */ |
1639 | 0 | last_size = 4096; |
1640 | 0 | ptr = malloc(last_size); |
1641 | 0 | if (!ptr) |
1642 | 0 | return ERR_PTR(-ENOMEM); |
1643 | | |
1644 | 0 | memset(&btf_info, 0, sizeof(btf_info)); |
1645 | 0 | btf_info.btf = ptr_to_u64(ptr); |
1646 | 0 | btf_info.btf_size = last_size; |
1647 | 0 | err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len); |
1648 | |
|
1649 | 0 | if (!err && btf_info.btf_size > last_size) { |
1650 | 0 | void *temp_ptr; |
1651 | |
|
1652 | 0 | last_size = btf_info.btf_size; |
1653 | 0 | temp_ptr = realloc(ptr, last_size); |
1654 | 0 | if (!temp_ptr) { |
1655 | 0 | btf = ERR_PTR(-ENOMEM); |
1656 | 0 | goto exit_free; |
1657 | 0 | } |
1658 | 0 | ptr = temp_ptr; |
1659 | |
|
1660 | 0 | len = sizeof(btf_info); |
1661 | 0 | memset(&btf_info, 0, sizeof(btf_info)); |
1662 | 0 | btf_info.btf = ptr_to_u64(ptr); |
1663 | 0 | btf_info.btf_size = last_size; |
1664 | |
|
1665 | 0 | err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len); |
1666 | 0 | } |
1667 | | |
1668 | 0 | if (err || btf_info.btf_size > last_size) { |
1669 | 0 | btf = err ? ERR_PTR(-errno) : ERR_PTR(-E2BIG); |
1670 | 0 | goto exit_free; |
1671 | 0 | } |
1672 | | |
1673 | 0 | btf = btf_new(ptr, btf_info.btf_size, base_btf, false); |
1674 | |
|
1675 | 0 | exit_free: |
1676 | 0 | free(ptr); |
1677 | 0 | return btf; |
1678 | 0 | } |
1679 | | |
1680 | | struct btf *btf_load_from_kernel(__u32 id, struct btf *base_btf, int token_fd) |
1681 | 0 | { |
1682 | 0 | struct btf *btf; |
1683 | 0 | int btf_fd; |
1684 | 0 | LIBBPF_OPTS(bpf_get_fd_by_id_opts, opts); |
1685 | |
|
1686 | 0 | if (token_fd) { |
1687 | 0 | opts.open_flags |= BPF_F_TOKEN_FD; |
1688 | 0 | opts.token_fd = token_fd; |
1689 | 0 | } |
1690 | |
|
1691 | 0 | btf_fd = bpf_btf_get_fd_by_id_opts(id, &opts); |
1692 | 0 | if (btf_fd < 0) |
1693 | 0 | return libbpf_err_ptr(-errno); |
1694 | | |
1695 | 0 | btf = btf_get_from_fd(btf_fd, base_btf); |
1696 | 0 | close(btf_fd); |
1697 | |
|
1698 | 0 | return libbpf_ptr(btf); |
1699 | 0 | } |
1700 | | |
1701 | | struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf) |
1702 | 0 | { |
1703 | 0 | return btf_load_from_kernel(id, base_btf, 0); |
1704 | 0 | } |
1705 | | |
1706 | | struct btf *btf__load_from_kernel_by_id(__u32 id) |
1707 | 0 | { |
1708 | 0 | return btf__load_from_kernel_by_id_split(id, NULL); |
1709 | 0 | } |
1710 | | |
1711 | | static void btf_invalidate_raw_data(struct btf *btf) |
1712 | 792 | { |
1713 | 792 | if (btf->raw_data) |
1714 | 396 | btf_free_raw_data(btf); |
1715 | 792 | if (btf->raw_data_swapped) { |
1716 | 0 | free(btf->raw_data_swapped); |
1717 | 0 | btf->raw_data_swapped = NULL; |
1718 | 0 | } |
1719 | 792 | } |
1720 | | |
1721 | | /* Ensure BTF is ready to be modified (by splitting into a three memory |
1722 | | * regions for header, types, and strings). Also invalidate cached |
1723 | | * raw_data, if any. |
1724 | | */ |
1725 | | static int btf_ensure_modifiable(struct btf *btf) |
1726 | 792 | { |
1727 | 792 | void *hdr, *types; |
1728 | 792 | struct strset *set = NULL; |
1729 | 792 | int err = -ENOMEM; |
1730 | | |
1731 | 792 | if (btf_is_modifiable(btf)) { |
1732 | | /* any BTF modification invalidates raw_data */ |
1733 | 396 | btf_invalidate_raw_data(btf); |
1734 | 396 | return 0; |
1735 | 396 | } |
1736 | | |
1737 | | /* split raw data into three memory regions */ |
1738 | 396 | hdr = malloc(btf->hdr->hdr_len); |
1739 | 396 | types = malloc(btf->hdr->type_len); |
1740 | 396 | if (!hdr || !types) |
1741 | 0 | goto err_out; |
1742 | | |
1743 | 396 | memcpy(hdr, btf->hdr, btf->hdr->hdr_len); |
1744 | 396 | memcpy(types, btf->types_data, btf->hdr->type_len); |
1745 | | |
1746 | | /* build lookup index for all strings */ |
1747 | 396 | set = strset__new(BTF_MAX_STR_OFFSET, btf->strs_data, btf->hdr->str_len); |
1748 | 396 | if (IS_ERR(set)) { |
1749 | 0 | err = PTR_ERR(set); |
1750 | 0 | goto err_out; |
1751 | 0 | } |
1752 | | |
1753 | | /* only when everything was successful, update internal state */ |
1754 | 396 | btf->hdr = hdr; |
1755 | 396 | btf->types_data = types; |
1756 | 396 | btf->types_data_cap = btf->hdr->type_len; |
1757 | 396 | btf->strs_data = NULL; |
1758 | 396 | btf->strs_set = set; |
1759 | | /* if BTF was created from scratch, all strings are guaranteed to be |
1760 | | * unique and deduplicated |
1761 | | */ |
1762 | 396 | if (btf->hdr->str_len == 0) |
1763 | 0 | btf->strs_deduped = true; |
1764 | 396 | if (!btf->base_btf && btf->hdr->str_len == 1) |
1765 | 0 | btf->strs_deduped = true; |
1766 | | |
1767 | | /* invalidate raw_data representation */ |
1768 | 396 | btf_invalidate_raw_data(btf); |
1769 | | |
1770 | 396 | return 0; |
1771 | | |
1772 | 0 | err_out: |
1773 | 0 | strset__free(set); |
1774 | 0 | free(hdr); |
1775 | 0 | free(types); |
1776 | 0 | return err; |
1777 | 396 | } |
1778 | | |
1779 | | /* Find an offset in BTF string section that corresponds to a given string *s*. |
1780 | | * Returns: |
1781 | | * - >0 offset into string section, if string is found; |
1782 | | * - -ENOENT, if string is not in the string section; |
1783 | | * - <0, on any other error. |
1784 | | */ |
1785 | | int btf__find_str(struct btf *btf, const char *s) |
1786 | 0 | { |
1787 | 0 | int off; |
1788 | |
|
1789 | 0 | if (btf->base_btf) { |
1790 | 0 | off = btf__find_str(btf->base_btf, s); |
1791 | 0 | if (off != -ENOENT) |
1792 | 0 | return off; |
1793 | 0 | } |
1794 | | |
1795 | | /* BTF needs to be in a modifiable state to build string lookup index */ |
1796 | 0 | if (btf_ensure_modifiable(btf)) |
1797 | 0 | return libbpf_err(-ENOMEM); |
1798 | | |
1799 | 0 | off = strset__find_str(btf->strs_set, s); |
1800 | 0 | if (off < 0) |
1801 | 0 | return libbpf_err(off); |
1802 | | |
1803 | 0 | return btf->start_str_off + off; |
1804 | 0 | } |
1805 | | |
1806 | | /* Add a string s to the BTF string section. |
1807 | | * Returns: |
1808 | | * - > 0 offset into string section, on success; |
1809 | | * - < 0, on error. |
1810 | | */ |
1811 | | int btf__add_str(struct btf *btf, const char *s) |
1812 | 396 | { |
1813 | 396 | int off; |
1814 | | |
1815 | 396 | if (btf->base_btf) { |
1816 | 0 | off = btf__find_str(btf->base_btf, s); |
1817 | 0 | if (off != -ENOENT) |
1818 | 0 | return off; |
1819 | 0 | } |
1820 | | |
1821 | 396 | if (btf_ensure_modifiable(btf)) |
1822 | 0 | return libbpf_err(-ENOMEM); |
1823 | | |
1824 | 396 | off = strset__add_str(btf->strs_set, s); |
1825 | 396 | if (off < 0) |
1826 | 0 | return libbpf_err(off); |
1827 | | |
1828 | 396 | btf->hdr->str_len = strset__data_size(btf->strs_set); |
1829 | | |
1830 | 396 | return btf->start_str_off + off; |
1831 | 396 | } |
1832 | | |
1833 | | static void *btf_add_type_mem(struct btf *btf, size_t add_sz) |
1834 | 396 | { |
1835 | 396 | return libbpf_add_mem(&btf->types_data, &btf->types_data_cap, 1, |
1836 | 396 | btf->hdr->type_len, UINT_MAX, add_sz); |
1837 | 396 | } |
1838 | | |
1839 | | static void btf_type_inc_vlen(struct btf_type *t) |
1840 | 0 | { |
1841 | 0 | t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, btf_kflag(t)); |
1842 | 0 | } |
1843 | | |
1844 | | static int btf_commit_type(struct btf *btf, int data_sz) |
1845 | 396 | { |
1846 | 396 | int err; |
1847 | | |
1848 | 396 | err = btf_add_type_idx_entry(btf, btf->hdr->type_len); |
1849 | 396 | if (err) |
1850 | 0 | return libbpf_err(err); |
1851 | | |
1852 | 396 | btf->hdr->type_len += data_sz; |
1853 | 396 | btf->hdr->str_off += data_sz; |
1854 | 396 | btf->nr_types++; |
1855 | 396 | return btf->start_id + btf->nr_types - 1; |
1856 | 396 | } |
1857 | | |
1858 | | struct btf_pipe { |
1859 | | const struct btf *src; |
1860 | | struct btf *dst; |
1861 | | struct hashmap *str_off_map; /* map string offsets from src to dst */ |
1862 | | }; |
1863 | | |
1864 | | static int btf_rewrite_str(struct btf_pipe *p, __u32 *str_off) |
1865 | 0 | { |
1866 | 0 | long mapped_off; |
1867 | 0 | int off, err; |
1868 | |
|
1869 | 0 | if (!*str_off) /* nothing to do for empty strings */ |
1870 | 0 | return 0; |
1871 | | |
1872 | 0 | if (p->str_off_map && |
1873 | 0 | hashmap__find(p->str_off_map, *str_off, &mapped_off)) { |
1874 | 0 | *str_off = mapped_off; |
1875 | 0 | return 0; |
1876 | 0 | } |
1877 | | |
1878 | 0 | off = btf__add_str(p->dst, btf__str_by_offset(p->src, *str_off)); |
1879 | 0 | if (off < 0) |
1880 | 0 | return off; |
1881 | | |
1882 | | /* Remember string mapping from src to dst. It avoids |
1883 | | * performing expensive string comparisons. |
1884 | | */ |
1885 | 0 | if (p->str_off_map) { |
1886 | 0 | err = hashmap__append(p->str_off_map, *str_off, off); |
1887 | 0 | if (err) |
1888 | 0 | return err; |
1889 | 0 | } |
1890 | | |
1891 | 0 | *str_off = off; |
1892 | 0 | return 0; |
1893 | 0 | } |
1894 | | |
1895 | | static int btf_add_type(struct btf_pipe *p, const struct btf_type *src_type) |
1896 | 0 | { |
1897 | 0 | struct btf_field_iter it; |
1898 | 0 | struct btf_type *t; |
1899 | 0 | __u32 *str_off; |
1900 | 0 | int sz, err; |
1901 | |
|
1902 | 0 | sz = btf_type_size(src_type); |
1903 | 0 | if (sz < 0) |
1904 | 0 | return libbpf_err(sz); |
1905 | | |
1906 | | /* deconstruct BTF, if necessary, and invalidate raw_data */ |
1907 | 0 | if (btf_ensure_modifiable(p->dst)) |
1908 | 0 | return libbpf_err(-ENOMEM); |
1909 | | |
1910 | 0 | t = btf_add_type_mem(p->dst, sz); |
1911 | 0 | if (!t) |
1912 | 0 | return libbpf_err(-ENOMEM); |
1913 | | |
1914 | 0 | memcpy(t, src_type, sz); |
1915 | |
|
1916 | 0 | err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS); |
1917 | 0 | if (err) |
1918 | 0 | return libbpf_err(err); |
1919 | | |
1920 | 0 | while ((str_off = btf_field_iter_next(&it))) { |
1921 | 0 | err = btf_rewrite_str(p, str_off); |
1922 | 0 | if (err) |
1923 | 0 | return libbpf_err(err); |
1924 | 0 | } |
1925 | | |
1926 | 0 | return btf_commit_type(p->dst, sz); |
1927 | 0 | } |
1928 | | |
1929 | | int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type) |
1930 | 0 | { |
1931 | 0 | struct btf_pipe p = { .src = src_btf, .dst = btf }; |
1932 | |
|
1933 | 0 | return btf_add_type(&p, src_type); |
1934 | 0 | } |
1935 | | |
1936 | | static size_t btf_dedup_identity_hash_fn(long key, void *ctx); |
1937 | | static bool btf_dedup_equal_fn(long k1, long k2, void *ctx); |
1938 | | |
1939 | | int btf__add_btf(struct btf *btf, const struct btf *src_btf) |
1940 | 0 | { |
1941 | 0 | struct btf_pipe p = { .src = src_btf, .dst = btf }; |
1942 | 0 | int data_sz, sz, cnt, i, err, old_strs_len; |
1943 | 0 | __u32 *off; |
1944 | 0 | void *t; |
1945 | | |
1946 | | /* appending split BTF isn't supported yet */ |
1947 | 0 | if (src_btf->base_btf) |
1948 | 0 | return libbpf_err(-ENOTSUP); |
1949 | | |
1950 | | /* deconstruct BTF, if necessary, and invalidate raw_data */ |
1951 | 0 | if (btf_ensure_modifiable(btf)) |
1952 | 0 | return libbpf_err(-ENOMEM); |
1953 | | |
1954 | | /* remember original strings section size if we have to roll back |
1955 | | * partial strings section changes |
1956 | | */ |
1957 | 0 | old_strs_len = btf->hdr->str_len; |
1958 | |
|
1959 | 0 | data_sz = src_btf->hdr->type_len; |
1960 | 0 | cnt = btf__type_cnt(src_btf) - 1; |
1961 | | |
1962 | | /* pre-allocate enough memory for new types */ |
1963 | 0 | t = btf_add_type_mem(btf, data_sz); |
1964 | 0 | if (!t) |
1965 | 0 | return libbpf_err(-ENOMEM); |
1966 | | |
1967 | | /* pre-allocate enough memory for type offset index for new types */ |
1968 | 0 | off = btf_add_type_offs_mem(btf, cnt); |
1969 | 0 | if (!off) |
1970 | 0 | return libbpf_err(-ENOMEM); |
1971 | | |
1972 | | /* Map the string offsets from src_btf to the offsets from btf to improve performance */ |
1973 | 0 | p.str_off_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL); |
1974 | 0 | if (IS_ERR(p.str_off_map)) |
1975 | 0 | return libbpf_err(-ENOMEM); |
1976 | | |
1977 | | /* bulk copy types data for all types from src_btf */ |
1978 | 0 | memcpy(t, src_btf->types_data, data_sz); |
1979 | |
|
1980 | 0 | for (i = 0; i < cnt; i++) { |
1981 | 0 | struct btf_field_iter it; |
1982 | 0 | __u32 *type_id, *str_off; |
1983 | |
|
1984 | 0 | sz = btf_type_size(t); |
1985 | 0 | if (sz < 0) { |
1986 | | /* unlikely, has to be corrupted src_btf */ |
1987 | 0 | err = sz; |
1988 | 0 | goto err_out; |
1989 | 0 | } |
1990 | | |
1991 | | /* fill out type ID to type offset mapping for lookups by type ID */ |
1992 | 0 | *off = t - btf->types_data; |
1993 | | |
1994 | | /* add, dedup, and remap strings referenced by this BTF type */ |
1995 | 0 | err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS); |
1996 | 0 | if (err) |
1997 | 0 | goto err_out; |
1998 | 0 | while ((str_off = btf_field_iter_next(&it))) { |
1999 | 0 | err = btf_rewrite_str(&p, str_off); |
2000 | 0 | if (err) |
2001 | 0 | goto err_out; |
2002 | 0 | } |
2003 | | |
2004 | | /* remap all type IDs referenced from this BTF type */ |
2005 | 0 | err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS); |
2006 | 0 | if (err) |
2007 | 0 | goto err_out; |
2008 | | |
2009 | 0 | while ((type_id = btf_field_iter_next(&it))) { |
2010 | 0 | if (!*type_id) /* nothing to do for VOID references */ |
2011 | 0 | continue; |
2012 | | |
2013 | | /* we haven't updated btf's type count yet, so |
2014 | | * btf->start_id + btf->nr_types - 1 is the type ID offset we should |
2015 | | * add to all newly added BTF types |
2016 | | */ |
2017 | 0 | *type_id += btf->start_id + btf->nr_types - 1; |
2018 | 0 | } |
2019 | | |
2020 | | /* go to next type data and type offset index entry */ |
2021 | 0 | t += sz; |
2022 | 0 | off++; |
2023 | 0 | } |
2024 | | |
2025 | | /* Up until now any of the copied type data was effectively invisible, |
2026 | | * so if we exited early before this point due to error, BTF would be |
2027 | | * effectively unmodified. There would be extra internal memory |
2028 | | * pre-allocated, but it would not be available for querying. But now |
2029 | | * that we've copied and rewritten all the data successfully, we can |
2030 | | * update type count and various internal offsets and sizes to |
2031 | | * "commit" the changes and made them visible to the outside world. |
2032 | | */ |
2033 | 0 | btf->hdr->type_len += data_sz; |
2034 | 0 | btf->hdr->str_off += data_sz; |
2035 | 0 | btf->nr_types += cnt; |
2036 | |
|
2037 | 0 | hashmap__free(p.str_off_map); |
2038 | | |
2039 | | /* return type ID of the first added BTF type */ |
2040 | 0 | return btf->start_id + btf->nr_types - cnt; |
2041 | 0 | err_out: |
2042 | | /* zero out preallocated memory as if it was just allocated with |
2043 | | * libbpf_add_mem() |
2044 | | */ |
2045 | 0 | memset(btf->types_data + btf->hdr->type_len, 0, data_sz); |
2046 | 0 | memset(btf->strs_data + old_strs_len, 0, btf->hdr->str_len - old_strs_len); |
2047 | | |
2048 | | /* and now restore original strings section size; types data size |
2049 | | * wasn't modified, so doesn't need restoring, see big comment above |
2050 | | */ |
2051 | 0 | btf->hdr->str_len = old_strs_len; |
2052 | |
|
2053 | 0 | hashmap__free(p.str_off_map); |
2054 | |
|
2055 | 0 | return libbpf_err(err); |
2056 | 0 | } |
2057 | | |
2058 | | /* |
2059 | | * Append new BTF_KIND_INT type with: |
2060 | | * - *name* - non-empty, non-NULL type name; |
2061 | | * - *sz* - power-of-2 (1, 2, 4, ..) size of the type, in bytes; |
2062 | | * - encoding is a combination of BTF_INT_SIGNED, BTF_INT_CHAR, BTF_INT_BOOL. |
2063 | | * Returns: |
2064 | | * - >0, type ID of newly added BTF type; |
2065 | | * - <0, on error. |
2066 | | */ |
2067 | | int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding) |
2068 | 0 | { |
2069 | 0 | struct btf_type *t; |
2070 | 0 | int sz, name_off; |
2071 | | |
2072 | | /* non-empty name */ |
2073 | 0 | if (!name || !name[0]) |
2074 | 0 | return libbpf_err(-EINVAL); |
2075 | | /* byte_sz must be power of 2 */ |
2076 | 0 | if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 16) |
2077 | 0 | return libbpf_err(-EINVAL); |
2078 | 0 | if (encoding & ~(BTF_INT_SIGNED | BTF_INT_CHAR | BTF_INT_BOOL)) |
2079 | 0 | return libbpf_err(-EINVAL); |
2080 | | |
2081 | | /* deconstruct BTF, if necessary, and invalidate raw_data */ |
2082 | 0 | if (btf_ensure_modifiable(btf)) |
2083 | 0 | return libbpf_err(-ENOMEM); |
2084 | | |
2085 | 0 | sz = sizeof(struct btf_type) + sizeof(int); |
2086 | 0 | t = btf_add_type_mem(btf, sz); |
2087 | 0 | if (!t) |
2088 | 0 | return libbpf_err(-ENOMEM); |
2089 | | |
2090 | | /* if something goes wrong later, we might end up with an extra string, |
2091 | | * but that shouldn't be a problem, because BTF can't be constructed |
2092 | | * completely anyway and will most probably be just discarded |
2093 | | */ |
2094 | 0 | name_off = btf__add_str(btf, name); |
2095 | 0 | if (name_off < 0) |
2096 | 0 | return name_off; |
2097 | | |
2098 | 0 | t->name_off = name_off; |
2099 | 0 | t->info = btf_type_info(BTF_KIND_INT, 0, 0); |
2100 | 0 | t->size = byte_sz; |
2101 | | /* set INT info, we don't allow setting legacy bit offset/size */ |
2102 | 0 | *(__u32 *)(t + 1) = (encoding << 24) | (byte_sz * 8); |
2103 | |
|
2104 | 0 | return btf_commit_type(btf, sz); |
2105 | 0 | } |
2106 | | |
2107 | | /* |
2108 | | * Append new BTF_KIND_FLOAT type with: |
2109 | | * - *name* - non-empty, non-NULL type name; |
2110 | | * - *sz* - size of the type, in bytes; |
2111 | | * Returns: |
2112 | | * - >0, type ID of newly added BTF type; |
2113 | | * - <0, on error. |
2114 | | */ |
2115 | | int btf__add_float(struct btf *btf, const char *name, size_t byte_sz) |
2116 | 0 | { |
2117 | 0 | struct btf_type *t; |
2118 | 0 | int sz, name_off; |
2119 | | |
2120 | | /* non-empty name */ |
2121 | 0 | if (!name || !name[0]) |
2122 | 0 | return libbpf_err(-EINVAL); |
2123 | | |
2124 | | /* byte_sz must be one of the explicitly allowed values */ |
2125 | 0 | if (byte_sz != 2 && byte_sz != 4 && byte_sz != 8 && byte_sz != 12 && |
2126 | 0 | byte_sz != 16) |
2127 | 0 | return libbpf_err(-EINVAL); |
2128 | | |
2129 | 0 | if (btf_ensure_modifiable(btf)) |
2130 | 0 | return libbpf_err(-ENOMEM); |
2131 | | |
2132 | 0 | sz = sizeof(struct btf_type); |
2133 | 0 | t = btf_add_type_mem(btf, sz); |
2134 | 0 | if (!t) |
2135 | 0 | return libbpf_err(-ENOMEM); |
2136 | | |
2137 | 0 | name_off = btf__add_str(btf, name); |
2138 | 0 | if (name_off < 0) |
2139 | 0 | return name_off; |
2140 | | |
2141 | 0 | t->name_off = name_off; |
2142 | 0 | t->info = btf_type_info(BTF_KIND_FLOAT, 0, 0); |
2143 | 0 | t->size = byte_sz; |
2144 | |
|
2145 | 0 | return btf_commit_type(btf, sz); |
2146 | 0 | } |
2147 | | |
2148 | | /* it's completely legal to append BTF types with type IDs pointing forward to |
2149 | | * types that haven't been appended yet, so we only make sure that id looks |
2150 | | * sane, we can't guarantee that ID will always be valid |
2151 | | */ |
2152 | | static int validate_type_id(int id) |
2153 | 396 | { |
2154 | 396 | if (id < 0 || id > BTF_MAX_NR_TYPES) |
2155 | 0 | return -EINVAL; |
2156 | 396 | return 0; |
2157 | 396 | } |
2158 | | |
2159 | | /* generic append function for PTR, TYPEDEF, CONST/VOLATILE/RESTRICT */ |
2160 | | static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref_type_id, int kflag) |
2161 | 0 | { |
2162 | 0 | struct btf_type *t; |
2163 | 0 | int sz, name_off = 0; |
2164 | |
|
2165 | 0 | if (validate_type_id(ref_type_id)) |
2166 | 0 | return libbpf_err(-EINVAL); |
2167 | | |
2168 | 0 | if (btf_ensure_modifiable(btf)) |
2169 | 0 | return libbpf_err(-ENOMEM); |
2170 | | |
2171 | 0 | sz = sizeof(struct btf_type); |
2172 | 0 | t = btf_add_type_mem(btf, sz); |
2173 | 0 | if (!t) |
2174 | 0 | return libbpf_err(-ENOMEM); |
2175 | | |
2176 | 0 | if (name && name[0]) { |
2177 | 0 | name_off = btf__add_str(btf, name); |
2178 | 0 | if (name_off < 0) |
2179 | 0 | return name_off; |
2180 | 0 | } |
2181 | | |
2182 | 0 | t->name_off = name_off; |
2183 | 0 | t->info = btf_type_info(kind, 0, kflag); |
2184 | 0 | t->type = ref_type_id; |
2185 | |
|
2186 | 0 | return btf_commit_type(btf, sz); |
2187 | 0 | } |
2188 | | |
2189 | | /* |
2190 | | * Append new BTF_KIND_PTR type with: |
2191 | | * - *ref_type_id* - referenced type ID, it might not exist yet; |
2192 | | * Returns: |
2193 | | * - >0, type ID of newly added BTF type; |
2194 | | * - <0, on error. |
2195 | | */ |
2196 | | int btf__add_ptr(struct btf *btf, int ref_type_id) |
2197 | 0 | { |
2198 | 0 | return btf_add_ref_kind(btf, BTF_KIND_PTR, NULL, ref_type_id, 0); |
2199 | 0 | } |
2200 | | |
2201 | | /* |
2202 | | * Append new BTF_KIND_ARRAY type with: |
2203 | | * - *index_type_id* - type ID of the type describing array index; |
2204 | | * - *elem_type_id* - type ID of the type describing array element; |
2205 | | * - *nr_elems* - the size of the array; |
2206 | | * Returns: |
2207 | | * - >0, type ID of newly added BTF type; |
2208 | | * - <0, on error. |
2209 | | */ |
2210 | | int btf__add_array(struct btf *btf, int index_type_id, int elem_type_id, __u32 nr_elems) |
2211 | 0 | { |
2212 | 0 | struct btf_type *t; |
2213 | 0 | struct btf_array *a; |
2214 | 0 | int sz; |
2215 | |
|
2216 | 0 | if (validate_type_id(index_type_id) || validate_type_id(elem_type_id)) |
2217 | 0 | return libbpf_err(-EINVAL); |
2218 | | |
2219 | 0 | if (btf_ensure_modifiable(btf)) |
2220 | 0 | return libbpf_err(-ENOMEM); |
2221 | | |
2222 | 0 | sz = sizeof(struct btf_type) + sizeof(struct btf_array); |
2223 | 0 | t = btf_add_type_mem(btf, sz); |
2224 | 0 | if (!t) |
2225 | 0 | return libbpf_err(-ENOMEM); |
2226 | | |
2227 | 0 | t->name_off = 0; |
2228 | 0 | t->info = btf_type_info(BTF_KIND_ARRAY, 0, 0); |
2229 | 0 | t->size = 0; |
2230 | |
|
2231 | 0 | a = btf_array(t); |
2232 | 0 | a->type = elem_type_id; |
2233 | 0 | a->index_type = index_type_id; |
2234 | 0 | a->nelems = nr_elems; |
2235 | |
|
2236 | 0 | return btf_commit_type(btf, sz); |
2237 | 0 | } |
2238 | | |
2239 | | /* generic STRUCT/UNION append function */ |
2240 | | static int btf_add_composite(struct btf *btf, int kind, const char *name, __u32 bytes_sz) |
2241 | 0 | { |
2242 | 0 | struct btf_type *t; |
2243 | 0 | int sz, name_off = 0; |
2244 | |
|
2245 | 0 | if (btf_ensure_modifiable(btf)) |
2246 | 0 | return libbpf_err(-ENOMEM); |
2247 | | |
2248 | 0 | sz = sizeof(struct btf_type); |
2249 | 0 | t = btf_add_type_mem(btf, sz); |
2250 | 0 | if (!t) |
2251 | 0 | return libbpf_err(-ENOMEM); |
2252 | | |
2253 | 0 | if (name && name[0]) { |
2254 | 0 | name_off = btf__add_str(btf, name); |
2255 | 0 | if (name_off < 0) |
2256 | 0 | return name_off; |
2257 | 0 | } |
2258 | | |
2259 | | /* start out with vlen=0 and no kflag; this will be adjusted when |
2260 | | * adding each member |
2261 | | */ |
2262 | 0 | t->name_off = name_off; |
2263 | 0 | t->info = btf_type_info(kind, 0, 0); |
2264 | 0 | t->size = bytes_sz; |
2265 | |
|
2266 | 0 | return btf_commit_type(btf, sz); |
2267 | 0 | } |
2268 | | |
2269 | | /* |
2270 | | * Append new BTF_KIND_STRUCT type with: |
2271 | | * - *name* - name of the struct, can be NULL or empty for anonymous structs; |
2272 | | * - *byte_sz* - size of the struct, in bytes; |
2273 | | * |
2274 | | * Struct initially has no fields in it. Fields can be added by |
2275 | | * btf__add_field() right after btf__add_struct() succeeds. |
2276 | | * |
2277 | | * Returns: |
2278 | | * - >0, type ID of newly added BTF type; |
2279 | | * - <0, on error. |
2280 | | */ |
2281 | | int btf__add_struct(struct btf *btf, const char *name, __u32 byte_sz) |
2282 | 0 | { |
2283 | 0 | return btf_add_composite(btf, BTF_KIND_STRUCT, name, byte_sz); |
2284 | 0 | } |
2285 | | |
2286 | | /* |
2287 | | * Append new BTF_KIND_UNION type with: |
2288 | | * - *name* - name of the union, can be NULL or empty for anonymous union; |
2289 | | * - *byte_sz* - size of the union, in bytes; |
2290 | | * |
2291 | | * Union initially has no fields in it. Fields can be added by |
2292 | | * btf__add_field() right after btf__add_union() succeeds. All fields |
2293 | | * should have *bit_offset* of 0. |
2294 | | * |
2295 | | * Returns: |
2296 | | * - >0, type ID of newly added BTF type; |
2297 | | * - <0, on error. |
2298 | | */ |
2299 | | int btf__add_union(struct btf *btf, const char *name, __u32 byte_sz) |
2300 | 0 | { |
2301 | 0 | return btf_add_composite(btf, BTF_KIND_UNION, name, byte_sz); |
2302 | 0 | } |
2303 | | |
2304 | | static struct btf_type *btf_last_type(struct btf *btf) |
2305 | 0 | { |
2306 | 0 | return btf_type_by_id(btf, btf__type_cnt(btf) - 1); |
2307 | 0 | } |
2308 | | |
2309 | | /* |
2310 | | * Append new field for the current STRUCT/UNION type with: |
2311 | | * - *name* - name of the field, can be NULL or empty for anonymous field; |
2312 | | * - *type_id* - type ID for the type describing field type; |
2313 | | * - *bit_offset* - bit offset of the start of the field within struct/union; |
2314 | | * - *bit_size* - bit size of a bitfield, 0 for non-bitfield fields; |
2315 | | * Returns: |
2316 | | * - 0, on success; |
2317 | | * - <0, on error. |
2318 | | */ |
2319 | | int btf__add_field(struct btf *btf, const char *name, int type_id, |
2320 | | __u32 bit_offset, __u32 bit_size) |
2321 | 0 | { |
2322 | 0 | struct btf_type *t; |
2323 | 0 | struct btf_member *m; |
2324 | 0 | bool is_bitfield; |
2325 | 0 | int sz, name_off = 0; |
2326 | | |
2327 | | /* last type should be union/struct */ |
2328 | 0 | if (btf->nr_types == 0) |
2329 | 0 | return libbpf_err(-EINVAL); |
2330 | 0 | t = btf_last_type(btf); |
2331 | 0 | if (!btf_is_composite(t)) |
2332 | 0 | return libbpf_err(-EINVAL); |
2333 | | |
2334 | 0 | if (validate_type_id(type_id)) |
2335 | 0 | return libbpf_err(-EINVAL); |
2336 | | /* best-effort bit field offset/size enforcement */ |
2337 | 0 | is_bitfield = bit_size || (bit_offset % 8 != 0); |
2338 | 0 | if (is_bitfield && (bit_size == 0 || bit_size > 255 || bit_offset > 0xffffff)) |
2339 | 0 | return libbpf_err(-EINVAL); |
2340 | | |
2341 | | /* only offset 0 is allowed for unions */ |
2342 | 0 | if (btf_is_union(t) && bit_offset) |
2343 | 0 | return libbpf_err(-EINVAL); |
2344 | | |
2345 | | /* decompose and invalidate raw data */ |
2346 | 0 | if (btf_ensure_modifiable(btf)) |
2347 | 0 | return libbpf_err(-ENOMEM); |
2348 | | |
2349 | 0 | sz = sizeof(struct btf_member); |
2350 | 0 | m = btf_add_type_mem(btf, sz); |
2351 | 0 | if (!m) |
2352 | 0 | return libbpf_err(-ENOMEM); |
2353 | | |
2354 | 0 | if (name && name[0]) { |
2355 | 0 | name_off = btf__add_str(btf, name); |
2356 | 0 | if (name_off < 0) |
2357 | 0 | return name_off; |
2358 | 0 | } |
2359 | | |
2360 | 0 | m->name_off = name_off; |
2361 | 0 | m->type = type_id; |
2362 | 0 | m->offset = bit_offset | (bit_size << 24); |
2363 | | |
2364 | | /* btf_add_type_mem can invalidate t pointer */ |
2365 | 0 | t = btf_last_type(btf); |
2366 | | /* update parent type's vlen and kflag */ |
2367 | 0 | t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, is_bitfield || btf_kflag(t)); |
2368 | |
|
2369 | 0 | btf->hdr->type_len += sz; |
2370 | 0 | btf->hdr->str_off += sz; |
2371 | 0 | return 0; |
2372 | 0 | } |
2373 | | |
2374 | | static int btf_add_enum_common(struct btf *btf, const char *name, __u32 byte_sz, |
2375 | | bool is_signed, __u8 kind) |
2376 | 0 | { |
2377 | 0 | struct btf_type *t; |
2378 | 0 | int sz, name_off = 0; |
2379 | | |
2380 | | /* byte_sz must be power of 2 */ |
2381 | 0 | if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 8) |
2382 | 0 | return libbpf_err(-EINVAL); |
2383 | | |
2384 | 0 | if (btf_ensure_modifiable(btf)) |
2385 | 0 | return libbpf_err(-ENOMEM); |
2386 | | |
2387 | 0 | sz = sizeof(struct btf_type); |
2388 | 0 | t = btf_add_type_mem(btf, sz); |
2389 | 0 | if (!t) |
2390 | 0 | return libbpf_err(-ENOMEM); |
2391 | | |
2392 | 0 | if (name && name[0]) { |
2393 | 0 | name_off = btf__add_str(btf, name); |
2394 | 0 | if (name_off < 0) |
2395 | 0 | return name_off; |
2396 | 0 | } |
2397 | | |
2398 | | /* start out with vlen=0; it will be adjusted when adding enum values */ |
2399 | 0 | t->name_off = name_off; |
2400 | 0 | t->info = btf_type_info(kind, 0, is_signed); |
2401 | 0 | t->size = byte_sz; |
2402 | |
|
2403 | 0 | return btf_commit_type(btf, sz); |
2404 | 0 | } |
2405 | | |
2406 | | /* |
2407 | | * Append new BTF_KIND_ENUM type with: |
2408 | | * - *name* - name of the enum, can be NULL or empty for anonymous enums; |
2409 | | * - *byte_sz* - size of the enum, in bytes. |
2410 | | * |
2411 | | * Enum initially has no enum values in it (and corresponds to enum forward |
2412 | | * declaration). Enumerator values can be added by btf__add_enum_value() |
2413 | | * immediately after btf__add_enum() succeeds. |
2414 | | * |
2415 | | * Returns: |
2416 | | * - >0, type ID of newly added BTF type; |
2417 | | * - <0, on error. |
2418 | | */ |
2419 | | int btf__add_enum(struct btf *btf, const char *name, __u32 byte_sz) |
2420 | 0 | { |
2421 | | /* |
2422 | | * set the signedness to be unsigned, it will change to signed |
2423 | | * if any later enumerator is negative. |
2424 | | */ |
2425 | 0 | return btf_add_enum_common(btf, name, byte_sz, false, BTF_KIND_ENUM); |
2426 | 0 | } |
2427 | | |
2428 | | /* |
2429 | | * Append new enum value for the current ENUM type with: |
2430 | | * - *name* - name of the enumerator value, can't be NULL or empty; |
2431 | | * - *value* - integer value corresponding to enum value *name*; |
2432 | | * Returns: |
2433 | | * - 0, on success; |
2434 | | * - <0, on error. |
2435 | | */ |
2436 | | int btf__add_enum_value(struct btf *btf, const char *name, __s64 value) |
2437 | 0 | { |
2438 | 0 | struct btf_type *t; |
2439 | 0 | struct btf_enum *v; |
2440 | 0 | int sz, name_off; |
2441 | | |
2442 | | /* last type should be BTF_KIND_ENUM */ |
2443 | 0 | if (btf->nr_types == 0) |
2444 | 0 | return libbpf_err(-EINVAL); |
2445 | 0 | t = btf_last_type(btf); |
2446 | 0 | if (!btf_is_enum(t)) |
2447 | 0 | return libbpf_err(-EINVAL); |
2448 | | |
2449 | | /* non-empty name */ |
2450 | 0 | if (!name || !name[0]) |
2451 | 0 | return libbpf_err(-EINVAL); |
2452 | 0 | if (value < INT_MIN || value > UINT_MAX) |
2453 | 0 | return libbpf_err(-E2BIG); |
2454 | | |
2455 | | /* decompose and invalidate raw data */ |
2456 | 0 | if (btf_ensure_modifiable(btf)) |
2457 | 0 | return libbpf_err(-ENOMEM); |
2458 | | |
2459 | 0 | sz = sizeof(struct btf_enum); |
2460 | 0 | v = btf_add_type_mem(btf, sz); |
2461 | 0 | if (!v) |
2462 | 0 | return libbpf_err(-ENOMEM); |
2463 | | |
2464 | 0 | name_off = btf__add_str(btf, name); |
2465 | 0 | if (name_off < 0) |
2466 | 0 | return name_off; |
2467 | | |
2468 | 0 | v->name_off = name_off; |
2469 | 0 | v->val = value; |
2470 | | |
2471 | | /* update parent type's vlen */ |
2472 | 0 | t = btf_last_type(btf); |
2473 | 0 | btf_type_inc_vlen(t); |
2474 | | |
2475 | | /* if negative value, set signedness to signed */ |
2476 | 0 | if (value < 0) |
2477 | 0 | t->info = btf_type_info(btf_kind(t), btf_vlen(t), true); |
2478 | |
|
2479 | 0 | btf->hdr->type_len += sz; |
2480 | 0 | btf->hdr->str_off += sz; |
2481 | 0 | return 0; |
2482 | 0 | } |
2483 | | |
2484 | | /* |
2485 | | * Append new BTF_KIND_ENUM64 type with: |
2486 | | * - *name* - name of the enum, can be NULL or empty for anonymous enums; |
2487 | | * - *byte_sz* - size of the enum, in bytes. |
2488 | | * - *is_signed* - whether the enum values are signed or not; |
2489 | | * |
2490 | | * Enum initially has no enum values in it (and corresponds to enum forward |
2491 | | * declaration). Enumerator values can be added by btf__add_enum64_value() |
2492 | | * immediately after btf__add_enum64() succeeds. |
2493 | | * |
2494 | | * Returns: |
2495 | | * - >0, type ID of newly added BTF type; |
2496 | | * - <0, on error. |
2497 | | */ |
2498 | | int btf__add_enum64(struct btf *btf, const char *name, __u32 byte_sz, |
2499 | | bool is_signed) |
2500 | 0 | { |
2501 | 0 | return btf_add_enum_common(btf, name, byte_sz, is_signed, |
2502 | 0 | BTF_KIND_ENUM64); |
2503 | 0 | } |
2504 | | |
2505 | | /* |
2506 | | * Append new enum value for the current ENUM64 type with: |
2507 | | * - *name* - name of the enumerator value, can't be NULL or empty; |
2508 | | * - *value* - integer value corresponding to enum value *name*; |
2509 | | * Returns: |
2510 | | * - 0, on success; |
2511 | | * - <0, on error. |
2512 | | */ |
2513 | | int btf__add_enum64_value(struct btf *btf, const char *name, __u64 value) |
2514 | 0 | { |
2515 | 0 | struct btf_enum64 *v; |
2516 | 0 | struct btf_type *t; |
2517 | 0 | int sz, name_off; |
2518 | | |
2519 | | /* last type should be BTF_KIND_ENUM64 */ |
2520 | 0 | if (btf->nr_types == 0) |
2521 | 0 | return libbpf_err(-EINVAL); |
2522 | 0 | t = btf_last_type(btf); |
2523 | 0 | if (!btf_is_enum64(t)) |
2524 | 0 | return libbpf_err(-EINVAL); |
2525 | | |
2526 | | /* non-empty name */ |
2527 | 0 | if (!name || !name[0]) |
2528 | 0 | return libbpf_err(-EINVAL); |
2529 | | |
2530 | | /* decompose and invalidate raw data */ |
2531 | 0 | if (btf_ensure_modifiable(btf)) |
2532 | 0 | return libbpf_err(-ENOMEM); |
2533 | | |
2534 | 0 | sz = sizeof(struct btf_enum64); |
2535 | 0 | v = btf_add_type_mem(btf, sz); |
2536 | 0 | if (!v) |
2537 | 0 | return libbpf_err(-ENOMEM); |
2538 | | |
2539 | 0 | name_off = btf__add_str(btf, name); |
2540 | 0 | if (name_off < 0) |
2541 | 0 | return name_off; |
2542 | | |
2543 | 0 | v->name_off = name_off; |
2544 | 0 | v->val_lo32 = (__u32)value; |
2545 | 0 | v->val_hi32 = value >> 32; |
2546 | | |
2547 | | /* update parent type's vlen */ |
2548 | 0 | t = btf_last_type(btf); |
2549 | 0 | btf_type_inc_vlen(t); |
2550 | |
|
2551 | 0 | btf->hdr->type_len += sz; |
2552 | 0 | btf->hdr->str_off += sz; |
2553 | 0 | return 0; |
2554 | 0 | } |
2555 | | |
2556 | | /* |
2557 | | * Append new BTF_KIND_FWD type with: |
2558 | | * - *name*, non-empty/non-NULL name; |
2559 | | * - *fwd_kind*, kind of forward declaration, one of BTF_FWD_STRUCT, |
2560 | | * BTF_FWD_UNION, or BTF_FWD_ENUM; |
2561 | | * Returns: |
2562 | | * - >0, type ID of newly added BTF type; |
2563 | | * - <0, on error. |
2564 | | */ |
2565 | | int btf__add_fwd(struct btf *btf, const char *name, enum btf_fwd_kind fwd_kind) |
2566 | 0 | { |
2567 | 0 | if (!name || !name[0]) |
2568 | 0 | return libbpf_err(-EINVAL); |
2569 | | |
2570 | 0 | switch (fwd_kind) { |
2571 | 0 | case BTF_FWD_STRUCT: |
2572 | 0 | case BTF_FWD_UNION: { |
2573 | 0 | struct btf_type *t; |
2574 | 0 | int id; |
2575 | |
|
2576 | 0 | id = btf_add_ref_kind(btf, BTF_KIND_FWD, name, 0, 0); |
2577 | 0 | if (id <= 0) |
2578 | 0 | return id; |
2579 | 0 | t = btf_type_by_id(btf, id); |
2580 | 0 | t->info = btf_type_info(BTF_KIND_FWD, 0, fwd_kind == BTF_FWD_UNION); |
2581 | 0 | return id; |
2582 | 0 | } |
2583 | 0 | case BTF_FWD_ENUM: |
2584 | | /* enum forward in BTF currently is just an enum with no enum |
2585 | | * values; we also assume a standard 4-byte size for it |
2586 | | */ |
2587 | 0 | return btf__add_enum(btf, name, sizeof(int)); |
2588 | 0 | default: |
2589 | 0 | return libbpf_err(-EINVAL); |
2590 | 0 | } |
2591 | 0 | } |
2592 | | |
2593 | | /* |
2594 | | * Append new BTF_KING_TYPEDEF type with: |
2595 | | * - *name*, non-empty/non-NULL name; |
2596 | | * - *ref_type_id* - referenced type ID, it might not exist yet; |
2597 | | * Returns: |
2598 | | * - >0, type ID of newly added BTF type; |
2599 | | * - <0, on error. |
2600 | | */ |
2601 | | int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id) |
2602 | 0 | { |
2603 | 0 | if (!name || !name[0]) |
2604 | 0 | return libbpf_err(-EINVAL); |
2605 | | |
2606 | 0 | return btf_add_ref_kind(btf, BTF_KIND_TYPEDEF, name, ref_type_id, 0); |
2607 | 0 | } |
2608 | | |
2609 | | /* |
2610 | | * Append new BTF_KIND_VOLATILE type with: |
2611 | | * - *ref_type_id* - referenced type ID, it might not exist yet; |
2612 | | * Returns: |
2613 | | * - >0, type ID of newly added BTF type; |
2614 | | * - <0, on error. |
2615 | | */ |
2616 | | int btf__add_volatile(struct btf *btf, int ref_type_id) |
2617 | 0 | { |
2618 | 0 | return btf_add_ref_kind(btf, BTF_KIND_VOLATILE, NULL, ref_type_id, 0); |
2619 | 0 | } |
2620 | | |
2621 | | /* |
2622 | | * Append new BTF_KIND_CONST type with: |
2623 | | * - *ref_type_id* - referenced type ID, it might not exist yet; |
2624 | | * Returns: |
2625 | | * - >0, type ID of newly added BTF type; |
2626 | | * - <0, on error. |
2627 | | */ |
2628 | | int btf__add_const(struct btf *btf, int ref_type_id) |
2629 | 0 | { |
2630 | 0 | return btf_add_ref_kind(btf, BTF_KIND_CONST, NULL, ref_type_id, 0); |
2631 | 0 | } |
2632 | | |
2633 | | /* |
2634 | | * Append new BTF_KIND_RESTRICT type with: |
2635 | | * - *ref_type_id* - referenced type ID, it might not exist yet; |
2636 | | * Returns: |
2637 | | * - >0, type ID of newly added BTF type; |
2638 | | * - <0, on error. |
2639 | | */ |
2640 | | int btf__add_restrict(struct btf *btf, int ref_type_id) |
2641 | 0 | { |
2642 | 0 | return btf_add_ref_kind(btf, BTF_KIND_RESTRICT, NULL, ref_type_id, 0); |
2643 | 0 | } |
2644 | | |
2645 | | /* |
2646 | | * Append new BTF_KIND_TYPE_TAG type with: |
2647 | | * - *value*, non-empty/non-NULL tag value; |
2648 | | * - *ref_type_id* - referenced type ID, it might not exist yet; |
2649 | | * Returns: |
2650 | | * - >0, type ID of newly added BTF type; |
2651 | | * - <0, on error. |
2652 | | */ |
2653 | | int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id) |
2654 | 0 | { |
2655 | 0 | if (!value || !value[0]) |
2656 | 0 | return libbpf_err(-EINVAL); |
2657 | | |
2658 | 0 | return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id, 0); |
2659 | 0 | } |
2660 | | |
2661 | | /* |
2662 | | * Append new BTF_KIND_TYPE_TAG type with: |
2663 | | * - *value*, non-empty/non-NULL tag value; |
2664 | | * - *ref_type_id* - referenced type ID, it might not exist yet; |
2665 | | * Set info->kflag to 1, indicating this tag is an __attribute__ |
2666 | | * Returns: |
2667 | | * - >0, type ID of newly added BTF type; |
2668 | | * - <0, on error. |
2669 | | */ |
2670 | | int btf__add_type_attr(struct btf *btf, const char *value, int ref_type_id) |
2671 | 0 | { |
2672 | 0 | if (!value || !value[0]) |
2673 | 0 | return libbpf_err(-EINVAL); |
2674 | | |
2675 | 0 | return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id, 1); |
2676 | 0 | } |
2677 | | |
2678 | | /* |
2679 | | * Append new BTF_KIND_FUNC type with: |
2680 | | * - *name*, non-empty/non-NULL name; |
2681 | | * - *proto_type_id* - FUNC_PROTO's type ID, it might not exist yet; |
2682 | | * Returns: |
2683 | | * - >0, type ID of newly added BTF type; |
2684 | | * - <0, on error. |
2685 | | */ |
2686 | | int btf__add_func(struct btf *btf, const char *name, |
2687 | | enum btf_func_linkage linkage, int proto_type_id) |
2688 | 0 | { |
2689 | 0 | int id; |
2690 | |
|
2691 | 0 | if (!name || !name[0]) |
2692 | 0 | return libbpf_err(-EINVAL); |
2693 | 0 | if (linkage != BTF_FUNC_STATIC && linkage != BTF_FUNC_GLOBAL && |
2694 | 0 | linkage != BTF_FUNC_EXTERN) |
2695 | 0 | return libbpf_err(-EINVAL); |
2696 | | |
2697 | 0 | id = btf_add_ref_kind(btf, BTF_KIND_FUNC, name, proto_type_id, 0); |
2698 | 0 | if (id > 0) { |
2699 | 0 | struct btf_type *t = btf_type_by_id(btf, id); |
2700 | |
|
2701 | 0 | t->info = btf_type_info(BTF_KIND_FUNC, linkage, 0); |
2702 | 0 | } |
2703 | 0 | return libbpf_err(id); |
2704 | 0 | } |
2705 | | |
2706 | | /* |
2707 | | * Append new BTF_KIND_FUNC_PROTO with: |
2708 | | * - *ret_type_id* - type ID for return result of a function. |
2709 | | * |
2710 | | * Function prototype initially has no arguments, but they can be added by |
2711 | | * btf__add_func_param() one by one, immediately after |
2712 | | * btf__add_func_proto() succeeded. |
2713 | | * |
2714 | | * Returns: |
2715 | | * - >0, type ID of newly added BTF type; |
2716 | | * - <0, on error. |
2717 | | */ |
2718 | | int btf__add_func_proto(struct btf *btf, int ret_type_id) |
2719 | 0 | { |
2720 | 0 | struct btf_type *t; |
2721 | 0 | int sz; |
2722 | |
|
2723 | 0 | if (validate_type_id(ret_type_id)) |
2724 | 0 | return libbpf_err(-EINVAL); |
2725 | | |
2726 | 0 | if (btf_ensure_modifiable(btf)) |
2727 | 0 | return libbpf_err(-ENOMEM); |
2728 | | |
2729 | 0 | sz = sizeof(struct btf_type); |
2730 | 0 | t = btf_add_type_mem(btf, sz); |
2731 | 0 | if (!t) |
2732 | 0 | return libbpf_err(-ENOMEM); |
2733 | | |
2734 | | /* start out with vlen=0; this will be adjusted when adding enum |
2735 | | * values, if necessary |
2736 | | */ |
2737 | 0 | t->name_off = 0; |
2738 | 0 | t->info = btf_type_info(BTF_KIND_FUNC_PROTO, 0, 0); |
2739 | 0 | t->type = ret_type_id; |
2740 | |
|
2741 | 0 | return btf_commit_type(btf, sz); |
2742 | 0 | } |
2743 | | |
2744 | | /* |
2745 | | * Append new function parameter for current FUNC_PROTO type with: |
2746 | | * - *name* - parameter name, can be NULL or empty; |
2747 | | * - *type_id* - type ID describing the type of the parameter. |
2748 | | * Returns: |
2749 | | * - 0, on success; |
2750 | | * - <0, on error. |
2751 | | */ |
2752 | | int btf__add_func_param(struct btf *btf, const char *name, int type_id) |
2753 | 0 | { |
2754 | 0 | struct btf_type *t; |
2755 | 0 | struct btf_param *p; |
2756 | 0 | int sz, name_off = 0; |
2757 | |
|
2758 | 0 | if (validate_type_id(type_id)) |
2759 | 0 | return libbpf_err(-EINVAL); |
2760 | | |
2761 | | /* last type should be BTF_KIND_FUNC_PROTO */ |
2762 | 0 | if (btf->nr_types == 0) |
2763 | 0 | return libbpf_err(-EINVAL); |
2764 | 0 | t = btf_last_type(btf); |
2765 | 0 | if (!btf_is_func_proto(t)) |
2766 | 0 | return libbpf_err(-EINVAL); |
2767 | | |
2768 | | /* decompose and invalidate raw data */ |
2769 | 0 | if (btf_ensure_modifiable(btf)) |
2770 | 0 | return libbpf_err(-ENOMEM); |
2771 | | |
2772 | 0 | sz = sizeof(struct btf_param); |
2773 | 0 | p = btf_add_type_mem(btf, sz); |
2774 | 0 | if (!p) |
2775 | 0 | return libbpf_err(-ENOMEM); |
2776 | | |
2777 | 0 | if (name && name[0]) { |
2778 | 0 | name_off = btf__add_str(btf, name); |
2779 | 0 | if (name_off < 0) |
2780 | 0 | return name_off; |
2781 | 0 | } |
2782 | | |
2783 | 0 | p->name_off = name_off; |
2784 | 0 | p->type = type_id; |
2785 | | |
2786 | | /* update parent type's vlen */ |
2787 | 0 | t = btf_last_type(btf); |
2788 | 0 | btf_type_inc_vlen(t); |
2789 | |
|
2790 | 0 | btf->hdr->type_len += sz; |
2791 | 0 | btf->hdr->str_off += sz; |
2792 | 0 | return 0; |
2793 | 0 | } |
2794 | | |
2795 | | /* |
2796 | | * Append new BTF_KIND_VAR type with: |
2797 | | * - *name* - non-empty/non-NULL name; |
2798 | | * - *linkage* - variable linkage, one of BTF_VAR_STATIC, |
2799 | | * BTF_VAR_GLOBAL_ALLOCATED, or BTF_VAR_GLOBAL_EXTERN; |
2800 | | * - *type_id* - type ID of the type describing the type of the variable. |
2801 | | * Returns: |
2802 | | * - >0, type ID of newly added BTF type; |
2803 | | * - <0, on error. |
2804 | | */ |
2805 | | int btf__add_var(struct btf *btf, const char *name, int linkage, int type_id) |
2806 | 396 | { |
2807 | 396 | struct btf_type *t; |
2808 | 396 | struct btf_var *v; |
2809 | 396 | int sz, name_off; |
2810 | | |
2811 | | /* non-empty name */ |
2812 | 396 | if (!name || !name[0]) |
2813 | 0 | return libbpf_err(-EINVAL); |
2814 | 396 | if (linkage != BTF_VAR_STATIC && linkage != BTF_VAR_GLOBAL_ALLOCATED && |
2815 | 396 | linkage != BTF_VAR_GLOBAL_EXTERN) |
2816 | 0 | return libbpf_err(-EINVAL); |
2817 | 396 | if (validate_type_id(type_id)) |
2818 | 0 | return libbpf_err(-EINVAL); |
2819 | | |
2820 | | /* deconstruct BTF, if necessary, and invalidate raw_data */ |
2821 | 396 | if (btf_ensure_modifiable(btf)) |
2822 | 0 | return libbpf_err(-ENOMEM); |
2823 | | |
2824 | 396 | sz = sizeof(struct btf_type) + sizeof(struct btf_var); |
2825 | 396 | t = btf_add_type_mem(btf, sz); |
2826 | 396 | if (!t) |
2827 | 0 | return libbpf_err(-ENOMEM); |
2828 | | |
2829 | 396 | name_off = btf__add_str(btf, name); |
2830 | 396 | if (name_off < 0) |
2831 | 0 | return name_off; |
2832 | | |
2833 | 396 | t->name_off = name_off; |
2834 | 396 | t->info = btf_type_info(BTF_KIND_VAR, 0, 0); |
2835 | 396 | t->type = type_id; |
2836 | | |
2837 | 396 | v = btf_var(t); |
2838 | 396 | v->linkage = linkage; |
2839 | | |
2840 | 396 | return btf_commit_type(btf, sz); |
2841 | 396 | } |
2842 | | |
2843 | | /* |
2844 | | * Append new BTF_KIND_DATASEC type with: |
2845 | | * - *name* - non-empty/non-NULL name; |
2846 | | * - *byte_sz* - data section size, in bytes. |
2847 | | * |
2848 | | * Data section is initially empty. Variables info can be added with |
2849 | | * btf__add_datasec_var_info() calls, after btf__add_datasec() succeeds. |
2850 | | * |
2851 | | * Returns: |
2852 | | * - >0, type ID of newly added BTF type; |
2853 | | * - <0, on error. |
2854 | | */ |
2855 | | int btf__add_datasec(struct btf *btf, const char *name, __u32 byte_sz) |
2856 | 0 | { |
2857 | 0 | struct btf_type *t; |
2858 | 0 | int sz, name_off; |
2859 | | |
2860 | | /* non-empty name */ |
2861 | 0 | if (!name || !name[0]) |
2862 | 0 | return libbpf_err(-EINVAL); |
2863 | | |
2864 | 0 | if (btf_ensure_modifiable(btf)) |
2865 | 0 | return libbpf_err(-ENOMEM); |
2866 | | |
2867 | 0 | sz = sizeof(struct btf_type); |
2868 | 0 | t = btf_add_type_mem(btf, sz); |
2869 | 0 | if (!t) |
2870 | 0 | return libbpf_err(-ENOMEM); |
2871 | | |
2872 | 0 | name_off = btf__add_str(btf, name); |
2873 | 0 | if (name_off < 0) |
2874 | 0 | return name_off; |
2875 | | |
2876 | | /* start with vlen=0, which will be update as var_secinfos are added */ |
2877 | 0 | t->name_off = name_off; |
2878 | 0 | t->info = btf_type_info(BTF_KIND_DATASEC, 0, 0); |
2879 | 0 | t->size = byte_sz; |
2880 | |
|
2881 | 0 | return btf_commit_type(btf, sz); |
2882 | 0 | } |
2883 | | |
2884 | | /* |
2885 | | * Append new data section variable information entry for current DATASEC type: |
2886 | | * - *var_type_id* - type ID, describing type of the variable; |
2887 | | * - *offset* - variable offset within data section, in bytes; |
2888 | | * - *byte_sz* - variable size, in bytes. |
2889 | | * |
2890 | | * Returns: |
2891 | | * - 0, on success; |
2892 | | * - <0, on error. |
2893 | | */ |
2894 | | int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __u32 byte_sz) |
2895 | 0 | { |
2896 | 0 | struct btf_type *t; |
2897 | 0 | struct btf_var_secinfo *v; |
2898 | 0 | int sz; |
2899 | | |
2900 | | /* last type should be BTF_KIND_DATASEC */ |
2901 | 0 | if (btf->nr_types == 0) |
2902 | 0 | return libbpf_err(-EINVAL); |
2903 | 0 | t = btf_last_type(btf); |
2904 | 0 | if (!btf_is_datasec(t)) |
2905 | 0 | return libbpf_err(-EINVAL); |
2906 | | |
2907 | 0 | if (validate_type_id(var_type_id)) |
2908 | 0 | return libbpf_err(-EINVAL); |
2909 | | |
2910 | | /* decompose and invalidate raw data */ |
2911 | 0 | if (btf_ensure_modifiable(btf)) |
2912 | 0 | return libbpf_err(-ENOMEM); |
2913 | | |
2914 | 0 | sz = sizeof(struct btf_var_secinfo); |
2915 | 0 | v = btf_add_type_mem(btf, sz); |
2916 | 0 | if (!v) |
2917 | 0 | return libbpf_err(-ENOMEM); |
2918 | | |
2919 | 0 | v->type = var_type_id; |
2920 | 0 | v->offset = offset; |
2921 | 0 | v->size = byte_sz; |
2922 | | |
2923 | | /* update parent type's vlen */ |
2924 | 0 | t = btf_last_type(btf); |
2925 | 0 | btf_type_inc_vlen(t); |
2926 | |
|
2927 | 0 | btf->hdr->type_len += sz; |
2928 | 0 | btf->hdr->str_off += sz; |
2929 | 0 | return 0; |
2930 | 0 | } |
2931 | | |
2932 | | static int btf_add_decl_tag(struct btf *btf, const char *value, int ref_type_id, |
2933 | | int component_idx, int kflag) |
2934 | 0 | { |
2935 | 0 | struct btf_type *t; |
2936 | 0 | int sz, value_off; |
2937 | |
|
2938 | 0 | if (!value || !value[0] || component_idx < -1) |
2939 | 0 | return libbpf_err(-EINVAL); |
2940 | | |
2941 | 0 | if (validate_type_id(ref_type_id)) |
2942 | 0 | return libbpf_err(-EINVAL); |
2943 | | |
2944 | 0 | if (btf_ensure_modifiable(btf)) |
2945 | 0 | return libbpf_err(-ENOMEM); |
2946 | | |
2947 | 0 | sz = sizeof(struct btf_type) + sizeof(struct btf_decl_tag); |
2948 | 0 | t = btf_add_type_mem(btf, sz); |
2949 | 0 | if (!t) |
2950 | 0 | return libbpf_err(-ENOMEM); |
2951 | | |
2952 | 0 | value_off = btf__add_str(btf, value); |
2953 | 0 | if (value_off < 0) |
2954 | 0 | return value_off; |
2955 | | |
2956 | 0 | t->name_off = value_off; |
2957 | 0 | t->info = btf_type_info(BTF_KIND_DECL_TAG, 0, kflag); |
2958 | 0 | t->type = ref_type_id; |
2959 | 0 | btf_decl_tag(t)->component_idx = component_idx; |
2960 | |
|
2961 | 0 | return btf_commit_type(btf, sz); |
2962 | 0 | } |
2963 | | |
2964 | | /* |
2965 | | * Append new BTF_KIND_DECL_TAG type with: |
2966 | | * - *value* - non-empty/non-NULL string; |
2967 | | * - *ref_type_id* - referenced type ID, it might not exist yet; |
2968 | | * - *component_idx* - -1 for tagging reference type, otherwise struct/union |
2969 | | * member or function argument index; |
2970 | | * Returns: |
2971 | | * - >0, type ID of newly added BTF type; |
2972 | | * - <0, on error. |
2973 | | */ |
2974 | | int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id, |
2975 | | int component_idx) |
2976 | 0 | { |
2977 | 0 | return btf_add_decl_tag(btf, value, ref_type_id, component_idx, 0); |
2978 | 0 | } |
2979 | | |
2980 | | /* |
2981 | | * Append new BTF_KIND_DECL_TAG type with: |
2982 | | * - *value* - non-empty/non-NULL string; |
2983 | | * - *ref_type_id* - referenced type ID, it might not exist yet; |
2984 | | * - *component_idx* - -1 for tagging reference type, otherwise struct/union |
2985 | | * member or function argument index; |
2986 | | * Set info->kflag to 1, indicating this tag is an __attribute__ |
2987 | | * Returns: |
2988 | | * - >0, type ID of newly added BTF type; |
2989 | | * - <0, on error. |
2990 | | */ |
2991 | | int btf__add_decl_attr(struct btf *btf, const char *value, int ref_type_id, |
2992 | | int component_idx) |
2993 | 0 | { |
2994 | 0 | return btf_add_decl_tag(btf, value, ref_type_id, component_idx, 1); |
2995 | 0 | } |
2996 | | |
2997 | | struct btf_ext_sec_info_param { |
2998 | | __u32 off; |
2999 | | __u32 len; |
3000 | | __u32 min_rec_size; |
3001 | | struct btf_ext_info *ext_info; |
3002 | | const char *desc; |
3003 | | }; |
3004 | | |
3005 | | /* |
3006 | | * Parse a single info subsection of the BTF.ext info data: |
3007 | | * - validate subsection structure and elements |
3008 | | * - save info subsection start and sizing details in struct btf_ext |
3009 | | * - endian-independent operation, for calling before byte-swapping |
3010 | | */ |
3011 | | static int btf_ext_parse_sec_info(struct btf_ext *btf_ext, |
3012 | | struct btf_ext_sec_info_param *ext_sec, |
3013 | | bool is_native) |
3014 | 716 | { |
3015 | 716 | const struct btf_ext_info_sec *sinfo; |
3016 | 716 | struct btf_ext_info *ext_info; |
3017 | 716 | __u32 info_left, record_size; |
3018 | 716 | size_t sec_cnt = 0; |
3019 | 716 | void *info; |
3020 | | |
3021 | 716 | if (ext_sec->len == 0) |
3022 | 291 | return 0; |
3023 | | |
3024 | 425 | if (ext_sec->off & 0x03) { |
3025 | 22 | pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n", |
3026 | 22 | ext_sec->desc); |
3027 | 22 | return -EINVAL; |
3028 | 22 | } |
3029 | | |
3030 | | /* The start of the info sec (including the __u32 record_size). */ |
3031 | 403 | info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off; |
3032 | 403 | info_left = ext_sec->len; |
3033 | | |
3034 | 403 | if (btf_ext->data + btf_ext->data_size < info + ext_sec->len) { |
3035 | 47 | pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n", |
3036 | 47 | ext_sec->desc, ext_sec->off, ext_sec->len); |
3037 | 47 | return -EINVAL; |
3038 | 47 | } |
3039 | | |
3040 | | /* At least a record size */ |
3041 | 356 | if (info_left < sizeof(__u32)) { |
3042 | 1 | pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc); |
3043 | 1 | return -EINVAL; |
3044 | 1 | } |
3045 | | |
3046 | | /* The record size needs to meet either the minimum standard or, when |
3047 | | * handling non-native endianness data, the exact standard so as |
3048 | | * to allow safe byte-swapping. |
3049 | | */ |
3050 | 355 | record_size = is_native ? *(__u32 *)info : bswap_32(*(__u32 *)info); |
3051 | 355 | if (record_size < ext_sec->min_rec_size || |
3052 | 355 | (!is_native && record_size != ext_sec->min_rec_size) || |
3053 | 355 | record_size & 0x03) { |
3054 | 44 | pr_debug("%s section in .BTF.ext has invalid record size %u\n", |
3055 | 44 | ext_sec->desc, record_size); |
3056 | 44 | return -EINVAL; |
3057 | 44 | } |
3058 | | |
3059 | 311 | sinfo = info + sizeof(__u32); |
3060 | 311 | info_left -= sizeof(__u32); |
3061 | | |
3062 | | /* If no records, return failure now so .BTF.ext won't be used. */ |
3063 | 311 | if (!info_left) { |
3064 | 1 | pr_debug("%s section in .BTF.ext has no records\n", ext_sec->desc); |
3065 | 1 | return -EINVAL; |
3066 | 1 | } |
3067 | | |
3068 | 617 | while (info_left) { |
3069 | 425 | unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec); |
3070 | 425 | __u64 total_record_size; |
3071 | 425 | __u32 num_records; |
3072 | | |
3073 | 425 | if (info_left < sec_hdrlen) { |
3074 | 3 | pr_debug("%s section header is not found in .BTF.ext\n", |
3075 | 3 | ext_sec->desc); |
3076 | 3 | return -EINVAL; |
3077 | 3 | } |
3078 | | |
3079 | 422 | num_records = is_native ? sinfo->num_info : bswap_32(sinfo->num_info); |
3080 | 422 | if (num_records == 0) { |
3081 | 6 | pr_debug("%s section has incorrect num_records in .BTF.ext\n", |
3082 | 6 | ext_sec->desc); |
3083 | 6 | return -EINVAL; |
3084 | 6 | } |
3085 | | |
3086 | 416 | total_record_size = sec_hdrlen + (__u64)num_records * record_size; |
3087 | 416 | if (info_left < total_record_size) { |
3088 | 109 | pr_debug("%s section has incorrect num_records in .BTF.ext\n", |
3089 | 109 | ext_sec->desc); |
3090 | 109 | return -EINVAL; |
3091 | 109 | } |
3092 | | |
3093 | 307 | info_left -= total_record_size; |
3094 | 307 | sinfo = (void *)sinfo + total_record_size; |
3095 | 307 | sec_cnt++; |
3096 | 307 | } |
3097 | | |
3098 | 192 | ext_info = ext_sec->ext_info; |
3099 | 192 | ext_info->len = ext_sec->len - sizeof(__u32); |
3100 | 192 | ext_info->rec_size = record_size; |
3101 | 192 | ext_info->info = info + sizeof(__u32); |
3102 | 192 | ext_info->sec_cnt = sec_cnt; |
3103 | | |
3104 | 192 | return 0; |
3105 | 310 | } |
3106 | | |
3107 | | /* Parse all info secs in the BTF.ext info data */ |
3108 | | static int btf_ext_parse_info(struct btf_ext *btf_ext, bool is_native) |
3109 | 357 | { |
3110 | 357 | struct btf_ext_sec_info_param func_info = { |
3111 | 357 | .off = btf_ext->hdr->func_info_off, |
3112 | 357 | .len = btf_ext->hdr->func_info_len, |
3113 | 357 | .min_rec_size = sizeof(struct bpf_func_info_min), |
3114 | 357 | .ext_info = &btf_ext->func_info, |
3115 | 357 | .desc = "func_info" |
3116 | 357 | }; |
3117 | 357 | struct btf_ext_sec_info_param line_info = { |
3118 | 357 | .off = btf_ext->hdr->line_info_off, |
3119 | 357 | .len = btf_ext->hdr->line_info_len, |
3120 | 357 | .min_rec_size = sizeof(struct bpf_line_info_min), |
3121 | 357 | .ext_info = &btf_ext->line_info, |
3122 | 357 | .desc = "line_info", |
3123 | 357 | }; |
3124 | 357 | struct btf_ext_sec_info_param core_relo = { |
3125 | 357 | .min_rec_size = sizeof(struct bpf_core_relo), |
3126 | 357 | .ext_info = &btf_ext->core_relo_info, |
3127 | 357 | .desc = "core_relo", |
3128 | 357 | }; |
3129 | 357 | int err; |
3130 | | |
3131 | 357 | err = btf_ext_parse_sec_info(btf_ext, &func_info, is_native); |
3132 | 357 | if (err) |
3133 | 136 | return err; |
3134 | | |
3135 | 221 | err = btf_ext_parse_sec_info(btf_ext, &line_info, is_native); |
3136 | 221 | if (err) |
3137 | 76 | return err; |
3138 | | |
3139 | 145 | if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len)) |
3140 | 7 | return 0; /* skip core relos parsing */ |
3141 | | |
3142 | 138 | core_relo.off = btf_ext->hdr->core_relo_off; |
3143 | 138 | core_relo.len = btf_ext->hdr->core_relo_len; |
3144 | 138 | err = btf_ext_parse_sec_info(btf_ext, &core_relo, is_native); |
3145 | 138 | if (err) |
3146 | 21 | return err; |
3147 | | |
3148 | 117 | return 0; |
3149 | 138 | } |
3150 | | |
3151 | | /* Swap byte-order of BTF.ext header with any endianness */ |
3152 | | static void btf_ext_bswap_hdr(struct btf_ext_header *h) |
3153 | 50 | { |
3154 | 50 | bool is_native = h->magic == BTF_MAGIC; |
3155 | 50 | __u32 hdr_len; |
3156 | | |
3157 | 50 | hdr_len = is_native ? h->hdr_len : bswap_32(h->hdr_len); |
3158 | | |
3159 | 50 | h->magic = bswap_16(h->magic); |
3160 | 50 | h->hdr_len = bswap_32(h->hdr_len); |
3161 | 50 | h->func_info_off = bswap_32(h->func_info_off); |
3162 | 50 | h->func_info_len = bswap_32(h->func_info_len); |
3163 | 50 | h->line_info_off = bswap_32(h->line_info_off); |
3164 | 50 | h->line_info_len = bswap_32(h->line_info_len); |
3165 | | |
3166 | 50 | if (hdr_len < offsetofend(struct btf_ext_header, core_relo_len)) |
3167 | 16 | return; |
3168 | | |
3169 | 34 | h->core_relo_off = bswap_32(h->core_relo_off); |
3170 | 34 | h->core_relo_len = bswap_32(h->core_relo_len); |
3171 | 34 | } |
3172 | | |
3173 | | /* Swap byte-order of generic info subsection */ |
3174 | | static void btf_ext_bswap_info_sec(void *info, __u32 len, bool is_native, |
3175 | | info_rec_bswap_fn bswap_fn) |
3176 | 18 | { |
3177 | 18 | struct btf_ext_info_sec *sec; |
3178 | 18 | __u32 info_left, rec_size, *rs; |
3179 | | |
3180 | 18 | if (len == 0) |
3181 | 14 | return; |
3182 | | |
3183 | 4 | rs = info; /* info record size */ |
3184 | 4 | rec_size = is_native ? *rs : bswap_32(*rs); |
3185 | 4 | *rs = bswap_32(*rs); |
3186 | | |
3187 | 4 | sec = info + sizeof(__u32); /* info sec #1 */ |
3188 | 4 | info_left = len - sizeof(__u32); |
3189 | 14 | while (info_left) { |
3190 | 10 | unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec); |
3191 | 10 | __u32 i, num_recs; |
3192 | 10 | void *p; |
3193 | | |
3194 | 10 | num_recs = is_native ? sec->num_info : bswap_32(sec->num_info); |
3195 | 10 | sec->sec_name_off = bswap_32(sec->sec_name_off); |
3196 | 10 | sec->num_info = bswap_32(sec->num_info); |
3197 | 10 | p = sec->data; /* info rec #1 */ |
3198 | 44 | for (i = 0; i < num_recs; i++, p += rec_size) |
3199 | 34 | bswap_fn(p); |
3200 | 10 | sec = p; |
3201 | 10 | info_left -= sec_hdrlen + (__u64)rec_size * num_recs; |
3202 | 10 | } |
3203 | 4 | } |
3204 | | |
3205 | | /* |
3206 | | * Swap byte-order of all info data in a BTF.ext section |
3207 | | * - requires BTF.ext hdr in native endianness |
3208 | | */ |
3209 | | static void btf_ext_bswap_info(struct btf_ext *btf_ext, void *data) |
3210 | 8 | { |
3211 | 8 | const bool is_native = btf_ext->swapped_endian; |
3212 | 8 | const struct btf_ext_header *h = data; |
3213 | 8 | void *info; |
3214 | | |
3215 | | /* Swap func_info subsection byte-order */ |
3216 | 8 | info = data + h->hdr_len + h->func_info_off; |
3217 | 8 | btf_ext_bswap_info_sec(info, h->func_info_len, is_native, |
3218 | 8 | (info_rec_bswap_fn)bpf_func_info_bswap); |
3219 | | |
3220 | | /* Swap line_info subsection byte-order */ |
3221 | 8 | info = data + h->hdr_len + h->line_info_off; |
3222 | 8 | btf_ext_bswap_info_sec(info, h->line_info_len, is_native, |
3223 | 8 | (info_rec_bswap_fn)bpf_line_info_bswap); |
3224 | | |
3225 | | /* Swap core_relo subsection byte-order (if present) */ |
3226 | 8 | if (h->hdr_len < offsetofend(struct btf_ext_header, core_relo_len)) |
3227 | 6 | return; |
3228 | | |
3229 | 2 | info = data + h->hdr_len + h->core_relo_off; |
3230 | 2 | btf_ext_bswap_info_sec(info, h->core_relo_len, is_native, |
3231 | 2 | (info_rec_bswap_fn)bpf_core_relo_bswap); |
3232 | 2 | } |
3233 | | |
3234 | | /* Parse hdr data and info sections: check and convert to native endianness */ |
3235 | | static int btf_ext_parse(struct btf_ext *btf_ext) |
3236 | 502 | { |
3237 | 502 | __u32 hdr_len, data_size = btf_ext->data_size; |
3238 | 502 | struct btf_ext_header *hdr = btf_ext->hdr; |
3239 | 502 | bool swapped_endian = false; |
3240 | 502 | int err; |
3241 | | |
3242 | 502 | if (data_size < offsetofend(struct btf_ext_header, hdr_len)) { |
3243 | 11 | pr_debug("BTF.ext header too short\n"); |
3244 | 11 | return -EINVAL; |
3245 | 11 | } |
3246 | | |
3247 | 491 | hdr_len = hdr->hdr_len; |
3248 | 491 | if (hdr->magic == bswap_16(BTF_MAGIC)) { |
3249 | 53 | swapped_endian = true; |
3250 | 53 | hdr_len = bswap_32(hdr_len); |
3251 | 438 | } else if (hdr->magic != BTF_MAGIC) { |
3252 | 56 | pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic); |
3253 | 56 | return -EINVAL; |
3254 | 56 | } |
3255 | | |
3256 | | /* Ensure known version of structs, current BTF_VERSION == 1 */ |
3257 | 435 | if (hdr->version != 1) { |
3258 | 13 | pr_debug("Unsupported BTF.ext version:%u\n", hdr->version); |
3259 | 13 | return -ENOTSUP; |
3260 | 13 | } |
3261 | | |
3262 | 422 | if (hdr->flags) { |
3263 | 7 | pr_debug("Unsupported BTF.ext flags:%x\n", hdr->flags); |
3264 | 7 | return -ENOTSUP; |
3265 | 7 | } |
3266 | | |
3267 | 415 | if (data_size < hdr_len) { |
3268 | 47 | pr_debug("BTF.ext header not found\n"); |
3269 | 47 | return -EINVAL; |
3270 | 368 | } else if (data_size == hdr_len) { |
3271 | 6 | pr_debug("BTF.ext has no data\n"); |
3272 | 6 | return -EINVAL; |
3273 | 6 | } |
3274 | | |
3275 | | /* Verify mandatory hdr info details present */ |
3276 | 362 | if (hdr_len < offsetofend(struct btf_ext_header, line_info_len)) { |
3277 | 5 | pr_warn("BTF.ext header missing func_info, line_info\n"); |
3278 | 5 | return -EINVAL; |
3279 | 5 | } |
3280 | | |
3281 | | /* Keep hdr native byte-order in memory for introspection */ |
3282 | 357 | if (swapped_endian) |
3283 | 50 | btf_ext_bswap_hdr(btf_ext->hdr); |
3284 | | |
3285 | | /* Validate info subsections and cache key metadata */ |
3286 | 357 | err = btf_ext_parse_info(btf_ext, !swapped_endian); |
3287 | 357 | if (err) |
3288 | 233 | return err; |
3289 | | |
3290 | | /* Keep infos native byte-order in memory for introspection */ |
3291 | 124 | if (swapped_endian) |
3292 | 8 | btf_ext_bswap_info(btf_ext, btf_ext->data); |
3293 | | |
3294 | | /* |
3295 | | * Set btf_ext->swapped_endian only after all header and info data has |
3296 | | * been swapped, helping bswap functions determine if their data are |
3297 | | * in native byte-order when called. |
3298 | | */ |
3299 | 124 | btf_ext->swapped_endian = swapped_endian; |
3300 | 124 | return 0; |
3301 | 357 | } |
3302 | | |
3303 | | void btf_ext__free(struct btf_ext *btf_ext) |
3304 | 10.6k | { |
3305 | 10.6k | if (IS_ERR_OR_NULL(btf_ext)) |
3306 | 10.1k | return; |
3307 | 502 | free(btf_ext->func_info.sec_idxs); |
3308 | 502 | free(btf_ext->line_info.sec_idxs); |
3309 | 502 | free(btf_ext->core_relo_info.sec_idxs); |
3310 | 502 | free(btf_ext->data); |
3311 | 502 | free(btf_ext->data_swapped); |
3312 | 502 | free(btf_ext); |
3313 | 502 | } |
3314 | | |
3315 | | struct btf_ext *btf_ext__new(const __u8 *data, __u32 size) |
3316 | 502 | { |
3317 | 502 | struct btf_ext *btf_ext; |
3318 | 502 | int err; |
3319 | | |
3320 | 502 | btf_ext = calloc(1, sizeof(struct btf_ext)); |
3321 | 502 | if (!btf_ext) |
3322 | 0 | return libbpf_err_ptr(-ENOMEM); |
3323 | | |
3324 | 502 | btf_ext->data_size = size; |
3325 | 502 | btf_ext->data = malloc(size); |
3326 | 502 | if (!btf_ext->data) { |
3327 | 0 | err = -ENOMEM; |
3328 | 0 | goto done; |
3329 | 0 | } |
3330 | 502 | memcpy(btf_ext->data, data, size); |
3331 | | |
3332 | 502 | err = btf_ext_parse(btf_ext); |
3333 | | |
3334 | 502 | done: |
3335 | 502 | if (err) { |
3336 | 378 | btf_ext__free(btf_ext); |
3337 | 378 | return libbpf_err_ptr(err); |
3338 | 378 | } |
3339 | | |
3340 | 124 | return btf_ext; |
3341 | 502 | } |
3342 | | |
3343 | | static void *btf_ext_raw_data(const struct btf_ext *btf_ext_ro, bool swap_endian) |
3344 | 0 | { |
3345 | 0 | struct btf_ext *btf_ext = (struct btf_ext *)btf_ext_ro; |
3346 | 0 | const __u32 data_sz = btf_ext->data_size; |
3347 | 0 | void *data; |
3348 | | |
3349 | | /* Return native data (always present) or swapped data if present */ |
3350 | 0 | if (!swap_endian) |
3351 | 0 | return btf_ext->data; |
3352 | 0 | else if (btf_ext->data_swapped) |
3353 | 0 | return btf_ext->data_swapped; |
3354 | | |
3355 | | /* Recreate missing swapped data, then cache and return */ |
3356 | 0 | data = calloc(1, data_sz); |
3357 | 0 | if (!data) |
3358 | 0 | return NULL; |
3359 | 0 | memcpy(data, btf_ext->data, data_sz); |
3360 | |
|
3361 | 0 | btf_ext_bswap_info(btf_ext, data); |
3362 | 0 | btf_ext_bswap_hdr(data); |
3363 | 0 | btf_ext->data_swapped = data; |
3364 | 0 | return data; |
3365 | 0 | } |
3366 | | |
3367 | | const void *btf_ext__raw_data(const struct btf_ext *btf_ext, __u32 *size) |
3368 | 0 | { |
3369 | 0 | void *data; |
3370 | |
|
3371 | 0 | data = btf_ext_raw_data(btf_ext, btf_ext->swapped_endian); |
3372 | 0 | if (!data) |
3373 | 0 | return errno = ENOMEM, NULL; |
3374 | | |
3375 | 0 | *size = btf_ext->data_size; |
3376 | 0 | return data; |
3377 | 0 | } |
3378 | | |
3379 | | __attribute__((alias("btf_ext__raw_data"))) |
3380 | | const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size); |
3381 | | |
3382 | | enum btf_endianness btf_ext__endianness(const struct btf_ext *btf_ext) |
3383 | 0 | { |
3384 | 0 | if (is_host_big_endian()) |
3385 | 0 | return btf_ext->swapped_endian ? BTF_LITTLE_ENDIAN : BTF_BIG_ENDIAN; |
3386 | 0 | else |
3387 | 0 | return btf_ext->swapped_endian ? BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN; |
3388 | 0 | } |
3389 | | |
3390 | | int btf_ext__set_endianness(struct btf_ext *btf_ext, enum btf_endianness endian) |
3391 | 0 | { |
3392 | 0 | if (endian != BTF_LITTLE_ENDIAN && endian != BTF_BIG_ENDIAN) |
3393 | 0 | return libbpf_err(-EINVAL); |
3394 | | |
3395 | 0 | btf_ext->swapped_endian = is_host_big_endian() != (endian == BTF_BIG_ENDIAN); |
3396 | |
|
3397 | 0 | if (!btf_ext->swapped_endian) { |
3398 | 0 | free(btf_ext->data_swapped); |
3399 | 0 | btf_ext->data_swapped = NULL; |
3400 | 0 | } |
3401 | 0 | return 0; |
3402 | 0 | } |
3403 | | |
3404 | | struct btf_dedup; |
3405 | | |
3406 | | static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts); |
3407 | | static void btf_dedup_free(struct btf_dedup *d); |
3408 | | static int btf_dedup_prep(struct btf_dedup *d); |
3409 | | static int btf_dedup_strings(struct btf_dedup *d); |
3410 | | static int btf_dedup_prim_types(struct btf_dedup *d); |
3411 | | static int btf_dedup_struct_types(struct btf_dedup *d); |
3412 | | static int btf_dedup_ref_types(struct btf_dedup *d); |
3413 | | static int btf_dedup_resolve_fwds(struct btf_dedup *d); |
3414 | | static int btf_dedup_compact_types(struct btf_dedup *d); |
3415 | | static int btf_dedup_remap_types(struct btf_dedup *d); |
3416 | | |
3417 | | /* |
3418 | | * Deduplicate BTF types and strings. |
3419 | | * |
3420 | | * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF |
3421 | | * section with all BTF type descriptors and string data. It overwrites that |
3422 | | * memory in-place with deduplicated types and strings without any loss of |
3423 | | * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section |
3424 | | * is provided, all the strings referenced from .BTF.ext section are honored |
3425 | | * and updated to point to the right offsets after deduplication. |
3426 | | * |
3427 | | * If function returns with error, type/string data might be garbled and should |
3428 | | * be discarded. |
3429 | | * |
3430 | | * More verbose and detailed description of both problem btf_dedup is solving, |
3431 | | * as well as solution could be found at: |
3432 | | * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html |
3433 | | * |
3434 | | * Problem description and justification |
3435 | | * ===================================== |
3436 | | * |
3437 | | * BTF type information is typically emitted either as a result of conversion |
3438 | | * from DWARF to BTF or directly by compiler. In both cases, each compilation |
3439 | | * unit contains information about a subset of all the types that are used |
3440 | | * in an application. These subsets are frequently overlapping and contain a lot |
3441 | | * of duplicated information when later concatenated together into a single |
3442 | | * binary. This algorithm ensures that each unique type is represented by single |
3443 | | * BTF type descriptor, greatly reducing resulting size of BTF data. |
3444 | | * |
3445 | | * Compilation unit isolation and subsequent duplication of data is not the only |
3446 | | * problem. The same type hierarchy (e.g., struct and all the type that struct |
3447 | | * references) in different compilation units can be represented in BTF to |
3448 | | * various degrees of completeness (or, rather, incompleteness) due to |
3449 | | * struct/union forward declarations. |
3450 | | * |
3451 | | * Let's take a look at an example, that we'll use to better understand the |
3452 | | * problem (and solution). Suppose we have two compilation units, each using |
3453 | | * same `struct S`, but each of them having incomplete type information about |
3454 | | * struct's fields: |
3455 | | * |
3456 | | * // CU #1: |
3457 | | * struct S; |
3458 | | * struct A { |
3459 | | * int a; |
3460 | | * struct A* self; |
3461 | | * struct S* parent; |
3462 | | * }; |
3463 | | * struct B; |
3464 | | * struct S { |
3465 | | * struct A* a_ptr; |
3466 | | * struct B* b_ptr; |
3467 | | * }; |
3468 | | * |
3469 | | * // CU #2: |
3470 | | * struct S; |
3471 | | * struct A; |
3472 | | * struct B { |
3473 | | * int b; |
3474 | | * struct B* self; |
3475 | | * struct S* parent; |
3476 | | * }; |
3477 | | * struct S { |
3478 | | * struct A* a_ptr; |
3479 | | * struct B* b_ptr; |
3480 | | * }; |
3481 | | * |
3482 | | * In case of CU #1, BTF data will know only that `struct B` exist (but no |
3483 | | * more), but will know the complete type information about `struct A`. While |
3484 | | * for CU #2, it will know full type information about `struct B`, but will |
3485 | | * only know about forward declaration of `struct A` (in BTF terms, it will |
3486 | | * have `BTF_KIND_FWD` type descriptor with name `B`). |
3487 | | * |
3488 | | * This compilation unit isolation means that it's possible that there is no |
3489 | | * single CU with complete type information describing structs `S`, `A`, and |
3490 | | * `B`. Also, we might get tons of duplicated and redundant type information. |
3491 | | * |
3492 | | * Additional complication we need to keep in mind comes from the fact that |
3493 | | * types, in general, can form graphs containing cycles, not just DAGs. |
3494 | | * |
3495 | | * While algorithm does deduplication, it also merges and resolves type |
3496 | | * information (unless disabled throught `struct btf_opts`), whenever possible. |
3497 | | * E.g., in the example above with two compilation units having partial type |
3498 | | * information for structs `A` and `B`, the output of algorithm will emit |
3499 | | * a single copy of each BTF type that describes structs `A`, `B`, and `S` |
3500 | | * (as well as type information for `int` and pointers), as if they were defined |
3501 | | * in a single compilation unit as: |
3502 | | * |
3503 | | * struct A { |
3504 | | * int a; |
3505 | | * struct A* self; |
3506 | | * struct S* parent; |
3507 | | * }; |
3508 | | * struct B { |
3509 | | * int b; |
3510 | | * struct B* self; |
3511 | | * struct S* parent; |
3512 | | * }; |
3513 | | * struct S { |
3514 | | * struct A* a_ptr; |
3515 | | * struct B* b_ptr; |
3516 | | * }; |
3517 | | * |
3518 | | * Algorithm summary |
3519 | | * ================= |
3520 | | * |
3521 | | * Algorithm completes its work in 7 separate passes: |
3522 | | * |
3523 | | * 1. Strings deduplication. |
3524 | | * 2. Primitive types deduplication (int, enum, fwd). |
3525 | | * 3. Struct/union types deduplication. |
3526 | | * 4. Resolve unambiguous forward declarations. |
3527 | | * 5. Reference types deduplication (pointers, typedefs, arrays, funcs, func |
3528 | | * protos, and const/volatile/restrict modifiers). |
3529 | | * 6. Types compaction. |
3530 | | * 7. Types remapping. |
3531 | | * |
3532 | | * Algorithm determines canonical type descriptor, which is a single |
3533 | | * representative type for each truly unique type. This canonical type is the |
3534 | | * one that will go into final deduplicated BTF type information. For |
3535 | | * struct/unions, it is also the type that algorithm will merge additional type |
3536 | | * information into (while resolving FWDs), as it discovers it from data in |
3537 | | * other CUs. Each input BTF type eventually gets either mapped to itself, if |
3538 | | * that type is canonical, or to some other type, if that type is equivalent |
3539 | | * and was chosen as canonical representative. This mapping is stored in |
3540 | | * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that |
3541 | | * FWD type got resolved to. |
3542 | | * |
3543 | | * To facilitate fast discovery of canonical types, we also maintain canonical |
3544 | | * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash |
3545 | | * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types |
3546 | | * that match that signature. With sufficiently good choice of type signature |
3547 | | * hashing function, we can limit number of canonical types for each unique type |
3548 | | * signature to a very small number, allowing to find canonical type for any |
3549 | | * duplicated type very quickly. |
3550 | | * |
3551 | | * Struct/union deduplication is the most critical part and algorithm for |
3552 | | * deduplicating structs/unions is described in greater details in comments for |
3553 | | * `btf_dedup_is_equiv` function. |
3554 | | */ |
3555 | | int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts) |
3556 | 0 | { |
3557 | 0 | struct btf_dedup *d; |
3558 | 0 | int err; |
3559 | |
|
3560 | 0 | if (!OPTS_VALID(opts, btf_dedup_opts)) |
3561 | 0 | return libbpf_err(-EINVAL); |
3562 | | |
3563 | 0 | d = btf_dedup_new(btf, opts); |
3564 | 0 | if (IS_ERR(d)) { |
3565 | 0 | pr_debug("btf_dedup_new failed: %ld\n", PTR_ERR(d)); |
3566 | 0 | return libbpf_err(-EINVAL); |
3567 | 0 | } |
3568 | | |
3569 | 0 | if (btf_ensure_modifiable(btf)) { |
3570 | 0 | err = -ENOMEM; |
3571 | 0 | goto done; |
3572 | 0 | } |
3573 | | |
3574 | 0 | err = btf_dedup_prep(d); |
3575 | 0 | if (err) { |
3576 | 0 | pr_debug("btf_dedup_prep failed: %s\n", errstr(err)); |
3577 | 0 | goto done; |
3578 | 0 | } |
3579 | 0 | err = btf_dedup_strings(d); |
3580 | 0 | if (err < 0) { |
3581 | 0 | pr_debug("btf_dedup_strings failed: %s\n", errstr(err)); |
3582 | 0 | goto done; |
3583 | 0 | } |
3584 | 0 | err = btf_dedup_prim_types(d); |
3585 | 0 | if (err < 0) { |
3586 | 0 | pr_debug("btf_dedup_prim_types failed: %s\n", errstr(err)); |
3587 | 0 | goto done; |
3588 | 0 | } |
3589 | 0 | err = btf_dedup_struct_types(d); |
3590 | 0 | if (err < 0) { |
3591 | 0 | pr_debug("btf_dedup_struct_types failed: %s\n", errstr(err)); |
3592 | 0 | goto done; |
3593 | 0 | } |
3594 | 0 | err = btf_dedup_resolve_fwds(d); |
3595 | 0 | if (err < 0) { |
3596 | 0 | pr_debug("btf_dedup_resolve_fwds failed: %s\n", errstr(err)); |
3597 | 0 | goto done; |
3598 | 0 | } |
3599 | 0 | err = btf_dedup_ref_types(d); |
3600 | 0 | if (err < 0) { |
3601 | 0 | pr_debug("btf_dedup_ref_types failed: %s\n", errstr(err)); |
3602 | 0 | goto done; |
3603 | 0 | } |
3604 | 0 | err = btf_dedup_compact_types(d); |
3605 | 0 | if (err < 0) { |
3606 | 0 | pr_debug("btf_dedup_compact_types failed: %s\n", errstr(err)); |
3607 | 0 | goto done; |
3608 | 0 | } |
3609 | 0 | err = btf_dedup_remap_types(d); |
3610 | 0 | if (err < 0) { |
3611 | 0 | pr_debug("btf_dedup_remap_types failed: %s\n", errstr(err)); |
3612 | 0 | goto done; |
3613 | 0 | } |
3614 | | |
3615 | 0 | done: |
3616 | 0 | btf_dedup_free(d); |
3617 | 0 | return libbpf_err(err); |
3618 | 0 | } |
3619 | | |
3620 | 0 | #define BTF_UNPROCESSED_ID ((__u32)-1) |
3621 | 0 | #define BTF_IN_PROGRESS_ID ((__u32)-2) |
3622 | | |
3623 | | struct btf_dedup { |
3624 | | /* .BTF section to be deduped in-place */ |
3625 | | struct btf *btf; |
3626 | | /* |
3627 | | * Optional .BTF.ext section. When provided, any strings referenced |
3628 | | * from it will be taken into account when deduping strings |
3629 | | */ |
3630 | | struct btf_ext *btf_ext; |
3631 | | /* |
3632 | | * This is a map from any type's signature hash to a list of possible |
3633 | | * canonical representative type candidates. Hash collisions are |
3634 | | * ignored, so even types of various kinds can share same list of |
3635 | | * candidates, which is fine because we rely on subsequent |
3636 | | * btf_xxx_equal() checks to authoritatively verify type equality. |
3637 | | */ |
3638 | | struct hashmap *dedup_table; |
3639 | | /* Canonical types map */ |
3640 | | __u32 *map; |
3641 | | /* Hypothetical mapping, used during type graph equivalence checks */ |
3642 | | __u32 *hypot_map; |
3643 | | __u32 *hypot_list; |
3644 | | size_t hypot_cnt; |
3645 | | size_t hypot_cap; |
3646 | | /* Whether hypothetical mapping, if successful, would need to adjust |
3647 | | * already canonicalized types (due to a new forward declaration to |
3648 | | * concrete type resolution). In such case, during split BTF dedup |
3649 | | * candidate type would still be considered as different, because base |
3650 | | * BTF is considered to be immutable. |
3651 | | */ |
3652 | | bool hypot_adjust_canon; |
3653 | | /* Various option modifying behavior of algorithm */ |
3654 | | struct btf_dedup_opts opts; |
3655 | | /* temporary strings deduplication state */ |
3656 | | struct strset *strs_set; |
3657 | | }; |
3658 | | |
3659 | | static unsigned long hash_combine(unsigned long h, unsigned long value) |
3660 | 0 | { |
3661 | 0 | return h * 31 + value; |
3662 | 0 | } |
3663 | | |
3664 | | #define for_each_dedup_cand(d, node, hash) \ |
3665 | 0 | hashmap__for_each_key_entry(d->dedup_table, node, hash) |
3666 | | |
3667 | | static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id) |
3668 | 0 | { |
3669 | 0 | return hashmap__append(d->dedup_table, hash, type_id); |
3670 | 0 | } |
3671 | | |
3672 | | static int btf_dedup_hypot_map_add(struct btf_dedup *d, |
3673 | | __u32 from_id, __u32 to_id) |
3674 | 0 | { |
3675 | 0 | if (d->hypot_cnt == d->hypot_cap) { |
3676 | 0 | __u32 *new_list; |
3677 | |
|
3678 | 0 | d->hypot_cap += max((size_t)16, d->hypot_cap / 2); |
3679 | 0 | new_list = libbpf_reallocarray(d->hypot_list, d->hypot_cap, sizeof(__u32)); |
3680 | 0 | if (!new_list) |
3681 | 0 | return -ENOMEM; |
3682 | 0 | d->hypot_list = new_list; |
3683 | 0 | } |
3684 | 0 | d->hypot_list[d->hypot_cnt++] = from_id; |
3685 | 0 | d->hypot_map[from_id] = to_id; |
3686 | 0 | return 0; |
3687 | 0 | } |
3688 | | |
3689 | | static void btf_dedup_clear_hypot_map(struct btf_dedup *d) |
3690 | 0 | { |
3691 | 0 | int i; |
3692 | |
|
3693 | 0 | for (i = 0; i < d->hypot_cnt; i++) |
3694 | 0 | d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID; |
3695 | 0 | d->hypot_cnt = 0; |
3696 | 0 | d->hypot_adjust_canon = false; |
3697 | 0 | } |
3698 | | |
3699 | | static void btf_dedup_free(struct btf_dedup *d) |
3700 | 0 | { |
3701 | 0 | hashmap__free(d->dedup_table); |
3702 | 0 | d->dedup_table = NULL; |
3703 | |
|
3704 | 0 | free(d->map); |
3705 | 0 | d->map = NULL; |
3706 | |
|
3707 | 0 | free(d->hypot_map); |
3708 | 0 | d->hypot_map = NULL; |
3709 | |
|
3710 | 0 | free(d->hypot_list); |
3711 | 0 | d->hypot_list = NULL; |
3712 | |
|
3713 | 0 | free(d); |
3714 | 0 | } |
3715 | | |
3716 | | static size_t btf_dedup_identity_hash_fn(long key, void *ctx) |
3717 | 0 | { |
3718 | 0 | return key; |
3719 | 0 | } |
3720 | | |
3721 | | static size_t btf_dedup_collision_hash_fn(long key, void *ctx) |
3722 | 0 | { |
3723 | 0 | return 0; |
3724 | 0 | } |
3725 | | |
3726 | | static bool btf_dedup_equal_fn(long k1, long k2, void *ctx) |
3727 | 0 | { |
3728 | 0 | return k1 == k2; |
3729 | 0 | } |
3730 | | |
3731 | | static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts) |
3732 | 0 | { |
3733 | 0 | struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup)); |
3734 | 0 | hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn; |
3735 | 0 | int i, err = 0, type_cnt; |
3736 | |
|
3737 | 0 | if (!d) |
3738 | 0 | return ERR_PTR(-ENOMEM); |
3739 | | |
3740 | 0 | if (OPTS_GET(opts, force_collisions, false)) |
3741 | 0 | hash_fn = btf_dedup_collision_hash_fn; |
3742 | |
|
3743 | 0 | d->btf = btf; |
3744 | 0 | d->btf_ext = OPTS_GET(opts, btf_ext, NULL); |
3745 | |
|
3746 | 0 | d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL); |
3747 | 0 | if (IS_ERR(d->dedup_table)) { |
3748 | 0 | err = PTR_ERR(d->dedup_table); |
3749 | 0 | d->dedup_table = NULL; |
3750 | 0 | goto done; |
3751 | 0 | } |
3752 | | |
3753 | 0 | type_cnt = btf__type_cnt(btf); |
3754 | 0 | d->map = malloc(sizeof(__u32) * type_cnt); |
3755 | 0 | if (!d->map) { |
3756 | 0 | err = -ENOMEM; |
3757 | 0 | goto done; |
3758 | 0 | } |
3759 | | /* special BTF "void" type is made canonical immediately */ |
3760 | 0 | d->map[0] = 0; |
3761 | 0 | for (i = 1; i < type_cnt; i++) { |
3762 | 0 | struct btf_type *t = btf_type_by_id(d->btf, i); |
3763 | | |
3764 | | /* VAR and DATASEC are never deduped and are self-canonical */ |
3765 | 0 | if (btf_is_var(t) || btf_is_datasec(t)) |
3766 | 0 | d->map[i] = i; |
3767 | 0 | else |
3768 | 0 | d->map[i] = BTF_UNPROCESSED_ID; |
3769 | 0 | } |
3770 | |
|
3771 | 0 | d->hypot_map = malloc(sizeof(__u32) * type_cnt); |
3772 | 0 | if (!d->hypot_map) { |
3773 | 0 | err = -ENOMEM; |
3774 | 0 | goto done; |
3775 | 0 | } |
3776 | 0 | for (i = 0; i < type_cnt; i++) |
3777 | 0 | d->hypot_map[i] = BTF_UNPROCESSED_ID; |
3778 | |
|
3779 | 0 | done: |
3780 | 0 | if (err) { |
3781 | 0 | btf_dedup_free(d); |
3782 | 0 | return ERR_PTR(err); |
3783 | 0 | } |
3784 | | |
3785 | 0 | return d; |
3786 | 0 | } |
3787 | | |
3788 | | /* |
3789 | | * Iterate over all possible places in .BTF and .BTF.ext that can reference |
3790 | | * string and pass pointer to it to a provided callback `fn`. |
3791 | | */ |
3792 | | static int btf_for_each_str_off(struct btf_dedup *d, str_off_visit_fn fn, void *ctx) |
3793 | 0 | { |
3794 | 0 | int i, r; |
3795 | |
|
3796 | 0 | for (i = 0; i < d->btf->nr_types; i++) { |
3797 | 0 | struct btf_field_iter it; |
3798 | 0 | struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i); |
3799 | 0 | __u32 *str_off; |
3800 | |
|
3801 | 0 | r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS); |
3802 | 0 | if (r) |
3803 | 0 | return r; |
3804 | | |
3805 | 0 | while ((str_off = btf_field_iter_next(&it))) { |
3806 | 0 | r = fn(str_off, ctx); |
3807 | 0 | if (r) |
3808 | 0 | return r; |
3809 | 0 | } |
3810 | 0 | } |
3811 | | |
3812 | 0 | if (!d->btf_ext) |
3813 | 0 | return 0; |
3814 | | |
3815 | 0 | r = btf_ext_visit_str_offs(d->btf_ext, fn, ctx); |
3816 | 0 | if (r) |
3817 | 0 | return r; |
3818 | | |
3819 | 0 | return 0; |
3820 | 0 | } |
3821 | | |
3822 | | static int strs_dedup_remap_str_off(__u32 *str_off_ptr, void *ctx) |
3823 | 0 | { |
3824 | 0 | struct btf_dedup *d = ctx; |
3825 | 0 | __u32 str_off = *str_off_ptr; |
3826 | 0 | const char *s; |
3827 | 0 | int off, err; |
3828 | | |
3829 | | /* don't touch empty string or string in main BTF */ |
3830 | 0 | if (str_off == 0 || str_off < d->btf->start_str_off) |
3831 | 0 | return 0; |
3832 | | |
3833 | 0 | s = btf__str_by_offset(d->btf, str_off); |
3834 | 0 | if (d->btf->base_btf) { |
3835 | 0 | err = btf__find_str(d->btf->base_btf, s); |
3836 | 0 | if (err >= 0) { |
3837 | 0 | *str_off_ptr = err; |
3838 | 0 | return 0; |
3839 | 0 | } |
3840 | 0 | if (err != -ENOENT) |
3841 | 0 | return err; |
3842 | 0 | } |
3843 | | |
3844 | 0 | off = strset__add_str(d->strs_set, s); |
3845 | 0 | if (off < 0) |
3846 | 0 | return off; |
3847 | | |
3848 | 0 | *str_off_ptr = d->btf->start_str_off + off; |
3849 | 0 | return 0; |
3850 | 0 | } |
3851 | | |
3852 | | /* |
3853 | | * Dedup string and filter out those that are not referenced from either .BTF |
3854 | | * or .BTF.ext (if provided) sections. |
3855 | | * |
3856 | | * This is done by building index of all strings in BTF's string section, |
3857 | | * then iterating over all entities that can reference strings (e.g., type |
3858 | | * names, struct field names, .BTF.ext line info, etc) and marking corresponding |
3859 | | * strings as used. After that all used strings are deduped and compacted into |
3860 | | * sequential blob of memory and new offsets are calculated. Then all the string |
3861 | | * references are iterated again and rewritten using new offsets. |
3862 | | */ |
3863 | | static int btf_dedup_strings(struct btf_dedup *d) |
3864 | 0 | { |
3865 | 0 | int err; |
3866 | |
|
3867 | 0 | if (d->btf->strs_deduped) |
3868 | 0 | return 0; |
3869 | | |
3870 | 0 | d->strs_set = strset__new(BTF_MAX_STR_OFFSET, NULL, 0); |
3871 | 0 | if (IS_ERR(d->strs_set)) { |
3872 | 0 | err = PTR_ERR(d->strs_set); |
3873 | 0 | goto err_out; |
3874 | 0 | } |
3875 | | |
3876 | 0 | if (!d->btf->base_btf) { |
3877 | | /* insert empty string; we won't be looking it up during strings |
3878 | | * dedup, but it's good to have it for generic BTF string lookups |
3879 | | */ |
3880 | 0 | err = strset__add_str(d->strs_set, ""); |
3881 | 0 | if (err < 0) |
3882 | 0 | goto err_out; |
3883 | 0 | } |
3884 | | |
3885 | | /* remap string offsets */ |
3886 | 0 | err = btf_for_each_str_off(d, strs_dedup_remap_str_off, d); |
3887 | 0 | if (err) |
3888 | 0 | goto err_out; |
3889 | | |
3890 | | /* replace BTF string data and hash with deduped ones */ |
3891 | 0 | strset__free(d->btf->strs_set); |
3892 | 0 | d->btf->hdr->str_len = strset__data_size(d->strs_set); |
3893 | 0 | d->btf->strs_set = d->strs_set; |
3894 | 0 | d->strs_set = NULL; |
3895 | 0 | d->btf->strs_deduped = true; |
3896 | 0 | return 0; |
3897 | | |
3898 | 0 | err_out: |
3899 | 0 | strset__free(d->strs_set); |
3900 | 0 | d->strs_set = NULL; |
3901 | |
|
3902 | 0 | return err; |
3903 | 0 | } |
3904 | | |
3905 | | static long btf_hash_common(struct btf_type *t) |
3906 | 0 | { |
3907 | 0 | long h; |
3908 | |
|
3909 | 0 | h = hash_combine(0, t->name_off); |
3910 | 0 | h = hash_combine(h, t->info); |
3911 | 0 | h = hash_combine(h, t->size); |
3912 | 0 | return h; |
3913 | 0 | } |
3914 | | |
3915 | | static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2) |
3916 | 0 | { |
3917 | 0 | return t1->name_off == t2->name_off && |
3918 | 0 | t1->info == t2->info && |
3919 | 0 | t1->size == t2->size; |
3920 | 0 | } |
3921 | | |
3922 | | /* Calculate type signature hash of INT or TAG. */ |
3923 | | static long btf_hash_int_decl_tag(struct btf_type *t) |
3924 | 0 | { |
3925 | 0 | __u32 info = *(__u32 *)(t + 1); |
3926 | 0 | long h; |
3927 | |
|
3928 | 0 | h = btf_hash_common(t); |
3929 | 0 | h = hash_combine(h, info); |
3930 | 0 | return h; |
3931 | 0 | } |
3932 | | |
3933 | | /* Check structural equality of two INTs or TAGs. */ |
3934 | | static bool btf_equal_int_tag(struct btf_type *t1, struct btf_type *t2) |
3935 | 0 | { |
3936 | 0 | __u32 info1, info2; |
3937 | |
|
3938 | 0 | if (!btf_equal_common(t1, t2)) |
3939 | 0 | return false; |
3940 | 0 | info1 = *(__u32 *)(t1 + 1); |
3941 | 0 | info2 = *(__u32 *)(t2 + 1); |
3942 | 0 | return info1 == info2; |
3943 | 0 | } |
3944 | | |
3945 | | /* Calculate type signature hash of ENUM/ENUM64. */ |
3946 | | static long btf_hash_enum(struct btf_type *t) |
3947 | 0 | { |
3948 | 0 | long h; |
3949 | | |
3950 | | /* don't hash vlen, enum members and size to support enum fwd resolving */ |
3951 | 0 | h = hash_combine(0, t->name_off); |
3952 | 0 | return h; |
3953 | 0 | } |
3954 | | |
3955 | | static bool btf_equal_enum_members(struct btf_type *t1, struct btf_type *t2) |
3956 | 0 | { |
3957 | 0 | const struct btf_enum *m1, *m2; |
3958 | 0 | __u16 vlen; |
3959 | 0 | int i; |
3960 | |
|
3961 | 0 | vlen = btf_vlen(t1); |
3962 | 0 | m1 = btf_enum(t1); |
3963 | 0 | m2 = btf_enum(t2); |
3964 | 0 | for (i = 0; i < vlen; i++) { |
3965 | 0 | if (m1->name_off != m2->name_off || m1->val != m2->val) |
3966 | 0 | return false; |
3967 | 0 | m1++; |
3968 | 0 | m2++; |
3969 | 0 | } |
3970 | 0 | return true; |
3971 | 0 | } |
3972 | | |
3973 | | static bool btf_equal_enum64_members(struct btf_type *t1, struct btf_type *t2) |
3974 | 0 | { |
3975 | 0 | const struct btf_enum64 *m1, *m2; |
3976 | 0 | __u16 vlen; |
3977 | 0 | int i; |
3978 | |
|
3979 | 0 | vlen = btf_vlen(t1); |
3980 | 0 | m1 = btf_enum64(t1); |
3981 | 0 | m2 = btf_enum64(t2); |
3982 | 0 | for (i = 0; i < vlen; i++) { |
3983 | 0 | if (m1->name_off != m2->name_off || m1->val_lo32 != m2->val_lo32 || |
3984 | 0 | m1->val_hi32 != m2->val_hi32) |
3985 | 0 | return false; |
3986 | 0 | m1++; |
3987 | 0 | m2++; |
3988 | 0 | } |
3989 | 0 | return true; |
3990 | 0 | } |
3991 | | |
3992 | | /* Check structural equality of two ENUMs or ENUM64s. */ |
3993 | | static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2) |
3994 | 0 | { |
3995 | 0 | if (!btf_equal_common(t1, t2)) |
3996 | 0 | return false; |
3997 | | |
3998 | | /* t1 & t2 kinds are identical because of btf_equal_common */ |
3999 | 0 | if (btf_kind(t1) == BTF_KIND_ENUM) |
4000 | 0 | return btf_equal_enum_members(t1, t2); |
4001 | 0 | else |
4002 | 0 | return btf_equal_enum64_members(t1, t2); |
4003 | 0 | } |
4004 | | |
4005 | | static inline bool btf_is_enum_fwd(struct btf_type *t) |
4006 | 0 | { |
4007 | 0 | return btf_is_any_enum(t) && btf_vlen(t) == 0; |
4008 | 0 | } |
4009 | | |
4010 | | static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2) |
4011 | 0 | { |
4012 | 0 | if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2)) |
4013 | 0 | return btf_equal_enum(t1, t2); |
4014 | | /* At this point either t1 or t2 or both are forward declarations, thus: |
4015 | | * - skip comparing vlen because it is zero for forward declarations; |
4016 | | * - skip comparing size to allow enum forward declarations |
4017 | | * to be compatible with enum64 full declarations; |
4018 | | * - skip comparing kind for the same reason. |
4019 | | */ |
4020 | 0 | return t1->name_off == t2->name_off && |
4021 | 0 | btf_is_any_enum(t1) && btf_is_any_enum(t2); |
4022 | 0 | } |
4023 | | |
4024 | | /* |
4025 | | * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs, |
4026 | | * as referenced type IDs equivalence is established separately during type |
4027 | | * graph equivalence check algorithm. |
4028 | | */ |
4029 | | static long btf_hash_struct(struct btf_type *t) |
4030 | 0 | { |
4031 | 0 | const struct btf_member *member = btf_members(t); |
4032 | 0 | __u32 vlen = btf_vlen(t); |
4033 | 0 | long h = btf_hash_common(t); |
4034 | 0 | int i; |
4035 | |
|
4036 | 0 | for (i = 0; i < vlen; i++) { |
4037 | 0 | h = hash_combine(h, member->name_off); |
4038 | 0 | h = hash_combine(h, member->offset); |
4039 | | /* no hashing of referenced type ID, it can be unresolved yet */ |
4040 | 0 | member++; |
4041 | 0 | } |
4042 | 0 | return h; |
4043 | 0 | } |
4044 | | |
4045 | | /* |
4046 | | * Check structural compatibility of two STRUCTs/UNIONs, ignoring referenced |
4047 | | * type IDs. This check is performed during type graph equivalence check and |
4048 | | * referenced types equivalence is checked separately. |
4049 | | */ |
4050 | | static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2) |
4051 | 0 | { |
4052 | 0 | const struct btf_member *m1, *m2; |
4053 | 0 | __u16 vlen; |
4054 | 0 | int i; |
4055 | |
|
4056 | 0 | if (!btf_equal_common(t1, t2)) |
4057 | 0 | return false; |
4058 | | |
4059 | 0 | vlen = btf_vlen(t1); |
4060 | 0 | m1 = btf_members(t1); |
4061 | 0 | m2 = btf_members(t2); |
4062 | 0 | for (i = 0; i < vlen; i++) { |
4063 | 0 | if (m1->name_off != m2->name_off || m1->offset != m2->offset) |
4064 | 0 | return false; |
4065 | 0 | m1++; |
4066 | 0 | m2++; |
4067 | 0 | } |
4068 | 0 | return true; |
4069 | 0 | } |
4070 | | |
4071 | | /* |
4072 | | * Calculate type signature hash of ARRAY, including referenced type IDs, |
4073 | | * under assumption that they were already resolved to canonical type IDs and |
4074 | | * are not going to change. |
4075 | | */ |
4076 | | static long btf_hash_array(struct btf_type *t) |
4077 | 0 | { |
4078 | 0 | const struct btf_array *info = btf_array(t); |
4079 | 0 | long h = btf_hash_common(t); |
4080 | |
|
4081 | 0 | h = hash_combine(h, info->type); |
4082 | 0 | h = hash_combine(h, info->index_type); |
4083 | 0 | h = hash_combine(h, info->nelems); |
4084 | 0 | return h; |
4085 | 0 | } |
4086 | | |
4087 | | /* |
4088 | | * Check exact equality of two ARRAYs, taking into account referenced |
4089 | | * type IDs, under assumption that they were already resolved to canonical |
4090 | | * type IDs and are not going to change. |
4091 | | * This function is called during reference types deduplication to compare |
4092 | | * ARRAY to potential canonical representative. |
4093 | | */ |
4094 | | static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2) |
4095 | 0 | { |
4096 | 0 | const struct btf_array *info1, *info2; |
4097 | |
|
4098 | 0 | if (!btf_equal_common(t1, t2)) |
4099 | 0 | return false; |
4100 | | |
4101 | 0 | info1 = btf_array(t1); |
4102 | 0 | info2 = btf_array(t2); |
4103 | 0 | return info1->type == info2->type && |
4104 | 0 | info1->index_type == info2->index_type && |
4105 | 0 | info1->nelems == info2->nelems; |
4106 | 0 | } |
4107 | | |
4108 | | /* |
4109 | | * Check structural compatibility of two ARRAYs, ignoring referenced type |
4110 | | * IDs. This check is performed during type graph equivalence check and |
4111 | | * referenced types equivalence is checked separately. |
4112 | | */ |
4113 | | static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2) |
4114 | 0 | { |
4115 | 0 | if (!btf_equal_common(t1, t2)) |
4116 | 0 | return false; |
4117 | | |
4118 | 0 | return btf_array(t1)->nelems == btf_array(t2)->nelems; |
4119 | 0 | } |
4120 | | |
4121 | | /* |
4122 | | * Calculate type signature hash of FUNC_PROTO, including referenced type IDs, |
4123 | | * under assumption that they were already resolved to canonical type IDs and |
4124 | | * are not going to change. |
4125 | | */ |
4126 | | static long btf_hash_fnproto(struct btf_type *t) |
4127 | 0 | { |
4128 | 0 | const struct btf_param *member = btf_params(t); |
4129 | 0 | __u16 vlen = btf_vlen(t); |
4130 | 0 | long h = btf_hash_common(t); |
4131 | 0 | int i; |
4132 | |
|
4133 | 0 | for (i = 0; i < vlen; i++) { |
4134 | 0 | h = hash_combine(h, member->name_off); |
4135 | 0 | h = hash_combine(h, member->type); |
4136 | 0 | member++; |
4137 | 0 | } |
4138 | 0 | return h; |
4139 | 0 | } |
4140 | | |
4141 | | /* |
4142 | | * Check exact equality of two FUNC_PROTOs, taking into account referenced |
4143 | | * type IDs, under assumption that they were already resolved to canonical |
4144 | | * type IDs and are not going to change. |
4145 | | * This function is called during reference types deduplication to compare |
4146 | | * FUNC_PROTO to potential canonical representative. |
4147 | | */ |
4148 | | static bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2) |
4149 | 0 | { |
4150 | 0 | const struct btf_param *m1, *m2; |
4151 | 0 | __u16 vlen; |
4152 | 0 | int i; |
4153 | |
|
4154 | 0 | if (!btf_equal_common(t1, t2)) |
4155 | 0 | return false; |
4156 | | |
4157 | 0 | vlen = btf_vlen(t1); |
4158 | 0 | m1 = btf_params(t1); |
4159 | 0 | m2 = btf_params(t2); |
4160 | 0 | for (i = 0; i < vlen; i++) { |
4161 | 0 | if (m1->name_off != m2->name_off || m1->type != m2->type) |
4162 | 0 | return false; |
4163 | 0 | m1++; |
4164 | 0 | m2++; |
4165 | 0 | } |
4166 | 0 | return true; |
4167 | 0 | } |
4168 | | |
4169 | | /* |
4170 | | * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type |
4171 | | * IDs. This check is performed during type graph equivalence check and |
4172 | | * referenced types equivalence is checked separately. |
4173 | | */ |
4174 | | static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2) |
4175 | 0 | { |
4176 | 0 | const struct btf_param *m1, *m2; |
4177 | 0 | __u16 vlen; |
4178 | 0 | int i; |
4179 | | |
4180 | | /* skip return type ID */ |
4181 | 0 | if (t1->name_off != t2->name_off || t1->info != t2->info) |
4182 | 0 | return false; |
4183 | | |
4184 | 0 | vlen = btf_vlen(t1); |
4185 | 0 | m1 = btf_params(t1); |
4186 | 0 | m2 = btf_params(t2); |
4187 | 0 | for (i = 0; i < vlen; i++) { |
4188 | 0 | if (m1->name_off != m2->name_off) |
4189 | 0 | return false; |
4190 | 0 | m1++; |
4191 | 0 | m2++; |
4192 | 0 | } |
4193 | 0 | return true; |
4194 | 0 | } |
4195 | | |
4196 | | /* Prepare split BTF for deduplication by calculating hashes of base BTF's |
4197 | | * types and initializing the rest of the state (canonical type mapping) for |
4198 | | * the fixed base BTF part. |
4199 | | */ |
4200 | | static int btf_dedup_prep(struct btf_dedup *d) |
4201 | 0 | { |
4202 | 0 | struct btf_type *t; |
4203 | 0 | int type_id; |
4204 | 0 | long h; |
4205 | |
|
4206 | 0 | if (!d->btf->base_btf) |
4207 | 0 | return 0; |
4208 | | |
4209 | 0 | for (type_id = 1; type_id < d->btf->start_id; type_id++) { |
4210 | 0 | t = btf_type_by_id(d->btf, type_id); |
4211 | | |
4212 | | /* all base BTF types are self-canonical by definition */ |
4213 | 0 | d->map[type_id] = type_id; |
4214 | |
|
4215 | 0 | switch (btf_kind(t)) { |
4216 | 0 | case BTF_KIND_VAR: |
4217 | 0 | case BTF_KIND_DATASEC: |
4218 | | /* VAR and DATASEC are never hash/deduplicated */ |
4219 | 0 | continue; |
4220 | 0 | case BTF_KIND_CONST: |
4221 | 0 | case BTF_KIND_VOLATILE: |
4222 | 0 | case BTF_KIND_RESTRICT: |
4223 | 0 | case BTF_KIND_PTR: |
4224 | 0 | case BTF_KIND_FWD: |
4225 | 0 | case BTF_KIND_TYPEDEF: |
4226 | 0 | case BTF_KIND_FUNC: |
4227 | 0 | case BTF_KIND_FLOAT: |
4228 | 0 | case BTF_KIND_TYPE_TAG: |
4229 | 0 | h = btf_hash_common(t); |
4230 | 0 | break; |
4231 | 0 | case BTF_KIND_INT: |
4232 | 0 | case BTF_KIND_DECL_TAG: |
4233 | 0 | h = btf_hash_int_decl_tag(t); |
4234 | 0 | break; |
4235 | 0 | case BTF_KIND_ENUM: |
4236 | 0 | case BTF_KIND_ENUM64: |
4237 | 0 | h = btf_hash_enum(t); |
4238 | 0 | break; |
4239 | 0 | case BTF_KIND_STRUCT: |
4240 | 0 | case BTF_KIND_UNION: |
4241 | 0 | h = btf_hash_struct(t); |
4242 | 0 | break; |
4243 | 0 | case BTF_KIND_ARRAY: |
4244 | 0 | h = btf_hash_array(t); |
4245 | 0 | break; |
4246 | 0 | case BTF_KIND_FUNC_PROTO: |
4247 | 0 | h = btf_hash_fnproto(t); |
4248 | 0 | break; |
4249 | 0 | default: |
4250 | 0 | pr_debug("unknown kind %d for type [%d]\n", btf_kind(t), type_id); |
4251 | 0 | return -EINVAL; |
4252 | 0 | } |
4253 | 0 | if (btf_dedup_table_add(d, h, type_id)) |
4254 | 0 | return -ENOMEM; |
4255 | 0 | } |
4256 | | |
4257 | 0 | return 0; |
4258 | 0 | } |
4259 | | |
4260 | | /* |
4261 | | * Deduplicate primitive types, that can't reference other types, by calculating |
4262 | | * their type signature hash and comparing them with any possible canonical |
4263 | | * candidate. If no canonical candidate matches, type itself is marked as |
4264 | | * canonical and is added into `btf_dedup->dedup_table` as another candidate. |
4265 | | */ |
4266 | | static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) |
4267 | 0 | { |
4268 | 0 | struct btf_type *t = btf_type_by_id(d->btf, type_id); |
4269 | 0 | struct hashmap_entry *hash_entry; |
4270 | 0 | struct btf_type *cand; |
4271 | | /* if we don't find equivalent type, then we are canonical */ |
4272 | 0 | __u32 new_id = type_id; |
4273 | 0 | __u32 cand_id; |
4274 | 0 | long h; |
4275 | |
|
4276 | 0 | switch (btf_kind(t)) { |
4277 | 0 | case BTF_KIND_CONST: |
4278 | 0 | case BTF_KIND_VOLATILE: |
4279 | 0 | case BTF_KIND_RESTRICT: |
4280 | 0 | case BTF_KIND_PTR: |
4281 | 0 | case BTF_KIND_TYPEDEF: |
4282 | 0 | case BTF_KIND_ARRAY: |
4283 | 0 | case BTF_KIND_STRUCT: |
4284 | 0 | case BTF_KIND_UNION: |
4285 | 0 | case BTF_KIND_FUNC: |
4286 | 0 | case BTF_KIND_FUNC_PROTO: |
4287 | 0 | case BTF_KIND_VAR: |
4288 | 0 | case BTF_KIND_DATASEC: |
4289 | 0 | case BTF_KIND_DECL_TAG: |
4290 | 0 | case BTF_KIND_TYPE_TAG: |
4291 | 0 | return 0; |
4292 | | |
4293 | 0 | case BTF_KIND_INT: |
4294 | 0 | h = btf_hash_int_decl_tag(t); |
4295 | 0 | for_each_dedup_cand(d, hash_entry, h) { |
4296 | 0 | cand_id = hash_entry->value; |
4297 | 0 | cand = btf_type_by_id(d->btf, cand_id); |
4298 | 0 | if (btf_equal_int_tag(t, cand)) { |
4299 | 0 | new_id = cand_id; |
4300 | 0 | break; |
4301 | 0 | } |
4302 | 0 | } |
4303 | 0 | break; |
4304 | | |
4305 | 0 | case BTF_KIND_ENUM: |
4306 | 0 | case BTF_KIND_ENUM64: |
4307 | 0 | h = btf_hash_enum(t); |
4308 | 0 | for_each_dedup_cand(d, hash_entry, h) { |
4309 | 0 | cand_id = hash_entry->value; |
4310 | 0 | cand = btf_type_by_id(d->btf, cand_id); |
4311 | 0 | if (btf_equal_enum(t, cand)) { |
4312 | 0 | new_id = cand_id; |
4313 | 0 | break; |
4314 | 0 | } |
4315 | 0 | if (btf_compat_enum(t, cand)) { |
4316 | 0 | if (btf_is_enum_fwd(t)) { |
4317 | | /* resolve fwd to full enum */ |
4318 | 0 | new_id = cand_id; |
4319 | 0 | break; |
4320 | 0 | } |
4321 | | /* resolve canonical enum fwd to full enum */ |
4322 | 0 | d->map[cand_id] = type_id; |
4323 | 0 | } |
4324 | 0 | } |
4325 | 0 | break; |
4326 | | |
4327 | 0 | case BTF_KIND_FWD: |
4328 | 0 | case BTF_KIND_FLOAT: |
4329 | 0 | h = btf_hash_common(t); |
4330 | 0 | for_each_dedup_cand(d, hash_entry, h) { |
4331 | 0 | cand_id = hash_entry->value; |
4332 | 0 | cand = btf_type_by_id(d->btf, cand_id); |
4333 | 0 | if (btf_equal_common(t, cand)) { |
4334 | 0 | new_id = cand_id; |
4335 | 0 | break; |
4336 | 0 | } |
4337 | 0 | } |
4338 | 0 | break; |
4339 | | |
4340 | 0 | default: |
4341 | 0 | return -EINVAL; |
4342 | 0 | } |
4343 | | |
4344 | 0 | d->map[type_id] = new_id; |
4345 | 0 | if (type_id == new_id && btf_dedup_table_add(d, h, type_id)) |
4346 | 0 | return -ENOMEM; |
4347 | | |
4348 | 0 | return 0; |
4349 | 0 | } |
4350 | | |
4351 | | static int btf_dedup_prim_types(struct btf_dedup *d) |
4352 | 0 | { |
4353 | 0 | int i, err; |
4354 | |
|
4355 | 0 | for (i = 0; i < d->btf->nr_types; i++) { |
4356 | 0 | err = btf_dedup_prim_type(d, d->btf->start_id + i); |
4357 | 0 | if (err) |
4358 | 0 | return err; |
4359 | 0 | } |
4360 | 0 | return 0; |
4361 | 0 | } |
4362 | | |
4363 | | /* |
4364 | | * Check whether type is already mapped into canonical one (could be to itself). |
4365 | | */ |
4366 | | static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id) |
4367 | 0 | { |
4368 | 0 | return d->map[type_id] <= BTF_MAX_NR_TYPES; |
4369 | 0 | } |
4370 | | |
4371 | | /* |
4372 | | * Resolve type ID into its canonical type ID, if any; otherwise return original |
4373 | | * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow |
4374 | | * STRUCT/UNION link and resolve it into canonical type ID as well. |
4375 | | */ |
4376 | | static inline __u32 resolve_type_id(struct btf_dedup *d, __u32 type_id) |
4377 | 0 | { |
4378 | 0 | while (is_type_mapped(d, type_id) && d->map[type_id] != type_id) |
4379 | 0 | type_id = d->map[type_id]; |
4380 | 0 | return type_id; |
4381 | 0 | } |
4382 | | |
4383 | | /* |
4384 | | * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original |
4385 | | * type ID. |
4386 | | */ |
4387 | | static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id) |
4388 | 0 | { |
4389 | 0 | __u32 orig_type_id = type_id; |
4390 | |
|
4391 | 0 | if (!btf_is_fwd(btf__type_by_id(d->btf, type_id))) |
4392 | 0 | return type_id; |
4393 | | |
4394 | 0 | while (is_type_mapped(d, type_id) && d->map[type_id] != type_id) |
4395 | 0 | type_id = d->map[type_id]; |
4396 | |
|
4397 | 0 | if (!btf_is_fwd(btf__type_by_id(d->btf, type_id))) |
4398 | 0 | return type_id; |
4399 | | |
4400 | 0 | return orig_type_id; |
4401 | 0 | } |
4402 | | |
4403 | | |
4404 | | static inline __u16 btf_fwd_kind(struct btf_type *t) |
4405 | 0 | { |
4406 | 0 | return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT; |
4407 | 0 | } |
4408 | | |
4409 | | static bool btf_dedup_identical_types(struct btf_dedup *d, __u32 id1, __u32 id2, int depth) |
4410 | 0 | { |
4411 | 0 | struct btf_type *t1, *t2; |
4412 | 0 | int k1, k2; |
4413 | 0 | recur: |
4414 | 0 | if (depth <= 0) |
4415 | 0 | return false; |
4416 | | |
4417 | 0 | t1 = btf_type_by_id(d->btf, id1); |
4418 | 0 | t2 = btf_type_by_id(d->btf, id2); |
4419 | |
|
4420 | 0 | k1 = btf_kind(t1); |
4421 | 0 | k2 = btf_kind(t2); |
4422 | 0 | if (k1 != k2) |
4423 | 0 | return false; |
4424 | | |
4425 | 0 | switch (k1) { |
4426 | 0 | case BTF_KIND_UNKN: /* VOID */ |
4427 | 0 | return true; |
4428 | 0 | case BTF_KIND_INT: |
4429 | 0 | return btf_equal_int_tag(t1, t2); |
4430 | 0 | case BTF_KIND_ENUM: |
4431 | 0 | case BTF_KIND_ENUM64: |
4432 | 0 | return btf_compat_enum(t1, t2); |
4433 | 0 | case BTF_KIND_FWD: |
4434 | 0 | case BTF_KIND_FLOAT: |
4435 | 0 | return btf_equal_common(t1, t2); |
4436 | 0 | case BTF_KIND_CONST: |
4437 | 0 | case BTF_KIND_VOLATILE: |
4438 | 0 | case BTF_KIND_RESTRICT: |
4439 | 0 | case BTF_KIND_PTR: |
4440 | 0 | case BTF_KIND_TYPEDEF: |
4441 | 0 | case BTF_KIND_FUNC: |
4442 | 0 | case BTF_KIND_TYPE_TAG: |
4443 | 0 | if (t1->info != t2->info || t1->name_off != t2->name_off) |
4444 | 0 | return false; |
4445 | 0 | id1 = t1->type; |
4446 | 0 | id2 = t2->type; |
4447 | 0 | goto recur; |
4448 | 0 | case BTF_KIND_ARRAY: { |
4449 | 0 | struct btf_array *a1, *a2; |
4450 | |
|
4451 | 0 | if (!btf_compat_array(t1, t2)) |
4452 | 0 | return false; |
4453 | | |
4454 | 0 | a1 = btf_array(t1); |
4455 | 0 | a2 = btf_array(t1); |
4456 | |
|
4457 | 0 | if (a1->index_type != a2->index_type && |
4458 | 0 | !btf_dedup_identical_types(d, a1->index_type, a2->index_type, depth - 1)) |
4459 | 0 | return false; |
4460 | | |
4461 | 0 | if (a1->type != a2->type && |
4462 | 0 | !btf_dedup_identical_types(d, a1->type, a2->type, depth - 1)) |
4463 | 0 | return false; |
4464 | | |
4465 | 0 | return true; |
4466 | 0 | } |
4467 | 0 | case BTF_KIND_STRUCT: |
4468 | 0 | case BTF_KIND_UNION: { |
4469 | 0 | const struct btf_member *m1, *m2; |
4470 | 0 | int i, n; |
4471 | |
|
4472 | 0 | if (!btf_shallow_equal_struct(t1, t2)) |
4473 | 0 | return false; |
4474 | | |
4475 | 0 | m1 = btf_members(t1); |
4476 | 0 | m2 = btf_members(t2); |
4477 | 0 | for (i = 0, n = btf_vlen(t1); i < n; i++, m1++, m2++) { |
4478 | 0 | if (m1->type == m2->type) |
4479 | 0 | continue; |
4480 | 0 | if (!btf_dedup_identical_types(d, m1->type, m2->type, depth - 1)) |
4481 | 0 | return false; |
4482 | 0 | } |
4483 | 0 | return true; |
4484 | 0 | } |
4485 | 0 | case BTF_KIND_FUNC_PROTO: { |
4486 | 0 | const struct btf_param *p1, *p2; |
4487 | 0 | int i, n; |
4488 | |
|
4489 | 0 | if (!btf_compat_fnproto(t1, t2)) |
4490 | 0 | return false; |
4491 | | |
4492 | 0 | if (t1->type != t2->type && |
4493 | 0 | !btf_dedup_identical_types(d, t1->type, t2->type, depth - 1)) |
4494 | 0 | return false; |
4495 | | |
4496 | 0 | p1 = btf_params(t1); |
4497 | 0 | p2 = btf_params(t2); |
4498 | 0 | for (i = 0, n = btf_vlen(t1); i < n; i++, p1++, p2++) { |
4499 | 0 | if (p1->type == p2->type) |
4500 | 0 | continue; |
4501 | 0 | if (!btf_dedup_identical_types(d, p1->type, p2->type, depth - 1)) |
4502 | 0 | return false; |
4503 | 0 | } |
4504 | 0 | return true; |
4505 | 0 | } |
4506 | 0 | default: |
4507 | 0 | return false; |
4508 | 0 | } |
4509 | 0 | } |
4510 | | |
4511 | | |
4512 | | /* |
4513 | | * Check equivalence of BTF type graph formed by candidate struct/union (we'll |
4514 | | * call it "candidate graph" in this description for brevity) to a type graph |
4515 | | * formed by (potential) canonical struct/union ("canonical graph" for brevity |
4516 | | * here, though keep in mind that not all types in canonical graph are |
4517 | | * necessarily canonical representatives themselves, some of them might be |
4518 | | * duplicates or its uniqueness might not have been established yet). |
4519 | | * Returns: |
4520 | | * - >0, if type graphs are equivalent; |
4521 | | * - 0, if not equivalent; |
4522 | | * - <0, on error. |
4523 | | * |
4524 | | * Algorithm performs side-by-side DFS traversal of both type graphs and checks |
4525 | | * equivalence of BTF types at each step. If at any point BTF types in candidate |
4526 | | * and canonical graphs are not compatible structurally, whole graphs are |
4527 | | * incompatible. If types are structurally equivalent (i.e., all information |
4528 | | * except referenced type IDs is exactly the same), a mapping from `canon_id` to |
4529 | | * a `cand_id` is recoded in hypothetical mapping (`btf_dedup->hypot_map`). |
4530 | | * If a type references other types, then those referenced types are checked |
4531 | | * for equivalence recursively. |
4532 | | * |
4533 | | * During DFS traversal, if we find that for current `canon_id` type we |
4534 | | * already have some mapping in hypothetical map, we check for two possible |
4535 | | * situations: |
4536 | | * - `canon_id` is mapped to exactly the same type as `cand_id`. This will |
4537 | | * happen when type graphs have cycles. In this case we assume those two |
4538 | | * types are equivalent. |
4539 | | * - `canon_id` is mapped to different type. This is contradiction in our |
4540 | | * hypothetical mapping, because same graph in canonical graph corresponds |
4541 | | * to two different types in candidate graph, which for equivalent type |
4542 | | * graphs shouldn't happen. This condition terminates equivalence check |
4543 | | * with negative result. |
4544 | | * |
4545 | | * If type graphs traversal exhausts types to check and find no contradiction, |
4546 | | * then type graphs are equivalent. |
4547 | | * |
4548 | | * When checking types for equivalence, there is one special case: FWD types. |
4549 | | * If FWD type resolution is allowed and one of the types (either from canonical |
4550 | | * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind |
4551 | | * flag) and their names match, hypothetical mapping is updated to point from |
4552 | | * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully, |
4553 | | * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently. |
4554 | | * |
4555 | | * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution, |
4556 | | * if there are two exactly named (or anonymous) structs/unions that are |
4557 | | * compatible structurally, one of which has FWD field, while other is concrete |
4558 | | * STRUCT/UNION, but according to C sources they are different structs/unions |
4559 | | * that are referencing different types with the same name. This is extremely |
4560 | | * unlikely to happen, but btf_dedup API allows to disable FWD resolution if |
4561 | | * this logic is causing problems. |
4562 | | * |
4563 | | * Doing FWD resolution means that both candidate and/or canonical graphs can |
4564 | | * consists of portions of the graph that come from multiple compilation units. |
4565 | | * This is due to the fact that types within single compilation unit are always |
4566 | | * deduplicated and FWDs are already resolved, if referenced struct/union |
4567 | | * definition is available. So, if we had unresolved FWD and found corresponding |
4568 | | * STRUCT/UNION, they will be from different compilation units. This |
4569 | | * consequently means that when we "link" FWD to corresponding STRUCT/UNION, |
4570 | | * type graph will likely have at least two different BTF types that describe |
4571 | | * same type (e.g., most probably there will be two different BTF types for the |
4572 | | * same 'int' primitive type) and could even have "overlapping" parts of type |
4573 | | * graph that describe same subset of types. |
4574 | | * |
4575 | | * This in turn means that our assumption that each type in canonical graph |
4576 | | * must correspond to exactly one type in candidate graph might not hold |
4577 | | * anymore and will make it harder to detect contradictions using hypothetical |
4578 | | * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION |
4579 | | * resolution only in canonical graph. FWDs in candidate graphs are never |
4580 | | * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs |
4581 | | * that can occur: |
4582 | | * - Both types in canonical and candidate graphs are FWDs. If they are |
4583 | | * structurally equivalent, then they can either be both resolved to the |
4584 | | * same STRUCT/UNION or not resolved at all. In both cases they are |
4585 | | * equivalent and there is no need to resolve FWD on candidate side. |
4586 | | * - Both types in canonical and candidate graphs are concrete STRUCT/UNION, |
4587 | | * so nothing to resolve as well, algorithm will check equivalence anyway. |
4588 | | * - Type in canonical graph is FWD, while type in candidate is concrete |
4589 | | * STRUCT/UNION. In this case candidate graph comes from single compilation |
4590 | | * unit, so there is exactly one BTF type for each unique C type. After |
4591 | | * resolving FWD into STRUCT/UNION, there might be more than one BTF type |
4592 | | * in canonical graph mapping to single BTF type in candidate graph, but |
4593 | | * because hypothetical mapping maps from canonical to candidate types, it's |
4594 | | * alright, and we still maintain the property of having single `canon_id` |
4595 | | * mapping to single `cand_id` (there could be two different `canon_id` |
4596 | | * mapped to the same `cand_id`, but it's not contradictory). |
4597 | | * - Type in canonical graph is concrete STRUCT/UNION, while type in candidate |
4598 | | * graph is FWD. In this case we are just going to check compatibility of |
4599 | | * STRUCT/UNION and corresponding FWD, and if they are compatible, we'll |
4600 | | * assume that whatever STRUCT/UNION FWD resolves to must be equivalent to |
4601 | | * a concrete STRUCT/UNION from canonical graph. If the rest of type graphs |
4602 | | * turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from |
4603 | | * canonical graph. |
4604 | | */ |
4605 | | static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id, |
4606 | | __u32 canon_id) |
4607 | 0 | { |
4608 | 0 | struct btf_type *cand_type; |
4609 | 0 | struct btf_type *canon_type; |
4610 | 0 | __u32 hypot_type_id; |
4611 | 0 | __u16 cand_kind; |
4612 | 0 | __u16 canon_kind; |
4613 | 0 | int i, eq; |
4614 | | |
4615 | | /* if both resolve to the same canonical, they must be equivalent */ |
4616 | 0 | if (resolve_type_id(d, cand_id) == resolve_type_id(d, canon_id)) |
4617 | 0 | return 1; |
4618 | | |
4619 | 0 | canon_id = resolve_fwd_id(d, canon_id); |
4620 | |
|
4621 | 0 | hypot_type_id = d->hypot_map[canon_id]; |
4622 | 0 | if (hypot_type_id <= BTF_MAX_NR_TYPES) { |
4623 | 0 | if (hypot_type_id == cand_id) |
4624 | 0 | return 1; |
4625 | | /* In some cases compiler will generate different DWARF types |
4626 | | * for *identical* array type definitions and use them for |
4627 | | * different fields within the *same* struct. This breaks type |
4628 | | * equivalence check, which makes an assumption that candidate |
4629 | | * types sub-graph has a consistent and deduped-by-compiler |
4630 | | * types within a single CU. And similar situation can happen |
4631 | | * with struct/union sometimes, and event with pointers. |
4632 | | * So accommodate cases like this doing a structural |
4633 | | * comparison recursively, but avoiding being stuck in endless |
4634 | | * loops by limiting the depth up to which we check. |
4635 | | */ |
4636 | 0 | if (btf_dedup_identical_types(d, hypot_type_id, cand_id, 16)) |
4637 | 0 | return 1; |
4638 | 0 | return 0; |
4639 | 0 | } |
4640 | | |
4641 | 0 | if (btf_dedup_hypot_map_add(d, canon_id, cand_id)) |
4642 | 0 | return -ENOMEM; |
4643 | | |
4644 | 0 | cand_type = btf_type_by_id(d->btf, cand_id); |
4645 | 0 | canon_type = btf_type_by_id(d->btf, canon_id); |
4646 | 0 | cand_kind = btf_kind(cand_type); |
4647 | 0 | canon_kind = btf_kind(canon_type); |
4648 | |
|
4649 | 0 | if (cand_type->name_off != canon_type->name_off) |
4650 | 0 | return 0; |
4651 | | |
4652 | | /* FWD <--> STRUCT/UNION equivalence check, if enabled */ |
4653 | 0 | if ((cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD) |
4654 | 0 | && cand_kind != canon_kind) { |
4655 | 0 | __u16 real_kind; |
4656 | 0 | __u16 fwd_kind; |
4657 | |
|
4658 | 0 | if (cand_kind == BTF_KIND_FWD) { |
4659 | 0 | real_kind = canon_kind; |
4660 | 0 | fwd_kind = btf_fwd_kind(cand_type); |
4661 | 0 | } else { |
4662 | 0 | real_kind = cand_kind; |
4663 | 0 | fwd_kind = btf_fwd_kind(canon_type); |
4664 | | /* we'd need to resolve base FWD to STRUCT/UNION */ |
4665 | 0 | if (fwd_kind == real_kind && canon_id < d->btf->start_id) |
4666 | 0 | d->hypot_adjust_canon = true; |
4667 | 0 | } |
4668 | 0 | return fwd_kind == real_kind; |
4669 | 0 | } |
4670 | | |
4671 | 0 | if (cand_kind != canon_kind) |
4672 | 0 | return 0; |
4673 | | |
4674 | 0 | switch (cand_kind) { |
4675 | 0 | case BTF_KIND_INT: |
4676 | 0 | return btf_equal_int_tag(cand_type, canon_type); |
4677 | | |
4678 | 0 | case BTF_KIND_ENUM: |
4679 | 0 | case BTF_KIND_ENUM64: |
4680 | 0 | return btf_compat_enum(cand_type, canon_type); |
4681 | | |
4682 | 0 | case BTF_KIND_FWD: |
4683 | 0 | case BTF_KIND_FLOAT: |
4684 | 0 | return btf_equal_common(cand_type, canon_type); |
4685 | | |
4686 | 0 | case BTF_KIND_CONST: |
4687 | 0 | case BTF_KIND_VOLATILE: |
4688 | 0 | case BTF_KIND_RESTRICT: |
4689 | 0 | case BTF_KIND_PTR: |
4690 | 0 | case BTF_KIND_TYPEDEF: |
4691 | 0 | case BTF_KIND_FUNC: |
4692 | 0 | case BTF_KIND_TYPE_TAG: |
4693 | 0 | if (cand_type->info != canon_type->info) |
4694 | 0 | return 0; |
4695 | 0 | return btf_dedup_is_equiv(d, cand_type->type, canon_type->type); |
4696 | | |
4697 | 0 | case BTF_KIND_ARRAY: { |
4698 | 0 | const struct btf_array *cand_arr, *canon_arr; |
4699 | |
|
4700 | 0 | if (!btf_compat_array(cand_type, canon_type)) |
4701 | 0 | return 0; |
4702 | 0 | cand_arr = btf_array(cand_type); |
4703 | 0 | canon_arr = btf_array(canon_type); |
4704 | 0 | eq = btf_dedup_is_equiv(d, cand_arr->index_type, canon_arr->index_type); |
4705 | 0 | if (eq <= 0) |
4706 | 0 | return eq; |
4707 | 0 | return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type); |
4708 | 0 | } |
4709 | | |
4710 | 0 | case BTF_KIND_STRUCT: |
4711 | 0 | case BTF_KIND_UNION: { |
4712 | 0 | const struct btf_member *cand_m, *canon_m; |
4713 | 0 | __u16 vlen; |
4714 | |
|
4715 | 0 | if (!btf_shallow_equal_struct(cand_type, canon_type)) |
4716 | 0 | return 0; |
4717 | 0 | vlen = btf_vlen(cand_type); |
4718 | 0 | cand_m = btf_members(cand_type); |
4719 | 0 | canon_m = btf_members(canon_type); |
4720 | 0 | for (i = 0; i < vlen; i++) { |
4721 | 0 | eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type); |
4722 | 0 | if (eq <= 0) |
4723 | 0 | return eq; |
4724 | 0 | cand_m++; |
4725 | 0 | canon_m++; |
4726 | 0 | } |
4727 | | |
4728 | 0 | return 1; |
4729 | 0 | } |
4730 | | |
4731 | 0 | case BTF_KIND_FUNC_PROTO: { |
4732 | 0 | const struct btf_param *cand_p, *canon_p; |
4733 | 0 | __u16 vlen; |
4734 | |
|
4735 | 0 | if (!btf_compat_fnproto(cand_type, canon_type)) |
4736 | 0 | return 0; |
4737 | 0 | eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type); |
4738 | 0 | if (eq <= 0) |
4739 | 0 | return eq; |
4740 | 0 | vlen = btf_vlen(cand_type); |
4741 | 0 | cand_p = btf_params(cand_type); |
4742 | 0 | canon_p = btf_params(canon_type); |
4743 | 0 | for (i = 0; i < vlen; i++) { |
4744 | 0 | eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type); |
4745 | 0 | if (eq <= 0) |
4746 | 0 | return eq; |
4747 | 0 | cand_p++; |
4748 | 0 | canon_p++; |
4749 | 0 | } |
4750 | 0 | return 1; |
4751 | 0 | } |
4752 | | |
4753 | 0 | default: |
4754 | 0 | return -EINVAL; |
4755 | 0 | } |
4756 | 0 | return 0; |
4757 | 0 | } |
4758 | | |
4759 | | /* |
4760 | | * Use hypothetical mapping, produced by successful type graph equivalence |
4761 | | * check, to augment existing struct/union canonical mapping, where possible. |
4762 | | * |
4763 | | * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record |
4764 | | * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional: |
4765 | | * it doesn't matter if FWD type was part of canonical graph or candidate one, |
4766 | | * we are recording the mapping anyway. As opposed to carefulness required |
4767 | | * for struct/union correspondence mapping (described below), for FWD resolution |
4768 | | * it's not important, as by the time that FWD type (reference type) will be |
4769 | | * deduplicated all structs/unions will be deduped already anyway. |
4770 | | * |
4771 | | * Recording STRUCT/UNION mapping is purely a performance optimization and is |
4772 | | * not required for correctness. It needs to be done carefully to ensure that |
4773 | | * struct/union from candidate's type graph is not mapped into corresponding |
4774 | | * struct/union from canonical type graph that itself hasn't been resolved into |
4775 | | * canonical representative. The only guarantee we have is that canonical |
4776 | | * struct/union was determined as canonical and that won't change. But any |
4777 | | * types referenced through that struct/union fields could have been not yet |
4778 | | * resolved, so in case like that it's too early to establish any kind of |
4779 | | * correspondence between structs/unions. |
4780 | | * |
4781 | | * No canonical correspondence is derived for primitive types (they are already |
4782 | | * deduplicated completely already anyway) or reference types (they rely on |
4783 | | * stability of struct/union canonical relationship for equivalence checks). |
4784 | | */ |
4785 | | static void btf_dedup_merge_hypot_map(struct btf_dedup *d) |
4786 | 0 | { |
4787 | 0 | __u32 canon_type_id, targ_type_id; |
4788 | 0 | __u16 t_kind, c_kind; |
4789 | 0 | __u32 t_id, c_id; |
4790 | 0 | int i; |
4791 | |
|
4792 | 0 | for (i = 0; i < d->hypot_cnt; i++) { |
4793 | 0 | canon_type_id = d->hypot_list[i]; |
4794 | 0 | targ_type_id = d->hypot_map[canon_type_id]; |
4795 | 0 | t_id = resolve_type_id(d, targ_type_id); |
4796 | 0 | c_id = resolve_type_id(d, canon_type_id); |
4797 | 0 | t_kind = btf_kind(btf__type_by_id(d->btf, t_id)); |
4798 | 0 | c_kind = btf_kind(btf__type_by_id(d->btf, c_id)); |
4799 | | /* |
4800 | | * Resolve FWD into STRUCT/UNION. |
4801 | | * It's ok to resolve FWD into STRUCT/UNION that's not yet |
4802 | | * mapped to canonical representative (as opposed to |
4803 | | * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because |
4804 | | * eventually that struct is going to be mapped and all resolved |
4805 | | * FWDs will automatically resolve to correct canonical |
4806 | | * representative. This will happen before ref type deduping, |
4807 | | * which critically depends on stability of these mapping. This |
4808 | | * stability is not a requirement for STRUCT/UNION equivalence |
4809 | | * checks, though. |
4810 | | */ |
4811 | | |
4812 | | /* if it's the split BTF case, we still need to point base FWD |
4813 | | * to STRUCT/UNION in a split BTF, because FWDs from split BTF |
4814 | | * will be resolved against base FWD. If we don't point base |
4815 | | * canonical FWD to the resolved STRUCT/UNION, then all the |
4816 | | * FWDs in split BTF won't be correctly resolved to a proper |
4817 | | * STRUCT/UNION. |
4818 | | */ |
4819 | 0 | if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD) |
4820 | 0 | d->map[c_id] = t_id; |
4821 | | |
4822 | | /* if graph equivalence determined that we'd need to adjust |
4823 | | * base canonical types, then we need to only point base FWDs |
4824 | | * to STRUCTs/UNIONs and do no more modifications. For all |
4825 | | * other purposes the type graphs were not equivalent. |
4826 | | */ |
4827 | 0 | if (d->hypot_adjust_canon) |
4828 | 0 | continue; |
4829 | | |
4830 | 0 | if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD) |
4831 | 0 | d->map[t_id] = c_id; |
4832 | |
|
4833 | 0 | if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) && |
4834 | 0 | c_kind != BTF_KIND_FWD && |
4835 | 0 | is_type_mapped(d, c_id) && |
4836 | 0 | !is_type_mapped(d, t_id)) { |
4837 | | /* |
4838 | | * as a perf optimization, we can map struct/union |
4839 | | * that's part of type graph we just verified for |
4840 | | * equivalence. We can do that for struct/union that has |
4841 | | * canonical representative only, though. |
4842 | | */ |
4843 | 0 | d->map[t_id] = c_id; |
4844 | 0 | } |
4845 | 0 | } |
4846 | 0 | } |
4847 | | |
4848 | | /* |
4849 | | * Deduplicate struct/union types. |
4850 | | * |
4851 | | * For each struct/union type its type signature hash is calculated, taking |
4852 | | * into account type's name, size, number, order and names of fields, but |
4853 | | * ignoring type ID's referenced from fields, because they might not be deduped |
4854 | | * completely until after reference types deduplication phase. This type hash |
4855 | | * is used to iterate over all potential canonical types, sharing same hash. |
4856 | | * For each canonical candidate we check whether type graphs that they form |
4857 | | * (through referenced types in fields and so on) are equivalent using algorithm |
4858 | | * implemented in `btf_dedup_is_equiv`. If such equivalence is found and |
4859 | | * BTF_KIND_FWD resolution is allowed, then hypothetical mapping |
4860 | | * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence |
4861 | | * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to |
4862 | | * potentially map other structs/unions to their canonical representatives, |
4863 | | * if such relationship hasn't yet been established. This speeds up algorithm |
4864 | | * by eliminating some of the duplicate work. |
4865 | | * |
4866 | | * If no matching canonical representative was found, struct/union is marked |
4867 | | * as canonical for itself and is added into btf_dedup->dedup_table hash map |
4868 | | * for further look ups. |
4869 | | */ |
4870 | | static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id) |
4871 | 0 | { |
4872 | 0 | struct btf_type *cand_type, *t; |
4873 | 0 | struct hashmap_entry *hash_entry; |
4874 | | /* if we don't find equivalent type, then we are canonical */ |
4875 | 0 | __u32 new_id = type_id; |
4876 | 0 | __u16 kind; |
4877 | 0 | long h; |
4878 | | |
4879 | | /* already deduped or is in process of deduping (loop detected) */ |
4880 | 0 | if (d->map[type_id] <= BTF_MAX_NR_TYPES) |
4881 | 0 | return 0; |
4882 | | |
4883 | 0 | t = btf_type_by_id(d->btf, type_id); |
4884 | 0 | kind = btf_kind(t); |
4885 | |
|
4886 | 0 | if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION) |
4887 | 0 | return 0; |
4888 | | |
4889 | 0 | h = btf_hash_struct(t); |
4890 | 0 | for_each_dedup_cand(d, hash_entry, h) { |
4891 | 0 | __u32 cand_id = hash_entry->value; |
4892 | 0 | int eq; |
4893 | | |
4894 | | /* |
4895 | | * Even though btf_dedup_is_equiv() checks for |
4896 | | * btf_shallow_equal_struct() internally when checking two |
4897 | | * structs (unions) for equivalence, we need to guard here |
4898 | | * from picking matching FWD type as a dedup candidate. |
4899 | | * This can happen due to hash collision. In such case just |
4900 | | * relying on btf_dedup_is_equiv() would lead to potentially |
4901 | | * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because |
4902 | | * FWD and compatible STRUCT/UNION are considered equivalent. |
4903 | | */ |
4904 | 0 | cand_type = btf_type_by_id(d->btf, cand_id); |
4905 | 0 | if (!btf_shallow_equal_struct(t, cand_type)) |
4906 | 0 | continue; |
4907 | | |
4908 | 0 | btf_dedup_clear_hypot_map(d); |
4909 | 0 | eq = btf_dedup_is_equiv(d, type_id, cand_id); |
4910 | 0 | if (eq < 0) |
4911 | 0 | return eq; |
4912 | 0 | if (!eq) |
4913 | 0 | continue; |
4914 | 0 | btf_dedup_merge_hypot_map(d); |
4915 | 0 | if (d->hypot_adjust_canon) /* not really equivalent */ |
4916 | 0 | continue; |
4917 | 0 | new_id = cand_id; |
4918 | 0 | break; |
4919 | 0 | } |
4920 | | |
4921 | 0 | d->map[type_id] = new_id; |
4922 | 0 | if (type_id == new_id && btf_dedup_table_add(d, h, type_id)) |
4923 | 0 | return -ENOMEM; |
4924 | | |
4925 | 0 | return 0; |
4926 | 0 | } |
4927 | | |
4928 | | static int btf_dedup_struct_types(struct btf_dedup *d) |
4929 | 0 | { |
4930 | 0 | int i, err; |
4931 | |
|
4932 | 0 | for (i = 0; i < d->btf->nr_types; i++) { |
4933 | 0 | err = btf_dedup_struct_type(d, d->btf->start_id + i); |
4934 | 0 | if (err) |
4935 | 0 | return err; |
4936 | 0 | } |
4937 | 0 | return 0; |
4938 | 0 | } |
4939 | | |
4940 | | /* |
4941 | | * Deduplicate reference type. |
4942 | | * |
4943 | | * Once all primitive and struct/union types got deduplicated, we can easily |
4944 | | * deduplicate all other (reference) BTF types. This is done in two steps: |
4945 | | * |
4946 | | * 1. Resolve all referenced type IDs into their canonical type IDs. This |
4947 | | * resolution can be done either immediately for primitive or struct/union types |
4948 | | * (because they were deduped in previous two phases) or recursively for |
4949 | | * reference types. Recursion will always terminate at either primitive or |
4950 | | * struct/union type, at which point we can "unwind" chain of reference types |
4951 | | * one by one. There is no danger of encountering cycles because in C type |
4952 | | * system the only way to form type cycle is through struct/union, so any chain |
4953 | | * of reference types, even those taking part in a type cycle, will inevitably |
4954 | | * reach struct/union at some point. |
4955 | | * |
4956 | | * 2. Once all referenced type IDs are resolved into canonical ones, BTF type |
4957 | | * becomes "stable", in the sense that no further deduplication will cause |
4958 | | * any changes to it. With that, it's now possible to calculate type's signature |
4959 | | * hash (this time taking into account referenced type IDs) and loop over all |
4960 | | * potential canonical representatives. If no match was found, current type |
4961 | | * will become canonical representative of itself and will be added into |
4962 | | * btf_dedup->dedup_table as another possible canonical representative. |
4963 | | */ |
4964 | | static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id) |
4965 | 0 | { |
4966 | 0 | struct hashmap_entry *hash_entry; |
4967 | 0 | __u32 new_id = type_id, cand_id; |
4968 | 0 | struct btf_type *t, *cand; |
4969 | | /* if we don't find equivalent type, then we are representative type */ |
4970 | 0 | int ref_type_id; |
4971 | 0 | long h; |
4972 | |
|
4973 | 0 | if (d->map[type_id] == BTF_IN_PROGRESS_ID) |
4974 | 0 | return -ELOOP; |
4975 | 0 | if (d->map[type_id] <= BTF_MAX_NR_TYPES) |
4976 | 0 | return resolve_type_id(d, type_id); |
4977 | | |
4978 | 0 | t = btf_type_by_id(d->btf, type_id); |
4979 | 0 | d->map[type_id] = BTF_IN_PROGRESS_ID; |
4980 | |
|
4981 | 0 | switch (btf_kind(t)) { |
4982 | 0 | case BTF_KIND_CONST: |
4983 | 0 | case BTF_KIND_VOLATILE: |
4984 | 0 | case BTF_KIND_RESTRICT: |
4985 | 0 | case BTF_KIND_PTR: |
4986 | 0 | case BTF_KIND_TYPEDEF: |
4987 | 0 | case BTF_KIND_FUNC: |
4988 | 0 | case BTF_KIND_TYPE_TAG: |
4989 | 0 | ref_type_id = btf_dedup_ref_type(d, t->type); |
4990 | 0 | if (ref_type_id < 0) |
4991 | 0 | return ref_type_id; |
4992 | 0 | t->type = ref_type_id; |
4993 | |
|
4994 | 0 | h = btf_hash_common(t); |
4995 | 0 | for_each_dedup_cand(d, hash_entry, h) { |
4996 | 0 | cand_id = hash_entry->value; |
4997 | 0 | cand = btf_type_by_id(d->btf, cand_id); |
4998 | 0 | if (btf_equal_common(t, cand)) { |
4999 | 0 | new_id = cand_id; |
5000 | 0 | break; |
5001 | 0 | } |
5002 | 0 | } |
5003 | 0 | break; |
5004 | | |
5005 | 0 | case BTF_KIND_DECL_TAG: |
5006 | 0 | ref_type_id = btf_dedup_ref_type(d, t->type); |
5007 | 0 | if (ref_type_id < 0) |
5008 | 0 | return ref_type_id; |
5009 | 0 | t->type = ref_type_id; |
5010 | |
|
5011 | 0 | h = btf_hash_int_decl_tag(t); |
5012 | 0 | for_each_dedup_cand(d, hash_entry, h) { |
5013 | 0 | cand_id = hash_entry->value; |
5014 | 0 | cand = btf_type_by_id(d->btf, cand_id); |
5015 | 0 | if (btf_equal_int_tag(t, cand)) { |
5016 | 0 | new_id = cand_id; |
5017 | 0 | break; |
5018 | 0 | } |
5019 | 0 | } |
5020 | 0 | break; |
5021 | | |
5022 | 0 | case BTF_KIND_ARRAY: { |
5023 | 0 | struct btf_array *info = btf_array(t); |
5024 | |
|
5025 | 0 | ref_type_id = btf_dedup_ref_type(d, info->type); |
5026 | 0 | if (ref_type_id < 0) |
5027 | 0 | return ref_type_id; |
5028 | 0 | info->type = ref_type_id; |
5029 | |
|
5030 | 0 | ref_type_id = btf_dedup_ref_type(d, info->index_type); |
5031 | 0 | if (ref_type_id < 0) |
5032 | 0 | return ref_type_id; |
5033 | 0 | info->index_type = ref_type_id; |
5034 | |
|
5035 | 0 | h = btf_hash_array(t); |
5036 | 0 | for_each_dedup_cand(d, hash_entry, h) { |
5037 | 0 | cand_id = hash_entry->value; |
5038 | 0 | cand = btf_type_by_id(d->btf, cand_id); |
5039 | 0 | if (btf_equal_array(t, cand)) { |
5040 | 0 | new_id = cand_id; |
5041 | 0 | break; |
5042 | 0 | } |
5043 | 0 | } |
5044 | 0 | break; |
5045 | 0 | } |
5046 | | |
5047 | 0 | case BTF_KIND_FUNC_PROTO: { |
5048 | 0 | struct btf_param *param; |
5049 | 0 | __u16 vlen; |
5050 | 0 | int i; |
5051 | |
|
5052 | 0 | ref_type_id = btf_dedup_ref_type(d, t->type); |
5053 | 0 | if (ref_type_id < 0) |
5054 | 0 | return ref_type_id; |
5055 | 0 | t->type = ref_type_id; |
5056 | |
|
5057 | 0 | vlen = btf_vlen(t); |
5058 | 0 | param = btf_params(t); |
5059 | 0 | for (i = 0; i < vlen; i++) { |
5060 | 0 | ref_type_id = btf_dedup_ref_type(d, param->type); |
5061 | 0 | if (ref_type_id < 0) |
5062 | 0 | return ref_type_id; |
5063 | 0 | param->type = ref_type_id; |
5064 | 0 | param++; |
5065 | 0 | } |
5066 | | |
5067 | 0 | h = btf_hash_fnproto(t); |
5068 | 0 | for_each_dedup_cand(d, hash_entry, h) { |
5069 | 0 | cand_id = hash_entry->value; |
5070 | 0 | cand = btf_type_by_id(d->btf, cand_id); |
5071 | 0 | if (btf_equal_fnproto(t, cand)) { |
5072 | 0 | new_id = cand_id; |
5073 | 0 | break; |
5074 | 0 | } |
5075 | 0 | } |
5076 | 0 | break; |
5077 | 0 | } |
5078 | | |
5079 | 0 | default: |
5080 | 0 | return -EINVAL; |
5081 | 0 | } |
5082 | | |
5083 | 0 | d->map[type_id] = new_id; |
5084 | 0 | if (type_id == new_id && btf_dedup_table_add(d, h, type_id)) |
5085 | 0 | return -ENOMEM; |
5086 | | |
5087 | 0 | return new_id; |
5088 | 0 | } |
5089 | | |
5090 | | static int btf_dedup_ref_types(struct btf_dedup *d) |
5091 | 0 | { |
5092 | 0 | int i, err; |
5093 | |
|
5094 | 0 | for (i = 0; i < d->btf->nr_types; i++) { |
5095 | 0 | err = btf_dedup_ref_type(d, d->btf->start_id + i); |
5096 | 0 | if (err < 0) |
5097 | 0 | return err; |
5098 | 0 | } |
5099 | | /* we won't need d->dedup_table anymore */ |
5100 | 0 | hashmap__free(d->dedup_table); |
5101 | 0 | d->dedup_table = NULL; |
5102 | 0 | return 0; |
5103 | 0 | } |
5104 | | |
5105 | | /* |
5106 | | * Collect a map from type names to type ids for all canonical structs |
5107 | | * and unions. If the same name is shared by several canonical types |
5108 | | * use a special value 0 to indicate this fact. |
5109 | | */ |
5110 | | static int btf_dedup_fill_unique_names_map(struct btf_dedup *d, struct hashmap *names_map) |
5111 | 0 | { |
5112 | 0 | __u32 nr_types = btf__type_cnt(d->btf); |
5113 | 0 | struct btf_type *t; |
5114 | 0 | __u32 type_id; |
5115 | 0 | __u16 kind; |
5116 | 0 | int err; |
5117 | | |
5118 | | /* |
5119 | | * Iterate over base and split module ids in order to get all |
5120 | | * available structs in the map. |
5121 | | */ |
5122 | 0 | for (type_id = 1; type_id < nr_types; ++type_id) { |
5123 | 0 | t = btf_type_by_id(d->btf, type_id); |
5124 | 0 | kind = btf_kind(t); |
5125 | |
|
5126 | 0 | if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION) |
5127 | 0 | continue; |
5128 | | |
5129 | | /* Skip non-canonical types */ |
5130 | 0 | if (type_id != d->map[type_id]) |
5131 | 0 | continue; |
5132 | | |
5133 | 0 | err = hashmap__add(names_map, t->name_off, type_id); |
5134 | 0 | if (err == -EEXIST) |
5135 | 0 | err = hashmap__set(names_map, t->name_off, 0, NULL, NULL); |
5136 | |
|
5137 | 0 | if (err) |
5138 | 0 | return err; |
5139 | 0 | } |
5140 | | |
5141 | 0 | return 0; |
5142 | 0 | } |
5143 | | |
5144 | | static int btf_dedup_resolve_fwd(struct btf_dedup *d, struct hashmap *names_map, __u32 type_id) |
5145 | 0 | { |
5146 | 0 | struct btf_type *t = btf_type_by_id(d->btf, type_id); |
5147 | 0 | enum btf_fwd_kind fwd_kind = btf_kflag(t); |
5148 | 0 | __u16 cand_kind, kind = btf_kind(t); |
5149 | 0 | struct btf_type *cand_t; |
5150 | 0 | uintptr_t cand_id; |
5151 | |
|
5152 | 0 | if (kind != BTF_KIND_FWD) |
5153 | 0 | return 0; |
5154 | | |
5155 | | /* Skip if this FWD already has a mapping */ |
5156 | 0 | if (type_id != d->map[type_id]) |
5157 | 0 | return 0; |
5158 | | |
5159 | 0 | if (!hashmap__find(names_map, t->name_off, &cand_id)) |
5160 | 0 | return 0; |
5161 | | |
5162 | | /* Zero is a special value indicating that name is not unique */ |
5163 | 0 | if (!cand_id) |
5164 | 0 | return 0; |
5165 | | |
5166 | 0 | cand_t = btf_type_by_id(d->btf, cand_id); |
5167 | 0 | cand_kind = btf_kind(cand_t); |
5168 | 0 | if ((cand_kind == BTF_KIND_STRUCT && fwd_kind != BTF_FWD_STRUCT) || |
5169 | 0 | (cand_kind == BTF_KIND_UNION && fwd_kind != BTF_FWD_UNION)) |
5170 | 0 | return 0; |
5171 | | |
5172 | 0 | d->map[type_id] = cand_id; |
5173 | |
|
5174 | 0 | return 0; |
5175 | 0 | } |
5176 | | |
5177 | | /* |
5178 | | * Resolve unambiguous forward declarations. |
5179 | | * |
5180 | | * The lion's share of all FWD declarations is resolved during |
5181 | | * `btf_dedup_struct_types` phase when different type graphs are |
5182 | | * compared against each other. However, if in some compilation unit a |
5183 | | * FWD declaration is not a part of a type graph compared against |
5184 | | * another type graph that declaration's canonical type would not be |
5185 | | * changed. Example: |
5186 | | * |
5187 | | * CU #1: |
5188 | | * |
5189 | | * struct foo; |
5190 | | * struct foo *some_global; |
5191 | | * |
5192 | | * CU #2: |
5193 | | * |
5194 | | * struct foo { int u; }; |
5195 | | * struct foo *another_global; |
5196 | | * |
5197 | | * After `btf_dedup_struct_types` the BTF looks as follows: |
5198 | | * |
5199 | | * [1] STRUCT 'foo' size=4 vlen=1 ... |
5200 | | * [2] INT 'int' size=4 ... |
5201 | | * [3] PTR '(anon)' type_id=1 |
5202 | | * [4] FWD 'foo' fwd_kind=struct |
5203 | | * [5] PTR '(anon)' type_id=4 |
5204 | | * |
5205 | | * This pass assumes that such FWD declarations should be mapped to |
5206 | | * structs or unions with identical name in case if the name is not |
5207 | | * ambiguous. |
5208 | | */ |
5209 | | static int btf_dedup_resolve_fwds(struct btf_dedup *d) |
5210 | 0 | { |
5211 | 0 | int i, err; |
5212 | 0 | struct hashmap *names_map; |
5213 | |
|
5214 | 0 | names_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL); |
5215 | 0 | if (IS_ERR(names_map)) |
5216 | 0 | return PTR_ERR(names_map); |
5217 | | |
5218 | 0 | err = btf_dedup_fill_unique_names_map(d, names_map); |
5219 | 0 | if (err < 0) |
5220 | 0 | goto exit; |
5221 | | |
5222 | 0 | for (i = 0; i < d->btf->nr_types; i++) { |
5223 | 0 | err = btf_dedup_resolve_fwd(d, names_map, d->btf->start_id + i); |
5224 | 0 | if (err < 0) |
5225 | 0 | break; |
5226 | 0 | } |
5227 | |
|
5228 | 0 | exit: |
5229 | 0 | hashmap__free(names_map); |
5230 | 0 | return err; |
5231 | 0 | } |
5232 | | |
5233 | | /* |
5234 | | * Compact types. |
5235 | | * |
5236 | | * After we established for each type its corresponding canonical representative |
5237 | | * type, we now can eliminate types that are not canonical and leave only |
5238 | | * canonical ones layed out sequentially in memory by copying them over |
5239 | | * duplicates. During compaction btf_dedup->hypot_map array is reused to store |
5240 | | * a map from original type ID to a new compacted type ID, which will be used |
5241 | | * during next phase to "fix up" type IDs, referenced from struct/union and |
5242 | | * reference types. |
5243 | | */ |
5244 | | static int btf_dedup_compact_types(struct btf_dedup *d) |
5245 | 0 | { |
5246 | 0 | __u32 *new_offs; |
5247 | 0 | __u32 next_type_id = d->btf->start_id; |
5248 | 0 | const struct btf_type *t; |
5249 | 0 | void *p; |
5250 | 0 | int i, id, len; |
5251 | | |
5252 | | /* we are going to reuse hypot_map to store compaction remapping */ |
5253 | 0 | d->hypot_map[0] = 0; |
5254 | | /* base BTF types are not renumbered */ |
5255 | 0 | for (id = 1; id < d->btf->start_id; id++) |
5256 | 0 | d->hypot_map[id] = id; |
5257 | 0 | for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++) |
5258 | 0 | d->hypot_map[id] = BTF_UNPROCESSED_ID; |
5259 | |
|
5260 | 0 | p = d->btf->types_data; |
5261 | |
|
5262 | 0 | for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++) { |
5263 | 0 | if (d->map[id] != id) |
5264 | 0 | continue; |
5265 | | |
5266 | 0 | t = btf__type_by_id(d->btf, id); |
5267 | 0 | len = btf_type_size(t); |
5268 | 0 | if (len < 0) |
5269 | 0 | return len; |
5270 | | |
5271 | 0 | memmove(p, t, len); |
5272 | 0 | d->hypot_map[id] = next_type_id; |
5273 | 0 | d->btf->type_offs[next_type_id - d->btf->start_id] = p - d->btf->types_data; |
5274 | 0 | p += len; |
5275 | 0 | next_type_id++; |
5276 | 0 | } |
5277 | | |
5278 | | /* shrink struct btf's internal types index and update btf_header */ |
5279 | 0 | d->btf->nr_types = next_type_id - d->btf->start_id; |
5280 | 0 | d->btf->type_offs_cap = d->btf->nr_types; |
5281 | 0 | d->btf->hdr->type_len = p - d->btf->types_data; |
5282 | 0 | new_offs = libbpf_reallocarray(d->btf->type_offs, d->btf->type_offs_cap, |
5283 | 0 | sizeof(*new_offs)); |
5284 | 0 | if (d->btf->type_offs_cap && !new_offs) |
5285 | 0 | return -ENOMEM; |
5286 | 0 | d->btf->type_offs = new_offs; |
5287 | 0 | d->btf->hdr->str_off = d->btf->hdr->type_len; |
5288 | 0 | d->btf->raw_size = d->btf->hdr->hdr_len + d->btf->hdr->type_len + d->btf->hdr->str_len; |
5289 | 0 | return 0; |
5290 | 0 | } |
5291 | | |
5292 | | /* |
5293 | | * Figure out final (deduplicated and compacted) type ID for provided original |
5294 | | * `type_id` by first resolving it into corresponding canonical type ID and |
5295 | | * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map, |
5296 | | * which is populated during compaction phase. |
5297 | | */ |
5298 | | static int btf_dedup_remap_type_id(__u32 *type_id, void *ctx) |
5299 | 0 | { |
5300 | 0 | struct btf_dedup *d = ctx; |
5301 | 0 | __u32 resolved_type_id, new_type_id; |
5302 | |
|
5303 | 0 | resolved_type_id = resolve_type_id(d, *type_id); |
5304 | 0 | new_type_id = d->hypot_map[resolved_type_id]; |
5305 | 0 | if (new_type_id > BTF_MAX_NR_TYPES) |
5306 | 0 | return -EINVAL; |
5307 | | |
5308 | 0 | *type_id = new_type_id; |
5309 | 0 | return 0; |
5310 | 0 | } |
5311 | | |
5312 | | /* |
5313 | | * Remap referenced type IDs into deduped type IDs. |
5314 | | * |
5315 | | * After BTF types are deduplicated and compacted, their final type IDs may |
5316 | | * differ from original ones. The map from original to a corresponding |
5317 | | * deduped type ID is stored in btf_dedup->hypot_map and is populated during |
5318 | | * compaction phase. During remapping phase we are rewriting all type IDs |
5319 | | * referenced from any BTF type (e.g., struct fields, func proto args, etc) to |
5320 | | * their final deduped type IDs. |
5321 | | */ |
5322 | | static int btf_dedup_remap_types(struct btf_dedup *d) |
5323 | 0 | { |
5324 | 0 | int i, r; |
5325 | |
|
5326 | 0 | for (i = 0; i < d->btf->nr_types; i++) { |
5327 | 0 | struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i); |
5328 | 0 | struct btf_field_iter it; |
5329 | 0 | __u32 *type_id; |
5330 | |
|
5331 | 0 | r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS); |
5332 | 0 | if (r) |
5333 | 0 | return r; |
5334 | | |
5335 | 0 | while ((type_id = btf_field_iter_next(&it))) { |
5336 | 0 | __u32 resolved_id, new_id; |
5337 | |
|
5338 | 0 | resolved_id = resolve_type_id(d, *type_id); |
5339 | 0 | new_id = d->hypot_map[resolved_id]; |
5340 | 0 | if (new_id > BTF_MAX_NR_TYPES) |
5341 | 0 | return -EINVAL; |
5342 | | |
5343 | 0 | *type_id = new_id; |
5344 | 0 | } |
5345 | 0 | } |
5346 | | |
5347 | 0 | if (!d->btf_ext) |
5348 | 0 | return 0; |
5349 | | |
5350 | 0 | r = btf_ext_visit_type_ids(d->btf_ext, btf_dedup_remap_type_id, d); |
5351 | 0 | if (r) |
5352 | 0 | return r; |
5353 | | |
5354 | 0 | return 0; |
5355 | 0 | } |
5356 | | |
5357 | | /* |
5358 | | * Probe few well-known locations for vmlinux kernel image and try to load BTF |
5359 | | * data out of it to use for target BTF. |
5360 | | */ |
5361 | | struct btf *btf__load_vmlinux_btf(void) |
5362 | 0 | { |
5363 | 0 | const char *sysfs_btf_path = "/sys/kernel/btf/vmlinux"; |
5364 | | /* fall back locations, trying to find vmlinux on disk */ |
5365 | 0 | const char *locations[] = { |
5366 | 0 | "/boot/vmlinux-%1$s", |
5367 | 0 | "/lib/modules/%1$s/vmlinux-%1$s", |
5368 | 0 | "/lib/modules/%1$s/build/vmlinux", |
5369 | 0 | "/usr/lib/modules/%1$s/kernel/vmlinux", |
5370 | 0 | "/usr/lib/debug/boot/vmlinux-%1$s", |
5371 | 0 | "/usr/lib/debug/boot/vmlinux-%1$s.debug", |
5372 | 0 | "/usr/lib/debug/lib/modules/%1$s/vmlinux", |
5373 | 0 | }; |
5374 | 0 | char path[PATH_MAX + 1]; |
5375 | 0 | struct utsname buf; |
5376 | 0 | struct btf *btf; |
5377 | 0 | int i, err; |
5378 | | |
5379 | | /* is canonical sysfs location accessible? */ |
5380 | 0 | if (faccessat(AT_FDCWD, sysfs_btf_path, F_OK, AT_EACCESS) < 0) { |
5381 | 0 | pr_warn("kernel BTF is missing at '%s', was CONFIG_DEBUG_INFO_BTF enabled?\n", |
5382 | 0 | sysfs_btf_path); |
5383 | 0 | } else { |
5384 | 0 | btf = btf_parse_raw_mmap(sysfs_btf_path, NULL); |
5385 | 0 | if (IS_ERR(btf)) |
5386 | 0 | btf = btf__parse(sysfs_btf_path, NULL); |
5387 | |
|
5388 | 0 | if (!btf) { |
5389 | 0 | err = -errno; |
5390 | 0 | pr_warn("failed to read kernel BTF from '%s': %s\n", |
5391 | 0 | sysfs_btf_path, errstr(err)); |
5392 | 0 | return libbpf_err_ptr(err); |
5393 | 0 | } |
5394 | 0 | pr_debug("loaded kernel BTF from '%s'\n", sysfs_btf_path); |
5395 | 0 | return btf; |
5396 | 0 | } |
5397 | | |
5398 | | /* try fallback locations */ |
5399 | 0 | uname(&buf); |
5400 | 0 | for (i = 0; i < ARRAY_SIZE(locations); i++) { |
5401 | 0 | snprintf(path, PATH_MAX, locations[i], buf.release); |
5402 | |
|
5403 | 0 | if (faccessat(AT_FDCWD, path, R_OK, AT_EACCESS)) |
5404 | 0 | continue; |
5405 | | |
5406 | 0 | btf = btf__parse(path, NULL); |
5407 | 0 | err = libbpf_get_error(btf); |
5408 | 0 | pr_debug("loading kernel BTF '%s': %s\n", path, errstr(err)); |
5409 | 0 | if (err) |
5410 | 0 | continue; |
5411 | | |
5412 | 0 | return btf; |
5413 | 0 | } |
5414 | | |
5415 | 0 | pr_warn("failed to find valid kernel BTF\n"); |
5416 | 0 | return libbpf_err_ptr(-ESRCH); |
5417 | 0 | } |
5418 | | |
5419 | | struct btf *libbpf_find_kernel_btf(void) __attribute__((alias("btf__load_vmlinux_btf"))); |
5420 | | |
5421 | | struct btf *btf__load_module_btf(const char *module_name, struct btf *vmlinux_btf) |
5422 | 0 | { |
5423 | 0 | char path[80]; |
5424 | |
|
5425 | 0 | snprintf(path, sizeof(path), "/sys/kernel/btf/%s", module_name); |
5426 | 0 | return btf__parse_split(path, vmlinux_btf); |
5427 | 0 | } |
5428 | | |
5429 | | int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx) |
5430 | 0 | { |
5431 | 0 | const struct btf_ext_info *seg; |
5432 | 0 | struct btf_ext_info_sec *sec; |
5433 | 0 | int i, err; |
5434 | |
|
5435 | 0 | seg = &btf_ext->func_info; |
5436 | 0 | for_each_btf_ext_sec(seg, sec) { |
5437 | 0 | struct bpf_func_info_min *rec; |
5438 | |
|
5439 | 0 | for_each_btf_ext_rec(seg, sec, i, rec) { |
5440 | 0 | err = visit(&rec->type_id, ctx); |
5441 | 0 | if (err < 0) |
5442 | 0 | return err; |
5443 | 0 | } |
5444 | 0 | } |
5445 | | |
5446 | 0 | seg = &btf_ext->core_relo_info; |
5447 | 0 | for_each_btf_ext_sec(seg, sec) { |
5448 | 0 | struct bpf_core_relo *rec; |
5449 | |
|
5450 | 0 | for_each_btf_ext_rec(seg, sec, i, rec) { |
5451 | 0 | err = visit(&rec->type_id, ctx); |
5452 | 0 | if (err < 0) |
5453 | 0 | return err; |
5454 | 0 | } |
5455 | 0 | } |
5456 | | |
5457 | 0 | return 0; |
5458 | 0 | } |
5459 | | |
5460 | | int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx) |
5461 | 0 | { |
5462 | 0 | const struct btf_ext_info *seg; |
5463 | 0 | struct btf_ext_info_sec *sec; |
5464 | 0 | int i, err; |
5465 | |
|
5466 | 0 | seg = &btf_ext->func_info; |
5467 | 0 | for_each_btf_ext_sec(seg, sec) { |
5468 | 0 | err = visit(&sec->sec_name_off, ctx); |
5469 | 0 | if (err) |
5470 | 0 | return err; |
5471 | 0 | } |
5472 | | |
5473 | 0 | seg = &btf_ext->line_info; |
5474 | 0 | for_each_btf_ext_sec(seg, sec) { |
5475 | 0 | struct bpf_line_info_min *rec; |
5476 | |
|
5477 | 0 | err = visit(&sec->sec_name_off, ctx); |
5478 | 0 | if (err) |
5479 | 0 | return err; |
5480 | | |
5481 | 0 | for_each_btf_ext_rec(seg, sec, i, rec) { |
5482 | 0 | err = visit(&rec->file_name_off, ctx); |
5483 | 0 | if (err) |
5484 | 0 | return err; |
5485 | 0 | err = visit(&rec->line_off, ctx); |
5486 | 0 | if (err) |
5487 | 0 | return err; |
5488 | 0 | } |
5489 | 0 | } |
5490 | | |
5491 | 0 | seg = &btf_ext->core_relo_info; |
5492 | 0 | for_each_btf_ext_sec(seg, sec) { |
5493 | 0 | struct bpf_core_relo *rec; |
5494 | |
|
5495 | 0 | err = visit(&sec->sec_name_off, ctx); |
5496 | 0 | if (err) |
5497 | 0 | return err; |
5498 | | |
5499 | 0 | for_each_btf_ext_rec(seg, sec, i, rec) { |
5500 | 0 | err = visit(&rec->access_str_off, ctx); |
5501 | 0 | if (err) |
5502 | 0 | return err; |
5503 | 0 | } |
5504 | 0 | } |
5505 | | |
5506 | 0 | return 0; |
5507 | 0 | } |
5508 | | |
5509 | | struct btf_distill { |
5510 | | struct btf_pipe pipe; |
5511 | | int *id_map; |
5512 | | unsigned int split_start_id; |
5513 | | unsigned int split_start_str; |
5514 | | int diff_id; |
5515 | | }; |
5516 | | |
5517 | | static int btf_add_distilled_type_ids(struct btf_distill *dist, __u32 i) |
5518 | 0 | { |
5519 | 0 | struct btf_type *split_t = btf_type_by_id(dist->pipe.src, i); |
5520 | 0 | struct btf_field_iter it; |
5521 | 0 | __u32 *id; |
5522 | 0 | int err; |
5523 | |
|
5524 | 0 | err = btf_field_iter_init(&it, split_t, BTF_FIELD_ITER_IDS); |
5525 | 0 | if (err) |
5526 | 0 | return err; |
5527 | 0 | while ((id = btf_field_iter_next(&it))) { |
5528 | 0 | struct btf_type *base_t; |
5529 | |
|
5530 | 0 | if (!*id) |
5531 | 0 | continue; |
5532 | | /* split BTF id, not needed */ |
5533 | 0 | if (*id >= dist->split_start_id) |
5534 | 0 | continue; |
5535 | | /* already added ? */ |
5536 | 0 | if (dist->id_map[*id] > 0) |
5537 | 0 | continue; |
5538 | | |
5539 | | /* only a subset of base BTF types should be referenced from |
5540 | | * split BTF; ensure nothing unexpected is referenced. |
5541 | | */ |
5542 | 0 | base_t = btf_type_by_id(dist->pipe.src, *id); |
5543 | 0 | switch (btf_kind(base_t)) { |
5544 | 0 | case BTF_KIND_INT: |
5545 | 0 | case BTF_KIND_FLOAT: |
5546 | 0 | case BTF_KIND_FWD: |
5547 | 0 | case BTF_KIND_ARRAY: |
5548 | 0 | case BTF_KIND_STRUCT: |
5549 | 0 | case BTF_KIND_UNION: |
5550 | 0 | case BTF_KIND_TYPEDEF: |
5551 | 0 | case BTF_KIND_ENUM: |
5552 | 0 | case BTF_KIND_ENUM64: |
5553 | 0 | case BTF_KIND_PTR: |
5554 | 0 | case BTF_KIND_CONST: |
5555 | 0 | case BTF_KIND_RESTRICT: |
5556 | 0 | case BTF_KIND_VOLATILE: |
5557 | 0 | case BTF_KIND_FUNC_PROTO: |
5558 | 0 | case BTF_KIND_TYPE_TAG: |
5559 | 0 | dist->id_map[*id] = *id; |
5560 | 0 | break; |
5561 | 0 | default: |
5562 | 0 | pr_warn("unexpected reference to base type[%u] of kind [%u] when creating distilled base BTF.\n", |
5563 | 0 | *id, btf_kind(base_t)); |
5564 | 0 | return -EINVAL; |
5565 | 0 | } |
5566 | | /* If a base type is used, ensure types it refers to are |
5567 | | * marked as used also; so for example if we find a PTR to INT |
5568 | | * we need both the PTR and INT. |
5569 | | * |
5570 | | * The only exception is named struct/unions, since distilled |
5571 | | * base BTF composite types have no members. |
5572 | | */ |
5573 | 0 | if (btf_is_composite(base_t) && base_t->name_off) |
5574 | 0 | continue; |
5575 | 0 | err = btf_add_distilled_type_ids(dist, *id); |
5576 | 0 | if (err) |
5577 | 0 | return err; |
5578 | 0 | } |
5579 | 0 | return 0; |
5580 | 0 | } |
5581 | | |
5582 | | static int btf_add_distilled_types(struct btf_distill *dist) |
5583 | 0 | { |
5584 | 0 | bool adding_to_base = dist->pipe.dst->start_id == 1; |
5585 | 0 | int id = btf__type_cnt(dist->pipe.dst); |
5586 | 0 | struct btf_type *t; |
5587 | 0 | int i, err = 0; |
5588 | | |
5589 | | |
5590 | | /* Add types for each of the required references to either distilled |
5591 | | * base or split BTF, depending on type characteristics. |
5592 | | */ |
5593 | 0 | for (i = 1; i < dist->split_start_id; i++) { |
5594 | 0 | const char *name; |
5595 | 0 | int kind; |
5596 | |
|
5597 | 0 | if (!dist->id_map[i]) |
5598 | 0 | continue; |
5599 | 0 | t = btf_type_by_id(dist->pipe.src, i); |
5600 | 0 | kind = btf_kind(t); |
5601 | 0 | name = btf__name_by_offset(dist->pipe.src, t->name_off); |
5602 | |
|
5603 | 0 | switch (kind) { |
5604 | 0 | case BTF_KIND_INT: |
5605 | 0 | case BTF_KIND_FLOAT: |
5606 | 0 | case BTF_KIND_FWD: |
5607 | | /* Named int, float, fwd are added to base. */ |
5608 | 0 | if (!adding_to_base) |
5609 | 0 | continue; |
5610 | 0 | err = btf_add_type(&dist->pipe, t); |
5611 | 0 | break; |
5612 | 0 | case BTF_KIND_STRUCT: |
5613 | 0 | case BTF_KIND_UNION: |
5614 | | /* Named struct/union are added to base as 0-vlen |
5615 | | * struct/union of same size. Anonymous struct/unions |
5616 | | * are added to split BTF as-is. |
5617 | | */ |
5618 | 0 | if (adding_to_base) { |
5619 | 0 | if (!t->name_off) |
5620 | 0 | continue; |
5621 | 0 | err = btf_add_composite(dist->pipe.dst, kind, name, t->size); |
5622 | 0 | } else { |
5623 | 0 | if (t->name_off) |
5624 | 0 | continue; |
5625 | 0 | err = btf_add_type(&dist->pipe, t); |
5626 | 0 | } |
5627 | 0 | break; |
5628 | 0 | case BTF_KIND_ENUM: |
5629 | 0 | case BTF_KIND_ENUM64: |
5630 | | /* Named enum[64]s are added to base as a sized |
5631 | | * enum; relocation will match with appropriately-named |
5632 | | * and sized enum or enum64. |
5633 | | * |
5634 | | * Anonymous enums are added to split BTF as-is. |
5635 | | */ |
5636 | 0 | if (adding_to_base) { |
5637 | 0 | if (!t->name_off) |
5638 | 0 | continue; |
5639 | 0 | err = btf__add_enum(dist->pipe.dst, name, t->size); |
5640 | 0 | } else { |
5641 | 0 | if (t->name_off) |
5642 | 0 | continue; |
5643 | 0 | err = btf_add_type(&dist->pipe, t); |
5644 | 0 | } |
5645 | 0 | break; |
5646 | 0 | case BTF_KIND_ARRAY: |
5647 | 0 | case BTF_KIND_TYPEDEF: |
5648 | 0 | case BTF_KIND_PTR: |
5649 | 0 | case BTF_KIND_CONST: |
5650 | 0 | case BTF_KIND_RESTRICT: |
5651 | 0 | case BTF_KIND_VOLATILE: |
5652 | 0 | case BTF_KIND_FUNC_PROTO: |
5653 | 0 | case BTF_KIND_TYPE_TAG: |
5654 | | /* All other types are added to split BTF. */ |
5655 | 0 | if (adding_to_base) |
5656 | 0 | continue; |
5657 | 0 | err = btf_add_type(&dist->pipe, t); |
5658 | 0 | break; |
5659 | 0 | default: |
5660 | 0 | pr_warn("unexpected kind when adding base type '%s'[%u] of kind [%u] to distilled base BTF.\n", |
5661 | 0 | name, i, kind); |
5662 | 0 | return -EINVAL; |
5663 | |
|
5664 | 0 | } |
5665 | 0 | if (err < 0) |
5666 | 0 | break; |
5667 | 0 | dist->id_map[i] = id++; |
5668 | 0 | } |
5669 | 0 | return err; |
5670 | 0 | } |
5671 | | |
5672 | | /* Split BTF ids without a mapping will be shifted downwards since distilled |
5673 | | * base BTF is smaller than the original base BTF. For those that have a |
5674 | | * mapping (either to base or updated split BTF), update the id based on |
5675 | | * that mapping. |
5676 | | */ |
5677 | | static int btf_update_distilled_type_ids(struct btf_distill *dist, __u32 i) |
5678 | 0 | { |
5679 | 0 | struct btf_type *t = btf_type_by_id(dist->pipe.dst, i); |
5680 | 0 | struct btf_field_iter it; |
5681 | 0 | __u32 *id; |
5682 | 0 | int err; |
5683 | |
|
5684 | 0 | err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS); |
5685 | 0 | if (err) |
5686 | 0 | return err; |
5687 | 0 | while ((id = btf_field_iter_next(&it))) { |
5688 | 0 | if (dist->id_map[*id]) |
5689 | 0 | *id = dist->id_map[*id]; |
5690 | 0 | else if (*id >= dist->split_start_id) |
5691 | 0 | *id -= dist->diff_id; |
5692 | 0 | } |
5693 | 0 | return 0; |
5694 | 0 | } |
5695 | | |
5696 | | /* Create updated split BTF with distilled base BTF; distilled base BTF |
5697 | | * consists of BTF information required to clarify the types that split |
5698 | | * BTF refers to, omitting unneeded details. Specifically it will contain |
5699 | | * base types and memberless definitions of named structs, unions and enumerated |
5700 | | * types. Associated reference types like pointers, arrays and anonymous |
5701 | | * structs, unions and enumerated types will be added to split BTF. |
5702 | | * Size is recorded for named struct/unions to help guide matching to the |
5703 | | * target base BTF during later relocation. |
5704 | | * |
5705 | | * The only case where structs, unions or enumerated types are fully represented |
5706 | | * is when they are anonymous; in such cases, the anonymous type is added to |
5707 | | * split BTF in full. |
5708 | | * |
5709 | | * We return newly-created split BTF where the split BTF refers to a newly-created |
5710 | | * distilled base BTF. Both must be freed separately by the caller. |
5711 | | */ |
5712 | | int btf__distill_base(const struct btf *src_btf, struct btf **new_base_btf, |
5713 | | struct btf **new_split_btf) |
5714 | 0 | { |
5715 | 0 | struct btf *new_base = NULL, *new_split = NULL; |
5716 | 0 | const struct btf *old_base; |
5717 | 0 | unsigned int n = btf__type_cnt(src_btf); |
5718 | 0 | struct btf_distill dist = {}; |
5719 | 0 | struct btf_type *t; |
5720 | 0 | int i, err = 0; |
5721 | | |
5722 | | /* src BTF must be split BTF. */ |
5723 | 0 | old_base = btf__base_btf(src_btf); |
5724 | 0 | if (!new_base_btf || !new_split_btf || !old_base) |
5725 | 0 | return libbpf_err(-EINVAL); |
5726 | | |
5727 | 0 | new_base = btf__new_empty(); |
5728 | 0 | if (!new_base) |
5729 | 0 | return libbpf_err(-ENOMEM); |
5730 | | |
5731 | 0 | btf__set_endianness(new_base, btf__endianness(src_btf)); |
5732 | |
|
5733 | 0 | dist.id_map = calloc(n, sizeof(*dist.id_map)); |
5734 | 0 | if (!dist.id_map) { |
5735 | 0 | err = -ENOMEM; |
5736 | 0 | goto done; |
5737 | 0 | } |
5738 | 0 | dist.pipe.src = src_btf; |
5739 | 0 | dist.pipe.dst = new_base; |
5740 | 0 | dist.pipe.str_off_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL); |
5741 | 0 | if (IS_ERR(dist.pipe.str_off_map)) { |
5742 | 0 | err = -ENOMEM; |
5743 | 0 | goto done; |
5744 | 0 | } |
5745 | 0 | dist.split_start_id = btf__type_cnt(old_base); |
5746 | 0 | dist.split_start_str = old_base->hdr->str_len; |
5747 | | |
5748 | | /* Pass over src split BTF; generate the list of base BTF type ids it |
5749 | | * references; these will constitute our distilled BTF set to be |
5750 | | * distributed over base and split BTF as appropriate. |
5751 | | */ |
5752 | 0 | for (i = src_btf->start_id; i < n; i++) { |
5753 | 0 | err = btf_add_distilled_type_ids(&dist, i); |
5754 | 0 | if (err < 0) |
5755 | 0 | goto done; |
5756 | 0 | } |
5757 | | /* Next add types for each of the required references to base BTF and split BTF |
5758 | | * in turn. |
5759 | | */ |
5760 | 0 | err = btf_add_distilled_types(&dist); |
5761 | 0 | if (err < 0) |
5762 | 0 | goto done; |
5763 | | |
5764 | | /* Create new split BTF with distilled base BTF as its base; the final |
5765 | | * state is split BTF with distilled base BTF that represents enough |
5766 | | * about its base references to allow it to be relocated with the base |
5767 | | * BTF available. |
5768 | | */ |
5769 | 0 | new_split = btf__new_empty_split(new_base); |
5770 | 0 | if (!new_split) { |
5771 | 0 | err = -errno; |
5772 | 0 | goto done; |
5773 | 0 | } |
5774 | 0 | dist.pipe.dst = new_split; |
5775 | | /* First add all split types */ |
5776 | 0 | for (i = src_btf->start_id; i < n; i++) { |
5777 | 0 | t = btf_type_by_id(src_btf, i); |
5778 | 0 | err = btf_add_type(&dist.pipe, t); |
5779 | 0 | if (err < 0) |
5780 | 0 | goto done; |
5781 | 0 | } |
5782 | | /* Now add distilled types to split BTF that are not added to base. */ |
5783 | 0 | err = btf_add_distilled_types(&dist); |
5784 | 0 | if (err < 0) |
5785 | 0 | goto done; |
5786 | | |
5787 | | /* All split BTF ids will be shifted downwards since there are less base |
5788 | | * BTF ids in distilled base BTF. |
5789 | | */ |
5790 | 0 | dist.diff_id = dist.split_start_id - btf__type_cnt(new_base); |
5791 | |
|
5792 | 0 | n = btf__type_cnt(new_split); |
5793 | | /* Now update base/split BTF ids. */ |
5794 | 0 | for (i = 1; i < n; i++) { |
5795 | 0 | err = btf_update_distilled_type_ids(&dist, i); |
5796 | 0 | if (err < 0) |
5797 | 0 | break; |
5798 | 0 | } |
5799 | 0 | done: |
5800 | 0 | free(dist.id_map); |
5801 | 0 | hashmap__free(dist.pipe.str_off_map); |
5802 | 0 | if (err) { |
5803 | 0 | btf__free(new_split); |
5804 | 0 | btf__free(new_base); |
5805 | 0 | return libbpf_err(err); |
5806 | 0 | } |
5807 | 0 | *new_base_btf = new_base; |
5808 | 0 | *new_split_btf = new_split; |
5809 | |
|
5810 | 0 | return 0; |
5811 | 0 | } |
5812 | | |
5813 | | const struct btf_header *btf_header(const struct btf *btf) |
5814 | 0 | { |
5815 | 0 | return btf->hdr; |
5816 | 0 | } |
5817 | | |
5818 | | void btf_set_base_btf(struct btf *btf, const struct btf *base_btf) |
5819 | 0 | { |
5820 | 0 | btf->base_btf = (struct btf *)base_btf; |
5821 | 0 | btf->start_id = btf__type_cnt(base_btf); |
5822 | 0 | btf->start_str_off = base_btf->hdr->str_len; |
5823 | 0 | } |
5824 | | |
5825 | | int btf__relocate(struct btf *btf, const struct btf *base_btf) |
5826 | 0 | { |
5827 | 0 | int err = btf_relocate(btf, base_btf, NULL); |
5828 | |
|
5829 | 0 | if (!err) |
5830 | 0 | btf->owns_base = false; |
5831 | 0 | return libbpf_err(err); |
5832 | 0 | } |