Line | Count | Source (jump to first uncovered line) |
1 | | // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) |
2 | | |
3 | | /* |
4 | | * Common eBPF ELF object loading operations. |
5 | | * |
6 | | * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org> |
7 | | * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> |
8 | | * Copyright (C) 2015 Huawei Inc. |
9 | | * Copyright (C) 2017 Nicira, Inc. |
10 | | * Copyright (C) 2019 Isovalent, Inc. |
11 | | */ |
12 | | |
13 | | #ifndef _GNU_SOURCE |
14 | | #define _GNU_SOURCE |
15 | | #endif |
16 | | #include <stdlib.h> |
17 | | #include <stdio.h> |
18 | | #include <stdarg.h> |
19 | | #include <libgen.h> |
20 | | #include <inttypes.h> |
21 | | #include <limits.h> |
22 | | #include <string.h> |
23 | | #include <unistd.h> |
24 | | #include <endian.h> |
25 | | #include <fcntl.h> |
26 | | #include <errno.h> |
27 | | #include <ctype.h> |
28 | | #include <asm/unistd.h> |
29 | | #include <linux/err.h> |
30 | | #include <linux/kernel.h> |
31 | | #include <linux/bpf.h> |
32 | | #include <linux/btf.h> |
33 | | #include <linux/filter.h> |
34 | | #include <linux/limits.h> |
35 | | #include <linux/perf_event.h> |
36 | | #include <linux/bpf_perf_event.h> |
37 | | #include <linux/ring_buffer.h> |
38 | | #include <sys/epoll.h> |
39 | | #include <sys/ioctl.h> |
40 | | #include <sys/mman.h> |
41 | | #include <sys/stat.h> |
42 | | #include <sys/types.h> |
43 | | #include <sys/vfs.h> |
44 | | #include <sys/utsname.h> |
45 | | #include <sys/resource.h> |
46 | | #include <libelf.h> |
47 | | #include <gelf.h> |
48 | | #include <zlib.h> |
49 | | |
50 | | #include "libbpf.h" |
51 | | #include "bpf.h" |
52 | | #include "btf.h" |
53 | | #include "str_error.h" |
54 | | #include "libbpf_internal.h" |
55 | | #include "hashmap.h" |
56 | | #include "bpf_gen_internal.h" |
57 | | #include "zip.h" |
58 | | |
59 | | #ifndef BPF_FS_MAGIC |
60 | 0 | #define BPF_FS_MAGIC 0xcafe4a11 |
61 | | #endif |
62 | | |
63 | | #define MAX_EVENT_NAME_LEN 64 |
64 | | |
65 | 1 | #define BPF_FS_DEFAULT_PATH "/sys/fs/bpf" |
66 | | |
67 | 90.0k | #define BPF_INSN_SZ (sizeof(struct bpf_insn)) |
68 | | |
69 | | /* vsprintf() in __base_pr() uses nonliteral format string. It may break |
70 | | * compilation if user enables corresponding warning. Disable it explicitly. |
71 | | */ |
72 | | #pragma GCC diagnostic ignored "-Wformat-nonliteral" |
73 | | |
74 | | #define __printf(a, b) __attribute__((format(printf, a, b))) |
75 | | |
76 | | static struct bpf_map *bpf_object__add_map(struct bpf_object *obj); |
77 | | static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog); |
78 | | static int map_set_def_max_entries(struct bpf_map *map); |
79 | | |
80 | | static const char * const attach_type_name[] = { |
81 | | [BPF_CGROUP_INET_INGRESS] = "cgroup_inet_ingress", |
82 | | [BPF_CGROUP_INET_EGRESS] = "cgroup_inet_egress", |
83 | | [BPF_CGROUP_INET_SOCK_CREATE] = "cgroup_inet_sock_create", |
84 | | [BPF_CGROUP_INET_SOCK_RELEASE] = "cgroup_inet_sock_release", |
85 | | [BPF_CGROUP_SOCK_OPS] = "cgroup_sock_ops", |
86 | | [BPF_CGROUP_DEVICE] = "cgroup_device", |
87 | | [BPF_CGROUP_INET4_BIND] = "cgroup_inet4_bind", |
88 | | [BPF_CGROUP_INET6_BIND] = "cgroup_inet6_bind", |
89 | | [BPF_CGROUP_INET4_CONNECT] = "cgroup_inet4_connect", |
90 | | [BPF_CGROUP_INET6_CONNECT] = "cgroup_inet6_connect", |
91 | | [BPF_CGROUP_UNIX_CONNECT] = "cgroup_unix_connect", |
92 | | [BPF_CGROUP_INET4_POST_BIND] = "cgroup_inet4_post_bind", |
93 | | [BPF_CGROUP_INET6_POST_BIND] = "cgroup_inet6_post_bind", |
94 | | [BPF_CGROUP_INET4_GETPEERNAME] = "cgroup_inet4_getpeername", |
95 | | [BPF_CGROUP_INET6_GETPEERNAME] = "cgroup_inet6_getpeername", |
96 | | [BPF_CGROUP_UNIX_GETPEERNAME] = "cgroup_unix_getpeername", |
97 | | [BPF_CGROUP_INET4_GETSOCKNAME] = "cgroup_inet4_getsockname", |
98 | | [BPF_CGROUP_INET6_GETSOCKNAME] = "cgroup_inet6_getsockname", |
99 | | [BPF_CGROUP_UNIX_GETSOCKNAME] = "cgroup_unix_getsockname", |
100 | | [BPF_CGROUP_UDP4_SENDMSG] = "cgroup_udp4_sendmsg", |
101 | | [BPF_CGROUP_UDP6_SENDMSG] = "cgroup_udp6_sendmsg", |
102 | | [BPF_CGROUP_UNIX_SENDMSG] = "cgroup_unix_sendmsg", |
103 | | [BPF_CGROUP_SYSCTL] = "cgroup_sysctl", |
104 | | [BPF_CGROUP_UDP4_RECVMSG] = "cgroup_udp4_recvmsg", |
105 | | [BPF_CGROUP_UDP6_RECVMSG] = "cgroup_udp6_recvmsg", |
106 | | [BPF_CGROUP_UNIX_RECVMSG] = "cgroup_unix_recvmsg", |
107 | | [BPF_CGROUP_GETSOCKOPT] = "cgroup_getsockopt", |
108 | | [BPF_CGROUP_SETSOCKOPT] = "cgroup_setsockopt", |
109 | | [BPF_SK_SKB_STREAM_PARSER] = "sk_skb_stream_parser", |
110 | | [BPF_SK_SKB_STREAM_VERDICT] = "sk_skb_stream_verdict", |
111 | | [BPF_SK_SKB_VERDICT] = "sk_skb_verdict", |
112 | | [BPF_SK_MSG_VERDICT] = "sk_msg_verdict", |
113 | | [BPF_LIRC_MODE2] = "lirc_mode2", |
114 | | [BPF_FLOW_DISSECTOR] = "flow_dissector", |
115 | | [BPF_TRACE_RAW_TP] = "trace_raw_tp", |
116 | | [BPF_TRACE_FENTRY] = "trace_fentry", |
117 | | [BPF_TRACE_FEXIT] = "trace_fexit", |
118 | | [BPF_MODIFY_RETURN] = "modify_return", |
119 | | [BPF_LSM_MAC] = "lsm_mac", |
120 | | [BPF_LSM_CGROUP] = "lsm_cgroup", |
121 | | [BPF_SK_LOOKUP] = "sk_lookup", |
122 | | [BPF_TRACE_ITER] = "trace_iter", |
123 | | [BPF_XDP_DEVMAP] = "xdp_devmap", |
124 | | [BPF_XDP_CPUMAP] = "xdp_cpumap", |
125 | | [BPF_XDP] = "xdp", |
126 | | [BPF_SK_REUSEPORT_SELECT] = "sk_reuseport_select", |
127 | | [BPF_SK_REUSEPORT_SELECT_OR_MIGRATE] = "sk_reuseport_select_or_migrate", |
128 | | [BPF_PERF_EVENT] = "perf_event", |
129 | | [BPF_TRACE_KPROBE_MULTI] = "trace_kprobe_multi", |
130 | | [BPF_STRUCT_OPS] = "struct_ops", |
131 | | [BPF_NETFILTER] = "netfilter", |
132 | | [BPF_TCX_INGRESS] = "tcx_ingress", |
133 | | [BPF_TCX_EGRESS] = "tcx_egress", |
134 | | [BPF_TRACE_UPROBE_MULTI] = "trace_uprobe_multi", |
135 | | [BPF_NETKIT_PRIMARY] = "netkit_primary", |
136 | | [BPF_NETKIT_PEER] = "netkit_peer", |
137 | | [BPF_TRACE_KPROBE_SESSION] = "trace_kprobe_session", |
138 | | [BPF_TRACE_UPROBE_SESSION] = "trace_uprobe_session", |
139 | | }; |
140 | | |
141 | | static const char * const link_type_name[] = { |
142 | | [BPF_LINK_TYPE_UNSPEC] = "unspec", |
143 | | [BPF_LINK_TYPE_RAW_TRACEPOINT] = "raw_tracepoint", |
144 | | [BPF_LINK_TYPE_TRACING] = "tracing", |
145 | | [BPF_LINK_TYPE_CGROUP] = "cgroup", |
146 | | [BPF_LINK_TYPE_ITER] = "iter", |
147 | | [BPF_LINK_TYPE_NETNS] = "netns", |
148 | | [BPF_LINK_TYPE_XDP] = "xdp", |
149 | | [BPF_LINK_TYPE_PERF_EVENT] = "perf_event", |
150 | | [BPF_LINK_TYPE_KPROBE_MULTI] = "kprobe_multi", |
151 | | [BPF_LINK_TYPE_STRUCT_OPS] = "struct_ops", |
152 | | [BPF_LINK_TYPE_NETFILTER] = "netfilter", |
153 | | [BPF_LINK_TYPE_TCX] = "tcx", |
154 | | [BPF_LINK_TYPE_UPROBE_MULTI] = "uprobe_multi", |
155 | | [BPF_LINK_TYPE_NETKIT] = "netkit", |
156 | | [BPF_LINK_TYPE_SOCKMAP] = "sockmap", |
157 | | }; |
158 | | |
159 | | static const char * const map_type_name[] = { |
160 | | [BPF_MAP_TYPE_UNSPEC] = "unspec", |
161 | | [BPF_MAP_TYPE_HASH] = "hash", |
162 | | [BPF_MAP_TYPE_ARRAY] = "array", |
163 | | [BPF_MAP_TYPE_PROG_ARRAY] = "prog_array", |
164 | | [BPF_MAP_TYPE_PERF_EVENT_ARRAY] = "perf_event_array", |
165 | | [BPF_MAP_TYPE_PERCPU_HASH] = "percpu_hash", |
166 | | [BPF_MAP_TYPE_PERCPU_ARRAY] = "percpu_array", |
167 | | [BPF_MAP_TYPE_STACK_TRACE] = "stack_trace", |
168 | | [BPF_MAP_TYPE_CGROUP_ARRAY] = "cgroup_array", |
169 | | [BPF_MAP_TYPE_LRU_HASH] = "lru_hash", |
170 | | [BPF_MAP_TYPE_LRU_PERCPU_HASH] = "lru_percpu_hash", |
171 | | [BPF_MAP_TYPE_LPM_TRIE] = "lpm_trie", |
172 | | [BPF_MAP_TYPE_ARRAY_OF_MAPS] = "array_of_maps", |
173 | | [BPF_MAP_TYPE_HASH_OF_MAPS] = "hash_of_maps", |
174 | | [BPF_MAP_TYPE_DEVMAP] = "devmap", |
175 | | [BPF_MAP_TYPE_DEVMAP_HASH] = "devmap_hash", |
176 | | [BPF_MAP_TYPE_SOCKMAP] = "sockmap", |
177 | | [BPF_MAP_TYPE_CPUMAP] = "cpumap", |
178 | | [BPF_MAP_TYPE_XSKMAP] = "xskmap", |
179 | | [BPF_MAP_TYPE_SOCKHASH] = "sockhash", |
180 | | [BPF_MAP_TYPE_CGROUP_STORAGE] = "cgroup_storage", |
181 | | [BPF_MAP_TYPE_REUSEPORT_SOCKARRAY] = "reuseport_sockarray", |
182 | | [BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE] = "percpu_cgroup_storage", |
183 | | [BPF_MAP_TYPE_QUEUE] = "queue", |
184 | | [BPF_MAP_TYPE_STACK] = "stack", |
185 | | [BPF_MAP_TYPE_SK_STORAGE] = "sk_storage", |
186 | | [BPF_MAP_TYPE_STRUCT_OPS] = "struct_ops", |
187 | | [BPF_MAP_TYPE_RINGBUF] = "ringbuf", |
188 | | [BPF_MAP_TYPE_INODE_STORAGE] = "inode_storage", |
189 | | [BPF_MAP_TYPE_TASK_STORAGE] = "task_storage", |
190 | | [BPF_MAP_TYPE_BLOOM_FILTER] = "bloom_filter", |
191 | | [BPF_MAP_TYPE_USER_RINGBUF] = "user_ringbuf", |
192 | | [BPF_MAP_TYPE_CGRP_STORAGE] = "cgrp_storage", |
193 | | [BPF_MAP_TYPE_ARENA] = "arena", |
194 | | }; |
195 | | |
196 | | static const char * const prog_type_name[] = { |
197 | | [BPF_PROG_TYPE_UNSPEC] = "unspec", |
198 | | [BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter", |
199 | | [BPF_PROG_TYPE_KPROBE] = "kprobe", |
200 | | [BPF_PROG_TYPE_SCHED_CLS] = "sched_cls", |
201 | | [BPF_PROG_TYPE_SCHED_ACT] = "sched_act", |
202 | | [BPF_PROG_TYPE_TRACEPOINT] = "tracepoint", |
203 | | [BPF_PROG_TYPE_XDP] = "xdp", |
204 | | [BPF_PROG_TYPE_PERF_EVENT] = "perf_event", |
205 | | [BPF_PROG_TYPE_CGROUP_SKB] = "cgroup_skb", |
206 | | [BPF_PROG_TYPE_CGROUP_SOCK] = "cgroup_sock", |
207 | | [BPF_PROG_TYPE_LWT_IN] = "lwt_in", |
208 | | [BPF_PROG_TYPE_LWT_OUT] = "lwt_out", |
209 | | [BPF_PROG_TYPE_LWT_XMIT] = "lwt_xmit", |
210 | | [BPF_PROG_TYPE_SOCK_OPS] = "sock_ops", |
211 | | [BPF_PROG_TYPE_SK_SKB] = "sk_skb", |
212 | | [BPF_PROG_TYPE_CGROUP_DEVICE] = "cgroup_device", |
213 | | [BPF_PROG_TYPE_SK_MSG] = "sk_msg", |
214 | | [BPF_PROG_TYPE_RAW_TRACEPOINT] = "raw_tracepoint", |
215 | | [BPF_PROG_TYPE_CGROUP_SOCK_ADDR] = "cgroup_sock_addr", |
216 | | [BPF_PROG_TYPE_LWT_SEG6LOCAL] = "lwt_seg6local", |
217 | | [BPF_PROG_TYPE_LIRC_MODE2] = "lirc_mode2", |
218 | | [BPF_PROG_TYPE_SK_REUSEPORT] = "sk_reuseport", |
219 | | [BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector", |
220 | | [BPF_PROG_TYPE_CGROUP_SYSCTL] = "cgroup_sysctl", |
221 | | [BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE] = "raw_tracepoint_writable", |
222 | | [BPF_PROG_TYPE_CGROUP_SOCKOPT] = "cgroup_sockopt", |
223 | | [BPF_PROG_TYPE_TRACING] = "tracing", |
224 | | [BPF_PROG_TYPE_STRUCT_OPS] = "struct_ops", |
225 | | [BPF_PROG_TYPE_EXT] = "ext", |
226 | | [BPF_PROG_TYPE_LSM] = "lsm", |
227 | | [BPF_PROG_TYPE_SK_LOOKUP] = "sk_lookup", |
228 | | [BPF_PROG_TYPE_SYSCALL] = "syscall", |
229 | | [BPF_PROG_TYPE_NETFILTER] = "netfilter", |
230 | | }; |
231 | | |
232 | | static int __base_pr(enum libbpf_print_level level, const char *format, |
233 | | va_list args) |
234 | 0 | { |
235 | 0 | const char *env_var = "LIBBPF_LOG_LEVEL"; |
236 | 0 | static enum libbpf_print_level min_level = LIBBPF_INFO; |
237 | 0 | static bool initialized; |
238 | |
|
239 | 0 | if (!initialized) { |
240 | 0 | char *verbosity; |
241 | |
|
242 | 0 | initialized = true; |
243 | 0 | verbosity = getenv(env_var); |
244 | 0 | if (verbosity) { |
245 | 0 | if (strcasecmp(verbosity, "warn") == 0) |
246 | 0 | min_level = LIBBPF_WARN; |
247 | 0 | else if (strcasecmp(verbosity, "debug") == 0) |
248 | 0 | min_level = LIBBPF_DEBUG; |
249 | 0 | else if (strcasecmp(verbosity, "info") == 0) |
250 | 0 | min_level = LIBBPF_INFO; |
251 | 0 | else |
252 | 0 | fprintf(stderr, "libbpf: unrecognized '%s' envvar value: '%s', should be one of 'warn', 'debug', or 'info'.\n", |
253 | 0 | env_var, verbosity); |
254 | 0 | } |
255 | 0 | } |
256 | | |
257 | | /* if too verbose, skip logging */ |
258 | 0 | if (level > min_level) |
259 | 0 | return 0; |
260 | | |
261 | 0 | return vfprintf(stderr, format, args); |
262 | 0 | } |
263 | | |
264 | | static libbpf_print_fn_t __libbpf_pr = __base_pr; |
265 | | |
266 | | libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn) |
267 | 11.5k | { |
268 | 11.5k | libbpf_print_fn_t old_print_fn; |
269 | | |
270 | 11.5k | old_print_fn = __atomic_exchange_n(&__libbpf_pr, fn, __ATOMIC_RELAXED); |
271 | | |
272 | 11.5k | return old_print_fn; |
273 | 11.5k | } |
274 | | |
275 | | __printf(2, 3) |
276 | | void libbpf_print(enum libbpf_print_level level, const char *format, ...) |
277 | 130k | { |
278 | 130k | va_list args; |
279 | 130k | int old_errno; |
280 | 130k | libbpf_print_fn_t print_fn; |
281 | | |
282 | 130k | print_fn = __atomic_load_n(&__libbpf_pr, __ATOMIC_RELAXED); |
283 | 130k | if (!print_fn) |
284 | 0 | return; |
285 | | |
286 | 130k | old_errno = errno; |
287 | | |
288 | 130k | va_start(args, format); |
289 | 130k | print_fn(level, format, args); |
290 | 130k | va_end(args); |
291 | | |
292 | 130k | errno = old_errno; |
293 | 130k | } |
294 | | |
295 | | static void pr_perm_msg(int err) |
296 | 0 | { |
297 | 0 | struct rlimit limit; |
298 | 0 | char buf[100]; |
299 | |
|
300 | 0 | if (err != -EPERM || geteuid() != 0) |
301 | 0 | return; |
302 | | |
303 | 0 | err = getrlimit(RLIMIT_MEMLOCK, &limit); |
304 | 0 | if (err) |
305 | 0 | return; |
306 | | |
307 | 0 | if (limit.rlim_cur == RLIM_INFINITY) |
308 | 0 | return; |
309 | | |
310 | 0 | if (limit.rlim_cur < 1024) |
311 | 0 | snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur); |
312 | 0 | else if (limit.rlim_cur < 1024*1024) |
313 | 0 | snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024); |
314 | 0 | else |
315 | 0 | snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024)); |
316 | |
|
317 | 0 | pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n", |
318 | 0 | buf); |
319 | 0 | } |
320 | | |
321 | | #define STRERR_BUFSIZE 128 |
322 | | |
323 | | /* Copied from tools/perf/util/util.h */ |
324 | | #ifndef zfree |
325 | 220k | # define zfree(ptr) ({ free(*ptr); *ptr = NULL; }) |
326 | | #endif |
327 | | |
328 | | #ifndef zclose |
329 | 32.8k | # define zclose(fd) ({ \ |
330 | 32.8k | int ___err = 0; \ |
331 | 32.8k | if ((fd) >= 0) \ |
332 | 32.8k | ___err = close((fd)); \ |
333 | 32.8k | fd = -1; \ |
334 | 32.8k | ___err; }) |
335 | | #endif |
336 | | |
337 | | static inline __u64 ptr_to_u64(const void *ptr) |
338 | 0 | { |
339 | 0 | return (__u64) (unsigned long) ptr; |
340 | 0 | } |
341 | | |
342 | | int libbpf_set_strict_mode(enum libbpf_strict_mode mode) |
343 | 0 | { |
344 | | /* as of v1.0 libbpf_set_strict_mode() is a no-op */ |
345 | 0 | return 0; |
346 | 0 | } |
347 | | |
348 | | __u32 libbpf_major_version(void) |
349 | 0 | { |
350 | 0 | return LIBBPF_MAJOR_VERSION; |
351 | 0 | } |
352 | | |
353 | | __u32 libbpf_minor_version(void) |
354 | 0 | { |
355 | 0 | return LIBBPF_MINOR_VERSION; |
356 | 0 | } |
357 | | |
358 | | const char *libbpf_version_string(void) |
359 | 0 | { |
360 | 0 | #define __S(X) #X |
361 | 0 | #define _S(X) __S(X) |
362 | 0 | return "v" _S(LIBBPF_MAJOR_VERSION) "." _S(LIBBPF_MINOR_VERSION); |
363 | 0 | #undef _S |
364 | 0 | #undef __S |
365 | 0 | } |
366 | | |
367 | | enum reloc_type { |
368 | | RELO_LD64, |
369 | | RELO_CALL, |
370 | | RELO_DATA, |
371 | | RELO_EXTERN_LD64, |
372 | | RELO_EXTERN_CALL, |
373 | | RELO_SUBPROG_ADDR, |
374 | | RELO_CORE, |
375 | | }; |
376 | | |
377 | | struct reloc_desc { |
378 | | enum reloc_type type; |
379 | | int insn_idx; |
380 | | union { |
381 | | const struct bpf_core_relo *core_relo; /* used when type == RELO_CORE */ |
382 | | struct { |
383 | | int map_idx; |
384 | | int sym_off; |
385 | | int ext_idx; |
386 | | }; |
387 | | }; |
388 | | }; |
389 | | |
390 | | /* stored as sec_def->cookie for all libbpf-supported SEC()s */ |
391 | | enum sec_def_flags { |
392 | | SEC_NONE = 0, |
393 | | /* expected_attach_type is optional, if kernel doesn't support that */ |
394 | | SEC_EXP_ATTACH_OPT = 1, |
395 | | /* legacy, only used by libbpf_get_type_names() and |
396 | | * libbpf_attach_type_by_name(), not used by libbpf itself at all. |
397 | | * This used to be associated with cgroup (and few other) BPF programs |
398 | | * that were attachable through BPF_PROG_ATTACH command. Pretty |
399 | | * meaningless nowadays, though. |
400 | | */ |
401 | | SEC_ATTACHABLE = 2, |
402 | | SEC_ATTACHABLE_OPT = SEC_ATTACHABLE | SEC_EXP_ATTACH_OPT, |
403 | | /* attachment target is specified through BTF ID in either kernel or |
404 | | * other BPF program's BTF object |
405 | | */ |
406 | | SEC_ATTACH_BTF = 4, |
407 | | /* BPF program type allows sleeping/blocking in kernel */ |
408 | | SEC_SLEEPABLE = 8, |
409 | | /* BPF program support non-linear XDP buffer */ |
410 | | SEC_XDP_FRAGS = 16, |
411 | | /* Setup proper attach type for usdt probes. */ |
412 | | SEC_USDT = 32, |
413 | | }; |
414 | | |
415 | | struct bpf_sec_def { |
416 | | char *sec; |
417 | | enum bpf_prog_type prog_type; |
418 | | enum bpf_attach_type expected_attach_type; |
419 | | long cookie; |
420 | | int handler_id; |
421 | | |
422 | | libbpf_prog_setup_fn_t prog_setup_fn; |
423 | | libbpf_prog_prepare_load_fn_t prog_prepare_load_fn; |
424 | | libbpf_prog_attach_fn_t prog_attach_fn; |
425 | | }; |
426 | | |
427 | | /* |
428 | | * bpf_prog should be a better name but it has been used in |
429 | | * linux/filter.h. |
430 | | */ |
431 | | struct bpf_program { |
432 | | char *name; |
433 | | char *sec_name; |
434 | | size_t sec_idx; |
435 | | const struct bpf_sec_def *sec_def; |
436 | | /* this program's instruction offset (in number of instructions) |
437 | | * within its containing ELF section |
438 | | */ |
439 | | size_t sec_insn_off; |
440 | | /* number of original instructions in ELF section belonging to this |
441 | | * program, not taking into account subprogram instructions possible |
442 | | * appended later during relocation |
443 | | */ |
444 | | size_t sec_insn_cnt; |
445 | | /* Offset (in number of instructions) of the start of instruction |
446 | | * belonging to this BPF program within its containing main BPF |
447 | | * program. For the entry-point (main) BPF program, this is always |
448 | | * zero. For a sub-program, this gets reset before each of main BPF |
449 | | * programs are processed and relocated and is used to determined |
450 | | * whether sub-program was already appended to the main program, and |
451 | | * if yes, at which instruction offset. |
452 | | */ |
453 | | size_t sub_insn_off; |
454 | | |
455 | | /* instructions that belong to BPF program; insns[0] is located at |
456 | | * sec_insn_off instruction within its ELF section in ELF file, so |
457 | | * when mapping ELF file instruction index to the local instruction, |
458 | | * one needs to subtract sec_insn_off; and vice versa. |
459 | | */ |
460 | | struct bpf_insn *insns; |
461 | | /* actual number of instruction in this BPF program's image; for |
462 | | * entry-point BPF programs this includes the size of main program |
463 | | * itself plus all the used sub-programs, appended at the end |
464 | | */ |
465 | | size_t insns_cnt; |
466 | | |
467 | | struct reloc_desc *reloc_desc; |
468 | | int nr_reloc; |
469 | | |
470 | | /* BPF verifier log settings */ |
471 | | char *log_buf; |
472 | | size_t log_size; |
473 | | __u32 log_level; |
474 | | |
475 | | struct bpf_object *obj; |
476 | | |
477 | | int fd; |
478 | | bool autoload; |
479 | | bool autoattach; |
480 | | bool sym_global; |
481 | | bool mark_btf_static; |
482 | | enum bpf_prog_type type; |
483 | | enum bpf_attach_type expected_attach_type; |
484 | | int exception_cb_idx; |
485 | | |
486 | | int prog_ifindex; |
487 | | __u32 attach_btf_obj_fd; |
488 | | __u32 attach_btf_id; |
489 | | __u32 attach_prog_fd; |
490 | | |
491 | | void *func_info; |
492 | | __u32 func_info_rec_size; |
493 | | __u32 func_info_cnt; |
494 | | |
495 | | void *line_info; |
496 | | __u32 line_info_rec_size; |
497 | | __u32 line_info_cnt; |
498 | | __u32 prog_flags; |
499 | | }; |
500 | | |
501 | | struct bpf_struct_ops { |
502 | | struct bpf_program **progs; |
503 | | __u32 *kern_func_off; |
504 | | /* e.g. struct tcp_congestion_ops in bpf_prog's btf format */ |
505 | | void *data; |
506 | | /* e.g. struct bpf_struct_ops_tcp_congestion_ops in |
507 | | * btf_vmlinux's format. |
508 | | * struct bpf_struct_ops_tcp_congestion_ops { |
509 | | * [... some other kernel fields ...] |
510 | | * struct tcp_congestion_ops data; |
511 | | * } |
512 | | * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops) |
513 | | * bpf_map__init_kern_struct_ops() will populate the "kern_vdata" |
514 | | * from "data". |
515 | | */ |
516 | | void *kern_vdata; |
517 | | __u32 type_id; |
518 | | }; |
519 | | |
520 | 3.14k | #define DATA_SEC ".data" |
521 | 2.27k | #define BSS_SEC ".bss" |
522 | 2.59k | #define RODATA_SEC ".rodata" |
523 | 7.05k | #define KCONFIG_SEC ".kconfig" |
524 | 9.23k | #define KSYMS_SEC ".ksyms" |
525 | 6.20k | #define STRUCT_OPS_SEC ".struct_ops" |
526 | 5.90k | #define STRUCT_OPS_LINK_SEC ".struct_ops.link" |
527 | 1.40k | #define ARENA_SEC ".addr_space.1" |
528 | | |
529 | | enum libbpf_map_type { |
530 | | LIBBPF_MAP_UNSPEC, |
531 | | LIBBPF_MAP_DATA, |
532 | | LIBBPF_MAP_BSS, |
533 | | LIBBPF_MAP_RODATA, |
534 | | LIBBPF_MAP_KCONFIG, |
535 | | }; |
536 | | |
537 | | struct bpf_map_def { |
538 | | unsigned int type; |
539 | | unsigned int key_size; |
540 | | unsigned int value_size; |
541 | | unsigned int max_entries; |
542 | | unsigned int map_flags; |
543 | | }; |
544 | | |
545 | | struct bpf_map { |
546 | | struct bpf_object *obj; |
547 | | char *name; |
548 | | /* real_name is defined for special internal maps (.rodata*, |
549 | | * .data*, .bss, .kconfig) and preserves their original ELF section |
550 | | * name. This is important to be able to find corresponding BTF |
551 | | * DATASEC information. |
552 | | */ |
553 | | char *real_name; |
554 | | int fd; |
555 | | int sec_idx; |
556 | | size_t sec_offset; |
557 | | int map_ifindex; |
558 | | int inner_map_fd; |
559 | | struct bpf_map_def def; |
560 | | __u32 numa_node; |
561 | | __u32 btf_var_idx; |
562 | | int mod_btf_fd; |
563 | | __u32 btf_key_type_id; |
564 | | __u32 btf_value_type_id; |
565 | | __u32 btf_vmlinux_value_type_id; |
566 | | enum libbpf_map_type libbpf_type; |
567 | | void *mmaped; |
568 | | struct bpf_struct_ops *st_ops; |
569 | | struct bpf_map *inner_map; |
570 | | void **init_slots; |
571 | | int init_slots_sz; |
572 | | char *pin_path; |
573 | | bool pinned; |
574 | | bool reused; |
575 | | bool autocreate; |
576 | | bool autoattach; |
577 | | __u64 map_extra; |
578 | | }; |
579 | | |
580 | | enum extern_type { |
581 | | EXT_UNKNOWN, |
582 | | EXT_KCFG, |
583 | | EXT_KSYM, |
584 | | }; |
585 | | |
586 | | enum kcfg_type { |
587 | | KCFG_UNKNOWN, |
588 | | KCFG_CHAR, |
589 | | KCFG_BOOL, |
590 | | KCFG_INT, |
591 | | KCFG_TRISTATE, |
592 | | KCFG_CHAR_ARR, |
593 | | }; |
594 | | |
595 | | struct extern_desc { |
596 | | enum extern_type type; |
597 | | int sym_idx; |
598 | | int btf_id; |
599 | | int sec_btf_id; |
600 | | char *name; |
601 | | char *essent_name; |
602 | | bool is_set; |
603 | | bool is_weak; |
604 | | union { |
605 | | struct { |
606 | | enum kcfg_type type; |
607 | | int sz; |
608 | | int align; |
609 | | int data_off; |
610 | | bool is_signed; |
611 | | } kcfg; |
612 | | struct { |
613 | | unsigned long long addr; |
614 | | |
615 | | /* target btf_id of the corresponding kernel var. */ |
616 | | int kernel_btf_obj_fd; |
617 | | int kernel_btf_id; |
618 | | |
619 | | /* local btf_id of the ksym extern's type. */ |
620 | | __u32 type_id; |
621 | | /* BTF fd index to be patched in for insn->off, this is |
622 | | * 0 for vmlinux BTF, index in obj->fd_array for module |
623 | | * BTF |
624 | | */ |
625 | | __s16 btf_fd_idx; |
626 | | } ksym; |
627 | | }; |
628 | | }; |
629 | | |
630 | | struct module_btf { |
631 | | struct btf *btf; |
632 | | char *name; |
633 | | __u32 id; |
634 | | int fd; |
635 | | int fd_array_idx; |
636 | | }; |
637 | | |
638 | | enum sec_type { |
639 | | SEC_UNUSED = 0, |
640 | | SEC_RELO, |
641 | | SEC_BSS, |
642 | | SEC_DATA, |
643 | | SEC_RODATA, |
644 | | SEC_ST_OPS, |
645 | | }; |
646 | | |
647 | | struct elf_sec_desc { |
648 | | enum sec_type sec_type; |
649 | | Elf64_Shdr *shdr; |
650 | | Elf_Data *data; |
651 | | }; |
652 | | |
653 | | struct elf_state { |
654 | | int fd; |
655 | | const void *obj_buf; |
656 | | size_t obj_buf_sz; |
657 | | Elf *elf; |
658 | | Elf64_Ehdr *ehdr; |
659 | | Elf_Data *symbols; |
660 | | Elf_Data *arena_data; |
661 | | size_t shstrndx; /* section index for section name strings */ |
662 | | size_t strtabidx; |
663 | | struct elf_sec_desc *secs; |
664 | | size_t sec_cnt; |
665 | | int btf_maps_shndx; |
666 | | __u32 btf_maps_sec_btf_id; |
667 | | int text_shndx; |
668 | | int symbols_shndx; |
669 | | bool has_st_ops; |
670 | | int arena_data_shndx; |
671 | | }; |
672 | | |
673 | | struct usdt_manager; |
674 | | |
675 | | enum bpf_object_state { |
676 | | OBJ_OPEN, |
677 | | OBJ_PREPARED, |
678 | | OBJ_LOADED, |
679 | | }; |
680 | | |
681 | | struct bpf_object { |
682 | | char name[BPF_OBJ_NAME_LEN]; |
683 | | char license[64]; |
684 | | __u32 kern_version; |
685 | | |
686 | | enum bpf_object_state state; |
687 | | struct bpf_program *programs; |
688 | | size_t nr_programs; |
689 | | struct bpf_map *maps; |
690 | | size_t nr_maps; |
691 | | size_t maps_cap; |
692 | | |
693 | | char *kconfig; |
694 | | struct extern_desc *externs; |
695 | | int nr_extern; |
696 | | int kconfig_map_idx; |
697 | | |
698 | | bool has_subcalls; |
699 | | bool has_rodata; |
700 | | |
701 | | struct bpf_gen *gen_loader; |
702 | | |
703 | | /* Information when doing ELF related work. Only valid if efile.elf is not NULL */ |
704 | | struct elf_state efile; |
705 | | |
706 | | unsigned char byteorder; |
707 | | |
708 | | struct btf *btf; |
709 | | struct btf_ext *btf_ext; |
710 | | |
711 | | /* Parse and load BTF vmlinux if any of the programs in the object need |
712 | | * it at load time. |
713 | | */ |
714 | | struct btf *btf_vmlinux; |
715 | | /* Path to the custom BTF to be used for BPF CO-RE relocations as an |
716 | | * override for vmlinux BTF. |
717 | | */ |
718 | | char *btf_custom_path; |
719 | | /* vmlinux BTF override for CO-RE relocations */ |
720 | | struct btf *btf_vmlinux_override; |
721 | | /* Lazily initialized kernel module BTFs */ |
722 | | struct module_btf *btf_modules; |
723 | | bool btf_modules_loaded; |
724 | | size_t btf_module_cnt; |
725 | | size_t btf_module_cap; |
726 | | |
727 | | /* optional log settings passed to BPF_BTF_LOAD and BPF_PROG_LOAD commands */ |
728 | | char *log_buf; |
729 | | size_t log_size; |
730 | | __u32 log_level; |
731 | | |
732 | | int *fd_array; |
733 | | size_t fd_array_cap; |
734 | | size_t fd_array_cnt; |
735 | | |
736 | | struct usdt_manager *usdt_man; |
737 | | |
738 | | struct bpf_map *arena_map; |
739 | | void *arena_data; |
740 | | size_t arena_data_sz; |
741 | | |
742 | | struct kern_feature_cache *feat_cache; |
743 | | char *token_path; |
744 | | int token_fd; |
745 | | |
746 | | char path[]; |
747 | | }; |
748 | | |
749 | | static const char *elf_sym_str(const struct bpf_object *obj, size_t off); |
750 | | static const char *elf_sec_str(const struct bpf_object *obj, size_t off); |
751 | | static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx); |
752 | | static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name); |
753 | | static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn); |
754 | | static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn); |
755 | | static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn); |
756 | | static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx); |
757 | | static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx); |
758 | | |
759 | | void bpf_program__unload(struct bpf_program *prog) |
760 | 18.5k | { |
761 | 18.5k | if (!prog) |
762 | 0 | return; |
763 | | |
764 | 18.5k | zclose(prog->fd); |
765 | | |
766 | 18.5k | zfree(&prog->func_info); |
767 | 18.5k | zfree(&prog->line_info); |
768 | 18.5k | } |
769 | | |
770 | | static void bpf_program__exit(struct bpf_program *prog) |
771 | 9.26k | { |
772 | 9.26k | if (!prog) |
773 | 0 | return; |
774 | | |
775 | 9.26k | bpf_program__unload(prog); |
776 | 9.26k | zfree(&prog->name); |
777 | 9.26k | zfree(&prog->sec_name); |
778 | 9.26k | zfree(&prog->insns); |
779 | 9.26k | zfree(&prog->reloc_desc); |
780 | | |
781 | 9.26k | prog->nr_reloc = 0; |
782 | 9.26k | prog->insns_cnt = 0; |
783 | 9.26k | prog->sec_idx = -1; |
784 | 9.26k | } |
785 | | |
786 | | static bool insn_is_subprog_call(const struct bpf_insn *insn) |
787 | 0 | { |
788 | 0 | return BPF_CLASS(insn->code) == BPF_JMP && |
789 | 0 | BPF_OP(insn->code) == BPF_CALL && |
790 | 0 | BPF_SRC(insn->code) == BPF_K && |
791 | 0 | insn->src_reg == BPF_PSEUDO_CALL && |
792 | 0 | insn->dst_reg == 0 && |
793 | 0 | insn->off == 0; |
794 | 0 | } |
795 | | |
796 | | static bool is_call_insn(const struct bpf_insn *insn) |
797 | 2.61k | { |
798 | 2.61k | return insn->code == (BPF_JMP | BPF_CALL); |
799 | 2.61k | } |
800 | | |
801 | | static bool insn_is_pseudo_func(struct bpf_insn *insn) |
802 | 0 | { |
803 | 0 | return is_ldimm64_insn(insn) && insn->src_reg == BPF_PSEUDO_FUNC; |
804 | 0 | } |
805 | | |
806 | | static int |
807 | | bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog, |
808 | | const char *name, size_t sec_idx, const char *sec_name, |
809 | | size_t sec_off, void *insn_data, size_t insn_data_sz) |
810 | 9.28k | { |
811 | 9.28k | if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) { |
812 | 18 | pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n", |
813 | 18 | sec_name, name, sec_off, insn_data_sz); |
814 | 18 | return -EINVAL; |
815 | 18 | } |
816 | | |
817 | 9.26k | memset(prog, 0, sizeof(*prog)); |
818 | 9.26k | prog->obj = obj; |
819 | | |
820 | 9.26k | prog->sec_idx = sec_idx; |
821 | 9.26k | prog->sec_insn_off = sec_off / BPF_INSN_SZ; |
822 | 9.26k | prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ; |
823 | | /* insns_cnt can later be increased by appending used subprograms */ |
824 | 9.26k | prog->insns_cnt = prog->sec_insn_cnt; |
825 | | |
826 | 9.26k | prog->type = BPF_PROG_TYPE_UNSPEC; |
827 | 9.26k | prog->fd = -1; |
828 | 9.26k | prog->exception_cb_idx = -1; |
829 | | |
830 | | /* libbpf's convention for SEC("?abc...") is that it's just like |
831 | | * SEC("abc...") but the corresponding bpf_program starts out with |
832 | | * autoload set to false. |
833 | | */ |
834 | 9.26k | if (sec_name[0] == '?') { |
835 | 308 | prog->autoload = false; |
836 | | /* from now on forget there was ? in section name */ |
837 | 308 | sec_name++; |
838 | 8.95k | } else { |
839 | 8.95k | prog->autoload = true; |
840 | 8.95k | } |
841 | | |
842 | 9.26k | prog->autoattach = true; |
843 | | |
844 | | /* inherit object's log_level */ |
845 | 9.26k | prog->log_level = obj->log_level; |
846 | | |
847 | 9.26k | prog->sec_name = strdup(sec_name); |
848 | 9.26k | if (!prog->sec_name) |
849 | 0 | goto errout; |
850 | | |
851 | 9.26k | prog->name = strdup(name); |
852 | 9.26k | if (!prog->name) |
853 | 0 | goto errout; |
854 | | |
855 | 9.26k | prog->insns = malloc(insn_data_sz); |
856 | 9.26k | if (!prog->insns) |
857 | 0 | goto errout; |
858 | 9.26k | memcpy(prog->insns, insn_data, insn_data_sz); |
859 | | |
860 | 9.26k | return 0; |
861 | 0 | errout: |
862 | 0 | pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name); |
863 | 0 | bpf_program__exit(prog); |
864 | 0 | return -ENOMEM; |
865 | 9.26k | } |
866 | | |
867 | | static int |
868 | | bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data, |
869 | | const char *sec_name, int sec_idx) |
870 | 1.33k | { |
871 | 1.33k | Elf_Data *symbols = obj->efile.symbols; |
872 | 1.33k | struct bpf_program *prog, *progs; |
873 | 1.33k | void *data = sec_data->d_buf; |
874 | 1.33k | size_t sec_sz = sec_data->d_size, sec_off, prog_sz, nr_syms; |
875 | 1.33k | int nr_progs, err, i; |
876 | 1.33k | const char *name; |
877 | 1.33k | Elf64_Sym *sym; |
878 | | |
879 | 1.33k | progs = obj->programs; |
880 | 1.33k | nr_progs = obj->nr_programs; |
881 | 1.33k | nr_syms = symbols->d_size / sizeof(Elf64_Sym); |
882 | | |
883 | 189k | for (i = 0; i < nr_syms; i++) { |
884 | 187k | sym = elf_sym_by_idx(obj, i); |
885 | | |
886 | 187k | if (sym->st_shndx != sec_idx) |
887 | 176k | continue; |
888 | 11.2k | if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC) |
889 | 1.74k | continue; |
890 | | |
891 | 9.49k | prog_sz = sym->st_size; |
892 | 9.49k | sec_off = sym->st_value; |
893 | | |
894 | 9.49k | name = elf_sym_str(obj, sym->st_name); |
895 | 9.49k | if (!name) { |
896 | 67 | pr_warn("sec '%s': failed to get symbol name for offset %zu\n", |
897 | 67 | sec_name, sec_off); |
898 | 67 | return -LIBBPF_ERRNO__FORMAT; |
899 | 67 | } |
900 | | |
901 | 9.42k | if (sec_off + prog_sz > sec_sz || sec_off + prog_sz < sec_off) { |
902 | 135 | pr_warn("sec '%s': program at offset %zu crosses section boundary\n", |
903 | 135 | sec_name, sec_off); |
904 | 135 | return -LIBBPF_ERRNO__FORMAT; |
905 | 135 | } |
906 | | |
907 | 9.29k | if (sec_idx != obj->efile.text_shndx && ELF64_ST_BIND(sym->st_info) == STB_LOCAL) { |
908 | 8 | pr_warn("sec '%s': program '%s' is static and not supported\n", sec_name, name); |
909 | 8 | return -ENOTSUP; |
910 | 8 | } |
911 | | |
912 | 9.28k | pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n", |
913 | 18.5k | sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz); |
914 | | |
915 | 9.28k | progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs)); |
916 | 9.28k | if (!progs) { |
917 | | /* |
918 | | * In this case the original obj->programs |
919 | | * is still valid, so don't need special treat for |
920 | | * bpf_close_object(). |
921 | | */ |
922 | 0 | pr_warn("sec '%s': failed to alloc memory for new program '%s'\n", |
923 | 0 | sec_name, name); |
924 | 0 | return -ENOMEM; |
925 | 0 | } |
926 | 9.28k | obj->programs = progs; |
927 | | |
928 | 9.28k | prog = &progs[nr_progs]; |
929 | | |
930 | 9.28k | err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name, |
931 | 9.28k | sec_off, data + sec_off, prog_sz); |
932 | 9.28k | if (err) |
933 | 18 | return err; |
934 | | |
935 | 9.26k | if (ELF64_ST_BIND(sym->st_info) != STB_LOCAL) |
936 | 9.07k | prog->sym_global = true; |
937 | | |
938 | | /* if function is a global/weak symbol, but has restricted |
939 | | * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF FUNC |
940 | | * as static to enable more permissive BPF verification mode |
941 | | * with more outside context available to BPF verifier |
942 | | */ |
943 | 9.26k | if (prog->sym_global && (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN |
944 | 9.07k | || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL)) |
945 | 6.59k | prog->mark_btf_static = true; |
946 | | |
947 | 9.26k | nr_progs++; |
948 | 9.26k | obj->nr_programs = nr_progs; |
949 | 9.26k | } |
950 | | |
951 | 1.10k | return 0; |
952 | 1.33k | } |
953 | | |
954 | | static void bpf_object_bswap_progs(struct bpf_object *obj) |
955 | 220 | { |
956 | 220 | struct bpf_program *prog = obj->programs; |
957 | 220 | struct bpf_insn *insn; |
958 | 220 | int p, i; |
959 | | |
960 | 602 | for (p = 0; p < obj->nr_programs; p++, prog++) { |
961 | 382 | insn = prog->insns; |
962 | 3.50k | for (i = 0; i < prog->insns_cnt; i++, insn++) |
963 | 3.12k | bpf_insn_bswap(insn); |
964 | 382 | } |
965 | 220 | pr_debug("converted %zu BPF programs to native byte order\n", obj->nr_programs); |
966 | 220 | } |
967 | | |
968 | | static const struct btf_member * |
969 | | find_member_by_offset(const struct btf_type *t, __u32 bit_offset) |
970 | 0 | { |
971 | 0 | struct btf_member *m; |
972 | 0 | int i; |
973 | |
|
974 | 0 | for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) { |
975 | 0 | if (btf_member_bit_offset(t, i) == bit_offset) |
976 | 0 | return m; |
977 | 0 | } |
978 | | |
979 | 0 | return NULL; |
980 | 0 | } |
981 | | |
982 | | static const struct btf_member * |
983 | | find_member_by_name(const struct btf *btf, const struct btf_type *t, |
984 | | const char *name) |
985 | 0 | { |
986 | 0 | struct btf_member *m; |
987 | 0 | int i; |
988 | |
|
989 | 0 | for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) { |
990 | 0 | if (!strcmp(btf__name_by_offset(btf, m->name_off), name)) |
991 | 0 | return m; |
992 | 0 | } |
993 | | |
994 | 0 | return NULL; |
995 | 0 | } |
996 | | |
997 | | static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name, |
998 | | __u16 kind, struct btf **res_btf, |
999 | | struct module_btf **res_mod_btf); |
1000 | | |
1001 | 0 | #define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_" |
1002 | | static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix, |
1003 | | const char *name, __u32 kind); |
1004 | | |
1005 | | static int |
1006 | | find_struct_ops_kern_types(struct bpf_object *obj, const char *tname_raw, |
1007 | | struct module_btf **mod_btf, |
1008 | | const struct btf_type **type, __u32 *type_id, |
1009 | | const struct btf_type **vtype, __u32 *vtype_id, |
1010 | | const struct btf_member **data_member) |
1011 | 0 | { |
1012 | 0 | const struct btf_type *kern_type, *kern_vtype; |
1013 | 0 | const struct btf_member *kern_data_member; |
1014 | 0 | struct btf *btf = NULL; |
1015 | 0 | __s32 kern_vtype_id, kern_type_id; |
1016 | 0 | char tname[256]; |
1017 | 0 | __u32 i; |
1018 | |
|
1019 | 0 | snprintf(tname, sizeof(tname), "%.*s", |
1020 | 0 | (int)bpf_core_essential_name_len(tname_raw), tname_raw); |
1021 | |
|
1022 | 0 | kern_type_id = find_ksym_btf_id(obj, tname, BTF_KIND_STRUCT, |
1023 | 0 | &btf, mod_btf); |
1024 | 0 | if (kern_type_id < 0) { |
1025 | 0 | pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n", |
1026 | 0 | tname); |
1027 | 0 | return kern_type_id; |
1028 | 0 | } |
1029 | 0 | kern_type = btf__type_by_id(btf, kern_type_id); |
1030 | | |
1031 | | /* Find the corresponding "map_value" type that will be used |
1032 | | * in map_update(BPF_MAP_TYPE_STRUCT_OPS). For example, |
1033 | | * find "struct bpf_struct_ops_tcp_congestion_ops" from the |
1034 | | * btf_vmlinux. |
1035 | | */ |
1036 | 0 | kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX, |
1037 | 0 | tname, BTF_KIND_STRUCT); |
1038 | 0 | if (kern_vtype_id < 0) { |
1039 | 0 | pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n", |
1040 | 0 | STRUCT_OPS_VALUE_PREFIX, tname); |
1041 | 0 | return kern_vtype_id; |
1042 | 0 | } |
1043 | 0 | kern_vtype = btf__type_by_id(btf, kern_vtype_id); |
1044 | | |
1045 | | /* Find "struct tcp_congestion_ops" from |
1046 | | * struct bpf_struct_ops_tcp_congestion_ops { |
1047 | | * [ ... ] |
1048 | | * struct tcp_congestion_ops data; |
1049 | | * } |
1050 | | */ |
1051 | 0 | kern_data_member = btf_members(kern_vtype); |
1052 | 0 | for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) { |
1053 | 0 | if (kern_data_member->type == kern_type_id) |
1054 | 0 | break; |
1055 | 0 | } |
1056 | 0 | if (i == btf_vlen(kern_vtype)) { |
1057 | 0 | pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n", |
1058 | 0 | tname, STRUCT_OPS_VALUE_PREFIX, tname); |
1059 | 0 | return -EINVAL; |
1060 | 0 | } |
1061 | | |
1062 | 0 | *type = kern_type; |
1063 | 0 | *type_id = kern_type_id; |
1064 | 0 | *vtype = kern_vtype; |
1065 | 0 | *vtype_id = kern_vtype_id; |
1066 | 0 | *data_member = kern_data_member; |
1067 | |
|
1068 | 0 | return 0; |
1069 | 0 | } |
1070 | | |
1071 | | static bool bpf_map__is_struct_ops(const struct bpf_map *map) |
1072 | 528 | { |
1073 | 528 | return map->def.type == BPF_MAP_TYPE_STRUCT_OPS; |
1074 | 528 | } |
1075 | | |
1076 | | static bool is_valid_st_ops_program(struct bpf_object *obj, |
1077 | | const struct bpf_program *prog) |
1078 | 0 | { |
1079 | 0 | int i; |
1080 | |
|
1081 | 0 | for (i = 0; i < obj->nr_programs; i++) { |
1082 | 0 | if (&obj->programs[i] == prog) |
1083 | 0 | return prog->type == BPF_PROG_TYPE_STRUCT_OPS; |
1084 | 0 | } |
1085 | | |
1086 | 0 | return false; |
1087 | 0 | } |
1088 | | |
1089 | | /* For each struct_ops program P, referenced from some struct_ops map M, |
1090 | | * enable P.autoload if there are Ms for which M.autocreate is true, |
1091 | | * disable P.autoload if for all Ms M.autocreate is false. |
1092 | | * Don't change P.autoload for programs that are not referenced from any maps. |
1093 | | */ |
1094 | | static int bpf_object_adjust_struct_ops_autoload(struct bpf_object *obj) |
1095 | 0 | { |
1096 | 0 | struct bpf_program *prog, *slot_prog; |
1097 | 0 | struct bpf_map *map; |
1098 | 0 | int i, j, k, vlen; |
1099 | |
|
1100 | 0 | for (i = 0; i < obj->nr_programs; ++i) { |
1101 | 0 | int should_load = false; |
1102 | 0 | int use_cnt = 0; |
1103 | |
|
1104 | 0 | prog = &obj->programs[i]; |
1105 | 0 | if (prog->type != BPF_PROG_TYPE_STRUCT_OPS) |
1106 | 0 | continue; |
1107 | | |
1108 | 0 | for (j = 0; j < obj->nr_maps; ++j) { |
1109 | 0 | const struct btf_type *type; |
1110 | |
|
1111 | 0 | map = &obj->maps[j]; |
1112 | 0 | if (!bpf_map__is_struct_ops(map)) |
1113 | 0 | continue; |
1114 | | |
1115 | 0 | type = btf__type_by_id(obj->btf, map->st_ops->type_id); |
1116 | 0 | vlen = btf_vlen(type); |
1117 | 0 | for (k = 0; k < vlen; ++k) { |
1118 | 0 | slot_prog = map->st_ops->progs[k]; |
1119 | 0 | if (prog != slot_prog) |
1120 | 0 | continue; |
1121 | | |
1122 | 0 | use_cnt++; |
1123 | 0 | if (map->autocreate) |
1124 | 0 | should_load = true; |
1125 | 0 | } |
1126 | 0 | } |
1127 | 0 | if (use_cnt) |
1128 | 0 | prog->autoload = should_load; |
1129 | 0 | } |
1130 | |
|
1131 | 0 | return 0; |
1132 | 0 | } |
1133 | | |
1134 | | /* Init the map's fields that depend on kern_btf */ |
1135 | | static int bpf_map__init_kern_struct_ops(struct bpf_map *map) |
1136 | 0 | { |
1137 | 0 | const struct btf_member *member, *kern_member, *kern_data_member; |
1138 | 0 | const struct btf_type *type, *kern_type, *kern_vtype; |
1139 | 0 | __u32 i, kern_type_id, kern_vtype_id, kern_data_off; |
1140 | 0 | struct bpf_object *obj = map->obj; |
1141 | 0 | const struct btf *btf = obj->btf; |
1142 | 0 | struct bpf_struct_ops *st_ops; |
1143 | 0 | const struct btf *kern_btf; |
1144 | 0 | struct module_btf *mod_btf = NULL; |
1145 | 0 | void *data, *kern_data; |
1146 | 0 | const char *tname; |
1147 | 0 | int err; |
1148 | |
|
1149 | 0 | st_ops = map->st_ops; |
1150 | 0 | type = btf__type_by_id(btf, st_ops->type_id); |
1151 | 0 | tname = btf__name_by_offset(btf, type->name_off); |
1152 | 0 | err = find_struct_ops_kern_types(obj, tname, &mod_btf, |
1153 | 0 | &kern_type, &kern_type_id, |
1154 | 0 | &kern_vtype, &kern_vtype_id, |
1155 | 0 | &kern_data_member); |
1156 | 0 | if (err) |
1157 | 0 | return err; |
1158 | | |
1159 | 0 | kern_btf = mod_btf ? mod_btf->btf : obj->btf_vmlinux; |
1160 | |
|
1161 | 0 | pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n", |
1162 | 0 | map->name, st_ops->type_id, kern_type_id, kern_vtype_id); |
1163 | |
|
1164 | 0 | map->mod_btf_fd = mod_btf ? mod_btf->fd : -1; |
1165 | 0 | map->def.value_size = kern_vtype->size; |
1166 | 0 | map->btf_vmlinux_value_type_id = kern_vtype_id; |
1167 | |
|
1168 | 0 | st_ops->kern_vdata = calloc(1, kern_vtype->size); |
1169 | 0 | if (!st_ops->kern_vdata) |
1170 | 0 | return -ENOMEM; |
1171 | | |
1172 | 0 | data = st_ops->data; |
1173 | 0 | kern_data_off = kern_data_member->offset / 8; |
1174 | 0 | kern_data = st_ops->kern_vdata + kern_data_off; |
1175 | |
|
1176 | 0 | member = btf_members(type); |
1177 | 0 | for (i = 0; i < btf_vlen(type); i++, member++) { |
1178 | 0 | const struct btf_type *mtype, *kern_mtype; |
1179 | 0 | __u32 mtype_id, kern_mtype_id; |
1180 | 0 | void *mdata, *kern_mdata; |
1181 | 0 | struct bpf_program *prog; |
1182 | 0 | __s64 msize, kern_msize; |
1183 | 0 | __u32 moff, kern_moff; |
1184 | 0 | __u32 kern_member_idx; |
1185 | 0 | const char *mname; |
1186 | |
|
1187 | 0 | mname = btf__name_by_offset(btf, member->name_off); |
1188 | 0 | moff = member->offset / 8; |
1189 | 0 | mdata = data + moff; |
1190 | 0 | msize = btf__resolve_size(btf, member->type); |
1191 | 0 | if (msize < 0) { |
1192 | 0 | pr_warn("struct_ops init_kern %s: failed to resolve the size of member %s\n", |
1193 | 0 | map->name, mname); |
1194 | 0 | return msize; |
1195 | 0 | } |
1196 | | |
1197 | 0 | kern_member = find_member_by_name(kern_btf, kern_type, mname); |
1198 | 0 | if (!kern_member) { |
1199 | 0 | if (!libbpf_is_mem_zeroed(mdata, msize)) { |
1200 | 0 | pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n", |
1201 | 0 | map->name, mname); |
1202 | 0 | return -ENOTSUP; |
1203 | 0 | } |
1204 | | |
1205 | 0 | if (st_ops->progs[i]) { |
1206 | | /* If we had declaratively set struct_ops callback, we need to |
1207 | | * force its autoload to false, because it doesn't have |
1208 | | * a chance of succeeding from POV of the current struct_ops map. |
1209 | | * If this program is still referenced somewhere else, though, |
1210 | | * then bpf_object_adjust_struct_ops_autoload() will update its |
1211 | | * autoload accordingly. |
1212 | | */ |
1213 | 0 | st_ops->progs[i]->autoload = false; |
1214 | 0 | st_ops->progs[i] = NULL; |
1215 | 0 | } |
1216 | | |
1217 | | /* Skip all-zero/NULL fields if they are not present in the kernel BTF */ |
1218 | 0 | pr_info("struct_ops %s: member %s not found in kernel, skipping it as it's set to zero\n", |
1219 | 0 | map->name, mname); |
1220 | 0 | continue; |
1221 | 0 | } |
1222 | | |
1223 | 0 | kern_member_idx = kern_member - btf_members(kern_type); |
1224 | 0 | if (btf_member_bitfield_size(type, i) || |
1225 | 0 | btf_member_bitfield_size(kern_type, kern_member_idx)) { |
1226 | 0 | pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n", |
1227 | 0 | map->name, mname); |
1228 | 0 | return -ENOTSUP; |
1229 | 0 | } |
1230 | | |
1231 | 0 | kern_moff = kern_member->offset / 8; |
1232 | 0 | kern_mdata = kern_data + kern_moff; |
1233 | |
|
1234 | 0 | mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id); |
1235 | 0 | kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type, |
1236 | 0 | &kern_mtype_id); |
1237 | 0 | if (BTF_INFO_KIND(mtype->info) != |
1238 | 0 | BTF_INFO_KIND(kern_mtype->info)) { |
1239 | 0 | pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n", |
1240 | 0 | map->name, mname, BTF_INFO_KIND(mtype->info), |
1241 | 0 | BTF_INFO_KIND(kern_mtype->info)); |
1242 | 0 | return -ENOTSUP; |
1243 | 0 | } |
1244 | | |
1245 | 0 | if (btf_is_ptr(mtype)) { |
1246 | 0 | prog = *(void **)mdata; |
1247 | | /* just like for !kern_member case above, reset declaratively |
1248 | | * set (at compile time) program's autload to false, |
1249 | | * if user replaced it with another program or NULL |
1250 | | */ |
1251 | 0 | if (st_ops->progs[i] && st_ops->progs[i] != prog) |
1252 | 0 | st_ops->progs[i]->autoload = false; |
1253 | | |
1254 | | /* Update the value from the shadow type */ |
1255 | 0 | st_ops->progs[i] = prog; |
1256 | 0 | if (!prog) |
1257 | 0 | continue; |
1258 | | |
1259 | 0 | if (!is_valid_st_ops_program(obj, prog)) { |
1260 | 0 | pr_warn("struct_ops init_kern %s: member %s is not a struct_ops program\n", |
1261 | 0 | map->name, mname); |
1262 | 0 | return -ENOTSUP; |
1263 | 0 | } |
1264 | | |
1265 | 0 | kern_mtype = skip_mods_and_typedefs(kern_btf, |
1266 | 0 | kern_mtype->type, |
1267 | 0 | &kern_mtype_id); |
1268 | | |
1269 | | /* mtype->type must be a func_proto which was |
1270 | | * guaranteed in bpf_object__collect_st_ops_relos(), |
1271 | | * so only check kern_mtype for func_proto here. |
1272 | | */ |
1273 | 0 | if (!btf_is_func_proto(kern_mtype)) { |
1274 | 0 | pr_warn("struct_ops init_kern %s: kernel member %s is not a func ptr\n", |
1275 | 0 | map->name, mname); |
1276 | 0 | return -ENOTSUP; |
1277 | 0 | } |
1278 | | |
1279 | 0 | if (mod_btf) |
1280 | 0 | prog->attach_btf_obj_fd = mod_btf->fd; |
1281 | | |
1282 | | /* if we haven't yet processed this BPF program, record proper |
1283 | | * attach_btf_id and member_idx |
1284 | | */ |
1285 | 0 | if (!prog->attach_btf_id) { |
1286 | 0 | prog->attach_btf_id = kern_type_id; |
1287 | 0 | prog->expected_attach_type = kern_member_idx; |
1288 | 0 | } |
1289 | | |
1290 | | /* struct_ops BPF prog can be re-used between multiple |
1291 | | * .struct_ops & .struct_ops.link as long as it's the |
1292 | | * same struct_ops struct definition and the same |
1293 | | * function pointer field |
1294 | | */ |
1295 | 0 | if (prog->attach_btf_id != kern_type_id) { |
1296 | 0 | pr_warn("struct_ops init_kern %s func ptr %s: invalid reuse of prog %s in sec %s with type %u: attach_btf_id %u != kern_type_id %u\n", |
1297 | 0 | map->name, mname, prog->name, prog->sec_name, prog->type, |
1298 | 0 | prog->attach_btf_id, kern_type_id); |
1299 | 0 | return -EINVAL; |
1300 | 0 | } |
1301 | 0 | if (prog->expected_attach_type != kern_member_idx) { |
1302 | 0 | pr_warn("struct_ops init_kern %s func ptr %s: invalid reuse of prog %s in sec %s with type %u: expected_attach_type %u != kern_member_idx %u\n", |
1303 | 0 | map->name, mname, prog->name, prog->sec_name, prog->type, |
1304 | 0 | prog->expected_attach_type, kern_member_idx); |
1305 | 0 | return -EINVAL; |
1306 | 0 | } |
1307 | | |
1308 | 0 | st_ops->kern_func_off[i] = kern_data_off + kern_moff; |
1309 | |
|
1310 | 0 | pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n", |
1311 | 0 | map->name, mname, prog->name, moff, |
1312 | 0 | kern_moff); |
1313 | |
|
1314 | 0 | continue; |
1315 | 0 | } |
1316 | | |
1317 | 0 | kern_msize = btf__resolve_size(kern_btf, kern_mtype_id); |
1318 | 0 | if (kern_msize < 0 || msize != kern_msize) { |
1319 | 0 | pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n", |
1320 | 0 | map->name, mname, (ssize_t)msize, |
1321 | 0 | (ssize_t)kern_msize); |
1322 | 0 | return -ENOTSUP; |
1323 | 0 | } |
1324 | | |
1325 | 0 | pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n", |
1326 | 0 | map->name, mname, (unsigned int)msize, |
1327 | 0 | moff, kern_moff); |
1328 | 0 | memcpy(kern_mdata, mdata, msize); |
1329 | 0 | } |
1330 | | |
1331 | 0 | return 0; |
1332 | 0 | } |
1333 | | |
1334 | | static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj) |
1335 | 0 | { |
1336 | 0 | struct bpf_map *map; |
1337 | 0 | size_t i; |
1338 | 0 | int err; |
1339 | |
|
1340 | 0 | for (i = 0; i < obj->nr_maps; i++) { |
1341 | 0 | map = &obj->maps[i]; |
1342 | |
|
1343 | 0 | if (!bpf_map__is_struct_ops(map)) |
1344 | 0 | continue; |
1345 | | |
1346 | 0 | if (!map->autocreate) |
1347 | 0 | continue; |
1348 | | |
1349 | 0 | err = bpf_map__init_kern_struct_ops(map); |
1350 | 0 | if (err) |
1351 | 0 | return err; |
1352 | 0 | } |
1353 | | |
1354 | 0 | return 0; |
1355 | 0 | } |
1356 | | |
1357 | | static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name, |
1358 | | int shndx, Elf_Data *data) |
1359 | 165 | { |
1360 | 165 | const struct btf_type *type, *datasec; |
1361 | 165 | const struct btf_var_secinfo *vsi; |
1362 | 165 | struct bpf_struct_ops *st_ops; |
1363 | 165 | const char *tname, *var_name; |
1364 | 165 | __s32 type_id, datasec_id; |
1365 | 165 | const struct btf *btf; |
1366 | 165 | struct bpf_map *map; |
1367 | 165 | __u32 i; |
1368 | | |
1369 | 165 | if (shndx == -1) |
1370 | 0 | return 0; |
1371 | | |
1372 | 165 | btf = obj->btf; |
1373 | 165 | datasec_id = btf__find_by_name_kind(btf, sec_name, |
1374 | 165 | BTF_KIND_DATASEC); |
1375 | 165 | if (datasec_id < 0) { |
1376 | 37 | pr_warn("struct_ops init: DATASEC %s not found\n", |
1377 | 37 | sec_name); |
1378 | 37 | return -EINVAL; |
1379 | 37 | } |
1380 | | |
1381 | 128 | datasec = btf__type_by_id(btf, datasec_id); |
1382 | 128 | vsi = btf_var_secinfos(datasec); |
1383 | 178 | for (i = 0; i < btf_vlen(datasec); i++, vsi++) { |
1384 | 79 | type = btf__type_by_id(obj->btf, vsi->type); |
1385 | 79 | var_name = btf__name_by_offset(obj->btf, type->name_off); |
1386 | | |
1387 | 79 | type_id = btf__resolve_type(obj->btf, vsi->type); |
1388 | 79 | if (type_id < 0) { |
1389 | 11 | pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n", |
1390 | 11 | vsi->type, sec_name); |
1391 | 11 | return -EINVAL; |
1392 | 11 | } |
1393 | | |
1394 | 68 | type = btf__type_by_id(obj->btf, type_id); |
1395 | 68 | tname = btf__name_by_offset(obj->btf, type->name_off); |
1396 | 68 | if (!tname[0]) { |
1397 | 3 | pr_warn("struct_ops init: anonymous type is not supported\n"); |
1398 | 3 | return -ENOTSUP; |
1399 | 3 | } |
1400 | 65 | if (!btf_is_struct(type)) { |
1401 | 5 | pr_warn("struct_ops init: %s is not a struct\n", tname); |
1402 | 5 | return -EINVAL; |
1403 | 5 | } |
1404 | | |
1405 | 60 | map = bpf_object__add_map(obj); |
1406 | 60 | if (IS_ERR(map)) |
1407 | 0 | return PTR_ERR(map); |
1408 | | |
1409 | 60 | map->sec_idx = shndx; |
1410 | 60 | map->sec_offset = vsi->offset; |
1411 | 60 | map->name = strdup(var_name); |
1412 | 60 | if (!map->name) |
1413 | 0 | return -ENOMEM; |
1414 | 60 | map->btf_value_type_id = type_id; |
1415 | | |
1416 | | /* Follow same convention as for programs autoload: |
1417 | | * SEC("?.struct_ops") means map is not created by default. |
1418 | | */ |
1419 | 60 | if (sec_name[0] == '?') { |
1420 | 17 | map->autocreate = false; |
1421 | | /* from now on forget there was ? in section name */ |
1422 | 17 | sec_name++; |
1423 | 17 | } |
1424 | | |
1425 | 60 | map->def.type = BPF_MAP_TYPE_STRUCT_OPS; |
1426 | 60 | map->def.key_size = sizeof(int); |
1427 | 60 | map->def.value_size = type->size; |
1428 | 60 | map->def.max_entries = 1; |
1429 | 60 | map->def.map_flags = strcmp(sec_name, STRUCT_OPS_LINK_SEC) == 0 ? BPF_F_LINK : 0; |
1430 | 60 | map->autoattach = true; |
1431 | | |
1432 | 60 | map->st_ops = calloc(1, sizeof(*map->st_ops)); |
1433 | 60 | if (!map->st_ops) |
1434 | 0 | return -ENOMEM; |
1435 | 60 | st_ops = map->st_ops; |
1436 | 60 | st_ops->data = malloc(type->size); |
1437 | 60 | st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs)); |
1438 | 60 | st_ops->kern_func_off = malloc(btf_vlen(type) * |
1439 | 60 | sizeof(*st_ops->kern_func_off)); |
1440 | 60 | if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off) |
1441 | 0 | return -ENOMEM; |
1442 | | |
1443 | 60 | if (vsi->offset + type->size > data->d_size) { |
1444 | 10 | pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n", |
1445 | 10 | var_name, sec_name); |
1446 | 10 | return -EINVAL; |
1447 | 10 | } |
1448 | | |
1449 | 50 | memcpy(st_ops->data, |
1450 | 50 | data->d_buf + vsi->offset, |
1451 | 50 | type->size); |
1452 | 50 | st_ops->type_id = type_id; |
1453 | | |
1454 | 50 | pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n", |
1455 | 50 | tname, type_id, var_name, vsi->offset); |
1456 | 50 | } |
1457 | | |
1458 | 99 | return 0; |
1459 | 128 | } |
1460 | | |
1461 | | static int bpf_object_init_struct_ops(struct bpf_object *obj) |
1462 | 2.28k | { |
1463 | 2.28k | const char *sec_name; |
1464 | 2.28k | int sec_idx, err; |
1465 | | |
1466 | 20.4k | for (sec_idx = 0; sec_idx < obj->efile.sec_cnt; ++sec_idx) { |
1467 | 18.2k | struct elf_sec_desc *desc = &obj->efile.secs[sec_idx]; |
1468 | | |
1469 | 18.2k | if (desc->sec_type != SEC_ST_OPS) |
1470 | 18.0k | continue; |
1471 | | |
1472 | 165 | sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx)); |
1473 | 165 | if (!sec_name) |
1474 | 0 | return -LIBBPF_ERRNO__FORMAT; |
1475 | | |
1476 | 165 | err = init_struct_ops_maps(obj, sec_name, sec_idx, desc->data); |
1477 | 165 | if (err) |
1478 | 66 | return err; |
1479 | 165 | } |
1480 | | |
1481 | 2.21k | return 0; |
1482 | 2.28k | } |
1483 | | |
1484 | | static struct bpf_object *bpf_object__new(const char *path, |
1485 | | const void *obj_buf, |
1486 | | size_t obj_buf_sz, |
1487 | | const char *obj_name) |
1488 | 11.5k | { |
1489 | 11.5k | struct bpf_object *obj; |
1490 | 11.5k | char *end; |
1491 | | |
1492 | 11.5k | obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1); |
1493 | 11.5k | if (!obj) { |
1494 | 0 | pr_warn("alloc memory failed for %s\n", path); |
1495 | 0 | return ERR_PTR(-ENOMEM); |
1496 | 0 | } |
1497 | | |
1498 | 11.5k | strcpy(obj->path, path); |
1499 | 11.5k | if (obj_name) { |
1500 | 11.5k | libbpf_strlcpy(obj->name, obj_name, sizeof(obj->name)); |
1501 | 11.5k | } else { |
1502 | | /* Using basename() GNU version which doesn't modify arg. */ |
1503 | 0 | libbpf_strlcpy(obj->name, basename((void *)path), sizeof(obj->name)); |
1504 | 0 | end = strchr(obj->name, '.'); |
1505 | 0 | if (end) |
1506 | 0 | *end = 0; |
1507 | 0 | } |
1508 | | |
1509 | 11.5k | obj->efile.fd = -1; |
1510 | | /* |
1511 | | * Caller of this function should also call |
1512 | | * bpf_object__elf_finish() after data collection to return |
1513 | | * obj_buf to user. If not, we should duplicate the buffer to |
1514 | | * avoid user freeing them before elf finish. |
1515 | | */ |
1516 | 11.5k | obj->efile.obj_buf = obj_buf; |
1517 | 11.5k | obj->efile.obj_buf_sz = obj_buf_sz; |
1518 | 11.5k | obj->efile.btf_maps_shndx = -1; |
1519 | 11.5k | obj->kconfig_map_idx = -1; |
1520 | | |
1521 | 11.5k | obj->kern_version = get_kernel_version(); |
1522 | 11.5k | obj->state = OBJ_OPEN; |
1523 | | |
1524 | 11.5k | return obj; |
1525 | 11.5k | } |
1526 | | |
1527 | | static void bpf_object__elf_finish(struct bpf_object *obj) |
1528 | 15.5k | { |
1529 | 15.5k | if (!obj->efile.elf) |
1530 | 4.02k | return; |
1531 | | |
1532 | 11.4k | elf_end(obj->efile.elf); |
1533 | 11.4k | obj->efile.elf = NULL; |
1534 | 11.4k | obj->efile.ehdr = NULL; |
1535 | 11.4k | obj->efile.symbols = NULL; |
1536 | 11.4k | obj->efile.arena_data = NULL; |
1537 | | |
1538 | 11.4k | zfree(&obj->efile.secs); |
1539 | 11.4k | obj->efile.sec_cnt = 0; |
1540 | 11.4k | zclose(obj->efile.fd); |
1541 | 11.4k | obj->efile.obj_buf = NULL; |
1542 | 11.4k | obj->efile.obj_buf_sz = 0; |
1543 | 11.4k | } |
1544 | | |
1545 | | static int bpf_object__elf_init(struct bpf_object *obj) |
1546 | 11.5k | { |
1547 | 11.5k | Elf64_Ehdr *ehdr; |
1548 | 11.5k | int err = 0; |
1549 | 11.5k | Elf *elf; |
1550 | | |
1551 | 11.5k | if (obj->efile.elf) { |
1552 | 0 | pr_warn("elf: init internal error\n"); |
1553 | 0 | return -LIBBPF_ERRNO__LIBELF; |
1554 | 0 | } |
1555 | | |
1556 | 11.5k | if (obj->efile.obj_buf_sz > 0) { |
1557 | | /* obj_buf should have been validated by bpf_object__open_mem(). */ |
1558 | 11.5k | elf = elf_memory((char *)obj->efile.obj_buf, obj->efile.obj_buf_sz); |
1559 | 11.5k | } else { |
1560 | 0 | obj->efile.fd = open(obj->path, O_RDONLY | O_CLOEXEC); |
1561 | 0 | if (obj->efile.fd < 0) { |
1562 | 0 | err = -errno; |
1563 | 0 | pr_warn("elf: failed to open %s: %s\n", obj->path, errstr(err)); |
1564 | 0 | return err; |
1565 | 0 | } |
1566 | | |
1567 | 0 | elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL); |
1568 | 0 | } |
1569 | | |
1570 | 11.5k | if (!elf) { |
1571 | 86 | pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1)); |
1572 | 86 | err = -LIBBPF_ERRNO__LIBELF; |
1573 | 86 | goto errout; |
1574 | 86 | } |
1575 | | |
1576 | 11.4k | obj->efile.elf = elf; |
1577 | | |
1578 | 11.4k | if (elf_kind(elf) != ELF_K_ELF) { |
1579 | 120 | err = -LIBBPF_ERRNO__FORMAT; |
1580 | 120 | pr_warn("elf: '%s' is not a proper ELF object\n", obj->path); |
1581 | 120 | goto errout; |
1582 | 120 | } |
1583 | | |
1584 | 11.3k | if (gelf_getclass(elf) != ELFCLASS64) { |
1585 | 507 | err = -LIBBPF_ERRNO__FORMAT; |
1586 | 507 | pr_warn("elf: '%s' is not a 64-bit ELF object\n", obj->path); |
1587 | 507 | goto errout; |
1588 | 507 | } |
1589 | | |
1590 | 10.8k | obj->efile.ehdr = ehdr = elf64_getehdr(elf); |
1591 | 10.8k | if (!obj->efile.ehdr) { |
1592 | 0 | pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1)); |
1593 | 0 | err = -LIBBPF_ERRNO__FORMAT; |
1594 | 0 | goto errout; |
1595 | 0 | } |
1596 | | |
1597 | | /* Validate ELF object endianness... */ |
1598 | 10.8k | if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB && |
1599 | 10.8k | ehdr->e_ident[EI_DATA] != ELFDATA2MSB) { |
1600 | 0 | err = -LIBBPF_ERRNO__ENDIAN; |
1601 | 0 | pr_warn("elf: '%s' has unknown byte order\n", obj->path); |
1602 | 0 | goto errout; |
1603 | 0 | } |
1604 | | /* and save after bpf_object_open() frees ELF data */ |
1605 | 10.8k | obj->byteorder = ehdr->e_ident[EI_DATA]; |
1606 | | |
1607 | 10.8k | if (elf_getshdrstrndx(elf, &obj->efile.shstrndx)) { |
1608 | 28 | pr_warn("elf: failed to get section names section index for %s: %s\n", |
1609 | 28 | obj->path, elf_errmsg(-1)); |
1610 | 28 | err = -LIBBPF_ERRNO__FORMAT; |
1611 | 28 | goto errout; |
1612 | 28 | } |
1613 | | |
1614 | | /* ELF is corrupted/truncated, avoid calling elf_strptr. */ |
1615 | 10.8k | if (!elf_rawdata(elf_getscn(elf, obj->efile.shstrndx), NULL)) { |
1616 | 1.10k | pr_warn("elf: failed to get section names strings from %s: %s\n", |
1617 | 1.10k | obj->path, elf_errmsg(-1)); |
1618 | 1.10k | err = -LIBBPF_ERRNO__FORMAT; |
1619 | 1.10k | goto errout; |
1620 | 1.10k | } |
1621 | | |
1622 | | /* Old LLVM set e_machine to EM_NONE */ |
1623 | 9.73k | if (ehdr->e_type != ET_REL || (ehdr->e_machine && ehdr->e_machine != EM_BPF)) { |
1624 | 380 | pr_warn("elf: %s is not a valid eBPF object file\n", obj->path); |
1625 | 380 | err = -LIBBPF_ERRNO__FORMAT; |
1626 | 380 | goto errout; |
1627 | 380 | } |
1628 | | |
1629 | 9.35k | return 0; |
1630 | 2.22k | errout: |
1631 | 2.22k | bpf_object__elf_finish(obj); |
1632 | 2.22k | return err; |
1633 | 9.73k | } |
1634 | | |
1635 | | static bool is_native_endianness(struct bpf_object *obj) |
1636 | 6.23k | { |
1637 | 6.23k | #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ |
1638 | 6.23k | return obj->byteorder == ELFDATA2LSB; |
1639 | | #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ |
1640 | | return obj->byteorder == ELFDATA2MSB; |
1641 | | #else |
1642 | | # error "Unrecognized __BYTE_ORDER__" |
1643 | | #endif |
1644 | 6.23k | } |
1645 | | |
1646 | | static int |
1647 | | bpf_object__init_license(struct bpf_object *obj, void *data, size_t size) |
1648 | 807 | { |
1649 | 807 | if (!data) { |
1650 | 1 | pr_warn("invalid license section in %s\n", obj->path); |
1651 | 1 | return -LIBBPF_ERRNO__FORMAT; |
1652 | 1 | } |
1653 | | /* libbpf_strlcpy() only copies first N - 1 bytes, so size + 1 won't |
1654 | | * go over allowed ELF data section buffer |
1655 | | */ |
1656 | 806 | libbpf_strlcpy(obj->license, data, min(size + 1, sizeof(obj->license))); |
1657 | 806 | pr_debug("license of %s is %s\n", obj->path, obj->license); |
1658 | 806 | return 0; |
1659 | 807 | } |
1660 | | |
1661 | | static int |
1662 | | bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size) |
1663 | 52 | { |
1664 | 52 | __u32 kver; |
1665 | | |
1666 | 52 | if (!data || size != sizeof(kver)) { |
1667 | 10 | pr_warn("invalid kver section in %s\n", obj->path); |
1668 | 10 | return -LIBBPF_ERRNO__FORMAT; |
1669 | 10 | } |
1670 | 42 | memcpy(&kver, data, sizeof(kver)); |
1671 | 42 | obj->kern_version = kver; |
1672 | 42 | pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version); |
1673 | 42 | return 0; |
1674 | 52 | } |
1675 | | |
1676 | | static bool bpf_map_type__is_map_in_map(enum bpf_map_type type) |
1677 | 105 | { |
1678 | 105 | if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS || |
1679 | 105 | type == BPF_MAP_TYPE_HASH_OF_MAPS) |
1680 | 58 | return true; |
1681 | 47 | return false; |
1682 | 105 | } |
1683 | | |
1684 | | static int find_elf_sec_sz(const struct bpf_object *obj, const char *name, __u32 *size) |
1685 | 513 | { |
1686 | 513 | Elf_Data *data; |
1687 | 513 | Elf_Scn *scn; |
1688 | | |
1689 | 513 | if (!name) |
1690 | 0 | return -EINVAL; |
1691 | | |
1692 | 513 | scn = elf_sec_by_name(obj, name); |
1693 | 513 | data = elf_sec_data(obj, scn); |
1694 | 513 | if (data) { |
1695 | 361 | *size = data->d_size; |
1696 | 361 | return 0; /* found it */ |
1697 | 361 | } |
1698 | | |
1699 | 152 | return -ENOENT; |
1700 | 513 | } |
1701 | | |
1702 | | static Elf64_Sym *find_elf_var_sym(const struct bpf_object *obj, const char *name) |
1703 | 2.09k | { |
1704 | 2.09k | Elf_Data *symbols = obj->efile.symbols; |
1705 | 2.09k | const char *sname; |
1706 | 2.09k | size_t si; |
1707 | | |
1708 | 49.2k | for (si = 0; si < symbols->d_size / sizeof(Elf64_Sym); si++) { |
1709 | 49.0k | Elf64_Sym *sym = elf_sym_by_idx(obj, si); |
1710 | | |
1711 | 49.0k | if (ELF64_ST_TYPE(sym->st_info) != STT_OBJECT) |
1712 | 42.5k | continue; |
1713 | | |
1714 | 6.54k | if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL && |
1715 | 6.54k | ELF64_ST_BIND(sym->st_info) != STB_WEAK) |
1716 | 3.92k | continue; |
1717 | | |
1718 | 2.61k | sname = elf_sym_str(obj, sym->st_name); |
1719 | 2.61k | if (!sname) { |
1720 | 21 | pr_warn("failed to get sym name string for var %s\n", name); |
1721 | 21 | return ERR_PTR(-EIO); |
1722 | 21 | } |
1723 | 2.59k | if (strcmp(name, sname) == 0) |
1724 | 1.84k | return sym; |
1725 | 2.59k | } |
1726 | | |
1727 | 235 | return ERR_PTR(-ENOENT); |
1728 | 2.09k | } |
1729 | | |
1730 | | #ifndef MFD_CLOEXEC |
1731 | | #define MFD_CLOEXEC 0x0001U |
1732 | | #endif |
1733 | | #ifndef MFD_NOEXEC_SEAL |
1734 | 5.69k | #define MFD_NOEXEC_SEAL 0x0008U |
1735 | | #endif |
1736 | | |
1737 | | static int create_placeholder_fd(void) |
1738 | 2.84k | { |
1739 | 2.84k | unsigned int flags = MFD_CLOEXEC | MFD_NOEXEC_SEAL; |
1740 | 2.84k | const char *name = "libbpf-placeholder-fd"; |
1741 | 2.84k | int fd; |
1742 | | |
1743 | 2.84k | fd = ensure_good_fd(sys_memfd_create(name, flags)); |
1744 | 2.84k | if (fd >= 0) |
1745 | 0 | return fd; |
1746 | 2.84k | else if (errno != EINVAL) |
1747 | 0 | return -errno; |
1748 | | |
1749 | | /* Possibly running on kernel without MFD_NOEXEC_SEAL */ |
1750 | 2.84k | fd = ensure_good_fd(sys_memfd_create(name, flags & ~MFD_NOEXEC_SEAL)); |
1751 | 2.84k | if (fd < 0) |
1752 | 0 | return -errno; |
1753 | 2.84k | return fd; |
1754 | 2.84k | } |
1755 | | |
1756 | | static struct bpf_map *bpf_object__add_map(struct bpf_object *obj) |
1757 | 2.84k | { |
1758 | 2.84k | struct bpf_map *map; |
1759 | 2.84k | int err; |
1760 | | |
1761 | 2.84k | err = libbpf_ensure_mem((void **)&obj->maps, &obj->maps_cap, |
1762 | 2.84k | sizeof(*obj->maps), obj->nr_maps + 1); |
1763 | 2.84k | if (err) |
1764 | 0 | return ERR_PTR(err); |
1765 | | |
1766 | 2.84k | map = &obj->maps[obj->nr_maps++]; |
1767 | 2.84k | map->obj = obj; |
1768 | | /* Preallocate map FD without actually creating BPF map just yet. |
1769 | | * These map FD "placeholders" will be reused later without changing |
1770 | | * FD value when map is actually created in the kernel. |
1771 | | * |
1772 | | * This is useful to be able to perform BPF program relocations |
1773 | | * without having to create BPF maps before that step. This allows us |
1774 | | * to finalize and load BTF very late in BPF object's loading phase, |
1775 | | * right before BPF maps have to be created and BPF programs have to |
1776 | | * be loaded. By having these map FD placeholders we can perform all |
1777 | | * the sanitizations, relocations, and any other adjustments before we |
1778 | | * start creating actual BPF kernel objects (BTF, maps, progs). |
1779 | | */ |
1780 | 2.84k | map->fd = create_placeholder_fd(); |
1781 | 2.84k | if (map->fd < 0) |
1782 | 0 | return ERR_PTR(map->fd); |
1783 | 2.84k | map->inner_map_fd = -1; |
1784 | 2.84k | map->autocreate = true; |
1785 | | |
1786 | 2.84k | return map; |
1787 | 2.84k | } |
1788 | | |
1789 | | static size_t array_map_mmap_sz(unsigned int value_sz, unsigned int max_entries) |
1790 | 3.65k | { |
1791 | 3.65k | const long page_sz = sysconf(_SC_PAGE_SIZE); |
1792 | 3.65k | size_t map_sz; |
1793 | | |
1794 | 3.65k | map_sz = (size_t)roundup(value_sz, 8) * max_entries; |
1795 | 3.65k | map_sz = roundup(map_sz, page_sz); |
1796 | 3.65k | return map_sz; |
1797 | 3.65k | } |
1798 | | |
1799 | | static size_t bpf_map_mmap_sz(const struct bpf_map *map) |
1800 | 3.65k | { |
1801 | 3.65k | const long page_sz = sysconf(_SC_PAGE_SIZE); |
1802 | | |
1803 | 3.65k | switch (map->def.type) { |
1804 | 3.65k | case BPF_MAP_TYPE_ARRAY: |
1805 | 3.65k | return array_map_mmap_sz(map->def.value_size, map->def.max_entries); |
1806 | 0 | case BPF_MAP_TYPE_ARENA: |
1807 | 0 | return page_sz * map->def.max_entries; |
1808 | 0 | default: |
1809 | 0 | return 0; /* not supported */ |
1810 | 3.65k | } |
1811 | 3.65k | } |
1812 | | |
1813 | | static int bpf_map_mmap_resize(struct bpf_map *map, size_t old_sz, size_t new_sz) |
1814 | 0 | { |
1815 | 0 | void *mmaped; |
1816 | |
|
1817 | 0 | if (!map->mmaped) |
1818 | 0 | return -EINVAL; |
1819 | | |
1820 | 0 | if (old_sz == new_sz) |
1821 | 0 | return 0; |
1822 | | |
1823 | 0 | mmaped = mmap(NULL, new_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0); |
1824 | 0 | if (mmaped == MAP_FAILED) |
1825 | 0 | return -errno; |
1826 | | |
1827 | 0 | memcpy(mmaped, map->mmaped, min(old_sz, new_sz)); |
1828 | 0 | munmap(map->mmaped, old_sz); |
1829 | 0 | map->mmaped = mmaped; |
1830 | 0 | return 0; |
1831 | 0 | } |
1832 | | |
1833 | | static char *internal_map_name(struct bpf_object *obj, const char *real_name) |
1834 | 1.85k | { |
1835 | 1.85k | char map_name[BPF_OBJ_NAME_LEN], *p; |
1836 | 1.85k | int pfx_len, sfx_len = max((size_t)7, strlen(real_name)); |
1837 | | |
1838 | | /* This is one of the more confusing parts of libbpf for various |
1839 | | * reasons, some of which are historical. The original idea for naming |
1840 | | * internal names was to include as much of BPF object name prefix as |
1841 | | * possible, so that it can be distinguished from similar internal |
1842 | | * maps of a different BPF object. |
1843 | | * As an example, let's say we have bpf_object named 'my_object_name' |
1844 | | * and internal map corresponding to '.rodata' ELF section. The final |
1845 | | * map name advertised to user and to the kernel will be |
1846 | | * 'my_objec.rodata', taking first 8 characters of object name and |
1847 | | * entire 7 characters of '.rodata'. |
1848 | | * Somewhat confusingly, if internal map ELF section name is shorter |
1849 | | * than 7 characters, e.g., '.bss', we still reserve 7 characters |
1850 | | * for the suffix, even though we only have 4 actual characters, and |
1851 | | * resulting map will be called 'my_objec.bss', not even using all 15 |
1852 | | * characters allowed by the kernel. Oh well, at least the truncated |
1853 | | * object name is somewhat consistent in this case. But if the map |
1854 | | * name is '.kconfig', we'll still have entirety of '.kconfig' added |
1855 | | * (8 chars) and thus will be left with only first 7 characters of the |
1856 | | * object name ('my_obje'). Happy guessing, user, that the final map |
1857 | | * name will be "my_obje.kconfig". |
1858 | | * Now, with libbpf starting to support arbitrarily named .rodata.* |
1859 | | * and .data.* data sections, it's possible that ELF section name is |
1860 | | * longer than allowed 15 chars, so we now need to be careful to take |
1861 | | * only up to 15 first characters of ELF name, taking no BPF object |
1862 | | * name characters at all. So '.rodata.abracadabra' will result in |
1863 | | * '.rodata.abracad' kernel and user-visible name. |
1864 | | * We need to keep this convoluted logic intact for .data, .bss and |
1865 | | * .rodata maps, but for new custom .data.custom and .rodata.custom |
1866 | | * maps we use their ELF names as is, not prepending bpf_object name |
1867 | | * in front. We still need to truncate them to 15 characters for the |
1868 | | * kernel. Full name can be recovered for such maps by using DATASEC |
1869 | | * BTF type associated with such map's value type, though. |
1870 | | */ |
1871 | 1.85k | if (sfx_len >= BPF_OBJ_NAME_LEN) |
1872 | 656 | sfx_len = BPF_OBJ_NAME_LEN - 1; |
1873 | | |
1874 | | /* if there are two or more dots in map name, it's a custom dot map */ |
1875 | 1.85k | if (strchr(real_name + 1, '.') != NULL) |
1876 | 1.13k | pfx_len = 0; |
1877 | 726 | else |
1878 | 726 | pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1, strlen(obj->name)); |
1879 | | |
1880 | 1.85k | snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name, |
1881 | 1.85k | sfx_len, real_name); |
1882 | | |
1883 | | /* sanities map name to characters allowed by kernel */ |
1884 | 25.7k | for (p = map_name; *p && p < map_name + sizeof(map_name); p++) |
1885 | 23.8k | if (!isalnum(*p) && *p != '_' && *p != '.') |
1886 | 3.03k | *p = '_'; |
1887 | | |
1888 | 1.85k | return strdup(map_name); |
1889 | 1.85k | } |
1890 | | |
1891 | | static int |
1892 | | map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map); |
1893 | | |
1894 | | /* Internal BPF map is mmap()'able only if at least one of corresponding |
1895 | | * DATASEC's VARs are to be exposed through BPF skeleton. I.e., it's a GLOBAL |
1896 | | * variable and it's not marked as __hidden (which turns it into, effectively, |
1897 | | * a STATIC variable). |
1898 | | */ |
1899 | | static bool map_is_mmapable(struct bpf_object *obj, struct bpf_map *map) |
1900 | 1.85k | { |
1901 | 1.85k | const struct btf_type *t, *vt; |
1902 | 1.85k | struct btf_var_secinfo *vsi; |
1903 | 1.85k | int i, n; |
1904 | | |
1905 | 1.85k | if (!map->btf_value_type_id) |
1906 | 1.68k | return false; |
1907 | | |
1908 | 169 | t = btf__type_by_id(obj->btf, map->btf_value_type_id); |
1909 | 169 | if (!btf_is_datasec(t)) |
1910 | 38 | return false; |
1911 | | |
1912 | 131 | vsi = btf_var_secinfos(t); |
1913 | 199 | for (i = 0, n = btf_vlen(t); i < n; i++, vsi++) { |
1914 | 155 | vt = btf__type_by_id(obj->btf, vsi->type); |
1915 | 155 | if (!btf_is_var(vt)) |
1916 | 43 | continue; |
1917 | | |
1918 | 112 | if (btf_var(vt)->linkage != BTF_VAR_STATIC) |
1919 | 87 | return true; |
1920 | 112 | } |
1921 | | |
1922 | 44 | return false; |
1923 | 131 | } |
1924 | | |
1925 | | static int |
1926 | | bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type, |
1927 | | const char *real_name, int sec_idx, void *data, size_t data_sz) |
1928 | 1.85k | { |
1929 | 1.85k | struct bpf_map_def *def; |
1930 | 1.85k | struct bpf_map *map; |
1931 | 1.85k | size_t mmap_sz; |
1932 | 1.85k | int err; |
1933 | | |
1934 | 1.85k | map = bpf_object__add_map(obj); |
1935 | 1.85k | if (IS_ERR(map)) |
1936 | 0 | return PTR_ERR(map); |
1937 | | |
1938 | 1.85k | map->libbpf_type = type; |
1939 | 1.85k | map->sec_idx = sec_idx; |
1940 | 1.85k | map->sec_offset = 0; |
1941 | 1.85k | map->real_name = strdup(real_name); |
1942 | 1.85k | map->name = internal_map_name(obj, real_name); |
1943 | 1.85k | if (!map->real_name || !map->name) { |
1944 | 0 | zfree(&map->real_name); |
1945 | 0 | zfree(&map->name); |
1946 | 0 | return -ENOMEM; |
1947 | 0 | } |
1948 | | |
1949 | 1.85k | def = &map->def; |
1950 | 1.85k | def->type = BPF_MAP_TYPE_ARRAY; |
1951 | 1.85k | def->key_size = sizeof(int); |
1952 | 1.85k | def->value_size = data_sz; |
1953 | 1.85k | def->max_entries = 1; |
1954 | 1.85k | def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG |
1955 | 1.85k | ? BPF_F_RDONLY_PROG : 0; |
1956 | | |
1957 | | /* failures are fine because of maps like .rodata.str1.1 */ |
1958 | 1.85k | (void) map_fill_btf_type_info(obj, map); |
1959 | | |
1960 | 1.85k | if (map_is_mmapable(obj, map)) |
1961 | 87 | def->map_flags |= BPF_F_MMAPABLE; |
1962 | | |
1963 | 1.85k | pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n", |
1964 | 1.85k | map->name, map->sec_idx, map->sec_offset, def->map_flags); |
1965 | | |
1966 | 1.85k | mmap_sz = bpf_map_mmap_sz(map); |
1967 | 1.85k | map->mmaped = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE, |
1968 | 1.85k | MAP_SHARED | MAP_ANONYMOUS, -1, 0); |
1969 | 1.85k | if (map->mmaped == MAP_FAILED) { |
1970 | 57 | err = -errno; |
1971 | 57 | map->mmaped = NULL; |
1972 | 57 | pr_warn("failed to alloc map '%s' content buffer: %s\n", map->name, errstr(err)); |
1973 | 57 | zfree(&map->real_name); |
1974 | 57 | zfree(&map->name); |
1975 | 57 | return err; |
1976 | 57 | } |
1977 | | |
1978 | 1.80k | if (data) |
1979 | 988 | memcpy(map->mmaped, data, data_sz); |
1980 | | |
1981 | 1.80k | pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name); |
1982 | 1.80k | return 0; |
1983 | 1.85k | } |
1984 | | |
1985 | | static int bpf_object__init_global_data_maps(struct bpf_object *obj) |
1986 | 2.33k | { |
1987 | 2.33k | struct elf_sec_desc *sec_desc; |
1988 | 2.33k | const char *sec_name; |
1989 | 2.33k | int err = 0, sec_idx; |
1990 | | |
1991 | | /* |
1992 | | * Populate obj->maps with libbpf internal maps. |
1993 | | */ |
1994 | 19.4k | for (sec_idx = 1; sec_idx < obj->efile.sec_cnt; sec_idx++) { |
1995 | 17.1k | sec_desc = &obj->efile.secs[sec_idx]; |
1996 | | |
1997 | | /* Skip recognized sections with size 0. */ |
1998 | 17.1k | if (!sec_desc->data || sec_desc->data->d_size == 0) |
1999 | 14.0k | continue; |
2000 | | |
2001 | 3.09k | switch (sec_desc->sec_type) { |
2002 | 494 | case SEC_DATA: |
2003 | 494 | sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx)); |
2004 | 494 | err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA, |
2005 | 494 | sec_name, sec_idx, |
2006 | 494 | sec_desc->data->d_buf, |
2007 | 494 | sec_desc->data->d_size); |
2008 | 494 | break; |
2009 | 494 | case SEC_RODATA: |
2010 | 494 | obj->has_rodata = true; |
2011 | 494 | sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx)); |
2012 | 494 | err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA, |
2013 | 494 | sec_name, sec_idx, |
2014 | 494 | sec_desc->data->d_buf, |
2015 | 494 | sec_desc->data->d_size); |
2016 | 494 | break; |
2017 | 810 | case SEC_BSS: |
2018 | 810 | sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx)); |
2019 | 810 | err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS, |
2020 | 810 | sec_name, sec_idx, |
2021 | 810 | NULL, |
2022 | 810 | sec_desc->data->d_size); |
2023 | 810 | break; |
2024 | 1.29k | default: |
2025 | | /* skip */ |
2026 | 1.29k | break; |
2027 | 3.09k | } |
2028 | 3.09k | if (err) |
2029 | 22 | return err; |
2030 | 3.09k | } |
2031 | 2.31k | return 0; |
2032 | 2.33k | } |
2033 | | |
2034 | | |
2035 | | static struct extern_desc *find_extern_by_name(const struct bpf_object *obj, |
2036 | | const void *name) |
2037 | 1.11k | { |
2038 | 1.11k | int i; |
2039 | | |
2040 | 2.99k | for (i = 0; i < obj->nr_extern; i++) { |
2041 | 2.56k | if (strcmp(obj->externs[i].name, name) == 0) |
2042 | 673 | return &obj->externs[i]; |
2043 | 2.56k | } |
2044 | 437 | return NULL; |
2045 | 1.11k | } |
2046 | | |
2047 | | static struct extern_desc *find_extern_by_name_with_len(const struct bpf_object *obj, |
2048 | | const void *name, int len) |
2049 | 0 | { |
2050 | 0 | const char *ext_name; |
2051 | 0 | int i; |
2052 | |
|
2053 | 0 | for (i = 0; i < obj->nr_extern; i++) { |
2054 | 0 | ext_name = obj->externs[i].name; |
2055 | 0 | if (strlen(ext_name) == len && strncmp(ext_name, name, len) == 0) |
2056 | 0 | return &obj->externs[i]; |
2057 | 0 | } |
2058 | 0 | return NULL; |
2059 | 0 | } |
2060 | | |
2061 | | static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val, |
2062 | | char value) |
2063 | 0 | { |
2064 | 0 | switch (ext->kcfg.type) { |
2065 | 0 | case KCFG_BOOL: |
2066 | 0 | if (value == 'm') { |
2067 | 0 | pr_warn("extern (kcfg) '%s': value '%c' implies tristate or char type\n", |
2068 | 0 | ext->name, value); |
2069 | 0 | return -EINVAL; |
2070 | 0 | } |
2071 | 0 | *(bool *)ext_val = value == 'y' ? true : false; |
2072 | 0 | break; |
2073 | 0 | case KCFG_TRISTATE: |
2074 | 0 | if (value == 'y') |
2075 | 0 | *(enum libbpf_tristate *)ext_val = TRI_YES; |
2076 | 0 | else if (value == 'm') |
2077 | 0 | *(enum libbpf_tristate *)ext_val = TRI_MODULE; |
2078 | 0 | else /* value == 'n' */ |
2079 | 0 | *(enum libbpf_tristate *)ext_val = TRI_NO; |
2080 | 0 | break; |
2081 | 0 | case KCFG_CHAR: |
2082 | 0 | *(char *)ext_val = value; |
2083 | 0 | break; |
2084 | 0 | case KCFG_UNKNOWN: |
2085 | 0 | case KCFG_INT: |
2086 | 0 | case KCFG_CHAR_ARR: |
2087 | 0 | default: |
2088 | 0 | pr_warn("extern (kcfg) '%s': value '%c' implies bool, tristate, or char type\n", |
2089 | 0 | ext->name, value); |
2090 | 0 | return -EINVAL; |
2091 | 0 | } |
2092 | 0 | ext->is_set = true; |
2093 | 0 | return 0; |
2094 | 0 | } |
2095 | | |
2096 | | static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val, |
2097 | | const char *value) |
2098 | 0 | { |
2099 | 0 | size_t len; |
2100 | |
|
2101 | 0 | if (ext->kcfg.type != KCFG_CHAR_ARR) { |
2102 | 0 | pr_warn("extern (kcfg) '%s': value '%s' implies char array type\n", |
2103 | 0 | ext->name, value); |
2104 | 0 | return -EINVAL; |
2105 | 0 | } |
2106 | | |
2107 | 0 | len = strlen(value); |
2108 | 0 | if (len < 2 || value[len - 1] != '"') { |
2109 | 0 | pr_warn("extern (kcfg) '%s': invalid string config '%s'\n", |
2110 | 0 | ext->name, value); |
2111 | 0 | return -EINVAL; |
2112 | 0 | } |
2113 | | |
2114 | | /* strip quotes */ |
2115 | 0 | len -= 2; |
2116 | 0 | if (len >= ext->kcfg.sz) { |
2117 | 0 | pr_warn("extern (kcfg) '%s': long string '%s' of (%zu bytes) truncated to %d bytes\n", |
2118 | 0 | ext->name, value, len, ext->kcfg.sz - 1); |
2119 | 0 | len = ext->kcfg.sz - 1; |
2120 | 0 | } |
2121 | 0 | memcpy(ext_val, value + 1, len); |
2122 | 0 | ext_val[len] = '\0'; |
2123 | 0 | ext->is_set = true; |
2124 | 0 | return 0; |
2125 | 0 | } |
2126 | | |
2127 | | static int parse_u64(const char *value, __u64 *res) |
2128 | 0 | { |
2129 | 0 | char *value_end; |
2130 | 0 | int err; |
2131 | |
|
2132 | 0 | errno = 0; |
2133 | 0 | *res = strtoull(value, &value_end, 0); |
2134 | 0 | if (errno) { |
2135 | 0 | err = -errno; |
2136 | 0 | pr_warn("failed to parse '%s': %s\n", value, errstr(err)); |
2137 | 0 | return err; |
2138 | 0 | } |
2139 | 0 | if (*value_end) { |
2140 | 0 | pr_warn("failed to parse '%s' as integer completely\n", value); |
2141 | 0 | return -EINVAL; |
2142 | 0 | } |
2143 | 0 | return 0; |
2144 | 0 | } |
2145 | | |
2146 | | static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v) |
2147 | 0 | { |
2148 | 0 | int bit_sz = ext->kcfg.sz * 8; |
2149 | |
|
2150 | 0 | if (ext->kcfg.sz == 8) |
2151 | 0 | return true; |
2152 | | |
2153 | | /* Validate that value stored in u64 fits in integer of `ext->sz` |
2154 | | * bytes size without any loss of information. If the target integer |
2155 | | * is signed, we rely on the following limits of integer type of |
2156 | | * Y bits and subsequent transformation: |
2157 | | * |
2158 | | * -2^(Y-1) <= X <= 2^(Y-1) - 1 |
2159 | | * 0 <= X + 2^(Y-1) <= 2^Y - 1 |
2160 | | * 0 <= X + 2^(Y-1) < 2^Y |
2161 | | * |
2162 | | * For unsigned target integer, check that all the (64 - Y) bits are |
2163 | | * zero. |
2164 | | */ |
2165 | 0 | if (ext->kcfg.is_signed) |
2166 | 0 | return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz); |
2167 | 0 | else |
2168 | 0 | return (v >> bit_sz) == 0; |
2169 | 0 | } |
2170 | | |
2171 | | static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val, |
2172 | | __u64 value) |
2173 | 0 | { |
2174 | 0 | if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR && |
2175 | 0 | ext->kcfg.type != KCFG_BOOL) { |
2176 | 0 | pr_warn("extern (kcfg) '%s': value '%llu' implies integer, char, or boolean type\n", |
2177 | 0 | ext->name, (unsigned long long)value); |
2178 | 0 | return -EINVAL; |
2179 | 0 | } |
2180 | 0 | if (ext->kcfg.type == KCFG_BOOL && value > 1) { |
2181 | 0 | pr_warn("extern (kcfg) '%s': value '%llu' isn't boolean compatible\n", |
2182 | 0 | ext->name, (unsigned long long)value); |
2183 | 0 | return -EINVAL; |
2184 | |
|
2185 | 0 | } |
2186 | 0 | if (!is_kcfg_value_in_range(ext, value)) { |
2187 | 0 | pr_warn("extern (kcfg) '%s': value '%llu' doesn't fit in %d bytes\n", |
2188 | 0 | ext->name, (unsigned long long)value, ext->kcfg.sz); |
2189 | 0 | return -ERANGE; |
2190 | 0 | } |
2191 | 0 | switch (ext->kcfg.sz) { |
2192 | 0 | case 1: |
2193 | 0 | *(__u8 *)ext_val = value; |
2194 | 0 | break; |
2195 | 0 | case 2: |
2196 | 0 | *(__u16 *)ext_val = value; |
2197 | 0 | break; |
2198 | 0 | case 4: |
2199 | 0 | *(__u32 *)ext_val = value; |
2200 | 0 | break; |
2201 | 0 | case 8: |
2202 | 0 | *(__u64 *)ext_val = value; |
2203 | 0 | break; |
2204 | 0 | default: |
2205 | 0 | return -EINVAL; |
2206 | 0 | } |
2207 | 0 | ext->is_set = true; |
2208 | 0 | return 0; |
2209 | 0 | } |
2210 | | |
2211 | | static int bpf_object__process_kconfig_line(struct bpf_object *obj, |
2212 | | char *buf, void *data) |
2213 | 0 | { |
2214 | 0 | struct extern_desc *ext; |
2215 | 0 | char *sep, *value; |
2216 | 0 | int len, err = 0; |
2217 | 0 | void *ext_val; |
2218 | 0 | __u64 num; |
2219 | |
|
2220 | 0 | if (!str_has_pfx(buf, "CONFIG_")) |
2221 | 0 | return 0; |
2222 | | |
2223 | 0 | sep = strchr(buf, '='); |
2224 | 0 | if (!sep) { |
2225 | 0 | pr_warn("failed to parse '%s': no separator\n", buf); |
2226 | 0 | return -EINVAL; |
2227 | 0 | } |
2228 | | |
2229 | | /* Trim ending '\n' */ |
2230 | 0 | len = strlen(buf); |
2231 | 0 | if (buf[len - 1] == '\n') |
2232 | 0 | buf[len - 1] = '\0'; |
2233 | | /* Split on '=' and ensure that a value is present. */ |
2234 | 0 | *sep = '\0'; |
2235 | 0 | if (!sep[1]) { |
2236 | 0 | *sep = '='; |
2237 | 0 | pr_warn("failed to parse '%s': no value\n", buf); |
2238 | 0 | return -EINVAL; |
2239 | 0 | } |
2240 | | |
2241 | 0 | ext = find_extern_by_name(obj, buf); |
2242 | 0 | if (!ext || ext->is_set) |
2243 | 0 | return 0; |
2244 | | |
2245 | 0 | ext_val = data + ext->kcfg.data_off; |
2246 | 0 | value = sep + 1; |
2247 | |
|
2248 | 0 | switch (*value) { |
2249 | 0 | case 'y': case 'n': case 'm': |
2250 | 0 | err = set_kcfg_value_tri(ext, ext_val, *value); |
2251 | 0 | break; |
2252 | 0 | case '"': |
2253 | 0 | err = set_kcfg_value_str(ext, ext_val, value); |
2254 | 0 | break; |
2255 | 0 | default: |
2256 | | /* assume integer */ |
2257 | 0 | err = parse_u64(value, &num); |
2258 | 0 | if (err) { |
2259 | 0 | pr_warn("extern (kcfg) '%s': value '%s' isn't a valid integer\n", ext->name, value); |
2260 | 0 | return err; |
2261 | 0 | } |
2262 | 0 | if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) { |
2263 | 0 | pr_warn("extern (kcfg) '%s': value '%s' implies integer type\n", ext->name, value); |
2264 | 0 | return -EINVAL; |
2265 | 0 | } |
2266 | 0 | err = set_kcfg_value_num(ext, ext_val, num); |
2267 | 0 | break; |
2268 | 0 | } |
2269 | 0 | if (err) |
2270 | 0 | return err; |
2271 | 0 | pr_debug("extern (kcfg) '%s': set to %s\n", ext->name, value); |
2272 | 0 | return 0; |
2273 | 0 | } |
2274 | | |
2275 | | static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data) |
2276 | 0 | { |
2277 | 0 | char buf[PATH_MAX]; |
2278 | 0 | struct utsname uts; |
2279 | 0 | int len, err = 0; |
2280 | 0 | gzFile file; |
2281 | |
|
2282 | 0 | uname(&uts); |
2283 | 0 | len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release); |
2284 | 0 | if (len < 0) |
2285 | 0 | return -EINVAL; |
2286 | 0 | else if (len >= PATH_MAX) |
2287 | 0 | return -ENAMETOOLONG; |
2288 | | |
2289 | | /* gzopen also accepts uncompressed files. */ |
2290 | 0 | file = gzopen(buf, "re"); |
2291 | 0 | if (!file) |
2292 | 0 | file = gzopen("/proc/config.gz", "re"); |
2293 | |
|
2294 | 0 | if (!file) { |
2295 | 0 | pr_warn("failed to open system Kconfig\n"); |
2296 | 0 | return -ENOENT; |
2297 | 0 | } |
2298 | | |
2299 | 0 | while (gzgets(file, buf, sizeof(buf))) { |
2300 | 0 | err = bpf_object__process_kconfig_line(obj, buf, data); |
2301 | 0 | if (err) { |
2302 | 0 | pr_warn("error parsing system Kconfig line '%s': %s\n", |
2303 | 0 | buf, errstr(err)); |
2304 | 0 | goto out; |
2305 | 0 | } |
2306 | 0 | } |
2307 | | |
2308 | 0 | out: |
2309 | 0 | gzclose(file); |
2310 | 0 | return err; |
2311 | 0 | } |
2312 | | |
2313 | | static int bpf_object__read_kconfig_mem(struct bpf_object *obj, |
2314 | | const char *config, void *data) |
2315 | 0 | { |
2316 | 0 | char buf[PATH_MAX]; |
2317 | 0 | int err = 0; |
2318 | 0 | FILE *file; |
2319 | |
|
2320 | 0 | file = fmemopen((void *)config, strlen(config), "r"); |
2321 | 0 | if (!file) { |
2322 | 0 | err = -errno; |
2323 | 0 | pr_warn("failed to open in-memory Kconfig: %s\n", errstr(err)); |
2324 | 0 | return err; |
2325 | 0 | } |
2326 | | |
2327 | 0 | while (fgets(buf, sizeof(buf), file)) { |
2328 | 0 | err = bpf_object__process_kconfig_line(obj, buf, data); |
2329 | 0 | if (err) { |
2330 | 0 | pr_warn("error parsing in-memory Kconfig line '%s': %s\n", |
2331 | 0 | buf, errstr(err)); |
2332 | 0 | break; |
2333 | 0 | } |
2334 | 0 | } |
2335 | |
|
2336 | 0 | fclose(file); |
2337 | 0 | return err; |
2338 | 0 | } |
2339 | | |
2340 | | static int bpf_object__init_kconfig_map(struct bpf_object *obj) |
2341 | 2.31k | { |
2342 | 2.31k | struct extern_desc *last_ext = NULL, *ext; |
2343 | 2.31k | size_t map_sz; |
2344 | 2.31k | int i, err; |
2345 | | |
2346 | 3.43k | for (i = 0; i < obj->nr_extern; i++) { |
2347 | 1.12k | ext = &obj->externs[i]; |
2348 | 1.12k | if (ext->type == EXT_KCFG) |
2349 | 267 | last_ext = ext; |
2350 | 1.12k | } |
2351 | | |
2352 | 2.31k | if (!last_ext) |
2353 | 2.25k | return 0; |
2354 | | |
2355 | 60 | map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz; |
2356 | 60 | err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG, |
2357 | 60 | ".kconfig", obj->efile.symbols_shndx, |
2358 | 60 | NULL, map_sz); |
2359 | 60 | if (err) |
2360 | 35 | return err; |
2361 | | |
2362 | 25 | obj->kconfig_map_idx = obj->nr_maps - 1; |
2363 | | |
2364 | 25 | return 0; |
2365 | 60 | } |
2366 | | |
2367 | | const struct btf_type * |
2368 | | skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id) |
2369 | 5.07k | { |
2370 | 5.07k | const struct btf_type *t = btf__type_by_id(btf, id); |
2371 | | |
2372 | 5.07k | if (res_id) |
2373 | 2.16k | *res_id = id; |
2374 | | |
2375 | 6.81k | while (btf_is_mod(t) || btf_is_typedef(t)) { |
2376 | 1.73k | if (res_id) |
2377 | 832 | *res_id = t->type; |
2378 | 1.73k | t = btf__type_by_id(btf, t->type); |
2379 | 1.73k | } |
2380 | | |
2381 | 5.07k | return t; |
2382 | 5.07k | } |
2383 | | |
2384 | | static const struct btf_type * |
2385 | | resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id) |
2386 | 0 | { |
2387 | 0 | const struct btf_type *t; |
2388 | |
|
2389 | 0 | t = skip_mods_and_typedefs(btf, id, NULL); |
2390 | 0 | if (!btf_is_ptr(t)) |
2391 | 0 | return NULL; |
2392 | | |
2393 | 0 | t = skip_mods_and_typedefs(btf, t->type, res_id); |
2394 | |
|
2395 | 0 | return btf_is_func_proto(t) ? t : NULL; |
2396 | 0 | } |
2397 | | |
2398 | | static const char *__btf_kind_str(__u16 kind) |
2399 | 342 | { |
2400 | 342 | switch (kind) { |
2401 | 46 | case BTF_KIND_UNKN: return "void"; |
2402 | 11 | case BTF_KIND_INT: return "int"; |
2403 | 1 | case BTF_KIND_PTR: return "ptr"; |
2404 | 6 | case BTF_KIND_ARRAY: return "array"; |
2405 | 5 | case BTF_KIND_STRUCT: return "struct"; |
2406 | 2 | case BTF_KIND_UNION: return "union"; |
2407 | 2 | case BTF_KIND_ENUM: return "enum"; |
2408 | 2 | case BTF_KIND_FWD: return "fwd"; |
2409 | 1 | case BTF_KIND_TYPEDEF: return "typedef"; |
2410 | 6 | case BTF_KIND_VOLATILE: return "volatile"; |
2411 | 4 | case BTF_KIND_CONST: return "const"; |
2412 | 5 | case BTF_KIND_RESTRICT: return "restrict"; |
2413 | 142 | case BTF_KIND_FUNC: return "func"; |
2414 | 6 | case BTF_KIND_FUNC_PROTO: return "func_proto"; |
2415 | 35 | case BTF_KIND_VAR: return "var"; |
2416 | 14 | case BTF_KIND_DATASEC: return "datasec"; |
2417 | 45 | case BTF_KIND_FLOAT: return "float"; |
2418 | 2 | case BTF_KIND_DECL_TAG: return "decl_tag"; |
2419 | 4 | case BTF_KIND_TYPE_TAG: return "type_tag"; |
2420 | 3 | case BTF_KIND_ENUM64: return "enum64"; |
2421 | 0 | default: return "unknown"; |
2422 | 342 | } |
2423 | 342 | } |
2424 | | |
2425 | | const char *btf_kind_str(const struct btf_type *t) |
2426 | 342 | { |
2427 | 342 | return __btf_kind_str(btf_kind(t)); |
2428 | 342 | } |
2429 | | |
2430 | | /* |
2431 | | * Fetch integer attribute of BTF map definition. Such attributes are |
2432 | | * represented using a pointer to an array, in which dimensionality of array |
2433 | | * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY]; |
2434 | | * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF |
2435 | | * type definition, while using only sizeof(void *) space in ELF data section. |
2436 | | */ |
2437 | | static bool get_map_field_int(const char *map_name, const struct btf *btf, |
2438 | | const struct btf_member *m, __u32 *res) |
2439 | 702 | { |
2440 | 702 | const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL); |
2441 | 702 | const char *name = btf__name_by_offset(btf, m->name_off); |
2442 | 702 | const struct btf_array *arr_info; |
2443 | 702 | const struct btf_type *arr_t; |
2444 | | |
2445 | 702 | if (!btf_is_ptr(t)) { |
2446 | 14 | pr_warn("map '%s': attr '%s': expected PTR, got %s.\n", |
2447 | 14 | map_name, name, btf_kind_str(t)); |
2448 | 14 | return false; |
2449 | 14 | } |
2450 | | |
2451 | 688 | arr_t = btf__type_by_id(btf, t->type); |
2452 | 688 | if (!arr_t) { |
2453 | 0 | pr_warn("map '%s': attr '%s': type [%u] not found.\n", |
2454 | 0 | map_name, name, t->type); |
2455 | 0 | return false; |
2456 | 0 | } |
2457 | 688 | if (!btf_is_array(arr_t)) { |
2458 | 8 | pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n", |
2459 | 8 | map_name, name, btf_kind_str(arr_t)); |
2460 | 8 | return false; |
2461 | 8 | } |
2462 | 680 | arr_info = btf_array(arr_t); |
2463 | 680 | *res = arr_info->nelems; |
2464 | 680 | return true; |
2465 | 688 | } |
2466 | | |
2467 | | static bool get_map_field_long(const char *map_name, const struct btf *btf, |
2468 | | const struct btf_member *m, __u64 *res) |
2469 | 60 | { |
2470 | 60 | const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL); |
2471 | 60 | const char *name = btf__name_by_offset(btf, m->name_off); |
2472 | | |
2473 | 60 | if (btf_is_ptr(t)) { |
2474 | 30 | __u32 res32; |
2475 | 30 | bool ret; |
2476 | | |
2477 | 30 | ret = get_map_field_int(map_name, btf, m, &res32); |
2478 | 30 | if (ret) |
2479 | 29 | *res = (__u64)res32; |
2480 | 30 | return ret; |
2481 | 30 | } |
2482 | | |
2483 | 30 | if (!btf_is_enum(t) && !btf_is_enum64(t)) { |
2484 | 11 | pr_warn("map '%s': attr '%s': expected ENUM or ENUM64, got %s.\n", |
2485 | 11 | map_name, name, btf_kind_str(t)); |
2486 | 11 | return false; |
2487 | 11 | } |
2488 | | |
2489 | 19 | if (btf_vlen(t) != 1) { |
2490 | 2 | pr_warn("map '%s': attr '%s': invalid __ulong\n", |
2491 | 2 | map_name, name); |
2492 | 2 | return false; |
2493 | 2 | } |
2494 | | |
2495 | 17 | if (btf_is_enum(t)) { |
2496 | 9 | const struct btf_enum *e = btf_enum(t); |
2497 | | |
2498 | 9 | *res = e->val; |
2499 | 9 | } else { |
2500 | 8 | const struct btf_enum64 *e = btf_enum64(t); |
2501 | | |
2502 | 8 | *res = btf_enum64_value(e); |
2503 | 8 | } |
2504 | 17 | return true; |
2505 | 19 | } |
2506 | | |
2507 | | static int pathname_concat(char *buf, size_t buf_sz, const char *path, const char *name) |
2508 | 1 | { |
2509 | 1 | int len; |
2510 | | |
2511 | 1 | len = snprintf(buf, buf_sz, "%s/%s", path, name); |
2512 | 1 | if (len < 0) |
2513 | 0 | return -EINVAL; |
2514 | 1 | if (len >= buf_sz) |
2515 | 0 | return -ENAMETOOLONG; |
2516 | | |
2517 | 1 | return 0; |
2518 | 1 | } |
2519 | | |
2520 | | static int build_map_pin_path(struct bpf_map *map, const char *path) |
2521 | 1 | { |
2522 | 1 | char buf[PATH_MAX]; |
2523 | 1 | int err; |
2524 | | |
2525 | 1 | if (!path) |
2526 | 1 | path = BPF_FS_DEFAULT_PATH; |
2527 | | |
2528 | 1 | err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map)); |
2529 | 1 | if (err) |
2530 | 0 | return err; |
2531 | | |
2532 | 1 | return bpf_map__set_pin_path(map, buf); |
2533 | 1 | } |
2534 | | |
2535 | | /* should match definition in bpf_helpers.h */ |
2536 | | enum libbpf_pin_type { |
2537 | | LIBBPF_PIN_NONE, |
2538 | | /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */ |
2539 | | LIBBPF_PIN_BY_NAME, |
2540 | | }; |
2541 | | |
2542 | | int parse_btf_map_def(const char *map_name, struct btf *btf, |
2543 | | const struct btf_type *def_t, bool strict, |
2544 | | struct btf_map_def *map_def, struct btf_map_def *inner_def) |
2545 | 932 | { |
2546 | 932 | const struct btf_type *t; |
2547 | 932 | const struct btf_member *m; |
2548 | 932 | bool is_inner = inner_def == NULL; |
2549 | 932 | int vlen, i; |
2550 | | |
2551 | 932 | vlen = btf_vlen(def_t); |
2552 | 932 | m = btf_members(def_t); |
2553 | 2.09k | for (i = 0; i < vlen; i++, m++) { |
2554 | 1.97k | const char *name = btf__name_by_offset(btf, m->name_off); |
2555 | | |
2556 | 1.97k | if (!name) { |
2557 | 0 | pr_warn("map '%s': invalid field #%d.\n", map_name, i); |
2558 | 0 | return -EINVAL; |
2559 | 0 | } |
2560 | 1.97k | if (strcmp(name, "type") == 0) { |
2561 | 278 | if (!get_map_field_int(map_name, btf, m, &map_def->map_type)) |
2562 | 6 | return -EINVAL; |
2563 | 272 | map_def->parts |= MAP_DEF_MAP_TYPE; |
2564 | 1.70k | } else if (strcmp(name, "max_entries") == 0) { |
2565 | 70 | if (!get_map_field_int(map_name, btf, m, &map_def->max_entries)) |
2566 | 3 | return -EINVAL; |
2567 | 67 | map_def->parts |= MAP_DEF_MAX_ENTRIES; |
2568 | 1.63k | } else if (strcmp(name, "map_flags") == 0) { |
2569 | 9 | if (!get_map_field_int(map_name, btf, m, &map_def->map_flags)) |
2570 | 1 | return -EINVAL; |
2571 | 8 | map_def->parts |= MAP_DEF_MAP_FLAGS; |
2572 | 1.62k | } else if (strcmp(name, "numa_node") == 0) { |
2573 | 17 | if (!get_map_field_int(map_name, btf, m, &map_def->numa_node)) |
2574 | 1 | return -EINVAL; |
2575 | 16 | map_def->parts |= MAP_DEF_NUMA_NODE; |
2576 | 1.60k | } else if (strcmp(name, "key_size") == 0) { |
2577 | 94 | __u32 sz; |
2578 | | |
2579 | 94 | if (!get_map_field_int(map_name, btf, m, &sz)) |
2580 | 2 | return -EINVAL; |
2581 | 92 | if (map_def->key_size && map_def->key_size != sz) { |
2582 | 36 | pr_warn("map '%s': conflicting key size %u != %u.\n", |
2583 | 36 | map_name, map_def->key_size, sz); |
2584 | 36 | return -EINVAL; |
2585 | 36 | } |
2586 | 56 | map_def->key_size = sz; |
2587 | 56 | map_def->parts |= MAP_DEF_KEY_SIZE; |
2588 | 1.51k | } else if (strcmp(name, "key") == 0) { |
2589 | 327 | __s64 sz; |
2590 | | |
2591 | 327 | t = btf__type_by_id(btf, m->type); |
2592 | 327 | if (!t) { |
2593 | 0 | pr_warn("map '%s': key type [%d] not found.\n", |
2594 | 0 | map_name, m->type); |
2595 | 0 | return -EINVAL; |
2596 | 0 | } |
2597 | 327 | if (!btf_is_ptr(t)) { |
2598 | 7 | pr_warn("map '%s': key spec is not PTR: %s.\n", |
2599 | 7 | map_name, btf_kind_str(t)); |
2600 | 7 | return -EINVAL; |
2601 | 7 | } |
2602 | 320 | sz = btf__resolve_size(btf, t->type); |
2603 | 320 | if (sz < 0) { |
2604 | 15 | pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n", |
2605 | 15 | map_name, t->type, (ssize_t)sz); |
2606 | 15 | return sz; |
2607 | 15 | } |
2608 | 305 | if (map_def->key_size && map_def->key_size != sz) { |
2609 | 33 | pr_warn("map '%s': conflicting key size %u != %zd.\n", |
2610 | 33 | map_name, map_def->key_size, (ssize_t)sz); |
2611 | 33 | return -EINVAL; |
2612 | 33 | } |
2613 | 272 | map_def->key_size = sz; |
2614 | 272 | map_def->key_type_id = t->type; |
2615 | 272 | map_def->parts |= MAP_DEF_KEY_SIZE | MAP_DEF_KEY_TYPE; |
2616 | 1.18k | } else if (strcmp(name, "value_size") == 0) { |
2617 | 151 | __u32 sz; |
2618 | | |
2619 | 151 | if (!get_map_field_int(map_name, btf, m, &sz)) |
2620 | 6 | return -EINVAL; |
2621 | 145 | if (map_def->value_size && map_def->value_size != sz) { |
2622 | 33 | pr_warn("map '%s': conflicting value size %u != %u.\n", |
2623 | 33 | map_name, map_def->value_size, sz); |
2624 | 33 | return -EINVAL; |
2625 | 33 | } |
2626 | 112 | map_def->value_size = sz; |
2627 | 112 | map_def->parts |= MAP_DEF_VALUE_SIZE; |
2628 | 1.03k | } else if (strcmp(name, "value") == 0) { |
2629 | 373 | __s64 sz; |
2630 | | |
2631 | 373 | t = btf__type_by_id(btf, m->type); |
2632 | 373 | if (!t) { |
2633 | 0 | pr_warn("map '%s': value type [%d] not found.\n", |
2634 | 0 | map_name, m->type); |
2635 | 0 | return -EINVAL; |
2636 | 0 | } |
2637 | 373 | if (!btf_is_ptr(t)) { |
2638 | 12 | pr_warn("map '%s': value spec is not PTR: %s.\n", |
2639 | 12 | map_name, btf_kind_str(t)); |
2640 | 12 | return -EINVAL; |
2641 | 12 | } |
2642 | 361 | sz = btf__resolve_size(btf, t->type); |
2643 | 361 | if (sz < 0) { |
2644 | 27 | pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n", |
2645 | 27 | map_name, t->type, (ssize_t)sz); |
2646 | 27 | return sz; |
2647 | 27 | } |
2648 | 334 | if (map_def->value_size && map_def->value_size != sz) { |
2649 | 36 | pr_warn("map '%s': conflicting value size %u != %zd.\n", |
2650 | 36 | map_name, map_def->value_size, (ssize_t)sz); |
2651 | 36 | return -EINVAL; |
2652 | 36 | } |
2653 | 298 | map_def->value_size = sz; |
2654 | 298 | map_def->value_type_id = t->type; |
2655 | 298 | map_def->parts |= MAP_DEF_VALUE_SIZE | MAP_DEF_VALUE_TYPE; |
2656 | 298 | } |
2657 | 660 | else if (strcmp(name, "values") == 0) { |
2658 | 105 | bool is_map_in_map = bpf_map_type__is_map_in_map(map_def->map_type); |
2659 | 105 | bool is_prog_array = map_def->map_type == BPF_MAP_TYPE_PROG_ARRAY; |
2660 | 105 | const char *desc = is_map_in_map ? "map-in-map inner" : "prog-array value"; |
2661 | 105 | char inner_map_name[128]; |
2662 | 105 | int err; |
2663 | | |
2664 | 105 | if (is_inner) { |
2665 | 2 | pr_warn("map '%s': multi-level inner maps not supported.\n", |
2666 | 2 | map_name); |
2667 | 2 | return -ENOTSUP; |
2668 | 2 | } |
2669 | 103 | if (i != vlen - 1) { |
2670 | 2 | pr_warn("map '%s': '%s' member should be last.\n", |
2671 | 2 | map_name, name); |
2672 | 2 | return -EINVAL; |
2673 | 2 | } |
2674 | 101 | if (!is_map_in_map && !is_prog_array) { |
2675 | 36 | pr_warn("map '%s': should be map-in-map or prog-array.\n", |
2676 | 36 | map_name); |
2677 | 36 | return -ENOTSUP; |
2678 | 36 | } |
2679 | 65 | if (map_def->value_size && map_def->value_size != 4) { |
2680 | 30 | pr_warn("map '%s': conflicting value size %u != 4.\n", |
2681 | 30 | map_name, map_def->value_size); |
2682 | 30 | return -EINVAL; |
2683 | 30 | } |
2684 | 35 | map_def->value_size = 4; |
2685 | 35 | t = btf__type_by_id(btf, m->type); |
2686 | 35 | if (!t) { |
2687 | 0 | pr_warn("map '%s': %s type [%d] not found.\n", |
2688 | 0 | map_name, desc, m->type); |
2689 | 0 | return -EINVAL; |
2690 | 0 | } |
2691 | 35 | if (!btf_is_array(t) || btf_array(t)->nelems) { |
2692 | 27 | pr_warn("map '%s': %s spec is not a zero-sized array.\n", |
2693 | 27 | map_name, desc); |
2694 | 27 | return -EINVAL; |
2695 | 27 | } |
2696 | 8 | t = skip_mods_and_typedefs(btf, btf_array(t)->type, NULL); |
2697 | 8 | if (!btf_is_ptr(t)) { |
2698 | 3 | pr_warn("map '%s': %s def is of unexpected kind %s.\n", |
2699 | 3 | map_name, desc, btf_kind_str(t)); |
2700 | 3 | return -EINVAL; |
2701 | 3 | } |
2702 | 5 | t = skip_mods_and_typedefs(btf, t->type, NULL); |
2703 | 5 | if (is_prog_array) { |
2704 | 2 | if (!btf_is_func_proto(t)) { |
2705 | 1 | pr_warn("map '%s': prog-array value def is of unexpected kind %s.\n", |
2706 | 1 | map_name, btf_kind_str(t)); |
2707 | 1 | return -EINVAL; |
2708 | 1 | } |
2709 | 1 | continue; |
2710 | 2 | } |
2711 | 3 | if (!btf_is_struct(t)) { |
2712 | 1 | pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n", |
2713 | 1 | map_name, btf_kind_str(t)); |
2714 | 1 | return -EINVAL; |
2715 | 1 | } |
2716 | | |
2717 | 2 | snprintf(inner_map_name, sizeof(inner_map_name), "%s.inner", map_name); |
2718 | 2 | err = parse_btf_map_def(inner_map_name, btf, t, strict, inner_def, NULL); |
2719 | 2 | if (err) |
2720 | 2 | return err; |
2721 | | |
2722 | 0 | map_def->parts |= MAP_DEF_INNER_MAP; |
2723 | 555 | } else if (strcmp(name, "pinning") == 0) { |
2724 | 53 | __u32 val; |
2725 | | |
2726 | 53 | if (is_inner) { |
2727 | 0 | pr_warn("map '%s': inner def can't be pinned.\n", map_name); |
2728 | 0 | return -EINVAL; |
2729 | 0 | } |
2730 | 53 | if (!get_map_field_int(map_name, btf, m, &val)) |
2731 | 2 | return -EINVAL; |
2732 | 51 | if (val != LIBBPF_PIN_NONE && val != LIBBPF_PIN_BY_NAME) { |
2733 | 35 | pr_warn("map '%s': invalid pinning value %u.\n", |
2734 | 35 | map_name, val); |
2735 | 35 | return -EINVAL; |
2736 | 35 | } |
2737 | 16 | map_def->pinning = val; |
2738 | 16 | map_def->parts |= MAP_DEF_PINNING; |
2739 | 502 | } else if (strcmp(name, "map_extra") == 0) { |
2740 | 60 | __u64 map_extra; |
2741 | | |
2742 | 60 | if (!get_map_field_long(map_name, btf, m, &map_extra)) |
2743 | 14 | return -EINVAL; |
2744 | 46 | map_def->map_extra = map_extra; |
2745 | 46 | map_def->parts |= MAP_DEF_MAP_EXTRA; |
2746 | 442 | } else { |
2747 | 442 | if (strict) { |
2748 | 442 | pr_warn("map '%s': unknown field '%s'.\n", map_name, name); |
2749 | 442 | return -ENOTSUP; |
2750 | 442 | } |
2751 | 0 | pr_debug("map '%s': ignoring unknown field '%s'.\n", map_name, name); |
2752 | 0 | } |
2753 | 1.97k | } |
2754 | | |
2755 | 117 | if (map_def->map_type == BPF_MAP_TYPE_UNSPEC) { |
2756 | 31 | pr_warn("map '%s': map type isn't specified.\n", map_name); |
2757 | 31 | return -EINVAL; |
2758 | 31 | } |
2759 | | |
2760 | 86 | return 0; |
2761 | 117 | } |
2762 | | |
2763 | | static size_t adjust_ringbuf_sz(size_t sz) |
2764 | 26 | { |
2765 | 26 | __u32 page_sz = sysconf(_SC_PAGE_SIZE); |
2766 | 26 | __u32 mul; |
2767 | | |
2768 | | /* if user forgot to set any size, make sure they see error */ |
2769 | 26 | if (sz == 0) |
2770 | 1 | return 0; |
2771 | | /* Kernel expects BPF_MAP_TYPE_RINGBUF's max_entries to be |
2772 | | * a power-of-2 multiple of kernel's page size. If user diligently |
2773 | | * satisified these conditions, pass the size through. |
2774 | | */ |
2775 | 25 | if ((sz % page_sz) == 0 && is_pow_of_2(sz / page_sz)) |
2776 | 1 | return sz; |
2777 | | |
2778 | | /* Otherwise find closest (page_sz * power_of_2) product bigger than |
2779 | | * user-set size to satisfy both user size request and kernel |
2780 | | * requirements and substitute correct max_entries for map creation. |
2781 | | */ |
2782 | 373 | for (mul = 1; mul <= UINT_MAX / page_sz; mul <<= 1) { |
2783 | 364 | if (mul * page_sz > sz) |
2784 | 15 | return mul * page_sz; |
2785 | 364 | } |
2786 | | |
2787 | | /* if it's impossible to satisfy the conditions (i.e., user size is |
2788 | | * very close to UINT_MAX but is not a power-of-2 multiple of |
2789 | | * page_size) then just return original size and let kernel reject it |
2790 | | */ |
2791 | 9 | return sz; |
2792 | 24 | } |
2793 | | |
2794 | | static bool map_is_ringbuf(const struct bpf_map *map) |
2795 | 86 | { |
2796 | 86 | return map->def.type == BPF_MAP_TYPE_RINGBUF || |
2797 | 86 | map->def.type == BPF_MAP_TYPE_USER_RINGBUF; |
2798 | 86 | } |
2799 | | |
2800 | | static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def) |
2801 | 86 | { |
2802 | 86 | map->def.type = def->map_type; |
2803 | 86 | map->def.key_size = def->key_size; |
2804 | 86 | map->def.value_size = def->value_size; |
2805 | 86 | map->def.max_entries = def->max_entries; |
2806 | 86 | map->def.map_flags = def->map_flags; |
2807 | 86 | map->map_extra = def->map_extra; |
2808 | | |
2809 | 86 | map->numa_node = def->numa_node; |
2810 | 86 | map->btf_key_type_id = def->key_type_id; |
2811 | 86 | map->btf_value_type_id = def->value_type_id; |
2812 | | |
2813 | | /* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */ |
2814 | 86 | if (map_is_ringbuf(map)) |
2815 | 26 | map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries); |
2816 | | |
2817 | 86 | if (def->parts & MAP_DEF_MAP_TYPE) |
2818 | 86 | pr_debug("map '%s': found type = %u.\n", map->name, def->map_type); |
2819 | | |
2820 | 86 | if (def->parts & MAP_DEF_KEY_TYPE) |
2821 | 86 | pr_debug("map '%s': found key [%u], sz = %u.\n", |
2822 | 77 | map->name, def->key_type_id, def->key_size); |
2823 | 77 | else if (def->parts & MAP_DEF_KEY_SIZE) |
2824 | 3 | pr_debug("map '%s': found key_size = %u.\n", map->name, def->key_size); |
2825 | | |
2826 | 86 | if (def->parts & MAP_DEF_VALUE_TYPE) |
2827 | 86 | pr_debug("map '%s': found value [%u], sz = %u.\n", |
2828 | 50 | map->name, def->value_type_id, def->value_size); |
2829 | 50 | else if (def->parts & MAP_DEF_VALUE_SIZE) |
2830 | 5 | pr_debug("map '%s': found value_size = %u.\n", map->name, def->value_size); |
2831 | | |
2832 | 86 | if (def->parts & MAP_DEF_MAX_ENTRIES) |
2833 | 86 | pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries); |
2834 | 86 | if (def->parts & MAP_DEF_MAP_FLAGS) |
2835 | 86 | pr_debug("map '%s': found map_flags = 0x%x.\n", map->name, def->map_flags); |
2836 | 86 | if (def->parts & MAP_DEF_MAP_EXTRA) |
2837 | 86 | pr_debug("map '%s': found map_extra = 0x%llx.\n", map->name, |
2838 | 86 | (unsigned long long)def->map_extra); |
2839 | 86 | if (def->parts & MAP_DEF_PINNING) |
2840 | 86 | pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning); |
2841 | 86 | if (def->parts & MAP_DEF_NUMA_NODE) |
2842 | 86 | pr_debug("map '%s': found numa_node = %u.\n", map->name, def->numa_node); |
2843 | | |
2844 | 86 | if (def->parts & MAP_DEF_INNER_MAP) |
2845 | 86 | pr_debug("map '%s': found inner map definition.\n", map->name); |
2846 | 86 | } |
2847 | | |
2848 | | static const char *btf_var_linkage_str(__u32 linkage) |
2849 | 56 | { |
2850 | 56 | switch (linkage) { |
2851 | 1 | case BTF_VAR_STATIC: return "static"; |
2852 | 0 | case BTF_VAR_GLOBAL_ALLOCATED: return "global"; |
2853 | 2 | case BTF_VAR_GLOBAL_EXTERN: return "extern"; |
2854 | 53 | default: return "unknown"; |
2855 | 56 | } |
2856 | 56 | } |
2857 | | |
2858 | | static int bpf_object__init_user_btf_map(struct bpf_object *obj, |
2859 | | const struct btf_type *sec, |
2860 | | int var_idx, int sec_idx, |
2861 | | const Elf_Data *data, bool strict, |
2862 | | const char *pin_root_path) |
2863 | 1.08k | { |
2864 | 1.08k | struct btf_map_def map_def = {}, inner_def = {}; |
2865 | 1.08k | const struct btf_type *var, *def; |
2866 | 1.08k | const struct btf_var_secinfo *vi; |
2867 | 1.08k | const struct btf_var *var_extra; |
2868 | 1.08k | const char *map_name; |
2869 | 1.08k | struct bpf_map *map; |
2870 | 1.08k | int err; |
2871 | | |
2872 | 1.08k | vi = btf_var_secinfos(sec) + var_idx; |
2873 | 1.08k | var = btf__type_by_id(obj->btf, vi->type); |
2874 | 1.08k | var_extra = btf_var(var); |
2875 | 1.08k | map_name = btf__name_by_offset(obj->btf, var->name_off); |
2876 | | |
2877 | 1.08k | if (map_name == NULL || map_name[0] == '\0') { |
2878 | 1 | pr_warn("map #%d: empty name.\n", var_idx); |
2879 | 1 | return -EINVAL; |
2880 | 1 | } |
2881 | 1.08k | if ((__u64)vi->offset + vi->size > data->d_size) { |
2882 | 41 | pr_warn("map '%s' BTF data is corrupted.\n", map_name); |
2883 | 41 | return -EINVAL; |
2884 | 41 | } |
2885 | 1.04k | if (!btf_is_var(var)) { |
2886 | 0 | pr_warn("map '%s': unexpected var kind %s.\n", |
2887 | 0 | map_name, btf_kind_str(var)); |
2888 | 0 | return -EINVAL; |
2889 | 0 | } |
2890 | 1.04k | if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED) { |
2891 | 56 | pr_warn("map '%s': unsupported map linkage %s.\n", |
2892 | 56 | map_name, btf_var_linkage_str(var_extra->linkage)); |
2893 | 56 | return -EOPNOTSUPP; |
2894 | 56 | } |
2895 | | |
2896 | 986 | def = skip_mods_and_typedefs(obj->btf, var->type, NULL); |
2897 | 986 | if (!btf_is_struct(def)) { |
2898 | 18 | pr_warn("map '%s': unexpected def kind %s.\n", |
2899 | 18 | map_name, btf_kind_str(var)); |
2900 | 18 | return -EINVAL; |
2901 | 18 | } |
2902 | 968 | if (def->size > vi->size) { |
2903 | 38 | pr_warn("map '%s': invalid def size.\n", map_name); |
2904 | 38 | return -EINVAL; |
2905 | 38 | } |
2906 | | |
2907 | 930 | map = bpf_object__add_map(obj); |
2908 | 930 | if (IS_ERR(map)) |
2909 | 0 | return PTR_ERR(map); |
2910 | 930 | map->name = strdup(map_name); |
2911 | 930 | if (!map->name) { |
2912 | 0 | pr_warn("map '%s': failed to alloc map name.\n", map_name); |
2913 | 0 | return -ENOMEM; |
2914 | 0 | } |
2915 | 930 | map->libbpf_type = LIBBPF_MAP_UNSPEC; |
2916 | 930 | map->def.type = BPF_MAP_TYPE_UNSPEC; |
2917 | 930 | map->sec_idx = sec_idx; |
2918 | 930 | map->sec_offset = vi->offset; |
2919 | 930 | map->btf_var_idx = var_idx; |
2920 | 930 | pr_debug("map '%s': at sec_idx %d, offset %zu.\n", |
2921 | 930 | map_name, map->sec_idx, map->sec_offset); |
2922 | | |
2923 | 930 | err = parse_btf_map_def(map->name, obj->btf, def, strict, &map_def, &inner_def); |
2924 | 930 | if (err) |
2925 | 844 | return err; |
2926 | | |
2927 | 86 | fill_map_from_def(map, &map_def); |
2928 | | |
2929 | 86 | if (map_def.pinning == LIBBPF_PIN_BY_NAME) { |
2930 | 1 | err = build_map_pin_path(map, pin_root_path); |
2931 | 1 | if (err) { |
2932 | 0 | pr_warn("map '%s': couldn't build pin path.\n", map->name); |
2933 | 0 | return err; |
2934 | 0 | } |
2935 | 1 | } |
2936 | | |
2937 | 86 | if (map_def.parts & MAP_DEF_INNER_MAP) { |
2938 | 0 | map->inner_map = calloc(1, sizeof(*map->inner_map)); |
2939 | 0 | if (!map->inner_map) |
2940 | 0 | return -ENOMEM; |
2941 | 0 | map->inner_map->fd = create_placeholder_fd(); |
2942 | 0 | if (map->inner_map->fd < 0) |
2943 | 0 | return map->inner_map->fd; |
2944 | 0 | map->inner_map->sec_idx = sec_idx; |
2945 | 0 | map->inner_map->name = malloc(strlen(map_name) + sizeof(".inner") + 1); |
2946 | 0 | if (!map->inner_map->name) |
2947 | 0 | return -ENOMEM; |
2948 | 0 | sprintf(map->inner_map->name, "%s.inner", map_name); |
2949 | |
|
2950 | 0 | fill_map_from_def(map->inner_map, &inner_def); |
2951 | 0 | } |
2952 | | |
2953 | 86 | err = map_fill_btf_type_info(obj, map); |
2954 | 86 | if (err) |
2955 | 0 | return err; |
2956 | | |
2957 | 86 | return 0; |
2958 | 86 | } |
2959 | | |
2960 | | static int init_arena_map_data(struct bpf_object *obj, struct bpf_map *map, |
2961 | | const char *sec_name, int sec_idx, |
2962 | | void *data, size_t data_sz) |
2963 | 0 | { |
2964 | 0 | const long page_sz = sysconf(_SC_PAGE_SIZE); |
2965 | 0 | size_t mmap_sz; |
2966 | |
|
2967 | 0 | mmap_sz = bpf_map_mmap_sz(obj->arena_map); |
2968 | 0 | if (roundup(data_sz, page_sz) > mmap_sz) { |
2969 | 0 | pr_warn("elf: sec '%s': declared ARENA map size (%zu) is too small to hold global __arena variables of size %zu\n", |
2970 | 0 | sec_name, mmap_sz, data_sz); |
2971 | 0 | return -E2BIG; |
2972 | 0 | } |
2973 | | |
2974 | 0 | obj->arena_data = malloc(data_sz); |
2975 | 0 | if (!obj->arena_data) |
2976 | 0 | return -ENOMEM; |
2977 | 0 | memcpy(obj->arena_data, data, data_sz); |
2978 | 0 | obj->arena_data_sz = data_sz; |
2979 | | |
2980 | | /* make bpf_map__init_value() work for ARENA maps */ |
2981 | 0 | map->mmaped = obj->arena_data; |
2982 | |
|
2983 | 0 | return 0; |
2984 | 0 | } |
2985 | | |
2986 | | static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict, |
2987 | | const char *pin_root_path) |
2988 | 3.38k | { |
2989 | 3.38k | const struct btf_type *sec = NULL; |
2990 | 3.38k | int nr_types, i, vlen, err; |
2991 | 3.38k | const struct btf_type *t; |
2992 | 3.38k | const char *name; |
2993 | 3.38k | Elf_Data *data; |
2994 | 3.38k | Elf_Scn *scn; |
2995 | | |
2996 | 3.38k | if (obj->efile.btf_maps_shndx < 0) |
2997 | 2.22k | return 0; |
2998 | | |
2999 | 1.16k | scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx); |
3000 | 1.16k | data = elf_sec_data(obj, scn); |
3001 | 1.16k | if (!scn || !data) { |
3002 | 0 | pr_warn("elf: failed to get %s map definitions for %s\n", |
3003 | 0 | MAPS_ELF_SEC, obj->path); |
3004 | 0 | return -EINVAL; |
3005 | 0 | } |
3006 | | |
3007 | 1.16k | nr_types = btf__type_cnt(obj->btf); |
3008 | 14.2k | for (i = 1; i < nr_types; i++) { |
3009 | 14.1k | t = btf__type_by_id(obj->btf, i); |
3010 | 14.1k | if (!btf_is_datasec(t)) |
3011 | 12.3k | continue; |
3012 | 1.85k | name = btf__name_by_offset(obj->btf, t->name_off); |
3013 | 1.85k | if (strcmp(name, MAPS_ELF_SEC) == 0) { |
3014 | 1.11k | sec = t; |
3015 | 1.11k | obj->efile.btf_maps_sec_btf_id = i; |
3016 | 1.11k | break; |
3017 | 1.11k | } |
3018 | 1.85k | } |
3019 | | |
3020 | 1.16k | if (!sec) { |
3021 | 51 | pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC); |
3022 | 51 | return -ENOENT; |
3023 | 51 | } |
3024 | | |
3025 | 1.11k | vlen = btf_vlen(sec); |
3026 | 1.19k | for (i = 0; i < vlen; i++) { |
3027 | 1.08k | err = bpf_object__init_user_btf_map(obj, sec, i, |
3028 | 1.08k | obj->efile.btf_maps_shndx, |
3029 | 1.08k | data, strict, |
3030 | 1.08k | pin_root_path); |
3031 | 1.08k | if (err) |
3032 | 998 | return err; |
3033 | 1.08k | } |
3034 | | |
3035 | 199 | for (i = 0; i < obj->nr_maps; i++) { |
3036 | 86 | struct bpf_map *map = &obj->maps[i]; |
3037 | | |
3038 | 86 | if (map->def.type != BPF_MAP_TYPE_ARENA) |
3039 | 85 | continue; |
3040 | | |
3041 | 1 | if (obj->arena_map) { |
3042 | 0 | pr_warn("map '%s': only single ARENA map is supported (map '%s' is also ARENA)\n", |
3043 | 0 | map->name, obj->arena_map->name); |
3044 | 0 | return -EINVAL; |
3045 | 0 | } |
3046 | 1 | obj->arena_map = map; |
3047 | | |
3048 | 1 | if (obj->efile.arena_data) { |
3049 | 0 | err = init_arena_map_data(obj, map, ARENA_SEC, obj->efile.arena_data_shndx, |
3050 | 0 | obj->efile.arena_data->d_buf, |
3051 | 0 | obj->efile.arena_data->d_size); |
3052 | 0 | if (err) |
3053 | 0 | return err; |
3054 | 0 | } |
3055 | 1 | } |
3056 | 113 | if (obj->efile.arena_data && !obj->arena_map) { |
3057 | 1 | pr_warn("elf: sec '%s': to use global __arena variables the ARENA map should be explicitly declared in SEC(\".maps\")\n", |
3058 | 1 | ARENA_SEC); |
3059 | 1 | return -ENOENT; |
3060 | 1 | } |
3061 | | |
3062 | 112 | return 0; |
3063 | 113 | } |
3064 | | |
3065 | | static int bpf_object__init_maps(struct bpf_object *obj, |
3066 | | const struct bpf_object_open_opts *opts) |
3067 | 3.38k | { |
3068 | 3.38k | const char *pin_root_path; |
3069 | 3.38k | bool strict; |
3070 | 3.38k | int err = 0; |
3071 | | |
3072 | 3.38k | strict = !OPTS_GET(opts, relaxed_maps, false); |
3073 | 3.38k | pin_root_path = OPTS_GET(opts, pin_root_path, NULL); |
3074 | | |
3075 | 3.38k | err = bpf_object__init_user_btf_maps(obj, strict, pin_root_path); |
3076 | 3.38k | err = err ?: bpf_object__init_global_data_maps(obj); |
3077 | 3.38k | err = err ?: bpf_object__init_kconfig_map(obj); |
3078 | 2.33k | err = err ?: bpf_object_init_struct_ops(obj); |
3079 | | |
3080 | 1.26k | return err; |
3081 | 1.26k | } |
3082 | | |
3083 | | static bool section_have_execinstr(struct bpf_object *obj, int idx) |
3084 | 2.48k | { |
3085 | 2.48k | Elf64_Shdr *sh; |
3086 | | |
3087 | 2.48k | sh = elf_sec_hdr(obj, elf_sec_by_idx(obj, idx)); |
3088 | 2.48k | if (!sh) |
3089 | 0 | return false; |
3090 | | |
3091 | 2.48k | return sh->sh_flags & SHF_EXECINSTR; |
3092 | 2.48k | } |
3093 | | |
3094 | | static bool starts_with_qmark(const char *s) |
3095 | 0 | { |
3096 | 0 | return s && s[0] == '?'; |
3097 | 0 | } |
3098 | | |
3099 | | static bool btf_needs_sanitization(struct bpf_object *obj) |
3100 | 0 | { |
3101 | 0 | bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC); |
3102 | 0 | bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC); |
3103 | 0 | bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT); |
3104 | 0 | bool has_func = kernel_supports(obj, FEAT_BTF_FUNC); |
3105 | 0 | bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG); |
3106 | 0 | bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG); |
3107 | 0 | bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64); |
3108 | 0 | bool has_qmark_datasec = kernel_supports(obj, FEAT_BTF_QMARK_DATASEC); |
3109 | |
|
3110 | 0 | return !has_func || !has_datasec || !has_func_global || !has_float || |
3111 | 0 | !has_decl_tag || !has_type_tag || !has_enum64 || !has_qmark_datasec; |
3112 | 0 | } |
3113 | | |
3114 | | static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf) |
3115 | 0 | { |
3116 | 0 | bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC); |
3117 | 0 | bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC); |
3118 | 0 | bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT); |
3119 | 0 | bool has_func = kernel_supports(obj, FEAT_BTF_FUNC); |
3120 | 0 | bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG); |
3121 | 0 | bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG); |
3122 | 0 | bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64); |
3123 | 0 | bool has_qmark_datasec = kernel_supports(obj, FEAT_BTF_QMARK_DATASEC); |
3124 | 0 | int enum64_placeholder_id = 0; |
3125 | 0 | struct btf_type *t; |
3126 | 0 | int i, j, vlen; |
3127 | |
|
3128 | 0 | for (i = 1; i < btf__type_cnt(btf); i++) { |
3129 | 0 | t = (struct btf_type *)btf__type_by_id(btf, i); |
3130 | |
|
3131 | 0 | if ((!has_datasec && btf_is_var(t)) || (!has_decl_tag && btf_is_decl_tag(t))) { |
3132 | | /* replace VAR/DECL_TAG with INT */ |
3133 | 0 | t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0); |
3134 | | /* |
3135 | | * using size = 1 is the safest choice, 4 will be too |
3136 | | * big and cause kernel BTF validation failure if |
3137 | | * original variable took less than 4 bytes |
3138 | | */ |
3139 | 0 | t->size = 1; |
3140 | 0 | *(int *)(t + 1) = BTF_INT_ENC(0, 0, 8); |
3141 | 0 | } else if (!has_datasec && btf_is_datasec(t)) { |
3142 | | /* replace DATASEC with STRUCT */ |
3143 | 0 | const struct btf_var_secinfo *v = btf_var_secinfos(t); |
3144 | 0 | struct btf_member *m = btf_members(t); |
3145 | 0 | struct btf_type *vt; |
3146 | 0 | char *name; |
3147 | |
|
3148 | 0 | name = (char *)btf__name_by_offset(btf, t->name_off); |
3149 | 0 | while (*name) { |
3150 | 0 | if (*name == '.' || *name == '?') |
3151 | 0 | *name = '_'; |
3152 | 0 | name++; |
3153 | 0 | } |
3154 | |
|
3155 | 0 | vlen = btf_vlen(t); |
3156 | 0 | t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen); |
3157 | 0 | for (j = 0; j < vlen; j++, v++, m++) { |
3158 | | /* order of field assignments is important */ |
3159 | 0 | m->offset = v->offset * 8; |
3160 | 0 | m->type = v->type; |
3161 | | /* preserve variable name as member name */ |
3162 | 0 | vt = (void *)btf__type_by_id(btf, v->type); |
3163 | 0 | m->name_off = vt->name_off; |
3164 | 0 | } |
3165 | 0 | } else if (!has_qmark_datasec && btf_is_datasec(t) && |
3166 | 0 | starts_with_qmark(btf__name_by_offset(btf, t->name_off))) { |
3167 | | /* replace '?' prefix with '_' for DATASEC names */ |
3168 | 0 | char *name; |
3169 | |
|
3170 | 0 | name = (char *)btf__name_by_offset(btf, t->name_off); |
3171 | 0 | if (name[0] == '?') |
3172 | 0 | name[0] = '_'; |
3173 | 0 | } else if (!has_func && btf_is_func_proto(t)) { |
3174 | | /* replace FUNC_PROTO with ENUM */ |
3175 | 0 | vlen = btf_vlen(t); |
3176 | 0 | t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen); |
3177 | 0 | t->size = sizeof(__u32); /* kernel enforced */ |
3178 | 0 | } else if (!has_func && btf_is_func(t)) { |
3179 | | /* replace FUNC with TYPEDEF */ |
3180 | 0 | t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0); |
3181 | 0 | } else if (!has_func_global && btf_is_func(t)) { |
3182 | | /* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */ |
3183 | 0 | t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0); |
3184 | 0 | } else if (!has_float && btf_is_float(t)) { |
3185 | | /* replace FLOAT with an equally-sized empty STRUCT; |
3186 | | * since C compilers do not accept e.g. "float" as a |
3187 | | * valid struct name, make it anonymous |
3188 | | */ |
3189 | 0 | t->name_off = 0; |
3190 | 0 | t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0); |
3191 | 0 | } else if (!has_type_tag && btf_is_type_tag(t)) { |
3192 | | /* replace TYPE_TAG with a CONST */ |
3193 | 0 | t->name_off = 0; |
3194 | 0 | t->info = BTF_INFO_ENC(BTF_KIND_CONST, 0, 0); |
3195 | 0 | } else if (!has_enum64 && btf_is_enum(t)) { |
3196 | | /* clear the kflag */ |
3197 | 0 | t->info = btf_type_info(btf_kind(t), btf_vlen(t), false); |
3198 | 0 | } else if (!has_enum64 && btf_is_enum64(t)) { |
3199 | | /* replace ENUM64 with a union */ |
3200 | 0 | struct btf_member *m; |
3201 | |
|
3202 | 0 | if (enum64_placeholder_id == 0) { |
3203 | 0 | enum64_placeholder_id = btf__add_int(btf, "enum64_placeholder", 1, 0); |
3204 | 0 | if (enum64_placeholder_id < 0) |
3205 | 0 | return enum64_placeholder_id; |
3206 | | |
3207 | 0 | t = (struct btf_type *)btf__type_by_id(btf, i); |
3208 | 0 | } |
3209 | | |
3210 | 0 | m = btf_members(t); |
3211 | 0 | vlen = btf_vlen(t); |
3212 | 0 | t->info = BTF_INFO_ENC(BTF_KIND_UNION, 0, vlen); |
3213 | 0 | for (j = 0; j < vlen; j++, m++) { |
3214 | 0 | m->type = enum64_placeholder_id; |
3215 | 0 | m->offset = 0; |
3216 | 0 | } |
3217 | 0 | } |
3218 | 0 | } |
3219 | | |
3220 | 0 | return 0; |
3221 | 0 | } |
3222 | | |
3223 | | static bool libbpf_needs_btf(const struct bpf_object *obj) |
3224 | 2.94k | { |
3225 | 2.94k | return obj->efile.btf_maps_shndx >= 0 || |
3226 | 2.94k | obj->efile.has_st_ops || |
3227 | 2.94k | obj->nr_extern > 0; |
3228 | 2.94k | } |
3229 | | |
3230 | | static bool kernel_needs_btf(const struct bpf_object *obj) |
3231 | 0 | { |
3232 | 0 | return obj->efile.has_st_ops; |
3233 | 0 | } |
3234 | | |
3235 | | static int bpf_object__init_btf(struct bpf_object *obj, |
3236 | | Elf_Data *btf_data, |
3237 | | Elf_Data *btf_ext_data) |
3238 | 6.23k | { |
3239 | 6.23k | int err = -ENOENT; |
3240 | | |
3241 | 6.23k | if (btf_data) { |
3242 | 4.57k | obj->btf = btf__new(btf_data->d_buf, btf_data->d_size); |
3243 | 4.57k | err = libbpf_get_error(obj->btf); |
3244 | 4.57k | if (err) { |
3245 | 946 | obj->btf = NULL; |
3246 | 946 | pr_warn("Error loading ELF section %s: %s.\n", BTF_ELF_SEC, errstr(err)); |
3247 | 946 | goto out; |
3248 | 946 | } |
3249 | | /* enforce 8-byte pointers for BPF-targeted BTFs */ |
3250 | 3.62k | btf__set_pointer_size(obj->btf, 8); |
3251 | 3.62k | } |
3252 | 5.29k | if (btf_ext_data) { |
3253 | 433 | struct btf_ext_info *ext_segs[3]; |
3254 | 433 | int seg_num, sec_num; |
3255 | | |
3256 | 433 | if (!obj->btf) { |
3257 | 5 | pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n", |
3258 | 5 | BTF_EXT_ELF_SEC, BTF_ELF_SEC); |
3259 | 5 | goto out; |
3260 | 5 | } |
3261 | 428 | obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size); |
3262 | 428 | err = libbpf_get_error(obj->btf_ext); |
3263 | 428 | if (err) { |
3264 | 332 | pr_warn("Error loading ELF section %s: %s. Ignored and continue.\n", |
3265 | 332 | BTF_EXT_ELF_SEC, errstr(err)); |
3266 | 332 | obj->btf_ext = NULL; |
3267 | 332 | goto out; |
3268 | 332 | } |
3269 | | |
3270 | | /* setup .BTF.ext to ELF section mapping */ |
3271 | 96 | ext_segs[0] = &obj->btf_ext->func_info; |
3272 | 96 | ext_segs[1] = &obj->btf_ext->line_info; |
3273 | 96 | ext_segs[2] = &obj->btf_ext->core_relo_info; |
3274 | 384 | for (seg_num = 0; seg_num < ARRAY_SIZE(ext_segs); seg_num++) { |
3275 | 288 | struct btf_ext_info *seg = ext_segs[seg_num]; |
3276 | 288 | const struct btf_ext_info_sec *sec; |
3277 | 288 | const char *sec_name; |
3278 | 288 | Elf_Scn *scn; |
3279 | | |
3280 | 288 | if (seg->sec_cnt == 0) |
3281 | 183 | continue; |
3282 | | |
3283 | 105 | seg->sec_idxs = calloc(seg->sec_cnt, sizeof(*seg->sec_idxs)); |
3284 | 105 | if (!seg->sec_idxs) { |
3285 | 0 | err = -ENOMEM; |
3286 | 0 | goto out; |
3287 | 0 | } |
3288 | | |
3289 | 105 | sec_num = 0; |
3290 | 264 | for_each_btf_ext_sec(seg, sec) { |
3291 | | /* preventively increment index to avoid doing |
3292 | | * this before every continue below |
3293 | | */ |
3294 | 264 | sec_num++; |
3295 | | |
3296 | 264 | sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off); |
3297 | 264 | if (str_is_empty(sec_name)) |
3298 | 129 | continue; |
3299 | 135 | scn = elf_sec_by_name(obj, sec_name); |
3300 | 135 | if (!scn) |
3301 | 121 | continue; |
3302 | | |
3303 | 14 | seg->sec_idxs[sec_num - 1] = elf_ndxscn(scn); |
3304 | 14 | } |
3305 | 105 | } |
3306 | 96 | } |
3307 | 6.23k | out: |
3308 | 6.23k | if (err && libbpf_needs_btf(obj)) { |
3309 | 48 | pr_warn("BTF is required, but is missing or corrupted.\n"); |
3310 | 48 | return err; |
3311 | 48 | } |
3312 | 6.18k | return 0; |
3313 | 6.23k | } |
3314 | | |
3315 | | static int compare_vsi_off(const void *_a, const void *_b) |
3316 | 1.92k | { |
3317 | 1.92k | const struct btf_var_secinfo *a = _a; |
3318 | 1.92k | const struct btf_var_secinfo *b = _b; |
3319 | | |
3320 | 1.92k | return a->offset - b->offset; |
3321 | 1.92k | } |
3322 | | |
3323 | | static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf, |
3324 | | struct btf_type *t) |
3325 | 3.67k | { |
3326 | 3.67k | __u32 size = 0, i, vars = btf_vlen(t); |
3327 | 3.67k | const char *sec_name = btf__name_by_offset(btf, t->name_off); |
3328 | 3.67k | struct btf_var_secinfo *vsi; |
3329 | 3.67k | bool fixup_offsets = false; |
3330 | 3.67k | int err; |
3331 | | |
3332 | 3.67k | if (!sec_name) { |
3333 | 0 | pr_debug("No name found in string section for DATASEC kind.\n"); |
3334 | 0 | return -ENOENT; |
3335 | 0 | } |
3336 | | |
3337 | | /* Extern-backing datasecs (.ksyms, .kconfig) have their size and |
3338 | | * variable offsets set at the previous step. Further, not every |
3339 | | * extern BTF VAR has corresponding ELF symbol preserved, so we skip |
3340 | | * all fixups altogether for such sections and go straight to sorting |
3341 | | * VARs within their DATASEC. |
3342 | | */ |
3343 | 3.67k | if (strcmp(sec_name, KCONFIG_SEC) == 0 || strcmp(sec_name, KSYMS_SEC) == 0) |
3344 | 414 | goto sort_vars; |
3345 | | |
3346 | | /* Clang leaves DATASEC size and VAR offsets as zeroes, so we need to |
3347 | | * fix this up. But BPF static linker already fixes this up and fills |
3348 | | * all the sizes and offsets during static linking. So this step has |
3349 | | * to be optional. But the STV_HIDDEN handling is non-optional for any |
3350 | | * non-extern DATASEC, so the variable fixup loop below handles both |
3351 | | * functions at the same time, paying the cost of BTF VAR <-> ELF |
3352 | | * symbol matching just once. |
3353 | | */ |
3354 | 3.26k | if (t->size == 0) { |
3355 | 513 | err = find_elf_sec_sz(obj, sec_name, &size); |
3356 | 513 | if (err || !size) { |
3357 | 158 | pr_debug("sec '%s': failed to determine size from ELF: size %u, err %s\n", |
3358 | 158 | sec_name, size, errstr(err)); |
3359 | 158 | return -ENOENT; |
3360 | 158 | } |
3361 | | |
3362 | 355 | t->size = size; |
3363 | 355 | fixup_offsets = true; |
3364 | 355 | } |
3365 | | |
3366 | 5.22k | for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) { |
3367 | 2.46k | const struct btf_type *t_var; |
3368 | 2.46k | struct btf_var *var; |
3369 | 2.46k | const char *var_name; |
3370 | 2.46k | Elf64_Sym *sym; |
3371 | | |
3372 | 2.46k | t_var = btf__type_by_id(btf, vsi->type); |
3373 | 2.46k | if (!t_var || !btf_is_var(t_var)) { |
3374 | 88 | pr_debug("sec '%s': unexpected non-VAR type found\n", sec_name); |
3375 | 88 | return -EINVAL; |
3376 | 88 | } |
3377 | | |
3378 | 2.38k | var = btf_var(t_var); |
3379 | 2.38k | if (var->linkage == BTF_VAR_STATIC || var->linkage == BTF_VAR_GLOBAL_EXTERN) |
3380 | 284 | continue; |
3381 | | |
3382 | 2.09k | var_name = btf__name_by_offset(btf, t_var->name_off); |
3383 | 2.09k | if (!var_name) { |
3384 | 0 | pr_debug("sec '%s': failed to find name of DATASEC's member #%d\n", |
3385 | 0 | sec_name, i); |
3386 | 0 | return -ENOENT; |
3387 | 0 | } |
3388 | | |
3389 | 2.09k | sym = find_elf_var_sym(obj, var_name); |
3390 | 2.09k | if (IS_ERR(sym)) { |
3391 | 256 | pr_debug("sec '%s': failed to find ELF symbol for VAR '%s'\n", |
3392 | 256 | sec_name, var_name); |
3393 | 256 | return -ENOENT; |
3394 | 256 | } |
3395 | | |
3396 | 1.84k | if (fixup_offsets) |
3397 | 208 | vsi->offset = sym->st_value; |
3398 | | |
3399 | | /* if variable is a global/weak symbol, but has restricted |
3400 | | * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF VAR |
3401 | | * as static. This follows similar logic for functions (BPF |
3402 | | * subprogs) and influences libbpf's further decisions about |
3403 | | * whether to make global data BPF array maps as |
3404 | | * BPF_F_MMAPABLE. |
3405 | | */ |
3406 | 1.84k | if (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN |
3407 | 1.84k | || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL) |
3408 | 138 | var->linkage = BTF_VAR_STATIC; |
3409 | 1.84k | } |
3410 | | |
3411 | 3.17k | sort_vars: |
3412 | 3.17k | qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off); |
3413 | 3.17k | return 0; |
3414 | 3.10k | } |
3415 | | |
3416 | | static int bpf_object_fixup_btf(struct bpf_object *obj) |
3417 | 3.88k | { |
3418 | 3.88k | int i, n, err = 0; |
3419 | | |
3420 | 3.88k | if (!obj->btf) |
3421 | 1.60k | return 0; |
3422 | | |
3423 | 2.28k | n = btf__type_cnt(obj->btf); |
3424 | 25.9k | for (i = 1; i < n; i++) { |
3425 | 24.1k | struct btf_type *t = btf_type_by_id(obj->btf, i); |
3426 | | |
3427 | | /* Loader needs to fix up some of the things compiler |
3428 | | * couldn't get its hands on while emitting BTF. This |
3429 | | * is section size and global variable offset. We use |
3430 | | * the info from the ELF itself for this purpose. |
3431 | | */ |
3432 | 24.1k | if (btf_is_datasec(t)) { |
3433 | 3.67k | err = btf_fixup_datasec(obj, obj->btf, t); |
3434 | 3.67k | if (err) |
3435 | 502 | return err; |
3436 | 3.67k | } |
3437 | 24.1k | } |
3438 | | |
3439 | 1.78k | return 0; |
3440 | 2.28k | } |
3441 | | |
3442 | | static bool prog_needs_vmlinux_btf(struct bpf_program *prog) |
3443 | 0 | { |
3444 | 0 | if (prog->type == BPF_PROG_TYPE_STRUCT_OPS || |
3445 | 0 | prog->type == BPF_PROG_TYPE_LSM) |
3446 | 0 | return true; |
3447 | | |
3448 | | /* BPF_PROG_TYPE_TRACING programs which do not attach to other programs |
3449 | | * also need vmlinux BTF |
3450 | | */ |
3451 | 0 | if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd) |
3452 | 0 | return true; |
3453 | | |
3454 | 0 | return false; |
3455 | 0 | } |
3456 | | |
3457 | | static bool map_needs_vmlinux_btf(struct bpf_map *map) |
3458 | 0 | { |
3459 | 0 | return bpf_map__is_struct_ops(map); |
3460 | 0 | } |
3461 | | |
3462 | | static bool obj_needs_vmlinux_btf(const struct bpf_object *obj) |
3463 | 0 | { |
3464 | 0 | struct bpf_program *prog; |
3465 | 0 | struct bpf_map *map; |
3466 | 0 | int i; |
3467 | | |
3468 | | /* CO-RE relocations need kernel BTF, only when btf_custom_path |
3469 | | * is not specified |
3470 | | */ |
3471 | 0 | if (obj->btf_ext && obj->btf_ext->core_relo_info.len && !obj->btf_custom_path) |
3472 | 0 | return true; |
3473 | | |
3474 | | /* Support for typed ksyms needs kernel BTF */ |
3475 | 0 | for (i = 0; i < obj->nr_extern; i++) { |
3476 | 0 | const struct extern_desc *ext; |
3477 | |
|
3478 | 0 | ext = &obj->externs[i]; |
3479 | 0 | if (ext->type == EXT_KSYM && ext->ksym.type_id) |
3480 | 0 | return true; |
3481 | 0 | } |
3482 | | |
3483 | 0 | bpf_object__for_each_program(prog, obj) { |
3484 | 0 | if (!prog->autoload) |
3485 | 0 | continue; |
3486 | 0 | if (prog_needs_vmlinux_btf(prog)) |
3487 | 0 | return true; |
3488 | 0 | } |
3489 | | |
3490 | 0 | bpf_object__for_each_map(map, obj) { |
3491 | 0 | if (map_needs_vmlinux_btf(map)) |
3492 | 0 | return true; |
3493 | 0 | } |
3494 | | |
3495 | 0 | return false; |
3496 | 0 | } |
3497 | | |
3498 | | static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force) |
3499 | 0 | { |
3500 | 0 | int err; |
3501 | | |
3502 | | /* btf_vmlinux could be loaded earlier */ |
3503 | 0 | if (obj->btf_vmlinux || obj->gen_loader) |
3504 | 0 | return 0; |
3505 | | |
3506 | 0 | if (!force && !obj_needs_vmlinux_btf(obj)) |
3507 | 0 | return 0; |
3508 | | |
3509 | 0 | obj->btf_vmlinux = btf__load_vmlinux_btf(); |
3510 | 0 | err = libbpf_get_error(obj->btf_vmlinux); |
3511 | 0 | if (err) { |
3512 | 0 | pr_warn("Error loading vmlinux BTF: %s\n", errstr(err)); |
3513 | 0 | obj->btf_vmlinux = NULL; |
3514 | 0 | return err; |
3515 | 0 | } |
3516 | 0 | return 0; |
3517 | 0 | } |
3518 | | |
3519 | | static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) |
3520 | 0 | { |
3521 | 0 | struct btf *kern_btf = obj->btf; |
3522 | 0 | bool btf_mandatory, sanitize; |
3523 | 0 | int i, err = 0; |
3524 | |
|
3525 | 0 | if (!obj->btf) |
3526 | 0 | return 0; |
3527 | | |
3528 | 0 | if (!kernel_supports(obj, FEAT_BTF)) { |
3529 | 0 | if (kernel_needs_btf(obj)) { |
3530 | 0 | err = -EOPNOTSUPP; |
3531 | 0 | goto report; |
3532 | 0 | } |
3533 | 0 | pr_debug("Kernel doesn't support BTF, skipping uploading it.\n"); |
3534 | 0 | return 0; |
3535 | 0 | } |
3536 | | |
3537 | | /* Even though some subprogs are global/weak, user might prefer more |
3538 | | * permissive BPF verification process that BPF verifier performs for |
3539 | | * static functions, taking into account more context from the caller |
3540 | | * functions. In such case, they need to mark such subprogs with |
3541 | | * __attribute__((visibility("hidden"))) and libbpf will adjust |
3542 | | * corresponding FUNC BTF type to be marked as static and trigger more |
3543 | | * involved BPF verification process. |
3544 | | */ |
3545 | 0 | for (i = 0; i < obj->nr_programs; i++) { |
3546 | 0 | struct bpf_program *prog = &obj->programs[i]; |
3547 | 0 | struct btf_type *t; |
3548 | 0 | const char *name; |
3549 | 0 | int j, n; |
3550 | |
|
3551 | 0 | if (!prog->mark_btf_static || !prog_is_subprog(obj, prog)) |
3552 | 0 | continue; |
3553 | | |
3554 | 0 | n = btf__type_cnt(obj->btf); |
3555 | 0 | for (j = 1; j < n; j++) { |
3556 | 0 | t = btf_type_by_id(obj->btf, j); |
3557 | 0 | if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL) |
3558 | 0 | continue; |
3559 | | |
3560 | 0 | name = btf__str_by_offset(obj->btf, t->name_off); |
3561 | 0 | if (strcmp(name, prog->name) != 0) |
3562 | 0 | continue; |
3563 | | |
3564 | 0 | t->info = btf_type_info(BTF_KIND_FUNC, BTF_FUNC_STATIC, 0); |
3565 | 0 | break; |
3566 | 0 | } |
3567 | 0 | } |
3568 | |
|
3569 | 0 | sanitize = btf_needs_sanitization(obj); |
3570 | 0 | if (sanitize) { |
3571 | 0 | const void *raw_data; |
3572 | 0 | __u32 sz; |
3573 | | |
3574 | | /* clone BTF to sanitize a copy and leave the original intact */ |
3575 | 0 | raw_data = btf__raw_data(obj->btf, &sz); |
3576 | 0 | kern_btf = btf__new(raw_data, sz); |
3577 | 0 | err = libbpf_get_error(kern_btf); |
3578 | 0 | if (err) |
3579 | 0 | return err; |
3580 | | |
3581 | | /* enforce 8-byte pointers for BPF-targeted BTFs */ |
3582 | 0 | btf__set_pointer_size(obj->btf, 8); |
3583 | 0 | err = bpf_object__sanitize_btf(obj, kern_btf); |
3584 | 0 | if (err) |
3585 | 0 | return err; |
3586 | 0 | } |
3587 | | |
3588 | 0 | if (obj->gen_loader) { |
3589 | 0 | __u32 raw_size = 0; |
3590 | 0 | const void *raw_data = btf__raw_data(kern_btf, &raw_size); |
3591 | |
|
3592 | 0 | if (!raw_data) |
3593 | 0 | return -ENOMEM; |
3594 | 0 | bpf_gen__load_btf(obj->gen_loader, raw_data, raw_size); |
3595 | | /* Pretend to have valid FD to pass various fd >= 0 checks. |
3596 | | * This fd == 0 will not be used with any syscall and will be reset to -1 eventually. |
3597 | | */ |
3598 | 0 | btf__set_fd(kern_btf, 0); |
3599 | 0 | } else { |
3600 | | /* currently BPF_BTF_LOAD only supports log_level 1 */ |
3601 | 0 | err = btf_load_into_kernel(kern_btf, obj->log_buf, obj->log_size, |
3602 | 0 | obj->log_level ? 1 : 0, obj->token_fd); |
3603 | 0 | } |
3604 | 0 | if (sanitize) { |
3605 | 0 | if (!err) { |
3606 | | /* move fd to libbpf's BTF */ |
3607 | 0 | btf__set_fd(obj->btf, btf__fd(kern_btf)); |
3608 | 0 | btf__set_fd(kern_btf, -1); |
3609 | 0 | } |
3610 | 0 | btf__free(kern_btf); |
3611 | 0 | } |
3612 | 0 | report: |
3613 | 0 | if (err) { |
3614 | 0 | btf_mandatory = kernel_needs_btf(obj); |
3615 | 0 | if (btf_mandatory) { |
3616 | 0 | pr_warn("Error loading .BTF into kernel: %s. BTF is mandatory, can't proceed.\n", |
3617 | 0 | errstr(err)); |
3618 | 0 | } else { |
3619 | 0 | pr_info("Error loading .BTF into kernel: %s. BTF is optional, ignoring.\n", |
3620 | 0 | errstr(err)); |
3621 | 0 | err = 0; |
3622 | 0 | } |
3623 | 0 | } |
3624 | 0 | return err; |
3625 | 0 | } |
3626 | | |
3627 | | static const char *elf_sym_str(const struct bpf_object *obj, size_t off) |
3628 | 25.5k | { |
3629 | 25.5k | const char *name; |
3630 | | |
3631 | 25.5k | name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off); |
3632 | 25.5k | if (!name) { |
3633 | 8.20k | pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n", |
3634 | 8.20k | off, obj->path, elf_errmsg(-1)); |
3635 | 8.20k | return NULL; |
3636 | 8.20k | } |
3637 | | |
3638 | 17.3k | return name; |
3639 | 25.5k | } |
3640 | | |
3641 | | static const char *elf_sec_str(const struct bpf_object *obj, size_t off) |
3642 | 57.6k | { |
3643 | 57.6k | const char *name; |
3644 | | |
3645 | 57.6k | name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off); |
3646 | 57.6k | if (!name) { |
3647 | 1.14k | pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n", |
3648 | 1.14k | off, obj->path, elf_errmsg(-1)); |
3649 | 1.14k | return NULL; |
3650 | 1.14k | } |
3651 | | |
3652 | 56.5k | return name; |
3653 | 57.6k | } |
3654 | | |
3655 | | static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx) |
3656 | 15.2k | { |
3657 | 15.2k | Elf_Scn *scn; |
3658 | | |
3659 | 15.2k | scn = elf_getscn(obj->efile.elf, idx); |
3660 | 15.2k | if (!scn) { |
3661 | 0 | pr_warn("elf: failed to get section(%zu) from %s: %s\n", |
3662 | 0 | idx, obj->path, elf_errmsg(-1)); |
3663 | 0 | return NULL; |
3664 | 0 | } |
3665 | 15.2k | return scn; |
3666 | 15.2k | } |
3667 | | |
3668 | | static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name) |
3669 | 648 | { |
3670 | 648 | Elf_Scn *scn = NULL; |
3671 | 648 | Elf *elf = obj->efile.elf; |
3672 | 648 | const char *sec_name; |
3673 | | |
3674 | 4.99k | while ((scn = elf_nextscn(elf, scn)) != NULL) { |
3675 | 4.72k | sec_name = elf_sec_name(obj, scn); |
3676 | 4.72k | if (!sec_name) |
3677 | 0 | return NULL; |
3678 | | |
3679 | 4.72k | if (strcmp(sec_name, name) != 0) |
3680 | 4.34k | continue; |
3681 | | |
3682 | 380 | return scn; |
3683 | 4.72k | } |
3684 | 268 | return NULL; |
3685 | 648 | } |
3686 | | |
3687 | | static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn) |
3688 | 123k | { |
3689 | 123k | Elf64_Shdr *shdr; |
3690 | | |
3691 | 123k | if (!scn) |
3692 | 0 | return NULL; |
3693 | | |
3694 | 123k | shdr = elf64_getshdr(scn); |
3695 | 123k | if (!shdr) { |
3696 | 0 | pr_warn("elf: failed to get section(%zu) header from %s: %s\n", |
3697 | 0 | elf_ndxscn(scn), obj->path, elf_errmsg(-1)); |
3698 | 0 | return NULL; |
3699 | 0 | } |
3700 | | |
3701 | 123k | return shdr; |
3702 | 123k | } |
3703 | | |
3704 | | static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn) |
3705 | 10.7k | { |
3706 | 10.7k | const char *name; |
3707 | 10.7k | Elf64_Shdr *sh; |
3708 | | |
3709 | 10.7k | if (!scn) |
3710 | 0 | return NULL; |
3711 | | |
3712 | 10.7k | sh = elf_sec_hdr(obj, scn); |
3713 | 10.7k | if (!sh) |
3714 | 0 | return NULL; |
3715 | | |
3716 | 10.7k | name = elf_sec_str(obj, sh->sh_name); |
3717 | 10.7k | if (!name) { |
3718 | 777 | pr_warn("elf: failed to get section(%zu) name from %s: %s\n", |
3719 | 777 | elf_ndxscn(scn), obj->path, elf_errmsg(-1)); |
3720 | 777 | return NULL; |
3721 | 777 | } |
3722 | | |
3723 | 9.93k | return name; |
3724 | 10.7k | } |
3725 | | |
3726 | | static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn) |
3727 | 48.3k | { |
3728 | 48.3k | Elf_Data *data; |
3729 | | |
3730 | 48.3k | if (!scn) |
3731 | 147 | return NULL; |
3732 | | |
3733 | 48.1k | data = elf_getdata(scn, 0); |
3734 | 48.1k | if (!data) { |
3735 | 551 | pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n", |
3736 | 551 | elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>", |
3737 | 551 | obj->path, elf_errmsg(-1)); |
3738 | 551 | return NULL; |
3739 | 551 | } |
3740 | | |
3741 | 47.6k | return data; |
3742 | 48.1k | } |
3743 | | |
3744 | | static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx) |
3745 | 507k | { |
3746 | 507k | if (idx >= obj->efile.symbols->d_size / sizeof(Elf64_Sym)) |
3747 | 144 | return NULL; |
3748 | | |
3749 | 507k | return (Elf64_Sym *)obj->efile.symbols->d_buf + idx; |
3750 | 507k | } |
3751 | | |
3752 | | static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx) |
3753 | 8.16k | { |
3754 | 8.16k | if (idx >= data->d_size / sizeof(Elf64_Rel)) |
3755 | 0 | return NULL; |
3756 | | |
3757 | 8.16k | return (Elf64_Rel *)data->d_buf + idx; |
3758 | 8.16k | } |
3759 | | |
3760 | | static bool is_sec_name_dwarf(const char *name) |
3761 | 42.7k | { |
3762 | | /* approximation, but the actual list is too long */ |
3763 | 42.7k | return str_has_pfx(name, ".debug_"); |
3764 | 42.7k | } |
3765 | | |
3766 | | static bool ignore_elf_section(Elf64_Shdr *hdr, const char *name) |
3767 | 45.5k | { |
3768 | | /* no special handling of .strtab */ |
3769 | 45.5k | if (hdr->sh_type == SHT_STRTAB) |
3770 | 5.40k | return true; |
3771 | | |
3772 | | /* ignore .llvm_addrsig section as well */ |
3773 | 40.1k | if (hdr->sh_type == SHT_LLVM_ADDRSIG) |
3774 | 334 | return true; |
3775 | | |
3776 | | /* no subprograms will lead to an empty .text section, ignore it */ |
3777 | 39.8k | if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 && |
3778 | 39.8k | strcmp(name, ".text") == 0) |
3779 | 148 | return true; |
3780 | | |
3781 | | /* DWARF sections */ |
3782 | 39.7k | if (is_sec_name_dwarf(name)) |
3783 | 2.17k | return true; |
3784 | | |
3785 | 37.5k | if (str_has_pfx(name, ".rel")) { |
3786 | 3.02k | name += sizeof(".rel") - 1; |
3787 | | /* DWARF section relocations */ |
3788 | 3.02k | if (is_sec_name_dwarf(name)) |
3789 | 371 | return true; |
3790 | | |
3791 | | /* .BTF and .BTF.ext don't need relocations */ |
3792 | 2.65k | if (strcmp(name, BTF_ELF_SEC) == 0 || |
3793 | 2.65k | strcmp(name, BTF_EXT_ELF_SEC) == 0) |
3794 | 773 | return true; |
3795 | 2.65k | } |
3796 | | |
3797 | 36.3k | return false; |
3798 | 37.5k | } |
3799 | | |
3800 | | static int cmp_progs(const void *_a, const void *_b) |
3801 | 34.8k | { |
3802 | 34.8k | const struct bpf_program *a = _a; |
3803 | 34.8k | const struct bpf_program *b = _b; |
3804 | | |
3805 | 34.8k | if (a->sec_idx != b->sec_idx) |
3806 | 698 | return a->sec_idx < b->sec_idx ? -1 : 1; |
3807 | | |
3808 | | /* sec_insn_off can't be the same within the section */ |
3809 | 34.1k | return a->sec_insn_off < b->sec_insn_off ? -1 : 1; |
3810 | 34.8k | } |
3811 | | |
3812 | | static int bpf_object__elf_collect(struct bpf_object *obj) |
3813 | 9.35k | { |
3814 | 9.35k | struct elf_sec_desc *sec_desc; |
3815 | 9.35k | Elf *elf = obj->efile.elf; |
3816 | 9.35k | Elf_Data *btf_ext_data = NULL; |
3817 | 9.35k | Elf_Data *btf_data = NULL; |
3818 | 9.35k | int idx = 0, err = 0; |
3819 | 9.35k | const char *name; |
3820 | 9.35k | Elf_Data *data; |
3821 | 9.35k | Elf_Scn *scn; |
3822 | 9.35k | Elf64_Shdr *sh; |
3823 | | |
3824 | | /* ELF section indices are 0-based, but sec #0 is special "invalid" |
3825 | | * section. Since section count retrieved by elf_getshdrnum() does |
3826 | | * include sec #0, it is already the necessary size of an array to keep |
3827 | | * all the sections. |
3828 | | */ |
3829 | 9.35k | if (elf_getshdrnum(obj->efile.elf, &obj->efile.sec_cnt)) { |
3830 | 0 | pr_warn("elf: failed to get the number of sections for %s: %s\n", |
3831 | 0 | obj->path, elf_errmsg(-1)); |
3832 | 0 | return -LIBBPF_ERRNO__FORMAT; |
3833 | 0 | } |
3834 | 9.35k | obj->efile.secs = calloc(obj->efile.sec_cnt, sizeof(*obj->efile.secs)); |
3835 | 9.35k | if (!obj->efile.secs) |
3836 | 0 | return -ENOMEM; |
3837 | | |
3838 | | /* a bunch of ELF parsing functionality depends on processing symbols, |
3839 | | * so do the first pass and find the symbol table |
3840 | | */ |
3841 | 9.35k | scn = NULL; |
3842 | 67.1k | while ((scn = elf_nextscn(elf, scn)) != NULL) { |
3843 | 58.0k | sh = elf_sec_hdr(obj, scn); |
3844 | 58.0k | if (!sh) |
3845 | 0 | return -LIBBPF_ERRNO__FORMAT; |
3846 | | |
3847 | 58.0k | if (sh->sh_type == SHT_SYMTAB) { |
3848 | 9.28k | if (obj->efile.symbols) { |
3849 | 5 | pr_warn("elf: multiple symbol tables in %s\n", obj->path); |
3850 | 5 | return -LIBBPF_ERRNO__FORMAT; |
3851 | 5 | } |
3852 | | |
3853 | 9.27k | data = elf_sec_data(obj, scn); |
3854 | 9.27k | if (!data) |
3855 | 244 | return -LIBBPF_ERRNO__FORMAT; |
3856 | | |
3857 | 9.03k | idx = elf_ndxscn(scn); |
3858 | | |
3859 | 9.03k | obj->efile.symbols = data; |
3860 | 9.03k | obj->efile.symbols_shndx = idx; |
3861 | 9.03k | obj->efile.strtabidx = sh->sh_link; |
3862 | 9.03k | } |
3863 | 58.0k | } |
3864 | | |
3865 | 9.10k | if (!obj->efile.symbols) { |
3866 | 78 | pr_warn("elf: couldn't find symbol table in %s, stripped object file?\n", |
3867 | 78 | obj->path); |
3868 | 78 | return -ENOENT; |
3869 | 78 | } |
3870 | | |
3871 | 9.02k | scn = NULL; |
3872 | 53.0k | while ((scn = elf_nextscn(elf, scn)) != NULL) { |
3873 | 45.9k | idx = elf_ndxscn(scn); |
3874 | 45.9k | sec_desc = &obj->efile.secs[idx]; |
3875 | | |
3876 | 45.9k | sh = elf_sec_hdr(obj, scn); |
3877 | 45.9k | if (!sh) |
3878 | 0 | return -LIBBPF_ERRNO__FORMAT; |
3879 | | |
3880 | 45.9k | name = elf_sec_str(obj, sh->sh_name); |
3881 | 45.9k | if (!name) |
3882 | 369 | return -LIBBPF_ERRNO__FORMAT; |
3883 | | |
3884 | 45.5k | if (ignore_elf_section(sh, name)) |
3885 | 9.20k | continue; |
3886 | | |
3887 | 36.3k | data = elf_sec_data(obj, scn); |
3888 | 36.3k | if (!data) |
3889 | 297 | return -LIBBPF_ERRNO__FORMAT; |
3890 | | |
3891 | 36.0k | pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n", |
3892 | 36.0k | idx, name, (unsigned long)data->d_size, |
3893 | 36.0k | (int)sh->sh_link, (unsigned long)sh->sh_flags, |
3894 | 36.0k | (int)sh->sh_type); |
3895 | | |
3896 | 36.0k | if (strcmp(name, "license") == 0) { |
3897 | 807 | err = bpf_object__init_license(obj, data->d_buf, data->d_size); |
3898 | 807 | if (err) |
3899 | 1 | return err; |
3900 | 35.2k | } else if (strcmp(name, "version") == 0) { |
3901 | 52 | err = bpf_object__init_kversion(obj, data->d_buf, data->d_size); |
3902 | 52 | if (err) |
3903 | 10 | return err; |
3904 | 35.2k | } else if (strcmp(name, "maps") == 0) { |
3905 | 8 | pr_warn("elf: legacy map definitions in 'maps' section are not supported by libbpf v1.0+\n"); |
3906 | 8 | return -ENOTSUP; |
3907 | 35.2k | } else if (strcmp(name, MAPS_ELF_SEC) == 0) { |
3908 | 1.47k | obj->efile.btf_maps_shndx = idx; |
3909 | 33.7k | } else if (strcmp(name, BTF_ELF_SEC) == 0) { |
3910 | 4.74k | if (sh->sh_type != SHT_PROGBITS) |
3911 | 56 | return -LIBBPF_ERRNO__FORMAT; |
3912 | 4.68k | btf_data = data; |
3913 | 29.0k | } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) { |
3914 | 581 | if (sh->sh_type != SHT_PROGBITS) |
3915 | 46 | return -LIBBPF_ERRNO__FORMAT; |
3916 | 535 | btf_ext_data = data; |
3917 | 28.4k | } else if (sh->sh_type == SHT_SYMTAB) { |
3918 | | /* already processed during the first pass above */ |
3919 | 21.0k | } else if (sh->sh_type == SHT_PROGBITS && data->d_size > 0) { |
3920 | 4.48k | if (sh->sh_flags & SHF_EXECINSTR) { |
3921 | 1.33k | if (strcmp(name, ".text") == 0) |
3922 | 223 | obj->efile.text_shndx = idx; |
3923 | 1.33k | err = bpf_object__add_programs(obj, data, name, idx); |
3924 | 1.33k | if (err) |
3925 | 228 | return err; |
3926 | 3.14k | } else if (strcmp(name, DATA_SEC) == 0 || |
3927 | 3.14k | str_has_pfx(name, DATA_SEC ".")) { |
3928 | 555 | sec_desc->sec_type = SEC_DATA; |
3929 | 555 | sec_desc->shdr = sh; |
3930 | 555 | sec_desc->data = data; |
3931 | 2.59k | } else if (strcmp(name, RODATA_SEC) == 0 || |
3932 | 2.59k | str_has_pfx(name, RODATA_SEC ".")) { |
3933 | 608 | sec_desc->sec_type = SEC_RODATA; |
3934 | 608 | sec_desc->shdr = sh; |
3935 | 608 | sec_desc->data = data; |
3936 | 1.98k | } else if (strcmp(name, STRUCT_OPS_SEC) == 0 || |
3937 | 1.98k | strcmp(name, STRUCT_OPS_LINK_SEC) == 0 || |
3938 | 1.98k | strcmp(name, "?" STRUCT_OPS_SEC) == 0 || |
3939 | 1.98k | strcmp(name, "?" STRUCT_OPS_LINK_SEC) == 0) { |
3940 | 585 | sec_desc->sec_type = SEC_ST_OPS; |
3941 | 585 | sec_desc->shdr = sh; |
3942 | 585 | sec_desc->data = data; |
3943 | 585 | obj->efile.has_st_ops = true; |
3944 | 1.40k | } else if (strcmp(name, ARENA_SEC) == 0) { |
3945 | 85 | obj->efile.arena_data = data; |
3946 | 85 | obj->efile.arena_data_shndx = idx; |
3947 | 1.31k | } else { |
3948 | 1.31k | pr_info("elf: skipping unrecognized data section(%d) %s\n", |
3949 | 1.31k | idx, name); |
3950 | 1.31k | } |
3951 | 16.5k | } else if (sh->sh_type == SHT_REL) { |
3952 | 2.66k | int targ_sec_idx = sh->sh_info; /* points to other section */ |
3953 | | |
3954 | 2.66k | if (sh->sh_entsize != sizeof(Elf64_Rel) || |
3955 | 2.66k | targ_sec_idx >= obj->efile.sec_cnt) |
3956 | 178 | return -LIBBPF_ERRNO__FORMAT; |
3957 | | |
3958 | | /* Only do relo for section with exec instructions */ |
3959 | 2.48k | if (!section_have_execinstr(obj, targ_sec_idx) && |
3960 | 2.48k | strcmp(name, ".rel" STRUCT_OPS_SEC) && |
3961 | 2.48k | strcmp(name, ".rel" STRUCT_OPS_LINK_SEC) && |
3962 | 2.48k | strcmp(name, ".rel?" STRUCT_OPS_SEC) && |
3963 | 2.48k | strcmp(name, ".rel?" STRUCT_OPS_LINK_SEC) && |
3964 | 2.48k | strcmp(name, ".rel" MAPS_ELF_SEC)) { |
3965 | 994 | pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n", |
3966 | 994 | idx, name, targ_sec_idx, |
3967 | 994 | elf_sec_name(obj, elf_sec_by_idx(obj, targ_sec_idx)) ?: "<?>"); |
3968 | 994 | continue; |
3969 | 994 | } |
3970 | | |
3971 | 1.49k | sec_desc->sec_type = SEC_RELO; |
3972 | 1.49k | sec_desc->shdr = sh; |
3973 | 1.49k | sec_desc->data = data; |
3974 | 13.8k | } else if (sh->sh_type == SHT_NOBITS && (strcmp(name, BSS_SEC) == 0 || |
3975 | 2.27k | str_has_pfx(name, BSS_SEC "."))) { |
3976 | 1.50k | sec_desc->sec_type = SEC_BSS; |
3977 | 1.50k | sec_desc->shdr = sh; |
3978 | 1.50k | sec_desc->data = data; |
3979 | 12.3k | } else { |
3980 | 12.3k | pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name, |
3981 | 12.3k | (size_t)sh->sh_size); |
3982 | 12.3k | } |
3983 | 36.0k | } |
3984 | | |
3985 | 7.63k | if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) { |
3986 | 1.59k | pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path); |
3987 | 1.59k | return -LIBBPF_ERRNO__FORMAT; |
3988 | 1.59k | } |
3989 | | |
3990 | | /* change BPF program insns to native endianness for introspection */ |
3991 | 5.49k | if (!is_native_endianness(obj)) |
3992 | 220 | bpf_object_bswap_progs(obj); |
3993 | | |
3994 | | /* sort BPF programs by section name and in-section instruction offset |
3995 | | * for faster search |
3996 | | */ |
3997 | 5.49k | if (obj->nr_programs) |
3998 | 556 | qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs); |
3999 | | |
4000 | 5.49k | return bpf_object__init_btf(obj, btf_data, btf_ext_data); |
4001 | 7.09k | } |
4002 | | |
4003 | | static bool sym_is_extern(const Elf64_Sym *sym) |
4004 | 263k | { |
4005 | 263k | int bind = ELF64_ST_BIND(sym->st_info); |
4006 | | /* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */ |
4007 | 263k | return sym->st_shndx == SHN_UNDEF && |
4008 | 263k | (bind == STB_GLOBAL || bind == STB_WEAK) && |
4009 | 263k | ELF64_ST_TYPE(sym->st_info) == STT_NOTYPE; |
4010 | 263k | } |
4011 | | |
4012 | | static bool sym_is_subprog(const Elf64_Sym *sym, int text_shndx) |
4013 | 1.02k | { |
4014 | 1.02k | int bind = ELF64_ST_BIND(sym->st_info); |
4015 | 1.02k | int type = ELF64_ST_TYPE(sym->st_info); |
4016 | | |
4017 | | /* in .text section */ |
4018 | 1.02k | if (sym->st_shndx != text_shndx) |
4019 | 434 | return false; |
4020 | | |
4021 | | /* local function */ |
4022 | 586 | if (bind == STB_LOCAL && type == STT_SECTION) |
4023 | 363 | return true; |
4024 | | |
4025 | | /* global function */ |
4026 | 223 | return (bind == STB_GLOBAL || bind == STB_WEAK) && type == STT_FUNC; |
4027 | 586 | } |
4028 | | |
4029 | | static int find_extern_btf_id(const struct btf *btf, const char *ext_name) |
4030 | 3.56k | { |
4031 | 3.56k | const struct btf_type *t; |
4032 | 3.56k | const char *tname; |
4033 | 3.56k | int i, n; |
4034 | | |
4035 | 3.56k | if (!btf) |
4036 | 28 | return -ESRCH; |
4037 | | |
4038 | 3.53k | n = btf__type_cnt(btf); |
4039 | 24.5k | for (i = 1; i < n; i++) { |
4040 | 24.4k | t = btf__type_by_id(btf, i); |
4041 | | |
4042 | 24.4k | if (!btf_is_var(t) && !btf_is_func(t)) |
4043 | 17.7k | continue; |
4044 | | |
4045 | 6.74k | tname = btf__name_by_offset(btf, t->name_off); |
4046 | 6.74k | if (strcmp(tname, ext_name)) |
4047 | 3.28k | continue; |
4048 | | |
4049 | 3.46k | if (btf_is_var(t) && |
4050 | 3.46k | btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN) |
4051 | 48 | return -EINVAL; |
4052 | | |
4053 | 3.41k | if (btf_is_func(t) && btf_func_linkage(t) != BTF_FUNC_EXTERN) |
4054 | 16 | return -EINVAL; |
4055 | | |
4056 | 3.39k | return i; |
4057 | 3.41k | } |
4058 | | |
4059 | 78 | return -ENOENT; |
4060 | 3.53k | } |
4061 | | |
4062 | 3.39k | static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) { |
4063 | 3.39k | const struct btf_var_secinfo *vs; |
4064 | 3.39k | const struct btf_type *t; |
4065 | 3.39k | int i, j, n; |
4066 | | |
4067 | 3.39k | if (!btf) |
4068 | 0 | return -ESRCH; |
4069 | | |
4070 | 3.39k | n = btf__type_cnt(btf); |
4071 | 28.1k | for (i = 1; i < n; i++) { |
4072 | 28.0k | t = btf__type_by_id(btf, i); |
4073 | | |
4074 | 28.0k | if (!btf_is_datasec(t)) |
4075 | 19.4k | continue; |
4076 | | |
4077 | 8.60k | vs = btf_var_secinfos(t); |
4078 | 19.8k | for (j = 0; j < btf_vlen(t); j++, vs++) { |
4079 | 14.6k | if (vs->type == ext_btf_id) |
4080 | 3.37k | return i; |
4081 | 14.6k | } |
4082 | 8.60k | } |
4083 | | |
4084 | 23 | return -ENOENT; |
4085 | 3.39k | } |
4086 | | |
4087 | | static enum kcfg_type find_kcfg_type(const struct btf *btf, int id, |
4088 | | bool *is_signed) |
4089 | 1.14k | { |
4090 | 1.14k | const struct btf_type *t; |
4091 | 1.14k | const char *name; |
4092 | | |
4093 | 1.14k | t = skip_mods_and_typedefs(btf, id, NULL); |
4094 | 1.14k | name = btf__name_by_offset(btf, t->name_off); |
4095 | | |
4096 | 1.14k | if (is_signed) |
4097 | 993 | *is_signed = false; |
4098 | 1.14k | switch (btf_kind(t)) { |
4099 | 739 | case BTF_KIND_INT: { |
4100 | 739 | int enc = btf_int_encoding(t); |
4101 | | |
4102 | 739 | if (enc & BTF_INT_BOOL) |
4103 | 336 | return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN; |
4104 | 403 | if (is_signed) |
4105 | 319 | *is_signed = enc & BTF_INT_SIGNED; |
4106 | 403 | if (t->size == 1) |
4107 | 253 | return KCFG_CHAR; |
4108 | 150 | if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1))) |
4109 | 34 | return KCFG_UNKNOWN; |
4110 | 116 | return KCFG_INT; |
4111 | 150 | } |
4112 | 119 | case BTF_KIND_ENUM: |
4113 | 119 | if (t->size != 4) |
4114 | 34 | return KCFG_UNKNOWN; |
4115 | 85 | if (strcmp(name, "libbpf_tristate")) |
4116 | 75 | return KCFG_UNKNOWN; |
4117 | 10 | return KCFG_TRISTATE; |
4118 | 100 | case BTF_KIND_ENUM64: |
4119 | 100 | if (strcmp(name, "libbpf_tristate")) |
4120 | 90 | return KCFG_UNKNOWN; |
4121 | 10 | return KCFG_TRISTATE; |
4122 | 156 | case BTF_KIND_ARRAY: |
4123 | 156 | if (btf_array(t)->nelems == 0) |
4124 | 0 | return KCFG_UNKNOWN; |
4125 | 156 | if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR) |
4126 | 74 | return KCFG_UNKNOWN; |
4127 | 82 | return KCFG_CHAR_ARR; |
4128 | 35 | default: |
4129 | 35 | return KCFG_UNKNOWN; |
4130 | 1.14k | } |
4131 | 1.14k | } |
4132 | | |
4133 | | static int cmp_externs(const void *_a, const void *_b) |
4134 | 6.26k | { |
4135 | 6.26k | const struct extern_desc *a = _a; |
4136 | 6.26k | const struct extern_desc *b = _b; |
4137 | | |
4138 | 6.26k | if (a->type != b->type) |
4139 | 0 | return a->type < b->type ? -1 : 1; |
4140 | | |
4141 | 6.26k | if (a->type == EXT_KCFG) { |
4142 | | /* descending order by alignment requirements */ |
4143 | 1.12k | if (a->kcfg.align != b->kcfg.align) |
4144 | 0 | return a->kcfg.align > b->kcfg.align ? -1 : 1; |
4145 | | /* ascending order by size, within same alignment class */ |
4146 | 1.12k | if (a->kcfg.sz != b->kcfg.sz) |
4147 | 0 | return a->kcfg.sz < b->kcfg.sz ? -1 : 1; |
4148 | 1.12k | } |
4149 | | |
4150 | | /* resolve ties by name */ |
4151 | 6.26k | return strcmp(a->name, b->name); |
4152 | 6.26k | } |
4153 | | |
4154 | | static int find_int_btf_id(const struct btf *btf) |
4155 | 790 | { |
4156 | 790 | const struct btf_type *t; |
4157 | 790 | int i, n; |
4158 | | |
4159 | 790 | n = btf__type_cnt(btf); |
4160 | 8.19k | for (i = 1; i < n; i++) { |
4161 | 7.46k | t = btf__type_by_id(btf, i); |
4162 | | |
4163 | 7.46k | if (btf_is_int(t) && btf_int_bits(t) == 32) |
4164 | 57 | return i; |
4165 | 7.46k | } |
4166 | | |
4167 | 733 | return 0; |
4168 | 790 | } |
4169 | | |
4170 | | static int add_dummy_ksym_var(struct btf *btf) |
4171 | 4.98k | { |
4172 | 4.98k | int i, int_btf_id, sec_btf_id, dummy_var_btf_id; |
4173 | 4.98k | const struct btf_var_secinfo *vs; |
4174 | 4.98k | const struct btf_type *sec; |
4175 | | |
4176 | 4.98k | if (!btf) |
4177 | 1.63k | return 0; |
4178 | | |
4179 | 3.35k | sec_btf_id = btf__find_by_name_kind(btf, KSYMS_SEC, |
4180 | 3.35k | BTF_KIND_DATASEC); |
4181 | 3.35k | if (sec_btf_id < 0) |
4182 | 2.72k | return 0; |
4183 | | |
4184 | 625 | sec = btf__type_by_id(btf, sec_btf_id); |
4185 | 625 | vs = btf_var_secinfos(sec); |
4186 | 1.50k | for (i = 0; i < btf_vlen(sec); i++, vs++) { |
4187 | 1.36k | const struct btf_type *vt; |
4188 | | |
4189 | 1.36k | vt = btf__type_by_id(btf, vs->type); |
4190 | 1.36k | if (btf_is_func(vt)) |
4191 | 480 | break; |
4192 | 1.36k | } |
4193 | | |
4194 | | /* No func in ksyms sec. No need to add dummy var. */ |
4195 | 625 | if (i == btf_vlen(sec)) |
4196 | 145 | return 0; |
4197 | | |
4198 | 480 | int_btf_id = find_int_btf_id(btf); |
4199 | 480 | dummy_var_btf_id = btf__add_var(btf, |
4200 | 480 | "dummy_ksym", |
4201 | 480 | BTF_VAR_GLOBAL_ALLOCATED, |
4202 | 480 | int_btf_id); |
4203 | 480 | if (dummy_var_btf_id < 0) |
4204 | 480 | pr_warn("cannot create a dummy_ksym var\n"); |
4205 | | |
4206 | 480 | return dummy_var_btf_id; |
4207 | 625 | } |
4208 | | |
4209 | | static int bpf_object__collect_externs(struct bpf_object *obj) |
4210 | 6.18k | { |
4211 | 6.18k | struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL; |
4212 | 6.18k | const struct btf_type *t; |
4213 | 6.18k | struct extern_desc *ext; |
4214 | 6.18k | int i, n, off, dummy_var_btf_id; |
4215 | 6.18k | const char *ext_name, *sec_name; |
4216 | 6.18k | size_t ext_essent_len; |
4217 | 6.18k | Elf_Scn *scn; |
4218 | 6.18k | Elf64_Shdr *sh; |
4219 | | |
4220 | 6.18k | if (!obj->efile.symbols) |
4221 | 0 | return 0; |
4222 | | |
4223 | 6.18k | scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx); |
4224 | 6.18k | sh = elf_sec_hdr(obj, scn); |
4225 | 6.18k | if (!sh || sh->sh_entsize != sizeof(Elf64_Sym)) |
4226 | 1.20k | return -LIBBPF_ERRNO__FORMAT; |
4227 | | |
4228 | 4.98k | dummy_var_btf_id = add_dummy_ksym_var(obj->btf); |
4229 | 4.98k | if (dummy_var_btf_id < 0) |
4230 | 0 | return dummy_var_btf_id; |
4231 | | |
4232 | 4.98k | n = sh->sh_size / sh->sh_entsize; |
4233 | 4.98k | pr_debug("looking for externs among %d symbols...\n", n); |
4234 | | |
4235 | 266k | for (i = 0; i < n; i++) { |
4236 | 262k | Elf64_Sym *sym = elf_sym_by_idx(obj, i); |
4237 | | |
4238 | 262k | if (!sym) |
4239 | 0 | return -LIBBPF_ERRNO__FORMAT; |
4240 | 262k | if (!sym_is_extern(sym)) |
4241 | 255k | continue; |
4242 | 6.76k | ext_name = elf_sym_str(obj, sym->st_name); |
4243 | 6.76k | if (!ext_name || !ext_name[0]) |
4244 | 3.19k | continue; |
4245 | | |
4246 | 3.56k | ext = obj->externs; |
4247 | 3.56k | ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext)); |
4248 | 3.56k | if (!ext) |
4249 | 0 | return -ENOMEM; |
4250 | 3.56k | obj->externs = ext; |
4251 | 3.56k | ext = &ext[obj->nr_extern]; |
4252 | 3.56k | memset(ext, 0, sizeof(*ext)); |
4253 | 3.56k | obj->nr_extern++; |
4254 | | |
4255 | 3.56k | ext->btf_id = find_extern_btf_id(obj->btf, ext_name); |
4256 | 3.56k | if (ext->btf_id <= 0) { |
4257 | 170 | pr_warn("failed to find BTF for extern '%s': %d\n", |
4258 | 170 | ext_name, ext->btf_id); |
4259 | 170 | return ext->btf_id; |
4260 | 170 | } |
4261 | 3.39k | t = btf__type_by_id(obj->btf, ext->btf_id); |
4262 | 3.39k | ext->name = strdup(btf__name_by_offset(obj->btf, t->name_off)); |
4263 | 3.39k | if (!ext->name) |
4264 | 0 | return -ENOMEM; |
4265 | 3.39k | ext->sym_idx = i; |
4266 | 3.39k | ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK; |
4267 | | |
4268 | 3.39k | ext_essent_len = bpf_core_essential_name_len(ext->name); |
4269 | 3.39k | ext->essent_name = NULL; |
4270 | 3.39k | if (ext_essent_len != strlen(ext->name)) { |
4271 | 365 | ext->essent_name = strndup(ext->name, ext_essent_len); |
4272 | 365 | if (!ext->essent_name) |
4273 | 0 | return -ENOMEM; |
4274 | 365 | } |
4275 | | |
4276 | 3.39k | ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id); |
4277 | 3.39k | if (ext->sec_btf_id <= 0) { |
4278 | 23 | pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n", |
4279 | 23 | ext_name, ext->btf_id, ext->sec_btf_id); |
4280 | 23 | return ext->sec_btf_id; |
4281 | 23 | } |
4282 | 3.37k | sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id); |
4283 | 3.37k | sec_name = btf__name_by_offset(obj->btf, sec->name_off); |
4284 | | |
4285 | 3.37k | if (strcmp(sec_name, KCONFIG_SEC) == 0) { |
4286 | 1.09k | if (btf_is_func(t)) { |
4287 | 1 | pr_warn("extern function %s is unsupported under %s section\n", |
4288 | 1 | ext->name, KCONFIG_SEC); |
4289 | 1 | return -ENOTSUP; |
4290 | 1 | } |
4291 | 1.08k | kcfg_sec = sec; |
4292 | 1.08k | ext->type = EXT_KCFG; |
4293 | 1.08k | ext->kcfg.sz = btf__resolve_size(obj->btf, t->type); |
4294 | 1.08k | if (ext->kcfg.sz <= 0) { |
4295 | 87 | pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n", |
4296 | 87 | ext_name, ext->kcfg.sz); |
4297 | 87 | return ext->kcfg.sz; |
4298 | 87 | } |
4299 | 1.00k | ext->kcfg.align = btf__align_of(obj->btf, t->type); |
4300 | 1.00k | if (ext->kcfg.align <= 0) { |
4301 | 9 | pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n", |
4302 | 9 | ext_name, ext->kcfg.align); |
4303 | 9 | return -EINVAL; |
4304 | 9 | } |
4305 | 993 | ext->kcfg.type = find_kcfg_type(obj->btf, t->type, |
4306 | 993 | &ext->kcfg.is_signed); |
4307 | 993 | if (ext->kcfg.type == KCFG_UNKNOWN) { |
4308 | 285 | pr_warn("extern (kcfg) '%s': type is unsupported\n", ext_name); |
4309 | 285 | return -ENOTSUP; |
4310 | 285 | } |
4311 | 2.28k | } else if (strcmp(sec_name, KSYMS_SEC) == 0) { |
4312 | 2.16k | ksym_sec = sec; |
4313 | 2.16k | ext->type = EXT_KSYM; |
4314 | 2.16k | skip_mods_and_typedefs(obj->btf, t->type, |
4315 | 2.16k | &ext->ksym.type_id); |
4316 | 2.16k | } else { |
4317 | 116 | pr_warn("unrecognized extern section '%s'\n", sec_name); |
4318 | 116 | return -ENOTSUP; |
4319 | 116 | } |
4320 | 3.37k | } |
4321 | 4.29k | pr_debug("collected %d externs total\n", obj->nr_extern); |
4322 | | |
4323 | 4.29k | if (!obj->nr_extern) |
4324 | 3.78k | return 0; |
4325 | | |
4326 | | /* sort externs by type, for kcfg ones also by (align, size, name) */ |
4327 | 506 | qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs); |
4328 | | |
4329 | | /* for .ksyms section, we need to turn all externs into allocated |
4330 | | * variables in BTF to pass kernel verification; we do this by |
4331 | | * pretending that each extern is a 8-byte variable |
4332 | | */ |
4333 | 506 | if (ksym_sec) { |
4334 | | /* find existing 4-byte integer type in BTF to use for fake |
4335 | | * extern variables in DATASEC |
4336 | | */ |
4337 | 310 | int int_btf_id = find_int_btf_id(obj->btf); |
4338 | | /* For extern function, a dummy_var added earlier |
4339 | | * will be used to replace the vs->type and |
4340 | | * its name string will be used to refill |
4341 | | * the missing param's name. |
4342 | | */ |
4343 | 310 | const struct btf_type *dummy_var; |
4344 | | |
4345 | 310 | dummy_var = btf__type_by_id(obj->btf, dummy_var_btf_id); |
4346 | 2.45k | for (i = 0; i < obj->nr_extern; i++) { |
4347 | 2.14k | ext = &obj->externs[i]; |
4348 | 2.14k | if (ext->type != EXT_KSYM) |
4349 | 0 | continue; |
4350 | 2.14k | pr_debug("extern (ksym) #%d: symbol %d, name %s\n", |
4351 | 2.14k | i, ext->sym_idx, ext->name); |
4352 | 2.14k | } |
4353 | | |
4354 | 310 | sec = ksym_sec; |
4355 | 310 | n = btf_vlen(sec); |
4356 | 704 | for (i = 0, off = 0; i < n; i++, off += sizeof(int)) { |
4357 | 661 | struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i; |
4358 | 661 | struct btf_type *vt; |
4359 | | |
4360 | 661 | vt = (void *)btf__type_by_id(obj->btf, vs->type); |
4361 | 661 | ext_name = btf__name_by_offset(obj->btf, vt->name_off); |
4362 | 661 | ext = find_extern_by_name(obj, ext_name); |
4363 | 661 | if (!ext) { |
4364 | 267 | pr_warn("failed to find extern definition for BTF %s '%s'\n", |
4365 | 267 | btf_kind_str(vt), ext_name); |
4366 | 267 | return -ESRCH; |
4367 | 267 | } |
4368 | 394 | if (btf_is_func(vt)) { |
4369 | 68 | const struct btf_type *func_proto; |
4370 | 68 | struct btf_param *param; |
4371 | 68 | int j; |
4372 | | |
4373 | 68 | func_proto = btf__type_by_id(obj->btf, |
4374 | 68 | vt->type); |
4375 | 68 | param = btf_params(func_proto); |
4376 | | /* Reuse the dummy_var string if the |
4377 | | * func proto does not have param name. |
4378 | | */ |
4379 | 300 | for (j = 0; j < btf_vlen(func_proto); j++) |
4380 | 232 | if (param[j].type && !param[j].name_off) |
4381 | 39 | param[j].name_off = |
4382 | 39 | dummy_var->name_off; |
4383 | 68 | vs->type = dummy_var_btf_id; |
4384 | 68 | vt->info &= ~0xffff; |
4385 | 68 | vt->info |= BTF_FUNC_GLOBAL; |
4386 | 326 | } else { |
4387 | 326 | btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED; |
4388 | 326 | vt->type = int_btf_id; |
4389 | 326 | } |
4390 | 394 | vs->offset = off; |
4391 | 394 | vs->size = sizeof(int); |
4392 | 394 | } |
4393 | 43 | sec->size = off; |
4394 | 43 | } |
4395 | | |
4396 | 239 | if (kcfg_sec) { |
4397 | 196 | sec = kcfg_sec; |
4398 | | /* for kcfg externs calculate their offsets within a .kconfig map */ |
4399 | 196 | off = 0; |
4400 | 904 | for (i = 0; i < obj->nr_extern; i++) { |
4401 | 708 | ext = &obj->externs[i]; |
4402 | 708 | if (ext->type != EXT_KCFG) |
4403 | 0 | continue; |
4404 | | |
4405 | 708 | ext->kcfg.data_off = roundup(off, ext->kcfg.align); |
4406 | 708 | off = ext->kcfg.data_off + ext->kcfg.sz; |
4407 | 708 | pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n", |
4408 | 708 | i, ext->sym_idx, ext->kcfg.data_off, ext->name); |
4409 | 708 | } |
4410 | 196 | sec->size = off; |
4411 | 196 | n = btf_vlen(sec); |
4412 | 475 | for (i = 0; i < n; i++) { |
4413 | 449 | struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i; |
4414 | | |
4415 | 449 | t = btf__type_by_id(obj->btf, vs->type); |
4416 | 449 | ext_name = btf__name_by_offset(obj->btf, t->name_off); |
4417 | 449 | ext = find_extern_by_name(obj, ext_name); |
4418 | 449 | if (!ext) { |
4419 | 170 | pr_warn("failed to find extern definition for BTF var '%s'\n", |
4420 | 170 | ext_name); |
4421 | 170 | return -ESRCH; |
4422 | 170 | } |
4423 | 279 | btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED; |
4424 | 279 | vs->offset = ext->kcfg.data_off; |
4425 | 279 | } |
4426 | 196 | } |
4427 | 69 | return 0; |
4428 | 239 | } |
4429 | | |
4430 | | static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog) |
4431 | 7.53k | { |
4432 | 7.53k | return prog->sec_idx == obj->efile.text_shndx; |
4433 | 7.53k | } |
4434 | | |
4435 | | struct bpf_program * |
4436 | | bpf_object__find_program_by_name(const struct bpf_object *obj, |
4437 | | const char *name) |
4438 | 0 | { |
4439 | 0 | struct bpf_program *prog; |
4440 | |
|
4441 | 0 | bpf_object__for_each_program(prog, obj) { |
4442 | 0 | if (prog_is_subprog(obj, prog)) |
4443 | 0 | continue; |
4444 | 0 | if (!strcmp(prog->name, name)) |
4445 | 0 | return prog; |
4446 | 0 | } |
4447 | 0 | return errno = ENOENT, NULL; |
4448 | 0 | } |
4449 | | |
4450 | | static bool bpf_object__shndx_is_data(const struct bpf_object *obj, |
4451 | | int shndx) |
4452 | 387 | { |
4453 | 387 | switch (obj->efile.secs[shndx].sec_type) { |
4454 | 90 | case SEC_BSS: |
4455 | 280 | case SEC_DATA: |
4456 | 386 | case SEC_RODATA: |
4457 | 386 | return true; |
4458 | 1 | default: |
4459 | 1 | return false; |
4460 | 387 | } |
4461 | 387 | } |
4462 | | |
4463 | | static bool bpf_object__shndx_is_maps(const struct bpf_object *obj, |
4464 | | int shndx) |
4465 | 15 | { |
4466 | 15 | return shndx == obj->efile.btf_maps_shndx; |
4467 | 15 | } |
4468 | | |
4469 | | static enum libbpf_map_type |
4470 | | bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx) |
4471 | 441 | { |
4472 | 441 | if (shndx == obj->efile.symbols_shndx) |
4473 | 1 | return LIBBPF_MAP_KCONFIG; |
4474 | | |
4475 | 440 | switch (obj->efile.secs[shndx].sec_type) { |
4476 | 90 | case SEC_BSS: |
4477 | 90 | return LIBBPF_MAP_BSS; |
4478 | 190 | case SEC_DATA: |
4479 | 190 | return LIBBPF_MAP_DATA; |
4480 | 106 | case SEC_RODATA: |
4481 | 106 | return LIBBPF_MAP_RODATA; |
4482 | 54 | default: |
4483 | 54 | return LIBBPF_MAP_UNSPEC; |
4484 | 440 | } |
4485 | 440 | } |
4486 | | |
4487 | | static int bpf_program__record_reloc(struct bpf_program *prog, |
4488 | | struct reloc_desc *reloc_desc, |
4489 | | __u32 insn_idx, const char *sym_name, |
4490 | | const Elf64_Sym *sym, const Elf64_Rel *rel) |
4491 | 1.32k | { |
4492 | 1.32k | struct bpf_insn *insn = &prog->insns[insn_idx]; |
4493 | 1.32k | size_t map_idx, nr_maps = prog->obj->nr_maps; |
4494 | 1.32k | struct bpf_object *obj = prog->obj; |
4495 | 1.32k | __u32 shdr_idx = sym->st_shndx; |
4496 | 1.32k | enum libbpf_map_type type; |
4497 | 1.32k | const char *sym_sec_name; |
4498 | 1.32k | struct bpf_map *map; |
4499 | | |
4500 | 1.32k | if (!is_call_insn(insn) && !is_ldimm64_insn(insn)) { |
4501 | 31 | pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n", |
4502 | 31 | prog->name, sym_name, insn_idx, insn->code); |
4503 | 31 | return -LIBBPF_ERRNO__RELOC; |
4504 | 31 | } |
4505 | | |
4506 | 1.29k | if (sym_is_extern(sym)) { |
4507 | 1 | int sym_idx = ELF64_R_SYM(rel->r_info); |
4508 | 1 | int i, n = obj->nr_extern; |
4509 | 1 | struct extern_desc *ext; |
4510 | | |
4511 | 1 | for (i = 0; i < n; i++) { |
4512 | 0 | ext = &obj->externs[i]; |
4513 | 0 | if (ext->sym_idx == sym_idx) |
4514 | 0 | break; |
4515 | 0 | } |
4516 | 1 | if (i >= n) { |
4517 | 1 | pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n", |
4518 | 1 | prog->name, sym_name, sym_idx); |
4519 | 1 | return -LIBBPF_ERRNO__RELOC; |
4520 | 1 | } |
4521 | 0 | pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n", |
4522 | 0 | prog->name, i, ext->name, ext->sym_idx, insn_idx); |
4523 | 0 | if (insn->code == (BPF_JMP | BPF_CALL)) |
4524 | 0 | reloc_desc->type = RELO_EXTERN_CALL; |
4525 | 0 | else |
4526 | 0 | reloc_desc->type = RELO_EXTERN_LD64; |
4527 | 0 | reloc_desc->insn_idx = insn_idx; |
4528 | 0 | reloc_desc->ext_idx = i; |
4529 | 0 | return 0; |
4530 | 1 | } |
4531 | | |
4532 | | /* sub-program call relocation */ |
4533 | 1.28k | if (is_call_insn(insn)) { |
4534 | 262 | if (insn->src_reg != BPF_PSEUDO_CALL) { |
4535 | 7 | pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name); |
4536 | 7 | return -LIBBPF_ERRNO__RELOC; |
4537 | 7 | } |
4538 | | /* text_shndx can be 0, if no default "main" program exists */ |
4539 | 255 | if (!shdr_idx || shdr_idx != obj->efile.text_shndx) { |
4540 | 3 | sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx)); |
4541 | 3 | pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n", |
4542 | 3 | prog->name, sym_name, sym_sec_name); |
4543 | 3 | return -LIBBPF_ERRNO__RELOC; |
4544 | 3 | } |
4545 | 252 | if (sym->st_value % BPF_INSN_SZ) { |
4546 | 3 | pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n", |
4547 | 3 | prog->name, sym_name, (size_t)sym->st_value); |
4548 | 3 | return -LIBBPF_ERRNO__RELOC; |
4549 | 3 | } |
4550 | 249 | reloc_desc->type = RELO_CALL; |
4551 | 249 | reloc_desc->insn_idx = insn_idx; |
4552 | 249 | reloc_desc->sym_off = sym->st_value; |
4553 | 249 | return 0; |
4554 | 252 | } |
4555 | | |
4556 | 1.02k | if (!shdr_idx || shdr_idx >= SHN_LORESERVE) { |
4557 | 7 | pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n", |
4558 | 7 | prog->name, sym_name, shdr_idx); |
4559 | 7 | return -LIBBPF_ERRNO__RELOC; |
4560 | 7 | } |
4561 | | |
4562 | | /* loading subprog addresses */ |
4563 | 1.02k | if (sym_is_subprog(sym, obj->efile.text_shndx)) { |
4564 | | /* global_func: sym->st_value = offset in the section, insn->imm = 0. |
4565 | | * local_func: sym->st_value = 0, insn->imm = offset in the section. |
4566 | | */ |
4567 | 579 | if ((sym->st_value % BPF_INSN_SZ) || (insn->imm % BPF_INSN_SZ)) { |
4568 | 5 | pr_warn("prog '%s': bad subprog addr relo against '%s' at offset %zu+%d\n", |
4569 | 5 | prog->name, sym_name, (size_t)sym->st_value, insn->imm); |
4570 | 5 | return -LIBBPF_ERRNO__RELOC; |
4571 | 5 | } |
4572 | | |
4573 | 574 | reloc_desc->type = RELO_SUBPROG_ADDR; |
4574 | 574 | reloc_desc->insn_idx = insn_idx; |
4575 | 574 | reloc_desc->sym_off = sym->st_value; |
4576 | 574 | return 0; |
4577 | 579 | } |
4578 | | |
4579 | 441 | type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx); |
4580 | 441 | sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx)); |
4581 | | |
4582 | | /* arena data relocation */ |
4583 | 441 | if (shdr_idx == obj->efile.arena_data_shndx) { |
4584 | 39 | reloc_desc->type = RELO_DATA; |
4585 | 39 | reloc_desc->insn_idx = insn_idx; |
4586 | 39 | reloc_desc->map_idx = obj->arena_map - obj->maps; |
4587 | 39 | reloc_desc->sym_off = sym->st_value; |
4588 | 39 | return 0; |
4589 | 39 | } |
4590 | | |
4591 | | /* generic map reference relocation */ |
4592 | 402 | if (type == LIBBPF_MAP_UNSPEC) { |
4593 | 15 | if (!bpf_object__shndx_is_maps(obj, shdr_idx)) { |
4594 | 15 | pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n", |
4595 | 15 | prog->name, sym_name, sym_sec_name); |
4596 | 15 | return -LIBBPF_ERRNO__RELOC; |
4597 | 15 | } |
4598 | 0 | for (map_idx = 0; map_idx < nr_maps; map_idx++) { |
4599 | 0 | map = &obj->maps[map_idx]; |
4600 | 0 | if (map->libbpf_type != type || |
4601 | 0 | map->sec_idx != sym->st_shndx || |
4602 | 0 | map->sec_offset != sym->st_value) |
4603 | 0 | continue; |
4604 | 0 | pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n", |
4605 | 0 | prog->name, map_idx, map->name, map->sec_idx, |
4606 | 0 | map->sec_offset, insn_idx); |
4607 | 0 | break; |
4608 | 0 | } |
4609 | 0 | if (map_idx >= nr_maps) { |
4610 | 0 | pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n", |
4611 | 0 | prog->name, sym_sec_name, (size_t)sym->st_value); |
4612 | 0 | return -LIBBPF_ERRNO__RELOC; |
4613 | 0 | } |
4614 | 0 | reloc_desc->type = RELO_LD64; |
4615 | 0 | reloc_desc->insn_idx = insn_idx; |
4616 | 0 | reloc_desc->map_idx = map_idx; |
4617 | 0 | reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */ |
4618 | 0 | return 0; |
4619 | 0 | } |
4620 | | |
4621 | | /* global data map relocation */ |
4622 | 387 | if (!bpf_object__shndx_is_data(obj, shdr_idx)) { |
4623 | 1 | pr_warn("prog '%s': bad data relo against section '%s'\n", |
4624 | 1 | prog->name, sym_sec_name); |
4625 | 1 | return -LIBBPF_ERRNO__RELOC; |
4626 | 1 | } |
4627 | 537 | for (map_idx = 0; map_idx < nr_maps; map_idx++) { |
4628 | 534 | map = &obj->maps[map_idx]; |
4629 | 534 | if (map->libbpf_type != type || map->sec_idx != sym->st_shndx) |
4630 | 151 | continue; |
4631 | 383 | pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n", |
4632 | 383 | prog->name, map_idx, map->name, map->sec_idx, |
4633 | 383 | map->sec_offset, insn_idx); |
4634 | 383 | break; |
4635 | 534 | } |
4636 | 386 | if (map_idx >= nr_maps) { |
4637 | 3 | pr_warn("prog '%s': data relo failed to find map for section '%s'\n", |
4638 | 3 | prog->name, sym_sec_name); |
4639 | 3 | return -LIBBPF_ERRNO__RELOC; |
4640 | 3 | } |
4641 | | |
4642 | 383 | reloc_desc->type = RELO_DATA; |
4643 | 383 | reloc_desc->insn_idx = insn_idx; |
4644 | 383 | reloc_desc->map_idx = map_idx; |
4645 | 383 | reloc_desc->sym_off = sym->st_value; |
4646 | 383 | return 0; |
4647 | 386 | } |
4648 | | |
4649 | | static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx) |
4650 | 1.77k | { |
4651 | 1.77k | return insn_idx >= prog->sec_insn_off && |
4652 | 1.77k | insn_idx < prog->sec_insn_off + prog->sec_insn_cnt; |
4653 | 1.77k | } |
4654 | | |
4655 | | static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj, |
4656 | | size_t sec_idx, size_t insn_idx) |
4657 | 7.76k | { |
4658 | 7.76k | int l = 0, r = obj->nr_programs - 1, m; |
4659 | 7.76k | struct bpf_program *prog; |
4660 | | |
4661 | 7.76k | if (!obj->nr_programs) |
4662 | 5.34k | return NULL; |
4663 | | |
4664 | 3.77k | while (l < r) { |
4665 | 1.35k | m = l + (r - l + 1) / 2; |
4666 | 1.35k | prog = &obj->programs[m]; |
4667 | | |
4668 | 1.35k | if (prog->sec_idx < sec_idx || |
4669 | 1.35k | (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx)) |
4670 | 621 | l = m; |
4671 | 735 | else |
4672 | 735 | r = m - 1; |
4673 | 1.35k | } |
4674 | | /* matching program could be at index l, but it still might be the |
4675 | | * wrong one, so we need to double check conditions for the last time |
4676 | | */ |
4677 | 2.41k | prog = &obj->programs[l]; |
4678 | 2.41k | if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx)) |
4679 | 1.32k | return prog; |
4680 | 1.09k | return NULL; |
4681 | 2.41k | } |
4682 | | |
4683 | | static int |
4684 | | bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Data *data) |
4685 | 991 | { |
4686 | 991 | const char *relo_sec_name, *sec_name; |
4687 | 991 | size_t sec_idx = shdr->sh_info, sym_idx; |
4688 | 991 | struct bpf_program *prog; |
4689 | 991 | struct reloc_desc *relos; |
4690 | 991 | int err, i, nrels; |
4691 | 991 | const char *sym_name; |
4692 | 991 | __u32 insn_idx; |
4693 | 991 | Elf_Scn *scn; |
4694 | 991 | Elf_Data *scn_data; |
4695 | 991 | Elf64_Sym *sym; |
4696 | 991 | Elf64_Rel *rel; |
4697 | | |
4698 | 991 | if (sec_idx >= obj->efile.sec_cnt) |
4699 | 0 | return -EINVAL; |
4700 | | |
4701 | 991 | scn = elf_sec_by_idx(obj, sec_idx); |
4702 | 991 | scn_data = elf_sec_data(obj, scn); |
4703 | 991 | if (!scn_data) |
4704 | 5 | return -LIBBPF_ERRNO__FORMAT; |
4705 | | |
4706 | 986 | relo_sec_name = elf_sec_str(obj, shdr->sh_name); |
4707 | 986 | sec_name = elf_sec_name(obj, scn); |
4708 | 986 | if (!relo_sec_name || !sec_name) |
4709 | 11 | return -EINVAL; |
4710 | | |
4711 | 975 | pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n", |
4712 | 975 | relo_sec_name, sec_idx, sec_name); |
4713 | 975 | nrels = shdr->sh_size / shdr->sh_entsize; |
4714 | | |
4715 | 8.13k | for (i = 0; i < nrels; i++) { |
4716 | 8.13k | rel = elf_rel_by_idx(data, i); |
4717 | 8.13k | if (!rel) { |
4718 | 0 | pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i); |
4719 | 0 | return -LIBBPF_ERRNO__FORMAT; |
4720 | 0 | } |
4721 | | |
4722 | 8.13k | sym_idx = ELF64_R_SYM(rel->r_info); |
4723 | 8.13k | sym = elf_sym_by_idx(obj, sym_idx); |
4724 | 8.13k | if (!sym) { |
4725 | 129 | pr_warn("sec '%s': symbol #%zu not found for relo #%d\n", |
4726 | 129 | relo_sec_name, sym_idx, i); |
4727 | 129 | return -LIBBPF_ERRNO__FORMAT; |
4728 | 129 | } |
4729 | | |
4730 | 8.00k | if (sym->st_shndx >= obj->efile.sec_cnt) { |
4731 | 27 | pr_warn("sec '%s': corrupted symbol #%zu pointing to invalid section #%zu for relo #%d\n", |
4732 | 27 | relo_sec_name, sym_idx, (size_t)sym->st_shndx, i); |
4733 | 27 | return -LIBBPF_ERRNO__FORMAT; |
4734 | 27 | } |
4735 | | |
4736 | 7.98k | if (rel->r_offset % BPF_INSN_SZ || rel->r_offset >= scn_data->d_size) { |
4737 | 219 | pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n", |
4738 | 219 | relo_sec_name, (size_t)rel->r_offset, i); |
4739 | 219 | return -LIBBPF_ERRNO__FORMAT; |
4740 | 219 | } |
4741 | | |
4742 | 7.76k | insn_idx = rel->r_offset / BPF_INSN_SZ; |
4743 | | /* relocations against static functions are recorded as |
4744 | | * relocations against the section that contains a function; |
4745 | | * in such case, symbol will be STT_SECTION and sym.st_name |
4746 | | * will point to empty string (0), so fetch section name |
4747 | | * instead |
4748 | | */ |
4749 | 7.76k | if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION && sym->st_name == 0) |
4750 | 1.05k | sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym->st_shndx)); |
4751 | 6.71k | else |
4752 | 6.71k | sym_name = elf_sym_str(obj, sym->st_name); |
4753 | 7.76k | sym_name = sym_name ?: "<?"; |
4754 | | |
4755 | 7.76k | pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n", |
4756 | 5.82k | relo_sec_name, i, insn_idx, sym_name); |
4757 | | |
4758 | 5.82k | prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx); |
4759 | 6.44k | if (!prog) { |
4760 | 6.44k | pr_debug("sec '%s': relo #%d: couldn't find program in section '%s' for insn #%u, probably overridden weak function, skipping...\n", |
4761 | 6.44k | relo_sec_name, i, sec_name, insn_idx); |
4762 | 6.44k | continue; |
4763 | 6.44k | } |
4764 | | |
4765 | 18.4E | relos = libbpf_reallocarray(prog->reloc_desc, |
4766 | 18.4E | prog->nr_reloc + 1, sizeof(*relos)); |
4767 | 18.4E | if (!relos) |
4768 | 0 | return -ENOMEM; |
4769 | 18.4E | prog->reloc_desc = relos; |
4770 | | |
4771 | | /* adjust insn_idx to local BPF program frame of reference */ |
4772 | 18.4E | insn_idx -= prog->sec_insn_off; |
4773 | 18.4E | err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc], |
4774 | 18.4E | insn_idx, sym_name, sym, rel); |
4775 | 18.4E | if (err) |
4776 | 76 | return err; |
4777 | | |
4778 | 18.4E | prog->nr_reloc++; |
4779 | 18.4E | } |
4780 | 18.4E | return 0; |
4781 | 975 | } |
4782 | | |
4783 | | static int map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map) |
4784 | 1.94k | { |
4785 | 1.94k | int id; |
4786 | | |
4787 | 1.94k | if (!obj->btf) |
4788 | 1.34k | return -ENOENT; |
4789 | | |
4790 | | /* if it's BTF-defined map, we don't need to search for type IDs. |
4791 | | * For struct_ops map, it does not need btf_key_type_id and |
4792 | | * btf_value_type_id. |
4793 | | */ |
4794 | 599 | if (map->sec_idx == obj->efile.btf_maps_shndx || bpf_map__is_struct_ops(map)) |
4795 | 86 | return 0; |
4796 | | |
4797 | | /* |
4798 | | * LLVM annotates global data differently in BTF, that is, |
4799 | | * only as '.data', '.bss' or '.rodata'. |
4800 | | */ |
4801 | 513 | if (!bpf_map__is_internal(map)) |
4802 | 0 | return -ENOENT; |
4803 | | |
4804 | 513 | id = btf__find_by_name(obj->btf, map->real_name); |
4805 | 513 | if (id < 0) |
4806 | 344 | return id; |
4807 | | |
4808 | 169 | map->btf_key_type_id = 0; |
4809 | 169 | map->btf_value_type_id = id; |
4810 | 169 | return 0; |
4811 | 513 | } |
4812 | | |
4813 | | static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info) |
4814 | 0 | { |
4815 | 0 | char file[PATH_MAX], buff[4096]; |
4816 | 0 | FILE *fp; |
4817 | 0 | __u32 val; |
4818 | 0 | int err; |
4819 | |
|
4820 | 0 | snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd); |
4821 | 0 | memset(info, 0, sizeof(*info)); |
4822 | |
|
4823 | 0 | fp = fopen(file, "re"); |
4824 | 0 | if (!fp) { |
4825 | 0 | err = -errno; |
4826 | 0 | pr_warn("failed to open %s: %s. No procfs support?\n", file, |
4827 | 0 | errstr(err)); |
4828 | 0 | return err; |
4829 | 0 | } |
4830 | | |
4831 | 0 | while (fgets(buff, sizeof(buff), fp)) { |
4832 | 0 | if (sscanf(buff, "map_type:\t%u", &val) == 1) |
4833 | 0 | info->type = val; |
4834 | 0 | else if (sscanf(buff, "key_size:\t%u", &val) == 1) |
4835 | 0 | info->key_size = val; |
4836 | 0 | else if (sscanf(buff, "value_size:\t%u", &val) == 1) |
4837 | 0 | info->value_size = val; |
4838 | 0 | else if (sscanf(buff, "max_entries:\t%u", &val) == 1) |
4839 | 0 | info->max_entries = val; |
4840 | 0 | else if (sscanf(buff, "map_flags:\t%i", &val) == 1) |
4841 | 0 | info->map_flags = val; |
4842 | 0 | } |
4843 | |
|
4844 | 0 | fclose(fp); |
4845 | |
|
4846 | 0 | return 0; |
4847 | 0 | } |
4848 | | |
4849 | | static bool map_is_created(const struct bpf_map *map) |
4850 | 0 | { |
4851 | 0 | return map->obj->state >= OBJ_PREPARED || map->reused; |
4852 | 0 | } |
4853 | | |
4854 | | bool bpf_map__autocreate(const struct bpf_map *map) |
4855 | 0 | { |
4856 | 0 | return map->autocreate; |
4857 | 0 | } |
4858 | | |
4859 | | int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate) |
4860 | 0 | { |
4861 | 0 | if (map_is_created(map)) |
4862 | 0 | return libbpf_err(-EBUSY); |
4863 | | |
4864 | 0 | map->autocreate = autocreate; |
4865 | 0 | return 0; |
4866 | 0 | } |
4867 | | |
4868 | | int bpf_map__set_autoattach(struct bpf_map *map, bool autoattach) |
4869 | 0 | { |
4870 | 0 | if (!bpf_map__is_struct_ops(map)) |
4871 | 0 | return libbpf_err(-EINVAL); |
4872 | | |
4873 | 0 | map->autoattach = autoattach; |
4874 | 0 | return 0; |
4875 | 0 | } |
4876 | | |
4877 | | bool bpf_map__autoattach(const struct bpf_map *map) |
4878 | 0 | { |
4879 | 0 | return map->autoattach; |
4880 | 0 | } |
4881 | | |
4882 | | int bpf_map__reuse_fd(struct bpf_map *map, int fd) |
4883 | 0 | { |
4884 | 0 | struct bpf_map_info info; |
4885 | 0 | __u32 len = sizeof(info), name_len; |
4886 | 0 | int new_fd, err; |
4887 | 0 | char *new_name; |
4888 | |
|
4889 | 0 | memset(&info, 0, len); |
4890 | 0 | err = bpf_map_get_info_by_fd(fd, &info, &len); |
4891 | 0 | if (err && errno == EINVAL) |
4892 | 0 | err = bpf_get_map_info_from_fdinfo(fd, &info); |
4893 | 0 | if (err) |
4894 | 0 | return libbpf_err(err); |
4895 | | |
4896 | 0 | name_len = strlen(info.name); |
4897 | 0 | if (name_len == BPF_OBJ_NAME_LEN - 1 && strncmp(map->name, info.name, name_len) == 0) |
4898 | 0 | new_name = strdup(map->name); |
4899 | 0 | else |
4900 | 0 | new_name = strdup(info.name); |
4901 | |
|
4902 | 0 | if (!new_name) |
4903 | 0 | return libbpf_err(-errno); |
4904 | | |
4905 | | /* |
4906 | | * Like dup(), but make sure new FD is >= 3 and has O_CLOEXEC set. |
4907 | | * This is similar to what we do in ensure_good_fd(), but without |
4908 | | * closing original FD. |
4909 | | */ |
4910 | 0 | new_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3); |
4911 | 0 | if (new_fd < 0) { |
4912 | 0 | err = -errno; |
4913 | 0 | goto err_free_new_name; |
4914 | 0 | } |
4915 | | |
4916 | 0 | err = reuse_fd(map->fd, new_fd); |
4917 | 0 | if (err) |
4918 | 0 | goto err_free_new_name; |
4919 | | |
4920 | 0 | free(map->name); |
4921 | |
|
4922 | 0 | map->name = new_name; |
4923 | 0 | map->def.type = info.type; |
4924 | 0 | map->def.key_size = info.key_size; |
4925 | 0 | map->def.value_size = info.value_size; |
4926 | 0 | map->def.max_entries = info.max_entries; |
4927 | 0 | map->def.map_flags = info.map_flags; |
4928 | 0 | map->btf_key_type_id = info.btf_key_type_id; |
4929 | 0 | map->btf_value_type_id = info.btf_value_type_id; |
4930 | 0 | map->reused = true; |
4931 | 0 | map->map_extra = info.map_extra; |
4932 | |
|
4933 | 0 | return 0; |
4934 | | |
4935 | 0 | err_free_new_name: |
4936 | 0 | free(new_name); |
4937 | 0 | return libbpf_err(err); |
4938 | 0 | } |
4939 | | |
4940 | | __u32 bpf_map__max_entries(const struct bpf_map *map) |
4941 | 0 | { |
4942 | 0 | return map->def.max_entries; |
4943 | 0 | } |
4944 | | |
4945 | | struct bpf_map *bpf_map__inner_map(struct bpf_map *map) |
4946 | 0 | { |
4947 | 0 | if (!bpf_map_type__is_map_in_map(map->def.type)) |
4948 | 0 | return errno = EINVAL, NULL; |
4949 | | |
4950 | 0 | return map->inner_map; |
4951 | 0 | } |
4952 | | |
4953 | | int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries) |
4954 | 0 | { |
4955 | 0 | if (map_is_created(map)) |
4956 | 0 | return libbpf_err(-EBUSY); |
4957 | | |
4958 | 0 | map->def.max_entries = max_entries; |
4959 | | |
4960 | | /* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */ |
4961 | 0 | if (map_is_ringbuf(map)) |
4962 | 0 | map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries); |
4963 | |
|
4964 | 0 | return 0; |
4965 | 0 | } |
4966 | | |
4967 | | static int bpf_object_prepare_token(struct bpf_object *obj) |
4968 | 0 | { |
4969 | 0 | const char *bpffs_path; |
4970 | 0 | int bpffs_fd = -1, token_fd, err; |
4971 | 0 | bool mandatory; |
4972 | 0 | enum libbpf_print_level level; |
4973 | | |
4974 | | /* token is explicitly prevented */ |
4975 | 0 | if (obj->token_path && obj->token_path[0] == '\0') { |
4976 | 0 | pr_debug("object '%s': token is prevented, skipping...\n", obj->name); |
4977 | 0 | return 0; |
4978 | 0 | } |
4979 | | |
4980 | 0 | mandatory = obj->token_path != NULL; |
4981 | 0 | level = mandatory ? LIBBPF_WARN : LIBBPF_DEBUG; |
4982 | |
|
4983 | 0 | bpffs_path = obj->token_path ?: BPF_FS_DEFAULT_PATH; |
4984 | 0 | bpffs_fd = open(bpffs_path, O_DIRECTORY, O_RDWR); |
4985 | 0 | if (bpffs_fd < 0) { |
4986 | 0 | err = -errno; |
4987 | 0 | __pr(level, "object '%s': failed (%s) to open BPF FS mount at '%s'%s\n", |
4988 | 0 | obj->name, errstr(err), bpffs_path, |
4989 | 0 | mandatory ? "" : ", skipping optional step..."); |
4990 | 0 | return mandatory ? err : 0; |
4991 | 0 | } |
4992 | | |
4993 | 0 | token_fd = bpf_token_create(bpffs_fd, 0); |
4994 | 0 | close(bpffs_fd); |
4995 | 0 | if (token_fd < 0) { |
4996 | 0 | if (!mandatory && token_fd == -ENOENT) { |
4997 | 0 | pr_debug("object '%s': BPF FS at '%s' doesn't have BPF token delegation set up, skipping...\n", |
4998 | 0 | obj->name, bpffs_path); |
4999 | 0 | return 0; |
5000 | 0 | } |
5001 | 0 | __pr(level, "object '%s': failed (%d) to create BPF token from '%s'%s\n", |
5002 | 0 | obj->name, token_fd, bpffs_path, |
5003 | 0 | mandatory ? "" : ", skipping optional step..."); |
5004 | 0 | return mandatory ? token_fd : 0; |
5005 | 0 | } |
5006 | | |
5007 | 0 | obj->feat_cache = calloc(1, sizeof(*obj->feat_cache)); |
5008 | 0 | if (!obj->feat_cache) { |
5009 | 0 | close(token_fd); |
5010 | 0 | return -ENOMEM; |
5011 | 0 | } |
5012 | | |
5013 | 0 | obj->token_fd = token_fd; |
5014 | 0 | obj->feat_cache->token_fd = token_fd; |
5015 | |
|
5016 | 0 | return 0; |
5017 | 0 | } |
5018 | | |
5019 | | static int |
5020 | | bpf_object__probe_loading(struct bpf_object *obj) |
5021 | 0 | { |
5022 | 0 | struct bpf_insn insns[] = { |
5023 | 0 | BPF_MOV64_IMM(BPF_REG_0, 0), |
5024 | 0 | BPF_EXIT_INSN(), |
5025 | 0 | }; |
5026 | 0 | int ret, insn_cnt = ARRAY_SIZE(insns); |
5027 | 0 | LIBBPF_OPTS(bpf_prog_load_opts, opts, |
5028 | 0 | .token_fd = obj->token_fd, |
5029 | 0 | .prog_flags = obj->token_fd ? BPF_F_TOKEN_FD : 0, |
5030 | 0 | ); |
5031 | |
|
5032 | 0 | if (obj->gen_loader) |
5033 | 0 | return 0; |
5034 | | |
5035 | 0 | ret = bump_rlimit_memlock(); |
5036 | 0 | if (ret) |
5037 | 0 | pr_warn("Failed to bump RLIMIT_MEMLOCK (err = %s), you might need to do it explicitly!\n", |
5038 | 0 | errstr(ret)); |
5039 | | |
5040 | | /* make sure basic loading works */ |
5041 | 0 | ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &opts); |
5042 | 0 | if (ret < 0) |
5043 | 0 | ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts); |
5044 | 0 | if (ret < 0) { |
5045 | 0 | ret = errno; |
5046 | 0 | pr_warn("Error in %s(): %s. Couldn't load trivial BPF program. Make sure your kernel supports BPF (CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is set to big enough value.\n", |
5047 | 0 | __func__, errstr(ret)); |
5048 | 0 | return -ret; |
5049 | 0 | } |
5050 | 0 | close(ret); |
5051 | |
|
5052 | 0 | return 0; |
5053 | 0 | } |
5054 | | |
5055 | | bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id) |
5056 | 0 | { |
5057 | 0 | if (obj->gen_loader) |
5058 | | /* To generate loader program assume the latest kernel |
5059 | | * to avoid doing extra prog_load, map_create syscalls. |
5060 | | */ |
5061 | 0 | return true; |
5062 | | |
5063 | 0 | if (obj->token_fd) |
5064 | 0 | return feat_supported(obj->feat_cache, feat_id); |
5065 | | |
5066 | 0 | return feat_supported(NULL, feat_id); |
5067 | 0 | } |
5068 | | |
5069 | | static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd) |
5070 | 0 | { |
5071 | 0 | struct bpf_map_info map_info; |
5072 | 0 | __u32 map_info_len = sizeof(map_info); |
5073 | 0 | int err; |
5074 | |
|
5075 | 0 | memset(&map_info, 0, map_info_len); |
5076 | 0 | err = bpf_map_get_info_by_fd(map_fd, &map_info, &map_info_len); |
5077 | 0 | if (err && errno == EINVAL) |
5078 | 0 | err = bpf_get_map_info_from_fdinfo(map_fd, &map_info); |
5079 | 0 | if (err) { |
5080 | 0 | pr_warn("failed to get map info for map FD %d: %s\n", map_fd, |
5081 | 0 | errstr(err)); |
5082 | 0 | return false; |
5083 | 0 | } |
5084 | | |
5085 | 0 | return (map_info.type == map->def.type && |
5086 | 0 | map_info.key_size == map->def.key_size && |
5087 | 0 | map_info.value_size == map->def.value_size && |
5088 | 0 | map_info.max_entries == map->def.max_entries && |
5089 | 0 | map_info.map_flags == map->def.map_flags && |
5090 | 0 | map_info.map_extra == map->map_extra); |
5091 | 0 | } |
5092 | | |
5093 | | static int |
5094 | | bpf_object__reuse_map(struct bpf_map *map) |
5095 | 0 | { |
5096 | 0 | int err, pin_fd; |
5097 | |
|
5098 | 0 | pin_fd = bpf_obj_get(map->pin_path); |
5099 | 0 | if (pin_fd < 0) { |
5100 | 0 | err = -errno; |
5101 | 0 | if (err == -ENOENT) { |
5102 | 0 | pr_debug("found no pinned map to reuse at '%s'\n", |
5103 | 0 | map->pin_path); |
5104 | 0 | return 0; |
5105 | 0 | } |
5106 | | |
5107 | 0 | pr_warn("couldn't retrieve pinned map '%s': %s\n", |
5108 | 0 | map->pin_path, errstr(err)); |
5109 | 0 | return err; |
5110 | 0 | } |
5111 | | |
5112 | 0 | if (!map_is_reuse_compat(map, pin_fd)) { |
5113 | 0 | pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n", |
5114 | 0 | map->pin_path); |
5115 | 0 | close(pin_fd); |
5116 | 0 | return -EINVAL; |
5117 | 0 | } |
5118 | | |
5119 | 0 | err = bpf_map__reuse_fd(map, pin_fd); |
5120 | 0 | close(pin_fd); |
5121 | 0 | if (err) |
5122 | 0 | return err; |
5123 | | |
5124 | 0 | map->pinned = true; |
5125 | 0 | pr_debug("reused pinned map at '%s'\n", map->pin_path); |
5126 | |
|
5127 | 0 | return 0; |
5128 | 0 | } |
5129 | | |
5130 | | static int |
5131 | | bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map) |
5132 | 0 | { |
5133 | 0 | enum libbpf_map_type map_type = map->libbpf_type; |
5134 | 0 | int err, zero = 0; |
5135 | 0 | size_t mmap_sz; |
5136 | |
|
5137 | 0 | if (obj->gen_loader) { |
5138 | 0 | bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps, |
5139 | 0 | map->mmaped, map->def.value_size); |
5140 | 0 | if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) |
5141 | 0 | bpf_gen__map_freeze(obj->gen_loader, map - obj->maps); |
5142 | 0 | return 0; |
5143 | 0 | } |
5144 | | |
5145 | 0 | err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0); |
5146 | 0 | if (err) { |
5147 | 0 | err = -errno; |
5148 | 0 | pr_warn("map '%s': failed to set initial contents: %s\n", |
5149 | 0 | bpf_map__name(map), errstr(err)); |
5150 | 0 | return err; |
5151 | 0 | } |
5152 | | |
5153 | | /* Freeze .rodata and .kconfig map as read-only from syscall side. */ |
5154 | 0 | if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) { |
5155 | 0 | err = bpf_map_freeze(map->fd); |
5156 | 0 | if (err) { |
5157 | 0 | err = -errno; |
5158 | 0 | pr_warn("map '%s': failed to freeze as read-only: %s\n", |
5159 | 0 | bpf_map__name(map), errstr(err)); |
5160 | 0 | return err; |
5161 | 0 | } |
5162 | 0 | } |
5163 | | |
5164 | | /* Remap anonymous mmap()-ed "map initialization image" as |
5165 | | * a BPF map-backed mmap()-ed memory, but preserving the same |
5166 | | * memory address. This will cause kernel to change process' |
5167 | | * page table to point to a different piece of kernel memory, |
5168 | | * but from userspace point of view memory address (and its |
5169 | | * contents, being identical at this point) will stay the |
5170 | | * same. This mapping will be released by bpf_object__close() |
5171 | | * as per normal clean up procedure. |
5172 | | */ |
5173 | 0 | mmap_sz = bpf_map_mmap_sz(map); |
5174 | 0 | if (map->def.map_flags & BPF_F_MMAPABLE) { |
5175 | 0 | void *mmaped; |
5176 | 0 | int prot; |
5177 | |
|
5178 | 0 | if (map->def.map_flags & BPF_F_RDONLY_PROG) |
5179 | 0 | prot = PROT_READ; |
5180 | 0 | else |
5181 | 0 | prot = PROT_READ | PROT_WRITE; |
5182 | 0 | mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map->fd, 0); |
5183 | 0 | if (mmaped == MAP_FAILED) { |
5184 | 0 | err = -errno; |
5185 | 0 | pr_warn("map '%s': failed to re-mmap() contents: %s\n", |
5186 | 0 | bpf_map__name(map), errstr(err)); |
5187 | 0 | return err; |
5188 | 0 | } |
5189 | 0 | map->mmaped = mmaped; |
5190 | 0 | } else if (map->mmaped) { |
5191 | 0 | munmap(map->mmaped, mmap_sz); |
5192 | 0 | map->mmaped = NULL; |
5193 | 0 | } |
5194 | | |
5195 | 0 | return 0; |
5196 | 0 | } |
5197 | | |
5198 | | static void bpf_map__destroy(struct bpf_map *map); |
5199 | | |
5200 | | static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner) |
5201 | 0 | { |
5202 | 0 | LIBBPF_OPTS(bpf_map_create_opts, create_attr); |
5203 | 0 | struct bpf_map_def *def = &map->def; |
5204 | 0 | const char *map_name = NULL; |
5205 | 0 | int err = 0, map_fd; |
5206 | |
|
5207 | 0 | if (kernel_supports(obj, FEAT_PROG_NAME)) |
5208 | 0 | map_name = map->name; |
5209 | 0 | create_attr.map_ifindex = map->map_ifindex; |
5210 | 0 | create_attr.map_flags = def->map_flags; |
5211 | 0 | create_attr.numa_node = map->numa_node; |
5212 | 0 | create_attr.map_extra = map->map_extra; |
5213 | 0 | create_attr.token_fd = obj->token_fd; |
5214 | 0 | if (obj->token_fd) |
5215 | 0 | create_attr.map_flags |= BPF_F_TOKEN_FD; |
5216 | |
|
5217 | 0 | if (bpf_map__is_struct_ops(map)) { |
5218 | 0 | create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; |
5219 | 0 | if (map->mod_btf_fd >= 0) { |
5220 | 0 | create_attr.value_type_btf_obj_fd = map->mod_btf_fd; |
5221 | 0 | create_attr.map_flags |= BPF_F_VTYPE_BTF_OBJ_FD; |
5222 | 0 | } |
5223 | 0 | } |
5224 | |
|
5225 | 0 | if (obj->btf && btf__fd(obj->btf) >= 0) { |
5226 | 0 | create_attr.btf_fd = btf__fd(obj->btf); |
5227 | 0 | create_attr.btf_key_type_id = map->btf_key_type_id; |
5228 | 0 | create_attr.btf_value_type_id = map->btf_value_type_id; |
5229 | 0 | } |
5230 | |
|
5231 | 0 | if (bpf_map_type__is_map_in_map(def->type)) { |
5232 | 0 | if (map->inner_map) { |
5233 | 0 | err = map_set_def_max_entries(map->inner_map); |
5234 | 0 | if (err) |
5235 | 0 | return err; |
5236 | 0 | err = bpf_object__create_map(obj, map->inner_map, true); |
5237 | 0 | if (err) { |
5238 | 0 | pr_warn("map '%s': failed to create inner map: %s\n", |
5239 | 0 | map->name, errstr(err)); |
5240 | 0 | return err; |
5241 | 0 | } |
5242 | 0 | map->inner_map_fd = map->inner_map->fd; |
5243 | 0 | } |
5244 | 0 | if (map->inner_map_fd >= 0) |
5245 | 0 | create_attr.inner_map_fd = map->inner_map_fd; |
5246 | 0 | } |
5247 | | |
5248 | 0 | switch (def->type) { |
5249 | 0 | case BPF_MAP_TYPE_PERF_EVENT_ARRAY: |
5250 | 0 | case BPF_MAP_TYPE_CGROUP_ARRAY: |
5251 | 0 | case BPF_MAP_TYPE_STACK_TRACE: |
5252 | 0 | case BPF_MAP_TYPE_ARRAY_OF_MAPS: |
5253 | 0 | case BPF_MAP_TYPE_HASH_OF_MAPS: |
5254 | 0 | case BPF_MAP_TYPE_DEVMAP: |
5255 | 0 | case BPF_MAP_TYPE_DEVMAP_HASH: |
5256 | 0 | case BPF_MAP_TYPE_CPUMAP: |
5257 | 0 | case BPF_MAP_TYPE_XSKMAP: |
5258 | 0 | case BPF_MAP_TYPE_SOCKMAP: |
5259 | 0 | case BPF_MAP_TYPE_SOCKHASH: |
5260 | 0 | case BPF_MAP_TYPE_QUEUE: |
5261 | 0 | case BPF_MAP_TYPE_STACK: |
5262 | 0 | case BPF_MAP_TYPE_ARENA: |
5263 | 0 | create_attr.btf_fd = 0; |
5264 | 0 | create_attr.btf_key_type_id = 0; |
5265 | 0 | create_attr.btf_value_type_id = 0; |
5266 | 0 | map->btf_key_type_id = 0; |
5267 | 0 | map->btf_value_type_id = 0; |
5268 | 0 | break; |
5269 | 0 | case BPF_MAP_TYPE_STRUCT_OPS: |
5270 | 0 | create_attr.btf_value_type_id = 0; |
5271 | 0 | break; |
5272 | 0 | default: |
5273 | 0 | break; |
5274 | 0 | } |
5275 | | |
5276 | 0 | if (obj->gen_loader) { |
5277 | 0 | bpf_gen__map_create(obj->gen_loader, def->type, map_name, |
5278 | 0 | def->key_size, def->value_size, def->max_entries, |
5279 | 0 | &create_attr, is_inner ? -1 : map - obj->maps); |
5280 | | /* We keep pretenting we have valid FD to pass various fd >= 0 |
5281 | | * checks by just keeping original placeholder FDs in place. |
5282 | | * See bpf_object__add_map() comment. |
5283 | | * This placeholder fd will not be used with any syscall and |
5284 | | * will be reset to -1 eventually. |
5285 | | */ |
5286 | 0 | map_fd = map->fd; |
5287 | 0 | } else { |
5288 | 0 | map_fd = bpf_map_create(def->type, map_name, |
5289 | 0 | def->key_size, def->value_size, |
5290 | 0 | def->max_entries, &create_attr); |
5291 | 0 | } |
5292 | 0 | if (map_fd < 0 && (create_attr.btf_key_type_id || create_attr.btf_value_type_id)) { |
5293 | 0 | err = -errno; |
5294 | 0 | pr_warn("Error in bpf_create_map_xattr(%s): %s. Retrying without BTF.\n", |
5295 | 0 | map->name, errstr(err)); |
5296 | 0 | create_attr.btf_fd = 0; |
5297 | 0 | create_attr.btf_key_type_id = 0; |
5298 | 0 | create_attr.btf_value_type_id = 0; |
5299 | 0 | map->btf_key_type_id = 0; |
5300 | 0 | map->btf_value_type_id = 0; |
5301 | 0 | map_fd = bpf_map_create(def->type, map_name, |
5302 | 0 | def->key_size, def->value_size, |
5303 | 0 | def->max_entries, &create_attr); |
5304 | 0 | } |
5305 | |
|
5306 | 0 | if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) { |
5307 | 0 | if (obj->gen_loader) |
5308 | 0 | map->inner_map->fd = -1; |
5309 | 0 | bpf_map__destroy(map->inner_map); |
5310 | 0 | zfree(&map->inner_map); |
5311 | 0 | } |
5312 | |
|
5313 | 0 | if (map_fd < 0) |
5314 | 0 | return map_fd; |
5315 | | |
5316 | | /* obj->gen_loader case, prevent reuse_fd() from closing map_fd */ |
5317 | 0 | if (map->fd == map_fd) |
5318 | 0 | return 0; |
5319 | | |
5320 | | /* Keep placeholder FD value but now point it to the BPF map object. |
5321 | | * This way everything that relied on this map's FD (e.g., relocated |
5322 | | * ldimm64 instructions) will stay valid and won't need adjustments. |
5323 | | * map->fd stays valid but now point to what map_fd points to. |
5324 | | */ |
5325 | 0 | return reuse_fd(map->fd, map_fd); |
5326 | 0 | } |
5327 | | |
5328 | | static int init_map_in_map_slots(struct bpf_object *obj, struct bpf_map *map) |
5329 | 0 | { |
5330 | 0 | const struct bpf_map *targ_map; |
5331 | 0 | unsigned int i; |
5332 | 0 | int fd, err = 0; |
5333 | |
|
5334 | 0 | for (i = 0; i < map->init_slots_sz; i++) { |
5335 | 0 | if (!map->init_slots[i]) |
5336 | 0 | continue; |
5337 | | |
5338 | 0 | targ_map = map->init_slots[i]; |
5339 | 0 | fd = targ_map->fd; |
5340 | |
|
5341 | 0 | if (obj->gen_loader) { |
5342 | 0 | bpf_gen__populate_outer_map(obj->gen_loader, |
5343 | 0 | map - obj->maps, i, |
5344 | 0 | targ_map - obj->maps); |
5345 | 0 | } else { |
5346 | 0 | err = bpf_map_update_elem(map->fd, &i, &fd, 0); |
5347 | 0 | } |
5348 | 0 | if (err) { |
5349 | 0 | err = -errno; |
5350 | 0 | pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %s\n", |
5351 | 0 | map->name, i, targ_map->name, fd, errstr(err)); |
5352 | 0 | return err; |
5353 | 0 | } |
5354 | 0 | pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n", |
5355 | 0 | map->name, i, targ_map->name, fd); |
5356 | 0 | } |
5357 | | |
5358 | 0 | zfree(&map->init_slots); |
5359 | 0 | map->init_slots_sz = 0; |
5360 | |
|
5361 | 0 | return 0; |
5362 | 0 | } |
5363 | | |
5364 | | static int init_prog_array_slots(struct bpf_object *obj, struct bpf_map *map) |
5365 | 0 | { |
5366 | 0 | const struct bpf_program *targ_prog; |
5367 | 0 | unsigned int i; |
5368 | 0 | int fd, err; |
5369 | |
|
5370 | 0 | if (obj->gen_loader) |
5371 | 0 | return -ENOTSUP; |
5372 | | |
5373 | 0 | for (i = 0; i < map->init_slots_sz; i++) { |
5374 | 0 | if (!map->init_slots[i]) |
5375 | 0 | continue; |
5376 | | |
5377 | 0 | targ_prog = map->init_slots[i]; |
5378 | 0 | fd = bpf_program__fd(targ_prog); |
5379 | |
|
5380 | 0 | err = bpf_map_update_elem(map->fd, &i, &fd, 0); |
5381 | 0 | if (err) { |
5382 | 0 | err = -errno; |
5383 | 0 | pr_warn("map '%s': failed to initialize slot [%d] to prog '%s' fd=%d: %s\n", |
5384 | 0 | map->name, i, targ_prog->name, fd, errstr(err)); |
5385 | 0 | return err; |
5386 | 0 | } |
5387 | 0 | pr_debug("map '%s': slot [%d] set to prog '%s' fd=%d\n", |
5388 | 0 | map->name, i, targ_prog->name, fd); |
5389 | 0 | } |
5390 | | |
5391 | 0 | zfree(&map->init_slots); |
5392 | 0 | map->init_slots_sz = 0; |
5393 | |
|
5394 | 0 | return 0; |
5395 | 0 | } |
5396 | | |
5397 | | static int bpf_object_init_prog_arrays(struct bpf_object *obj) |
5398 | 0 | { |
5399 | 0 | struct bpf_map *map; |
5400 | 0 | int i, err; |
5401 | |
|
5402 | 0 | for (i = 0; i < obj->nr_maps; i++) { |
5403 | 0 | map = &obj->maps[i]; |
5404 | |
|
5405 | 0 | if (!map->init_slots_sz || map->def.type != BPF_MAP_TYPE_PROG_ARRAY) |
5406 | 0 | continue; |
5407 | | |
5408 | 0 | err = init_prog_array_slots(obj, map); |
5409 | 0 | if (err < 0) |
5410 | 0 | return err; |
5411 | 0 | } |
5412 | 0 | return 0; |
5413 | 0 | } |
5414 | | |
5415 | | static int map_set_def_max_entries(struct bpf_map *map) |
5416 | 0 | { |
5417 | 0 | if (map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !map->def.max_entries) { |
5418 | 0 | int nr_cpus; |
5419 | |
|
5420 | 0 | nr_cpus = libbpf_num_possible_cpus(); |
5421 | 0 | if (nr_cpus < 0) { |
5422 | 0 | pr_warn("map '%s': failed to determine number of system CPUs: %d\n", |
5423 | 0 | map->name, nr_cpus); |
5424 | 0 | return nr_cpus; |
5425 | 0 | } |
5426 | 0 | pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus); |
5427 | 0 | map->def.max_entries = nr_cpus; |
5428 | 0 | } |
5429 | | |
5430 | 0 | return 0; |
5431 | 0 | } |
5432 | | |
5433 | | static int |
5434 | | bpf_object__create_maps(struct bpf_object *obj) |
5435 | 0 | { |
5436 | 0 | struct bpf_map *map; |
5437 | 0 | unsigned int i, j; |
5438 | 0 | int err; |
5439 | 0 | bool retried; |
5440 | |
|
5441 | 0 | for (i = 0; i < obj->nr_maps; i++) { |
5442 | 0 | map = &obj->maps[i]; |
5443 | | |
5444 | | /* To support old kernels, we skip creating global data maps |
5445 | | * (.rodata, .data, .kconfig, etc); later on, during program |
5446 | | * loading, if we detect that at least one of the to-be-loaded |
5447 | | * programs is referencing any global data map, we'll error |
5448 | | * out with program name and relocation index logged. |
5449 | | * This approach allows to accommodate Clang emitting |
5450 | | * unnecessary .rodata.str1.1 sections for string literals, |
5451 | | * but also it allows to have CO-RE applications that use |
5452 | | * global variables in some of BPF programs, but not others. |
5453 | | * If those global variable-using programs are not loaded at |
5454 | | * runtime due to bpf_program__set_autoload(prog, false), |
5455 | | * bpf_object loading will succeed just fine even on old |
5456 | | * kernels. |
5457 | | */ |
5458 | 0 | if (bpf_map__is_internal(map) && !kernel_supports(obj, FEAT_GLOBAL_DATA)) |
5459 | 0 | map->autocreate = false; |
5460 | |
|
5461 | 0 | if (!map->autocreate) { |
5462 | 0 | pr_debug("map '%s': skipped auto-creating...\n", map->name); |
5463 | 0 | continue; |
5464 | 0 | } |
5465 | | |
5466 | 0 | err = map_set_def_max_entries(map); |
5467 | 0 | if (err) |
5468 | 0 | goto err_out; |
5469 | | |
5470 | 0 | retried = false; |
5471 | 0 | retry: |
5472 | 0 | if (map->pin_path) { |
5473 | 0 | err = bpf_object__reuse_map(map); |
5474 | 0 | if (err) { |
5475 | 0 | pr_warn("map '%s': error reusing pinned map\n", |
5476 | 0 | map->name); |
5477 | 0 | goto err_out; |
5478 | 0 | } |
5479 | 0 | if (retried && map->fd < 0) { |
5480 | 0 | pr_warn("map '%s': cannot find pinned map\n", |
5481 | 0 | map->name); |
5482 | 0 | err = -ENOENT; |
5483 | 0 | goto err_out; |
5484 | 0 | } |
5485 | 0 | } |
5486 | | |
5487 | 0 | if (map->reused) { |
5488 | 0 | pr_debug("map '%s': skipping creation (preset fd=%d)\n", |
5489 | 0 | map->name, map->fd); |
5490 | 0 | } else { |
5491 | 0 | err = bpf_object__create_map(obj, map, false); |
5492 | 0 | if (err) |
5493 | 0 | goto err_out; |
5494 | | |
5495 | 0 | pr_debug("map '%s': created successfully, fd=%d\n", |
5496 | 0 | map->name, map->fd); |
5497 | |
|
5498 | 0 | if (bpf_map__is_internal(map)) { |
5499 | 0 | err = bpf_object__populate_internal_map(obj, map); |
5500 | 0 | if (err < 0) |
5501 | 0 | goto err_out; |
5502 | 0 | } else if (map->def.type == BPF_MAP_TYPE_ARENA) { |
5503 | 0 | map->mmaped = mmap((void *)(long)map->map_extra, |
5504 | 0 | bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE, |
5505 | 0 | map->map_extra ? MAP_SHARED | MAP_FIXED : MAP_SHARED, |
5506 | 0 | map->fd, 0); |
5507 | 0 | if (map->mmaped == MAP_FAILED) { |
5508 | 0 | err = -errno; |
5509 | 0 | map->mmaped = NULL; |
5510 | 0 | pr_warn("map '%s': failed to mmap arena: %s\n", |
5511 | 0 | map->name, errstr(err)); |
5512 | 0 | return err; |
5513 | 0 | } |
5514 | 0 | if (obj->arena_data) { |
5515 | 0 | memcpy(map->mmaped, obj->arena_data, obj->arena_data_sz); |
5516 | 0 | zfree(&obj->arena_data); |
5517 | 0 | } |
5518 | 0 | } |
5519 | 0 | if (map->init_slots_sz && map->def.type != BPF_MAP_TYPE_PROG_ARRAY) { |
5520 | 0 | err = init_map_in_map_slots(obj, map); |
5521 | 0 | if (err < 0) |
5522 | 0 | goto err_out; |
5523 | 0 | } |
5524 | 0 | } |
5525 | | |
5526 | 0 | if (map->pin_path && !map->pinned) { |
5527 | 0 | err = bpf_map__pin(map, NULL); |
5528 | 0 | if (err) { |
5529 | 0 | if (!retried && err == -EEXIST) { |
5530 | 0 | retried = true; |
5531 | 0 | goto retry; |
5532 | 0 | } |
5533 | 0 | pr_warn("map '%s': failed to auto-pin at '%s': %s\n", |
5534 | 0 | map->name, map->pin_path, errstr(err)); |
5535 | 0 | goto err_out; |
5536 | 0 | } |
5537 | 0 | } |
5538 | 0 | } |
5539 | | |
5540 | 0 | return 0; |
5541 | | |
5542 | 0 | err_out: |
5543 | 0 | pr_warn("map '%s': failed to create: %s\n", map->name, errstr(err)); |
5544 | 0 | pr_perm_msg(err); |
5545 | 0 | for (j = 0; j < i; j++) |
5546 | 0 | zclose(obj->maps[j].fd); |
5547 | 0 | return err; |
5548 | 0 | } |
5549 | | |
5550 | | static bool bpf_core_is_flavor_sep(const char *s) |
5551 | 51.0k | { |
5552 | | /* check X___Y name pattern, where X and Y are not underscores */ |
5553 | 51.0k | return s[0] != '_' && /* X */ |
5554 | 51.0k | s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */ |
5555 | 51.0k | s[4] != '_'; /* Y */ |
5556 | 51.0k | } |
5557 | | |
5558 | | /* Given 'some_struct_name___with_flavor' return the length of a name prefix |
5559 | | * before last triple underscore. Struct name part after last triple |
5560 | | * underscore is ignored by BPF CO-RE relocation during relocation matching. |
5561 | | */ |
5562 | | size_t bpf_core_essential_name_len(const char *name) |
5563 | 3.39k | { |
5564 | 3.39k | size_t n = strlen(name); |
5565 | 3.39k | int i; |
5566 | | |
5567 | 54.1k | for (i = n - 5; i >= 0; i--) { |
5568 | 51.0k | if (bpf_core_is_flavor_sep(name + i)) |
5569 | 365 | return i + 1; |
5570 | 51.0k | } |
5571 | 3.03k | return n; |
5572 | 3.39k | } |
5573 | | |
5574 | | void bpf_core_free_cands(struct bpf_core_cand_list *cands) |
5575 | 0 | { |
5576 | 0 | if (!cands) |
5577 | 0 | return; |
5578 | | |
5579 | 0 | free(cands->cands); |
5580 | 0 | free(cands); |
5581 | 0 | } |
5582 | | |
5583 | | int bpf_core_add_cands(struct bpf_core_cand *local_cand, |
5584 | | size_t local_essent_len, |
5585 | | const struct btf *targ_btf, |
5586 | | const char *targ_btf_name, |
5587 | | int targ_start_id, |
5588 | | struct bpf_core_cand_list *cands) |
5589 | 0 | { |
5590 | 0 | struct bpf_core_cand *new_cands, *cand; |
5591 | 0 | const struct btf_type *t, *local_t; |
5592 | 0 | const char *targ_name, *local_name; |
5593 | 0 | size_t targ_essent_len; |
5594 | 0 | int n, i; |
5595 | |
|
5596 | 0 | local_t = btf__type_by_id(local_cand->btf, local_cand->id); |
5597 | 0 | local_name = btf__str_by_offset(local_cand->btf, local_t->name_off); |
5598 | |
|
5599 | 0 | n = btf__type_cnt(targ_btf); |
5600 | 0 | for (i = targ_start_id; i < n; i++) { |
5601 | 0 | t = btf__type_by_id(targ_btf, i); |
5602 | 0 | if (!btf_kind_core_compat(t, local_t)) |
5603 | 0 | continue; |
5604 | | |
5605 | 0 | targ_name = btf__name_by_offset(targ_btf, t->name_off); |
5606 | 0 | if (str_is_empty(targ_name)) |
5607 | 0 | continue; |
5608 | | |
5609 | 0 | targ_essent_len = bpf_core_essential_name_len(targ_name); |
5610 | 0 | if (targ_essent_len != local_essent_len) |
5611 | 0 | continue; |
5612 | | |
5613 | 0 | if (strncmp(local_name, targ_name, local_essent_len) != 0) |
5614 | 0 | continue; |
5615 | | |
5616 | 0 | pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n", |
5617 | 0 | local_cand->id, btf_kind_str(local_t), |
5618 | 0 | local_name, i, btf_kind_str(t), targ_name, |
5619 | 0 | targ_btf_name); |
5620 | 0 | new_cands = libbpf_reallocarray(cands->cands, cands->len + 1, |
5621 | 0 | sizeof(*cands->cands)); |
5622 | 0 | if (!new_cands) |
5623 | 0 | return -ENOMEM; |
5624 | | |
5625 | 0 | cand = &new_cands[cands->len]; |
5626 | 0 | cand->btf = targ_btf; |
5627 | 0 | cand->id = i; |
5628 | |
|
5629 | 0 | cands->cands = new_cands; |
5630 | 0 | cands->len++; |
5631 | 0 | } |
5632 | 0 | return 0; |
5633 | 0 | } |
5634 | | |
5635 | | static int load_module_btfs(struct bpf_object *obj) |
5636 | 0 | { |
5637 | 0 | struct bpf_btf_info info; |
5638 | 0 | struct module_btf *mod_btf; |
5639 | 0 | struct btf *btf; |
5640 | 0 | char name[64]; |
5641 | 0 | __u32 id = 0, len; |
5642 | 0 | int err, fd; |
5643 | |
|
5644 | 0 | if (obj->btf_modules_loaded) |
5645 | 0 | return 0; |
5646 | | |
5647 | 0 | if (obj->gen_loader) |
5648 | 0 | return 0; |
5649 | | |
5650 | | /* don't do this again, even if we find no module BTFs */ |
5651 | 0 | obj->btf_modules_loaded = true; |
5652 | | |
5653 | | /* kernel too old to support module BTFs */ |
5654 | 0 | if (!kernel_supports(obj, FEAT_MODULE_BTF)) |
5655 | 0 | return 0; |
5656 | | |
5657 | 0 | while (true) { |
5658 | 0 | err = bpf_btf_get_next_id(id, &id); |
5659 | 0 | if (err && errno == ENOENT) |
5660 | 0 | return 0; |
5661 | 0 | if (err && errno == EPERM) { |
5662 | 0 | pr_debug("skipping module BTFs loading, missing privileges\n"); |
5663 | 0 | return 0; |
5664 | 0 | } |
5665 | 0 | if (err) { |
5666 | 0 | err = -errno; |
5667 | 0 | pr_warn("failed to iterate BTF objects: %s\n", errstr(err)); |
5668 | 0 | return err; |
5669 | 0 | } |
5670 | | |
5671 | 0 | fd = bpf_btf_get_fd_by_id(id); |
5672 | 0 | if (fd < 0) { |
5673 | 0 | if (errno == ENOENT) |
5674 | 0 | continue; /* expected race: BTF was unloaded */ |
5675 | 0 | err = -errno; |
5676 | 0 | pr_warn("failed to get BTF object #%d FD: %s\n", id, errstr(err)); |
5677 | 0 | return err; |
5678 | 0 | } |
5679 | | |
5680 | 0 | len = sizeof(info); |
5681 | 0 | memset(&info, 0, sizeof(info)); |
5682 | 0 | info.name = ptr_to_u64(name); |
5683 | 0 | info.name_len = sizeof(name); |
5684 | |
|
5685 | 0 | err = bpf_btf_get_info_by_fd(fd, &info, &len); |
5686 | 0 | if (err) { |
5687 | 0 | err = -errno; |
5688 | 0 | pr_warn("failed to get BTF object #%d info: %s\n", id, errstr(err)); |
5689 | 0 | goto err_out; |
5690 | 0 | } |
5691 | | |
5692 | | /* ignore non-module BTFs */ |
5693 | 0 | if (!info.kernel_btf || strcmp(name, "vmlinux") == 0) { |
5694 | 0 | close(fd); |
5695 | 0 | continue; |
5696 | 0 | } |
5697 | | |
5698 | 0 | btf = btf_get_from_fd(fd, obj->btf_vmlinux); |
5699 | 0 | err = libbpf_get_error(btf); |
5700 | 0 | if (err) { |
5701 | 0 | pr_warn("failed to load module [%s]'s BTF object #%d: %s\n", |
5702 | 0 | name, id, errstr(err)); |
5703 | 0 | goto err_out; |
5704 | 0 | } |
5705 | | |
5706 | 0 | err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap, |
5707 | 0 | sizeof(*obj->btf_modules), obj->btf_module_cnt + 1); |
5708 | 0 | if (err) |
5709 | 0 | goto err_out; |
5710 | | |
5711 | 0 | mod_btf = &obj->btf_modules[obj->btf_module_cnt++]; |
5712 | |
|
5713 | 0 | mod_btf->btf = btf; |
5714 | 0 | mod_btf->id = id; |
5715 | 0 | mod_btf->fd = fd; |
5716 | 0 | mod_btf->name = strdup(name); |
5717 | 0 | if (!mod_btf->name) { |
5718 | 0 | err = -ENOMEM; |
5719 | 0 | goto err_out; |
5720 | 0 | } |
5721 | 0 | continue; |
5722 | | |
5723 | 0 | err_out: |
5724 | 0 | close(fd); |
5725 | 0 | return err; |
5726 | 0 | } |
5727 | | |
5728 | 0 | return 0; |
5729 | 0 | } |
5730 | | |
5731 | | static struct bpf_core_cand_list * |
5732 | | bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id) |
5733 | 0 | { |
5734 | 0 | struct bpf_core_cand local_cand = {}; |
5735 | 0 | struct bpf_core_cand_list *cands; |
5736 | 0 | const struct btf *main_btf; |
5737 | 0 | const struct btf_type *local_t; |
5738 | 0 | const char *local_name; |
5739 | 0 | size_t local_essent_len; |
5740 | 0 | int err, i; |
5741 | |
|
5742 | 0 | local_cand.btf = local_btf; |
5743 | 0 | local_cand.id = local_type_id; |
5744 | 0 | local_t = btf__type_by_id(local_btf, local_type_id); |
5745 | 0 | if (!local_t) |
5746 | 0 | return ERR_PTR(-EINVAL); |
5747 | | |
5748 | 0 | local_name = btf__name_by_offset(local_btf, local_t->name_off); |
5749 | 0 | if (str_is_empty(local_name)) |
5750 | 0 | return ERR_PTR(-EINVAL); |
5751 | 0 | local_essent_len = bpf_core_essential_name_len(local_name); |
5752 | |
|
5753 | 0 | cands = calloc(1, sizeof(*cands)); |
5754 | 0 | if (!cands) |
5755 | 0 | return ERR_PTR(-ENOMEM); |
5756 | | |
5757 | | /* Attempt to find target candidates in vmlinux BTF first */ |
5758 | 0 | main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux; |
5759 | 0 | err = bpf_core_add_cands(&local_cand, local_essent_len, main_btf, "vmlinux", 1, cands); |
5760 | 0 | if (err) |
5761 | 0 | goto err_out; |
5762 | | |
5763 | | /* if vmlinux BTF has any candidate, don't got for module BTFs */ |
5764 | 0 | if (cands->len) |
5765 | 0 | return cands; |
5766 | | |
5767 | | /* if vmlinux BTF was overridden, don't attempt to load module BTFs */ |
5768 | 0 | if (obj->btf_vmlinux_override) |
5769 | 0 | return cands; |
5770 | | |
5771 | | /* now look through module BTFs, trying to still find candidates */ |
5772 | 0 | err = load_module_btfs(obj); |
5773 | 0 | if (err) |
5774 | 0 | goto err_out; |
5775 | | |
5776 | 0 | for (i = 0; i < obj->btf_module_cnt; i++) { |
5777 | 0 | err = bpf_core_add_cands(&local_cand, local_essent_len, |
5778 | 0 | obj->btf_modules[i].btf, |
5779 | 0 | obj->btf_modules[i].name, |
5780 | 0 | btf__type_cnt(obj->btf_vmlinux), |
5781 | 0 | cands); |
5782 | 0 | if (err) |
5783 | 0 | goto err_out; |
5784 | 0 | } |
5785 | | |
5786 | 0 | return cands; |
5787 | 0 | err_out: |
5788 | 0 | bpf_core_free_cands(cands); |
5789 | 0 | return ERR_PTR(err); |
5790 | 0 | } |
5791 | | |
5792 | | /* Check local and target types for compatibility. This check is used for |
5793 | | * type-based CO-RE relocations and follow slightly different rules than |
5794 | | * field-based relocations. This function assumes that root types were already |
5795 | | * checked for name match. Beyond that initial root-level name check, names |
5796 | | * are completely ignored. Compatibility rules are as follows: |
5797 | | * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but |
5798 | | * kind should match for local and target types (i.e., STRUCT is not |
5799 | | * compatible with UNION); |
5800 | | * - for ENUMs, the size is ignored; |
5801 | | * - for INT, size and signedness are ignored; |
5802 | | * - for ARRAY, dimensionality is ignored, element types are checked for |
5803 | | * compatibility recursively; |
5804 | | * - CONST/VOLATILE/RESTRICT modifiers are ignored; |
5805 | | * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible; |
5806 | | * - FUNC_PROTOs are compatible if they have compatible signature: same |
5807 | | * number of input args and compatible return and argument types. |
5808 | | * These rules are not set in stone and probably will be adjusted as we get |
5809 | | * more experience with using BPF CO-RE relocations. |
5810 | | */ |
5811 | | int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, |
5812 | | const struct btf *targ_btf, __u32 targ_id) |
5813 | 0 | { |
5814 | 0 | return __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id, 32); |
5815 | 0 | } |
5816 | | |
5817 | | int bpf_core_types_match(const struct btf *local_btf, __u32 local_id, |
5818 | | const struct btf *targ_btf, __u32 targ_id) |
5819 | 0 | { |
5820 | 0 | return __bpf_core_types_match(local_btf, local_id, targ_btf, targ_id, false, 32); |
5821 | 0 | } |
5822 | | |
5823 | | static size_t bpf_core_hash_fn(const long key, void *ctx) |
5824 | 0 | { |
5825 | 0 | return key; |
5826 | 0 | } |
5827 | | |
5828 | | static bool bpf_core_equal_fn(const long k1, const long k2, void *ctx) |
5829 | 0 | { |
5830 | 0 | return k1 == k2; |
5831 | 0 | } |
5832 | | |
5833 | | static int record_relo_core(struct bpf_program *prog, |
5834 | | const struct bpf_core_relo *core_relo, int insn_idx) |
5835 | 0 | { |
5836 | 0 | struct reloc_desc *relos, *relo; |
5837 | |
|
5838 | 0 | relos = libbpf_reallocarray(prog->reloc_desc, |
5839 | 0 | prog->nr_reloc + 1, sizeof(*relos)); |
5840 | 0 | if (!relos) |
5841 | 0 | return -ENOMEM; |
5842 | 0 | relo = &relos[prog->nr_reloc]; |
5843 | 0 | relo->type = RELO_CORE; |
5844 | 0 | relo->insn_idx = insn_idx; |
5845 | 0 | relo->core_relo = core_relo; |
5846 | 0 | prog->reloc_desc = relos; |
5847 | 0 | prog->nr_reloc++; |
5848 | 0 | return 0; |
5849 | 0 | } |
5850 | | |
5851 | | static const struct bpf_core_relo *find_relo_core(struct bpf_program *prog, int insn_idx) |
5852 | 0 | { |
5853 | 0 | struct reloc_desc *relo; |
5854 | 0 | int i; |
5855 | |
|
5856 | 0 | for (i = 0; i < prog->nr_reloc; i++) { |
5857 | 0 | relo = &prog->reloc_desc[i]; |
5858 | 0 | if (relo->type != RELO_CORE || relo->insn_idx != insn_idx) |
5859 | 0 | continue; |
5860 | | |
5861 | 0 | return relo->core_relo; |
5862 | 0 | } |
5863 | | |
5864 | 0 | return NULL; |
5865 | 0 | } |
5866 | | |
5867 | | static int bpf_core_resolve_relo(struct bpf_program *prog, |
5868 | | const struct bpf_core_relo *relo, |
5869 | | int relo_idx, |
5870 | | const struct btf *local_btf, |
5871 | | struct hashmap *cand_cache, |
5872 | | struct bpf_core_relo_res *targ_res) |
5873 | 0 | { |
5874 | 0 | struct bpf_core_spec specs_scratch[3] = {}; |
5875 | 0 | struct bpf_core_cand_list *cands = NULL; |
5876 | 0 | const char *prog_name = prog->name; |
5877 | 0 | const struct btf_type *local_type; |
5878 | 0 | const char *local_name; |
5879 | 0 | __u32 local_id = relo->type_id; |
5880 | 0 | int err; |
5881 | |
|
5882 | 0 | local_type = btf__type_by_id(local_btf, local_id); |
5883 | 0 | if (!local_type) |
5884 | 0 | return -EINVAL; |
5885 | | |
5886 | 0 | local_name = btf__name_by_offset(local_btf, local_type->name_off); |
5887 | 0 | if (!local_name) |
5888 | 0 | return -EINVAL; |
5889 | | |
5890 | 0 | if (relo->kind != BPF_CORE_TYPE_ID_LOCAL && |
5891 | 0 | !hashmap__find(cand_cache, local_id, &cands)) { |
5892 | 0 | cands = bpf_core_find_cands(prog->obj, local_btf, local_id); |
5893 | 0 | if (IS_ERR(cands)) { |
5894 | 0 | pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n", |
5895 | 0 | prog_name, relo_idx, local_id, btf_kind_str(local_type), |
5896 | 0 | local_name, PTR_ERR(cands)); |
5897 | 0 | return PTR_ERR(cands); |
5898 | 0 | } |
5899 | 0 | err = hashmap__set(cand_cache, local_id, cands, NULL, NULL); |
5900 | 0 | if (err) { |
5901 | 0 | bpf_core_free_cands(cands); |
5902 | 0 | return err; |
5903 | 0 | } |
5904 | 0 | } |
5905 | | |
5906 | 0 | return bpf_core_calc_relo_insn(prog_name, relo, relo_idx, local_btf, cands, specs_scratch, |
5907 | 0 | targ_res); |
5908 | 0 | } |
5909 | | |
5910 | | static int |
5911 | | bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path) |
5912 | 0 | { |
5913 | 0 | const struct btf_ext_info_sec *sec; |
5914 | 0 | struct bpf_core_relo_res targ_res; |
5915 | 0 | const struct bpf_core_relo *rec; |
5916 | 0 | const struct btf_ext_info *seg; |
5917 | 0 | struct hashmap_entry *entry; |
5918 | 0 | struct hashmap *cand_cache = NULL; |
5919 | 0 | struct bpf_program *prog; |
5920 | 0 | struct bpf_insn *insn; |
5921 | 0 | const char *sec_name; |
5922 | 0 | int i, err = 0, insn_idx, sec_idx, sec_num; |
5923 | |
|
5924 | 0 | if (obj->btf_ext->core_relo_info.len == 0) |
5925 | 0 | return 0; |
5926 | | |
5927 | 0 | if (targ_btf_path) { |
5928 | 0 | obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL); |
5929 | 0 | err = libbpf_get_error(obj->btf_vmlinux_override); |
5930 | 0 | if (err) { |
5931 | 0 | pr_warn("failed to parse target BTF: %s\n", errstr(err)); |
5932 | 0 | return err; |
5933 | 0 | } |
5934 | 0 | } |
5935 | | |
5936 | 0 | cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL); |
5937 | 0 | if (IS_ERR(cand_cache)) { |
5938 | 0 | err = PTR_ERR(cand_cache); |
5939 | 0 | goto out; |
5940 | 0 | } |
5941 | | |
5942 | 0 | seg = &obj->btf_ext->core_relo_info; |
5943 | 0 | sec_num = 0; |
5944 | 0 | for_each_btf_ext_sec(seg, sec) { |
5945 | 0 | sec_idx = seg->sec_idxs[sec_num]; |
5946 | 0 | sec_num++; |
5947 | |
|
5948 | 0 | sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off); |
5949 | 0 | if (str_is_empty(sec_name)) { |
5950 | 0 | err = -EINVAL; |
5951 | 0 | goto out; |
5952 | 0 | } |
5953 | | |
5954 | 0 | pr_debug("sec '%s': found %d CO-RE relocations\n", sec_name, sec->num_info); |
5955 | |
|
5956 | 0 | for_each_btf_ext_rec(seg, sec, i, rec) { |
5957 | 0 | if (rec->insn_off % BPF_INSN_SZ) |
5958 | 0 | return -EINVAL; |
5959 | 0 | insn_idx = rec->insn_off / BPF_INSN_SZ; |
5960 | 0 | prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx); |
5961 | 0 | if (!prog) { |
5962 | | /* When __weak subprog is "overridden" by another instance |
5963 | | * of the subprog from a different object file, linker still |
5964 | | * appends all the .BTF.ext info that used to belong to that |
5965 | | * eliminated subprogram. |
5966 | | * This is similar to what x86-64 linker does for relocations. |
5967 | | * So just ignore such relocations just like we ignore |
5968 | | * subprog instructions when discovering subprograms. |
5969 | | */ |
5970 | 0 | pr_debug("sec '%s': skipping CO-RE relocation #%d for insn #%d belonging to eliminated weak subprogram\n", |
5971 | 0 | sec_name, i, insn_idx); |
5972 | 0 | continue; |
5973 | 0 | } |
5974 | | /* no need to apply CO-RE relocation if the program is |
5975 | | * not going to be loaded |
5976 | | */ |
5977 | 0 | if (!prog->autoload) |
5978 | 0 | continue; |
5979 | | |
5980 | | /* adjust insn_idx from section frame of reference to the local |
5981 | | * program's frame of reference; (sub-)program code is not yet |
5982 | | * relocated, so it's enough to just subtract in-section offset |
5983 | | */ |
5984 | 0 | insn_idx = insn_idx - prog->sec_insn_off; |
5985 | 0 | if (insn_idx >= prog->insns_cnt) |
5986 | 0 | return -EINVAL; |
5987 | 0 | insn = &prog->insns[insn_idx]; |
5988 | |
|
5989 | 0 | err = record_relo_core(prog, rec, insn_idx); |
5990 | 0 | if (err) { |
5991 | 0 | pr_warn("prog '%s': relo #%d: failed to record relocation: %s\n", |
5992 | 0 | prog->name, i, errstr(err)); |
5993 | 0 | goto out; |
5994 | 0 | } |
5995 | | |
5996 | 0 | if (prog->obj->gen_loader) |
5997 | 0 | continue; |
5998 | | |
5999 | 0 | err = bpf_core_resolve_relo(prog, rec, i, obj->btf, cand_cache, &targ_res); |
6000 | 0 | if (err) { |
6001 | 0 | pr_warn("prog '%s': relo #%d: failed to relocate: %s\n", |
6002 | 0 | prog->name, i, errstr(err)); |
6003 | 0 | goto out; |
6004 | 0 | } |
6005 | | |
6006 | 0 | err = bpf_core_patch_insn(prog->name, insn, insn_idx, rec, i, &targ_res); |
6007 | 0 | if (err) { |
6008 | 0 | pr_warn("prog '%s': relo #%d: failed to patch insn #%u: %s\n", |
6009 | 0 | prog->name, i, insn_idx, errstr(err)); |
6010 | 0 | goto out; |
6011 | 0 | } |
6012 | 0 | } |
6013 | 0 | } |
6014 | | |
6015 | 0 | out: |
6016 | | /* obj->btf_vmlinux and module BTFs are freed after object load */ |
6017 | 0 | btf__free(obj->btf_vmlinux_override); |
6018 | 0 | obj->btf_vmlinux_override = NULL; |
6019 | |
|
6020 | 0 | if (!IS_ERR_OR_NULL(cand_cache)) { |
6021 | 0 | hashmap__for_each_entry(cand_cache, entry, i) { |
6022 | 0 | bpf_core_free_cands(entry->pvalue); |
6023 | 0 | } |
6024 | 0 | hashmap__free(cand_cache); |
6025 | 0 | } |
6026 | 0 | return err; |
6027 | 0 | } |
6028 | | |
6029 | | /* base map load ldimm64 special constant, used also for log fixup logic */ |
6030 | 0 | #define POISON_LDIMM64_MAP_BASE 2001000000 |
6031 | | #define POISON_LDIMM64_MAP_PFX "200100" |
6032 | | |
6033 | | static void poison_map_ldimm64(struct bpf_program *prog, int relo_idx, |
6034 | | int insn_idx, struct bpf_insn *insn, |
6035 | | int map_idx, const struct bpf_map *map) |
6036 | 0 | { |
6037 | 0 | int i; |
6038 | |
|
6039 | 0 | pr_debug("prog '%s': relo #%d: poisoning insn #%d that loads map #%d '%s'\n", |
6040 | 0 | prog->name, relo_idx, insn_idx, map_idx, map->name); |
6041 | | |
6042 | | /* we turn single ldimm64 into two identical invalid calls */ |
6043 | 0 | for (i = 0; i < 2; i++) { |
6044 | 0 | insn->code = BPF_JMP | BPF_CALL; |
6045 | 0 | insn->dst_reg = 0; |
6046 | 0 | insn->src_reg = 0; |
6047 | 0 | insn->off = 0; |
6048 | | /* if this instruction is reachable (not a dead code), |
6049 | | * verifier will complain with something like: |
6050 | | * invalid func unknown#2001000123 |
6051 | | * where lower 123 is map index into obj->maps[] array |
6052 | | */ |
6053 | 0 | insn->imm = POISON_LDIMM64_MAP_BASE + map_idx; |
6054 | |
|
6055 | 0 | insn++; |
6056 | 0 | } |
6057 | 0 | } |
6058 | | |
6059 | | /* unresolved kfunc call special constant, used also for log fixup logic */ |
6060 | 0 | #define POISON_CALL_KFUNC_BASE 2002000000 |
6061 | | #define POISON_CALL_KFUNC_PFX "2002" |
6062 | | |
6063 | | static void poison_kfunc_call(struct bpf_program *prog, int relo_idx, |
6064 | | int insn_idx, struct bpf_insn *insn, |
6065 | | int ext_idx, const struct extern_desc *ext) |
6066 | 0 | { |
6067 | 0 | pr_debug("prog '%s': relo #%d: poisoning insn #%d that calls kfunc '%s'\n", |
6068 | 0 | prog->name, relo_idx, insn_idx, ext->name); |
6069 | | |
6070 | | /* we turn kfunc call into invalid helper call with identifiable constant */ |
6071 | 0 | insn->code = BPF_JMP | BPF_CALL; |
6072 | 0 | insn->dst_reg = 0; |
6073 | 0 | insn->src_reg = 0; |
6074 | 0 | insn->off = 0; |
6075 | | /* if this instruction is reachable (not a dead code), |
6076 | | * verifier will complain with something like: |
6077 | | * invalid func unknown#2001000123 |
6078 | | * where lower 123 is extern index into obj->externs[] array |
6079 | | */ |
6080 | 0 | insn->imm = POISON_CALL_KFUNC_BASE + ext_idx; |
6081 | 0 | } |
6082 | | |
6083 | | /* Relocate data references within program code: |
6084 | | * - map references; |
6085 | | * - global variable references; |
6086 | | * - extern references. |
6087 | | */ |
6088 | | static int |
6089 | | bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog) |
6090 | 0 | { |
6091 | 0 | int i; |
6092 | |
|
6093 | 0 | for (i = 0; i < prog->nr_reloc; i++) { |
6094 | 0 | struct reloc_desc *relo = &prog->reloc_desc[i]; |
6095 | 0 | struct bpf_insn *insn = &prog->insns[relo->insn_idx]; |
6096 | 0 | const struct bpf_map *map; |
6097 | 0 | struct extern_desc *ext; |
6098 | |
|
6099 | 0 | switch (relo->type) { |
6100 | 0 | case RELO_LD64: |
6101 | 0 | map = &obj->maps[relo->map_idx]; |
6102 | 0 | if (obj->gen_loader) { |
6103 | 0 | insn[0].src_reg = BPF_PSEUDO_MAP_IDX; |
6104 | 0 | insn[0].imm = relo->map_idx; |
6105 | 0 | } else if (map->autocreate) { |
6106 | 0 | insn[0].src_reg = BPF_PSEUDO_MAP_FD; |
6107 | 0 | insn[0].imm = map->fd; |
6108 | 0 | } else { |
6109 | 0 | poison_map_ldimm64(prog, i, relo->insn_idx, insn, |
6110 | 0 | relo->map_idx, map); |
6111 | 0 | } |
6112 | 0 | break; |
6113 | 0 | case RELO_DATA: |
6114 | 0 | map = &obj->maps[relo->map_idx]; |
6115 | 0 | insn[1].imm = insn[0].imm + relo->sym_off; |
6116 | 0 | if (obj->gen_loader) { |
6117 | 0 | insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE; |
6118 | 0 | insn[0].imm = relo->map_idx; |
6119 | 0 | } else if (map->autocreate) { |
6120 | 0 | insn[0].src_reg = BPF_PSEUDO_MAP_VALUE; |
6121 | 0 | insn[0].imm = map->fd; |
6122 | 0 | } else { |
6123 | 0 | poison_map_ldimm64(prog, i, relo->insn_idx, insn, |
6124 | 0 | relo->map_idx, map); |
6125 | 0 | } |
6126 | 0 | break; |
6127 | 0 | case RELO_EXTERN_LD64: |
6128 | 0 | ext = &obj->externs[relo->ext_idx]; |
6129 | 0 | if (ext->type == EXT_KCFG) { |
6130 | 0 | if (obj->gen_loader) { |
6131 | 0 | insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE; |
6132 | 0 | insn[0].imm = obj->kconfig_map_idx; |
6133 | 0 | } else { |
6134 | 0 | insn[0].src_reg = BPF_PSEUDO_MAP_VALUE; |
6135 | 0 | insn[0].imm = obj->maps[obj->kconfig_map_idx].fd; |
6136 | 0 | } |
6137 | 0 | insn[1].imm = ext->kcfg.data_off; |
6138 | 0 | } else /* EXT_KSYM */ { |
6139 | 0 | if (ext->ksym.type_id && ext->is_set) { /* typed ksyms */ |
6140 | 0 | insn[0].src_reg = BPF_PSEUDO_BTF_ID; |
6141 | 0 | insn[0].imm = ext->ksym.kernel_btf_id; |
6142 | 0 | insn[1].imm = ext->ksym.kernel_btf_obj_fd; |
6143 | 0 | } else { /* typeless ksyms or unresolved typed ksyms */ |
6144 | 0 | insn[0].imm = (__u32)ext->ksym.addr; |
6145 | 0 | insn[1].imm = ext->ksym.addr >> 32; |
6146 | 0 | } |
6147 | 0 | } |
6148 | 0 | break; |
6149 | 0 | case RELO_EXTERN_CALL: |
6150 | 0 | ext = &obj->externs[relo->ext_idx]; |
6151 | 0 | insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL; |
6152 | 0 | if (ext->is_set) { |
6153 | 0 | insn[0].imm = ext->ksym.kernel_btf_id; |
6154 | 0 | insn[0].off = ext->ksym.btf_fd_idx; |
6155 | 0 | } else { /* unresolved weak kfunc call */ |
6156 | 0 | poison_kfunc_call(prog, i, relo->insn_idx, insn, |
6157 | 0 | relo->ext_idx, ext); |
6158 | 0 | } |
6159 | 0 | break; |
6160 | 0 | case RELO_SUBPROG_ADDR: |
6161 | 0 | if (insn[0].src_reg != BPF_PSEUDO_FUNC) { |
6162 | 0 | pr_warn("prog '%s': relo #%d: bad insn\n", |
6163 | 0 | prog->name, i); |
6164 | 0 | return -EINVAL; |
6165 | 0 | } |
6166 | | /* handled already */ |
6167 | 0 | break; |
6168 | 0 | case RELO_CALL: |
6169 | | /* handled already */ |
6170 | 0 | break; |
6171 | 0 | case RELO_CORE: |
6172 | | /* will be handled by bpf_program_record_relos() */ |
6173 | 0 | break; |
6174 | 0 | default: |
6175 | 0 | pr_warn("prog '%s': relo #%d: bad relo type %d\n", |
6176 | 0 | prog->name, i, relo->type); |
6177 | 0 | return -EINVAL; |
6178 | 0 | } |
6179 | 0 | } |
6180 | | |
6181 | 0 | return 0; |
6182 | 0 | } |
6183 | | |
6184 | | static int adjust_prog_btf_ext_info(const struct bpf_object *obj, |
6185 | | const struct bpf_program *prog, |
6186 | | const struct btf_ext_info *ext_info, |
6187 | | void **prog_info, __u32 *prog_rec_cnt, |
6188 | | __u32 *prog_rec_sz) |
6189 | 0 | { |
6190 | 0 | void *copy_start = NULL, *copy_end = NULL; |
6191 | 0 | void *rec, *rec_end, *new_prog_info; |
6192 | 0 | const struct btf_ext_info_sec *sec; |
6193 | 0 | size_t old_sz, new_sz; |
6194 | 0 | int i, sec_num, sec_idx, off_adj; |
6195 | |
|
6196 | 0 | sec_num = 0; |
6197 | 0 | for_each_btf_ext_sec(ext_info, sec) { |
6198 | 0 | sec_idx = ext_info->sec_idxs[sec_num]; |
6199 | 0 | sec_num++; |
6200 | 0 | if (prog->sec_idx != sec_idx) |
6201 | 0 | continue; |
6202 | | |
6203 | 0 | for_each_btf_ext_rec(ext_info, sec, i, rec) { |
6204 | 0 | __u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ; |
6205 | |
|
6206 | 0 | if (insn_off < prog->sec_insn_off) |
6207 | 0 | continue; |
6208 | 0 | if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt) |
6209 | 0 | break; |
6210 | | |
6211 | 0 | if (!copy_start) |
6212 | 0 | copy_start = rec; |
6213 | 0 | copy_end = rec + ext_info->rec_size; |
6214 | 0 | } |
6215 | |
|
6216 | 0 | if (!copy_start) |
6217 | 0 | return -ENOENT; |
6218 | | |
6219 | | /* append func/line info of a given (sub-)program to the main |
6220 | | * program func/line info |
6221 | | */ |
6222 | 0 | old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size; |
6223 | 0 | new_sz = old_sz + (copy_end - copy_start); |
6224 | 0 | new_prog_info = realloc(*prog_info, new_sz); |
6225 | 0 | if (!new_prog_info) |
6226 | 0 | return -ENOMEM; |
6227 | 0 | *prog_info = new_prog_info; |
6228 | 0 | *prog_rec_cnt = new_sz / ext_info->rec_size; |
6229 | 0 | memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start); |
6230 | | |
6231 | | /* Kernel instruction offsets are in units of 8-byte |
6232 | | * instructions, while .BTF.ext instruction offsets generated |
6233 | | * by Clang are in units of bytes. So convert Clang offsets |
6234 | | * into kernel offsets and adjust offset according to program |
6235 | | * relocated position. |
6236 | | */ |
6237 | 0 | off_adj = prog->sub_insn_off - prog->sec_insn_off; |
6238 | 0 | rec = new_prog_info + old_sz; |
6239 | 0 | rec_end = new_prog_info + new_sz; |
6240 | 0 | for (; rec < rec_end; rec += ext_info->rec_size) { |
6241 | 0 | __u32 *insn_off = rec; |
6242 | |
|
6243 | 0 | *insn_off = *insn_off / BPF_INSN_SZ + off_adj; |
6244 | 0 | } |
6245 | 0 | *prog_rec_sz = ext_info->rec_size; |
6246 | 0 | return 0; |
6247 | 0 | } |
6248 | | |
6249 | 0 | return -ENOENT; |
6250 | 0 | } |
6251 | | |
6252 | | static int |
6253 | | reloc_prog_func_and_line_info(const struct bpf_object *obj, |
6254 | | struct bpf_program *main_prog, |
6255 | | const struct bpf_program *prog) |
6256 | 0 | { |
6257 | 0 | int err; |
6258 | | |
6259 | | /* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't |
6260 | | * support func/line info |
6261 | | */ |
6262 | 0 | if (!obj->btf_ext || !kernel_supports(obj, FEAT_BTF_FUNC)) |
6263 | 0 | return 0; |
6264 | | |
6265 | | /* only attempt func info relocation if main program's func_info |
6266 | | * relocation was successful |
6267 | | */ |
6268 | 0 | if (main_prog != prog && !main_prog->func_info) |
6269 | 0 | goto line_info; |
6270 | | |
6271 | 0 | err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info, |
6272 | 0 | &main_prog->func_info, |
6273 | 0 | &main_prog->func_info_cnt, |
6274 | 0 | &main_prog->func_info_rec_size); |
6275 | 0 | if (err) { |
6276 | 0 | if (err != -ENOENT) { |
6277 | 0 | pr_warn("prog '%s': error relocating .BTF.ext function info: %s\n", |
6278 | 0 | prog->name, errstr(err)); |
6279 | 0 | return err; |
6280 | 0 | } |
6281 | 0 | if (main_prog->func_info) { |
6282 | | /* |
6283 | | * Some info has already been found but has problem |
6284 | | * in the last btf_ext reloc. Must have to error out. |
6285 | | */ |
6286 | 0 | pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name); |
6287 | 0 | return err; |
6288 | 0 | } |
6289 | | /* Have problem loading the very first info. Ignore the rest. */ |
6290 | 0 | pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n", |
6291 | 0 | prog->name); |
6292 | 0 | } |
6293 | | |
6294 | 0 | line_info: |
6295 | | /* don't relocate line info if main program's relocation failed */ |
6296 | 0 | if (main_prog != prog && !main_prog->line_info) |
6297 | 0 | return 0; |
6298 | | |
6299 | 0 | err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info, |
6300 | 0 | &main_prog->line_info, |
6301 | 0 | &main_prog->line_info_cnt, |
6302 | 0 | &main_prog->line_info_rec_size); |
6303 | 0 | if (err) { |
6304 | 0 | if (err != -ENOENT) { |
6305 | 0 | pr_warn("prog '%s': error relocating .BTF.ext line info: %s\n", |
6306 | 0 | prog->name, errstr(err)); |
6307 | 0 | return err; |
6308 | 0 | } |
6309 | 0 | if (main_prog->line_info) { |
6310 | | /* |
6311 | | * Some info has already been found but has problem |
6312 | | * in the last btf_ext reloc. Must have to error out. |
6313 | | */ |
6314 | 0 | pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name); |
6315 | 0 | return err; |
6316 | 0 | } |
6317 | | /* Have problem loading the very first info. Ignore the rest. */ |
6318 | 0 | pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n", |
6319 | 0 | prog->name); |
6320 | 0 | } |
6321 | 0 | return 0; |
6322 | 0 | } |
6323 | | |
6324 | | static int cmp_relo_by_insn_idx(const void *key, const void *elem) |
6325 | 0 | { |
6326 | 0 | size_t insn_idx = *(const size_t *)key; |
6327 | 0 | const struct reloc_desc *relo = elem; |
6328 | |
|
6329 | 0 | if (insn_idx == relo->insn_idx) |
6330 | 0 | return 0; |
6331 | 0 | return insn_idx < relo->insn_idx ? -1 : 1; |
6332 | 0 | } |
6333 | | |
6334 | | static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx) |
6335 | 0 | { |
6336 | 0 | if (!prog->nr_reloc) |
6337 | 0 | return NULL; |
6338 | 0 | return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc, |
6339 | 0 | sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx); |
6340 | 0 | } |
6341 | | |
6342 | | static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_program *subprog) |
6343 | 0 | { |
6344 | 0 | int new_cnt = main_prog->nr_reloc + subprog->nr_reloc; |
6345 | 0 | struct reloc_desc *relos; |
6346 | 0 | int i; |
6347 | |
|
6348 | 0 | if (main_prog == subprog) |
6349 | 0 | return 0; |
6350 | 0 | relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos)); |
6351 | | /* if new count is zero, reallocarray can return a valid NULL result; |
6352 | | * in this case the previous pointer will be freed, so we *have to* |
6353 | | * reassign old pointer to the new value (even if it's NULL) |
6354 | | */ |
6355 | 0 | if (!relos && new_cnt) |
6356 | 0 | return -ENOMEM; |
6357 | 0 | if (subprog->nr_reloc) |
6358 | 0 | memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc, |
6359 | 0 | sizeof(*relos) * subprog->nr_reloc); |
6360 | |
|
6361 | 0 | for (i = main_prog->nr_reloc; i < new_cnt; i++) |
6362 | 0 | relos[i].insn_idx += subprog->sub_insn_off; |
6363 | | /* After insn_idx adjustment the 'relos' array is still sorted |
6364 | | * by insn_idx and doesn't break bsearch. |
6365 | | */ |
6366 | 0 | main_prog->reloc_desc = relos; |
6367 | 0 | main_prog->nr_reloc = new_cnt; |
6368 | 0 | return 0; |
6369 | 0 | } |
6370 | | |
6371 | | static int |
6372 | | bpf_object__append_subprog_code(struct bpf_object *obj, struct bpf_program *main_prog, |
6373 | | struct bpf_program *subprog) |
6374 | 0 | { |
6375 | 0 | struct bpf_insn *insns; |
6376 | 0 | size_t new_cnt; |
6377 | 0 | int err; |
6378 | |
|
6379 | 0 | subprog->sub_insn_off = main_prog->insns_cnt; |
6380 | |
|
6381 | 0 | new_cnt = main_prog->insns_cnt + subprog->insns_cnt; |
6382 | 0 | insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns)); |
6383 | 0 | if (!insns) { |
6384 | 0 | pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name); |
6385 | 0 | return -ENOMEM; |
6386 | 0 | } |
6387 | 0 | main_prog->insns = insns; |
6388 | 0 | main_prog->insns_cnt = new_cnt; |
6389 | |
|
6390 | 0 | memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns, |
6391 | 0 | subprog->insns_cnt * sizeof(*insns)); |
6392 | |
|
6393 | 0 | pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n", |
6394 | 0 | main_prog->name, subprog->insns_cnt, subprog->name); |
6395 | | |
6396 | | /* The subprog insns are now appended. Append its relos too. */ |
6397 | 0 | err = append_subprog_relos(main_prog, subprog); |
6398 | 0 | if (err) |
6399 | 0 | return err; |
6400 | 0 | return 0; |
6401 | 0 | } |
6402 | | |
6403 | | static int |
6404 | | bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog, |
6405 | | struct bpf_program *prog) |
6406 | 0 | { |
6407 | 0 | size_t sub_insn_idx, insn_idx; |
6408 | 0 | struct bpf_program *subprog; |
6409 | 0 | struct reloc_desc *relo; |
6410 | 0 | struct bpf_insn *insn; |
6411 | 0 | int err; |
6412 | |
|
6413 | 0 | err = reloc_prog_func_and_line_info(obj, main_prog, prog); |
6414 | 0 | if (err) |
6415 | 0 | return err; |
6416 | | |
6417 | 0 | for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) { |
6418 | 0 | insn = &main_prog->insns[prog->sub_insn_off + insn_idx]; |
6419 | 0 | if (!insn_is_subprog_call(insn) && !insn_is_pseudo_func(insn)) |
6420 | 0 | continue; |
6421 | | |
6422 | 0 | relo = find_prog_insn_relo(prog, insn_idx); |
6423 | 0 | if (relo && relo->type == RELO_EXTERN_CALL) |
6424 | | /* kfunc relocations will be handled later |
6425 | | * in bpf_object__relocate_data() |
6426 | | */ |
6427 | 0 | continue; |
6428 | 0 | if (relo && relo->type != RELO_CALL && relo->type != RELO_SUBPROG_ADDR) { |
6429 | 0 | pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n", |
6430 | 0 | prog->name, insn_idx, relo->type); |
6431 | 0 | return -LIBBPF_ERRNO__RELOC; |
6432 | 0 | } |
6433 | 0 | if (relo) { |
6434 | | /* sub-program instruction index is a combination of |
6435 | | * an offset of a symbol pointed to by relocation and |
6436 | | * call instruction's imm field; for global functions, |
6437 | | * call always has imm = -1, but for static functions |
6438 | | * relocation is against STT_SECTION and insn->imm |
6439 | | * points to a start of a static function |
6440 | | * |
6441 | | * for subprog addr relocation, the relo->sym_off + insn->imm is |
6442 | | * the byte offset in the corresponding section. |
6443 | | */ |
6444 | 0 | if (relo->type == RELO_CALL) |
6445 | 0 | sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1; |
6446 | 0 | else |
6447 | 0 | sub_insn_idx = (relo->sym_off + insn->imm) / BPF_INSN_SZ; |
6448 | 0 | } else if (insn_is_pseudo_func(insn)) { |
6449 | | /* |
6450 | | * RELO_SUBPROG_ADDR relo is always emitted even if both |
6451 | | * functions are in the same section, so it shouldn't reach here. |
6452 | | */ |
6453 | 0 | pr_warn("prog '%s': missing subprog addr relo for insn #%zu\n", |
6454 | 0 | prog->name, insn_idx); |
6455 | 0 | return -LIBBPF_ERRNO__RELOC; |
6456 | 0 | } else { |
6457 | | /* if subprogram call is to a static function within |
6458 | | * the same ELF section, there won't be any relocation |
6459 | | * emitted, but it also means there is no additional |
6460 | | * offset necessary, insns->imm is relative to |
6461 | | * instruction's original position within the section |
6462 | | */ |
6463 | 0 | sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1; |
6464 | 0 | } |
6465 | | |
6466 | | /* we enforce that sub-programs should be in .text section */ |
6467 | 0 | subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx); |
6468 | 0 | if (!subprog) { |
6469 | 0 | pr_warn("prog '%s': no .text section found yet sub-program call exists\n", |
6470 | 0 | prog->name); |
6471 | 0 | return -LIBBPF_ERRNO__RELOC; |
6472 | 0 | } |
6473 | | |
6474 | | /* if it's the first call instruction calling into this |
6475 | | * subprogram (meaning this subprog hasn't been processed |
6476 | | * yet) within the context of current main program: |
6477 | | * - append it at the end of main program's instructions blog; |
6478 | | * - process is recursively, while current program is put on hold; |
6479 | | * - if that subprogram calls some other not yet processes |
6480 | | * subprogram, same thing will happen recursively until |
6481 | | * there are no more unprocesses subprograms left to append |
6482 | | * and relocate. |
6483 | | */ |
6484 | 0 | if (subprog->sub_insn_off == 0) { |
6485 | 0 | err = bpf_object__append_subprog_code(obj, main_prog, subprog); |
6486 | 0 | if (err) |
6487 | 0 | return err; |
6488 | 0 | err = bpf_object__reloc_code(obj, main_prog, subprog); |
6489 | 0 | if (err) |
6490 | 0 | return err; |
6491 | 0 | } |
6492 | | |
6493 | | /* main_prog->insns memory could have been re-allocated, so |
6494 | | * calculate pointer again |
6495 | | */ |
6496 | 0 | insn = &main_prog->insns[prog->sub_insn_off + insn_idx]; |
6497 | | /* calculate correct instruction position within current main |
6498 | | * prog; each main prog can have a different set of |
6499 | | * subprograms appended (potentially in different order as |
6500 | | * well), so position of any subprog can be different for |
6501 | | * different main programs |
6502 | | */ |
6503 | 0 | insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1; |
6504 | |
|
6505 | 0 | pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n", |
6506 | 0 | prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off); |
6507 | 0 | } |
6508 | | |
6509 | 0 | return 0; |
6510 | 0 | } |
6511 | | |
6512 | | /* |
6513 | | * Relocate sub-program calls. |
6514 | | * |
6515 | | * Algorithm operates as follows. Each entry-point BPF program (referred to as |
6516 | | * main prog) is processed separately. For each subprog (non-entry functions, |
6517 | | * that can be called from either entry progs or other subprogs) gets their |
6518 | | * sub_insn_off reset to zero. This serves as indicator that this subprogram |
6519 | | * hasn't been yet appended and relocated within current main prog. Once its |
6520 | | * relocated, sub_insn_off will point at the position within current main prog |
6521 | | * where given subprog was appended. This will further be used to relocate all |
6522 | | * the call instructions jumping into this subprog. |
6523 | | * |
6524 | | * We start with main program and process all call instructions. If the call |
6525 | | * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off |
6526 | | * is zero), subprog instructions are appended at the end of main program's |
6527 | | * instruction array. Then main program is "put on hold" while we recursively |
6528 | | * process newly appended subprogram. If that subprogram calls into another |
6529 | | * subprogram that hasn't been appended, new subprogram is appended again to |
6530 | | * the *main* prog's instructions (subprog's instructions are always left |
6531 | | * untouched, as they need to be in unmodified state for subsequent main progs |
6532 | | * and subprog instructions are always sent only as part of a main prog) and |
6533 | | * the process continues recursively. Once all the subprogs called from a main |
6534 | | * prog or any of its subprogs are appended (and relocated), all their |
6535 | | * positions within finalized instructions array are known, so it's easy to |
6536 | | * rewrite call instructions with correct relative offsets, corresponding to |
6537 | | * desired target subprog. |
6538 | | * |
6539 | | * Its important to realize that some subprogs might not be called from some |
6540 | | * main prog and any of its called/used subprogs. Those will keep their |
6541 | | * subprog->sub_insn_off as zero at all times and won't be appended to current |
6542 | | * main prog and won't be relocated within the context of current main prog. |
6543 | | * They might still be used from other main progs later. |
6544 | | * |
6545 | | * Visually this process can be shown as below. Suppose we have two main |
6546 | | * programs mainA and mainB and BPF object contains three subprogs: subA, |
6547 | | * subB, and subC. mainA calls only subA, mainB calls only subC, but subA and |
6548 | | * subC both call subB: |
6549 | | * |
6550 | | * +--------+ +-------+ |
6551 | | * | v v | |
6552 | | * +--+---+ +--+-+-+ +---+--+ |
6553 | | * | subA | | subB | | subC | |
6554 | | * +--+---+ +------+ +---+--+ |
6555 | | * ^ ^ |
6556 | | * | | |
6557 | | * +---+-------+ +------+----+ |
6558 | | * | mainA | | mainB | |
6559 | | * +-----------+ +-----------+ |
6560 | | * |
6561 | | * We'll start relocating mainA, will find subA, append it and start |
6562 | | * processing sub A recursively: |
6563 | | * |
6564 | | * +-----------+------+ |
6565 | | * | mainA | subA | |
6566 | | * +-----------+------+ |
6567 | | * |
6568 | | * At this point we notice that subB is used from subA, so we append it and |
6569 | | * relocate (there are no further subcalls from subB): |
6570 | | * |
6571 | | * +-----------+------+------+ |
6572 | | * | mainA | subA | subB | |
6573 | | * +-----------+------+------+ |
6574 | | * |
6575 | | * At this point, we relocate subA calls, then go one level up and finish with |
6576 | | * relocatin mainA calls. mainA is done. |
6577 | | * |
6578 | | * For mainB process is similar but results in different order. We start with |
6579 | | * mainB and skip subA and subB, as mainB never calls them (at least |
6580 | | * directly), but we see subC is needed, so we append and start processing it: |
6581 | | * |
6582 | | * +-----------+------+ |
6583 | | * | mainB | subC | |
6584 | | * +-----------+------+ |
6585 | | * Now we see subC needs subB, so we go back to it, append and relocate it: |
6586 | | * |
6587 | | * +-----------+------+------+ |
6588 | | * | mainB | subC | subB | |
6589 | | * +-----------+------+------+ |
6590 | | * |
6591 | | * At this point we unwind recursion, relocate calls in subC, then in mainB. |
6592 | | */ |
6593 | | static int |
6594 | | bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog) |
6595 | 0 | { |
6596 | 0 | struct bpf_program *subprog; |
6597 | 0 | int i, err; |
6598 | | |
6599 | | /* mark all subprogs as not relocated (yet) within the context of |
6600 | | * current main program |
6601 | | */ |
6602 | 0 | for (i = 0; i < obj->nr_programs; i++) { |
6603 | 0 | subprog = &obj->programs[i]; |
6604 | 0 | if (!prog_is_subprog(obj, subprog)) |
6605 | 0 | continue; |
6606 | | |
6607 | 0 | subprog->sub_insn_off = 0; |
6608 | 0 | } |
6609 | |
|
6610 | 0 | err = bpf_object__reloc_code(obj, prog, prog); |
6611 | 0 | if (err) |
6612 | 0 | return err; |
6613 | | |
6614 | 0 | return 0; |
6615 | 0 | } |
6616 | | |
6617 | | static void |
6618 | | bpf_object__free_relocs(struct bpf_object *obj) |
6619 | 0 | { |
6620 | 0 | struct bpf_program *prog; |
6621 | 0 | int i; |
6622 | | |
6623 | | /* free up relocation descriptors */ |
6624 | 0 | for (i = 0; i < obj->nr_programs; i++) { |
6625 | 0 | prog = &obj->programs[i]; |
6626 | 0 | zfree(&prog->reloc_desc); |
6627 | 0 | prog->nr_reloc = 0; |
6628 | 0 | } |
6629 | 0 | } |
6630 | | |
6631 | | static int cmp_relocs(const void *_a, const void *_b) |
6632 | 2.83k | { |
6633 | 2.83k | const struct reloc_desc *a = _a; |
6634 | 2.83k | const struct reloc_desc *b = _b; |
6635 | | |
6636 | 2.83k | if (a->insn_idx != b->insn_idx) |
6637 | 292 | return a->insn_idx < b->insn_idx ? -1 : 1; |
6638 | | |
6639 | | /* no two relocations should have the same insn_idx, but ... */ |
6640 | 2.54k | if (a->type != b->type) |
6641 | 224 | return a->type < b->type ? -1 : 1; |
6642 | | |
6643 | 2.31k | return 0; |
6644 | 2.54k | } |
6645 | | |
6646 | | static void bpf_object__sort_relos(struct bpf_object *obj) |
6647 | 1.72k | { |
6648 | 1.72k | int i; |
6649 | | |
6650 | 8.48k | for (i = 0; i < obj->nr_programs; i++) { |
6651 | 6.76k | struct bpf_program *p = &obj->programs[i]; |
6652 | | |
6653 | 6.76k | if (!p->nr_reloc) |
6654 | 6.72k | continue; |
6655 | | |
6656 | 46 | qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs); |
6657 | 46 | } |
6658 | 1.72k | } |
6659 | | |
6660 | | static int bpf_prog_assign_exc_cb(struct bpf_object *obj, struct bpf_program *prog) |
6661 | 0 | { |
6662 | 0 | const char *str = "exception_callback:"; |
6663 | 0 | size_t pfx_len = strlen(str); |
6664 | 0 | int i, j, n; |
6665 | |
|
6666 | 0 | if (!obj->btf || !kernel_supports(obj, FEAT_BTF_DECL_TAG)) |
6667 | 0 | return 0; |
6668 | | |
6669 | 0 | n = btf__type_cnt(obj->btf); |
6670 | 0 | for (i = 1; i < n; i++) { |
6671 | 0 | const char *name; |
6672 | 0 | struct btf_type *t; |
6673 | |
|
6674 | 0 | t = btf_type_by_id(obj->btf, i); |
6675 | 0 | if (!btf_is_decl_tag(t) || btf_decl_tag(t)->component_idx != -1) |
6676 | 0 | continue; |
6677 | | |
6678 | 0 | name = btf__str_by_offset(obj->btf, t->name_off); |
6679 | 0 | if (strncmp(name, str, pfx_len) != 0) |
6680 | 0 | continue; |
6681 | | |
6682 | 0 | t = btf_type_by_id(obj->btf, t->type); |
6683 | 0 | if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL) { |
6684 | 0 | pr_warn("prog '%s': exception_callback:<value> decl tag not applied to the main program\n", |
6685 | 0 | prog->name); |
6686 | 0 | return -EINVAL; |
6687 | 0 | } |
6688 | 0 | if (strcmp(prog->name, btf__str_by_offset(obj->btf, t->name_off)) != 0) |
6689 | 0 | continue; |
6690 | | /* Multiple callbacks are specified for the same prog, |
6691 | | * the verifier will eventually return an error for this |
6692 | | * case, hence simply skip appending a subprog. |
6693 | | */ |
6694 | 0 | if (prog->exception_cb_idx >= 0) { |
6695 | 0 | prog->exception_cb_idx = -1; |
6696 | 0 | break; |
6697 | 0 | } |
6698 | | |
6699 | 0 | name += pfx_len; |
6700 | 0 | if (str_is_empty(name)) { |
6701 | 0 | pr_warn("prog '%s': exception_callback:<value> decl tag contains empty value\n", |
6702 | 0 | prog->name); |
6703 | 0 | return -EINVAL; |
6704 | 0 | } |
6705 | | |
6706 | 0 | for (j = 0; j < obj->nr_programs; j++) { |
6707 | 0 | struct bpf_program *subprog = &obj->programs[j]; |
6708 | |
|
6709 | 0 | if (!prog_is_subprog(obj, subprog)) |
6710 | 0 | continue; |
6711 | 0 | if (strcmp(name, subprog->name) != 0) |
6712 | 0 | continue; |
6713 | | /* Enforce non-hidden, as from verifier point of |
6714 | | * view it expects global functions, whereas the |
6715 | | * mark_btf_static fixes up linkage as static. |
6716 | | */ |
6717 | 0 | if (!subprog->sym_global || subprog->mark_btf_static) { |
6718 | 0 | pr_warn("prog '%s': exception callback %s must be a global non-hidden function\n", |
6719 | 0 | prog->name, subprog->name); |
6720 | 0 | return -EINVAL; |
6721 | 0 | } |
6722 | | /* Let's see if we already saw a static exception callback with the same name */ |
6723 | 0 | if (prog->exception_cb_idx >= 0) { |
6724 | 0 | pr_warn("prog '%s': multiple subprogs with same name as exception callback '%s'\n", |
6725 | 0 | prog->name, subprog->name); |
6726 | 0 | return -EINVAL; |
6727 | 0 | } |
6728 | 0 | prog->exception_cb_idx = j; |
6729 | 0 | break; |
6730 | 0 | } |
6731 | | |
6732 | 0 | if (prog->exception_cb_idx >= 0) |
6733 | 0 | continue; |
6734 | | |
6735 | 0 | pr_warn("prog '%s': cannot find exception callback '%s'\n", prog->name, name); |
6736 | 0 | return -ENOENT; |
6737 | 0 | } |
6738 | | |
6739 | 0 | return 0; |
6740 | 0 | } |
6741 | | |
6742 | | static struct { |
6743 | | enum bpf_prog_type prog_type; |
6744 | | const char *ctx_name; |
6745 | | } global_ctx_map[] = { |
6746 | | { BPF_PROG_TYPE_CGROUP_DEVICE, "bpf_cgroup_dev_ctx" }, |
6747 | | { BPF_PROG_TYPE_CGROUP_SKB, "__sk_buff" }, |
6748 | | { BPF_PROG_TYPE_CGROUP_SOCK, "bpf_sock" }, |
6749 | | { BPF_PROG_TYPE_CGROUP_SOCK_ADDR, "bpf_sock_addr" }, |
6750 | | { BPF_PROG_TYPE_CGROUP_SOCKOPT, "bpf_sockopt" }, |
6751 | | { BPF_PROG_TYPE_CGROUP_SYSCTL, "bpf_sysctl" }, |
6752 | | { BPF_PROG_TYPE_FLOW_DISSECTOR, "__sk_buff" }, |
6753 | | { BPF_PROG_TYPE_KPROBE, "bpf_user_pt_regs_t" }, |
6754 | | { BPF_PROG_TYPE_LWT_IN, "__sk_buff" }, |
6755 | | { BPF_PROG_TYPE_LWT_OUT, "__sk_buff" }, |
6756 | | { BPF_PROG_TYPE_LWT_SEG6LOCAL, "__sk_buff" }, |
6757 | | { BPF_PROG_TYPE_LWT_XMIT, "__sk_buff" }, |
6758 | | { BPF_PROG_TYPE_NETFILTER, "bpf_nf_ctx" }, |
6759 | | { BPF_PROG_TYPE_PERF_EVENT, "bpf_perf_event_data" }, |
6760 | | { BPF_PROG_TYPE_RAW_TRACEPOINT, "bpf_raw_tracepoint_args" }, |
6761 | | { BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, "bpf_raw_tracepoint_args" }, |
6762 | | { BPF_PROG_TYPE_SCHED_ACT, "__sk_buff" }, |
6763 | | { BPF_PROG_TYPE_SCHED_CLS, "__sk_buff" }, |
6764 | | { BPF_PROG_TYPE_SK_LOOKUP, "bpf_sk_lookup" }, |
6765 | | { BPF_PROG_TYPE_SK_MSG, "sk_msg_md" }, |
6766 | | { BPF_PROG_TYPE_SK_REUSEPORT, "sk_reuseport_md" }, |
6767 | | { BPF_PROG_TYPE_SK_SKB, "__sk_buff" }, |
6768 | | { BPF_PROG_TYPE_SOCK_OPS, "bpf_sock_ops" }, |
6769 | | { BPF_PROG_TYPE_SOCKET_FILTER, "__sk_buff" }, |
6770 | | { BPF_PROG_TYPE_XDP, "xdp_md" }, |
6771 | | /* all other program types don't have "named" context structs */ |
6772 | | }; |
6773 | | |
6774 | | /* forward declarations for arch-specific underlying types of bpf_user_pt_regs_t typedef, |
6775 | | * for below __builtin_types_compatible_p() checks; |
6776 | | * with this approach we don't need any extra arch-specific #ifdef guards |
6777 | | */ |
6778 | | struct pt_regs; |
6779 | | struct user_pt_regs; |
6780 | | struct user_regs_struct; |
6781 | | |
6782 | | static bool need_func_arg_type_fixup(const struct btf *btf, const struct bpf_program *prog, |
6783 | | const char *subprog_name, int arg_idx, |
6784 | | int arg_type_id, const char *ctx_name) |
6785 | 0 | { |
6786 | 0 | const struct btf_type *t; |
6787 | 0 | const char *tname; |
6788 | | |
6789 | | /* check if existing parameter already matches verifier expectations */ |
6790 | 0 | t = skip_mods_and_typedefs(btf, arg_type_id, NULL); |
6791 | 0 | if (!btf_is_ptr(t)) |
6792 | 0 | goto out_warn; |
6793 | | |
6794 | | /* typedef bpf_user_pt_regs_t is a special PITA case, valid for kprobe |
6795 | | * and perf_event programs, so check this case early on and forget |
6796 | | * about it for subsequent checks |
6797 | | */ |
6798 | 0 | while (btf_is_mod(t)) |
6799 | 0 | t = btf__type_by_id(btf, t->type); |
6800 | 0 | if (btf_is_typedef(t) && |
6801 | 0 | (prog->type == BPF_PROG_TYPE_KPROBE || prog->type == BPF_PROG_TYPE_PERF_EVENT)) { |
6802 | 0 | tname = btf__str_by_offset(btf, t->name_off) ?: "<anon>"; |
6803 | 0 | if (strcmp(tname, "bpf_user_pt_regs_t") == 0) |
6804 | 0 | return false; /* canonical type for kprobe/perf_event */ |
6805 | 0 | } |
6806 | | |
6807 | | /* now we can ignore typedefs moving forward */ |
6808 | 0 | t = skip_mods_and_typedefs(btf, t->type, NULL); |
6809 | | |
6810 | | /* if it's `void *`, definitely fix up BTF info */ |
6811 | 0 | if (btf_is_void(t)) |
6812 | 0 | return true; |
6813 | | |
6814 | | /* if it's already proper canonical type, no need to fix up */ |
6815 | 0 | tname = btf__str_by_offset(btf, t->name_off) ?: "<anon>"; |
6816 | 0 | if (btf_is_struct(t) && strcmp(tname, ctx_name) == 0) |
6817 | 0 | return false; |
6818 | | |
6819 | | /* special cases */ |
6820 | 0 | switch (prog->type) { |
6821 | 0 | case BPF_PROG_TYPE_KPROBE: |
6822 | | /* `struct pt_regs *` is expected, but we need to fix up */ |
6823 | 0 | if (btf_is_struct(t) && strcmp(tname, "pt_regs") == 0) |
6824 | 0 | return true; |
6825 | 0 | break; |
6826 | 0 | case BPF_PROG_TYPE_PERF_EVENT: |
6827 | 0 | if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct pt_regs) && |
6828 | 0 | btf_is_struct(t) && strcmp(tname, "pt_regs") == 0) |
6829 | 0 | return true; |
6830 | 0 | if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_pt_regs) && |
6831 | 0 | btf_is_struct(t) && strcmp(tname, "user_pt_regs") == 0) |
6832 | 0 | return true; |
6833 | 0 | if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_regs_struct) && |
6834 | 0 | btf_is_struct(t) && strcmp(tname, "user_regs_struct") == 0) |
6835 | 0 | return true; |
6836 | 0 | break; |
6837 | 0 | case BPF_PROG_TYPE_RAW_TRACEPOINT: |
6838 | 0 | case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: |
6839 | | /* allow u64* as ctx */ |
6840 | 0 | if (btf_is_int(t) && t->size == 8) |
6841 | 0 | return true; |
6842 | 0 | break; |
6843 | 0 | default: |
6844 | 0 | break; |
6845 | 0 | } |
6846 | | |
6847 | 0 | out_warn: |
6848 | 0 | pr_warn("prog '%s': subprog '%s' arg#%d is expected to be of `struct %s *` type\n", |
6849 | 0 | prog->name, subprog_name, arg_idx, ctx_name); |
6850 | 0 | return false; |
6851 | 0 | } |
6852 | | |
6853 | | static int clone_func_btf_info(struct btf *btf, int orig_fn_id, struct bpf_program *prog) |
6854 | 0 | { |
6855 | 0 | int fn_id, fn_proto_id, ret_type_id, orig_proto_id; |
6856 | 0 | int i, err, arg_cnt, fn_name_off, linkage; |
6857 | 0 | struct btf_type *fn_t, *fn_proto_t, *t; |
6858 | 0 | struct btf_param *p; |
6859 | | |
6860 | | /* caller already validated FUNC -> FUNC_PROTO validity */ |
6861 | 0 | fn_t = btf_type_by_id(btf, orig_fn_id); |
6862 | 0 | fn_proto_t = btf_type_by_id(btf, fn_t->type); |
6863 | | |
6864 | | /* Note that each btf__add_xxx() operation invalidates |
6865 | | * all btf_type and string pointers, so we need to be |
6866 | | * very careful when cloning BTF types. BTF type |
6867 | | * pointers have to be always refetched. And to avoid |
6868 | | * problems with invalidated string pointers, we |
6869 | | * add empty strings initially, then just fix up |
6870 | | * name_off offsets in place. Offsets are stable for |
6871 | | * existing strings, so that works out. |
6872 | | */ |
6873 | 0 | fn_name_off = fn_t->name_off; /* we are about to invalidate fn_t */ |
6874 | 0 | linkage = btf_func_linkage(fn_t); |
6875 | 0 | orig_proto_id = fn_t->type; /* original FUNC_PROTO ID */ |
6876 | 0 | ret_type_id = fn_proto_t->type; /* fn_proto_t will be invalidated */ |
6877 | 0 | arg_cnt = btf_vlen(fn_proto_t); |
6878 | | |
6879 | | /* clone FUNC_PROTO and its params */ |
6880 | 0 | fn_proto_id = btf__add_func_proto(btf, ret_type_id); |
6881 | 0 | if (fn_proto_id < 0) |
6882 | 0 | return -EINVAL; |
6883 | | |
6884 | 0 | for (i = 0; i < arg_cnt; i++) { |
6885 | 0 | int name_off; |
6886 | | |
6887 | | /* copy original parameter data */ |
6888 | 0 | t = btf_type_by_id(btf, orig_proto_id); |
6889 | 0 | p = &btf_params(t)[i]; |
6890 | 0 | name_off = p->name_off; |
6891 | |
|
6892 | 0 | err = btf__add_func_param(btf, "", p->type); |
6893 | 0 | if (err) |
6894 | 0 | return err; |
6895 | | |
6896 | 0 | fn_proto_t = btf_type_by_id(btf, fn_proto_id); |
6897 | 0 | p = &btf_params(fn_proto_t)[i]; |
6898 | 0 | p->name_off = name_off; /* use remembered str offset */ |
6899 | 0 | } |
6900 | | |
6901 | | /* clone FUNC now, btf__add_func() enforces non-empty name, so use |
6902 | | * entry program's name as a placeholder, which we replace immediately |
6903 | | * with original name_off |
6904 | | */ |
6905 | 0 | fn_id = btf__add_func(btf, prog->name, linkage, fn_proto_id); |
6906 | 0 | if (fn_id < 0) |
6907 | 0 | return -EINVAL; |
6908 | | |
6909 | 0 | fn_t = btf_type_by_id(btf, fn_id); |
6910 | 0 | fn_t->name_off = fn_name_off; /* reuse original string */ |
6911 | |
|
6912 | 0 | return fn_id; |
6913 | 0 | } |
6914 | | |
6915 | | /* Check if main program or global subprog's function prototype has `arg:ctx` |
6916 | | * argument tags, and, if necessary, substitute correct type to match what BPF |
6917 | | * verifier would expect, taking into account specific program type. This |
6918 | | * allows to support __arg_ctx tag transparently on old kernels that don't yet |
6919 | | * have a native support for it in the verifier, making user's life much |
6920 | | * easier. |
6921 | | */ |
6922 | | static int bpf_program_fixup_func_info(struct bpf_object *obj, struct bpf_program *prog) |
6923 | 0 | { |
6924 | 0 | const char *ctx_name = NULL, *ctx_tag = "arg:ctx", *fn_name; |
6925 | 0 | struct bpf_func_info_min *func_rec; |
6926 | 0 | struct btf_type *fn_t, *fn_proto_t; |
6927 | 0 | struct btf *btf = obj->btf; |
6928 | 0 | const struct btf_type *t; |
6929 | 0 | struct btf_param *p; |
6930 | 0 | int ptr_id = 0, struct_id, tag_id, orig_fn_id; |
6931 | 0 | int i, n, arg_idx, arg_cnt, err, rec_idx; |
6932 | 0 | int *orig_ids; |
6933 | | |
6934 | | /* no .BTF.ext, no problem */ |
6935 | 0 | if (!obj->btf_ext || !prog->func_info) |
6936 | 0 | return 0; |
6937 | | |
6938 | | /* don't do any fix ups if kernel natively supports __arg_ctx */ |
6939 | 0 | if (kernel_supports(obj, FEAT_ARG_CTX_TAG)) |
6940 | 0 | return 0; |
6941 | | |
6942 | | /* some BPF program types just don't have named context structs, so |
6943 | | * this fallback mechanism doesn't work for them |
6944 | | */ |
6945 | 0 | for (i = 0; i < ARRAY_SIZE(global_ctx_map); i++) { |
6946 | 0 | if (global_ctx_map[i].prog_type != prog->type) |
6947 | 0 | continue; |
6948 | 0 | ctx_name = global_ctx_map[i].ctx_name; |
6949 | 0 | break; |
6950 | 0 | } |
6951 | 0 | if (!ctx_name) |
6952 | 0 | return 0; |
6953 | | |
6954 | | /* remember original func BTF IDs to detect if we already cloned them */ |
6955 | 0 | orig_ids = calloc(prog->func_info_cnt, sizeof(*orig_ids)); |
6956 | 0 | if (!orig_ids) |
6957 | 0 | return -ENOMEM; |
6958 | 0 | for (i = 0; i < prog->func_info_cnt; i++) { |
6959 | 0 | func_rec = prog->func_info + prog->func_info_rec_size * i; |
6960 | 0 | orig_ids[i] = func_rec->type_id; |
6961 | 0 | } |
6962 | | |
6963 | | /* go through each DECL_TAG with "arg:ctx" and see if it points to one |
6964 | | * of our subprogs; if yes and subprog is global and needs adjustment, |
6965 | | * clone and adjust FUNC -> FUNC_PROTO combo |
6966 | | */ |
6967 | 0 | for (i = 1, n = btf__type_cnt(btf); i < n; i++) { |
6968 | | /* only DECL_TAG with "arg:ctx" value are interesting */ |
6969 | 0 | t = btf__type_by_id(btf, i); |
6970 | 0 | if (!btf_is_decl_tag(t)) |
6971 | 0 | continue; |
6972 | 0 | if (strcmp(btf__str_by_offset(btf, t->name_off), ctx_tag) != 0) |
6973 | 0 | continue; |
6974 | | |
6975 | | /* only global funcs need adjustment, if at all */ |
6976 | 0 | orig_fn_id = t->type; |
6977 | 0 | fn_t = btf_type_by_id(btf, orig_fn_id); |
6978 | 0 | if (!btf_is_func(fn_t) || btf_func_linkage(fn_t) != BTF_FUNC_GLOBAL) |
6979 | 0 | continue; |
6980 | | |
6981 | | /* sanity check FUNC -> FUNC_PROTO chain, just in case */ |
6982 | 0 | fn_proto_t = btf_type_by_id(btf, fn_t->type); |
6983 | 0 | if (!fn_proto_t || !btf_is_func_proto(fn_proto_t)) |
6984 | 0 | continue; |
6985 | | |
6986 | | /* find corresponding func_info record */ |
6987 | 0 | func_rec = NULL; |
6988 | 0 | for (rec_idx = 0; rec_idx < prog->func_info_cnt; rec_idx++) { |
6989 | 0 | if (orig_ids[rec_idx] == t->type) { |
6990 | 0 | func_rec = prog->func_info + prog->func_info_rec_size * rec_idx; |
6991 | 0 | break; |
6992 | 0 | } |
6993 | 0 | } |
6994 | | /* current main program doesn't call into this subprog */ |
6995 | 0 | if (!func_rec) |
6996 | 0 | continue; |
6997 | | |
6998 | | /* some more sanity checking of DECL_TAG */ |
6999 | 0 | arg_cnt = btf_vlen(fn_proto_t); |
7000 | 0 | arg_idx = btf_decl_tag(t)->component_idx; |
7001 | 0 | if (arg_idx < 0 || arg_idx >= arg_cnt) |
7002 | 0 | continue; |
7003 | | |
7004 | | /* check if we should fix up argument type */ |
7005 | 0 | p = &btf_params(fn_proto_t)[arg_idx]; |
7006 | 0 | fn_name = btf__str_by_offset(btf, fn_t->name_off) ?: "<anon>"; |
7007 | 0 | if (!need_func_arg_type_fixup(btf, prog, fn_name, arg_idx, p->type, ctx_name)) |
7008 | 0 | continue; |
7009 | | |
7010 | | /* clone fn/fn_proto, unless we already did it for another arg */ |
7011 | 0 | if (func_rec->type_id == orig_fn_id) { |
7012 | 0 | int fn_id; |
7013 | |
|
7014 | 0 | fn_id = clone_func_btf_info(btf, orig_fn_id, prog); |
7015 | 0 | if (fn_id < 0) { |
7016 | 0 | err = fn_id; |
7017 | 0 | goto err_out; |
7018 | 0 | } |
7019 | | |
7020 | | /* point func_info record to a cloned FUNC type */ |
7021 | 0 | func_rec->type_id = fn_id; |
7022 | 0 | } |
7023 | | |
7024 | | /* create PTR -> STRUCT type chain to mark PTR_TO_CTX argument; |
7025 | | * we do it just once per main BPF program, as all global |
7026 | | * funcs share the same program type, so need only PTR -> |
7027 | | * STRUCT type chain |
7028 | | */ |
7029 | 0 | if (ptr_id == 0) { |
7030 | 0 | struct_id = btf__add_struct(btf, ctx_name, 0); |
7031 | 0 | ptr_id = btf__add_ptr(btf, struct_id); |
7032 | 0 | if (ptr_id < 0 || struct_id < 0) { |
7033 | 0 | err = -EINVAL; |
7034 | 0 | goto err_out; |
7035 | 0 | } |
7036 | 0 | } |
7037 | | |
7038 | | /* for completeness, clone DECL_TAG and point it to cloned param */ |
7039 | 0 | tag_id = btf__add_decl_tag(btf, ctx_tag, func_rec->type_id, arg_idx); |
7040 | 0 | if (tag_id < 0) { |
7041 | 0 | err = -EINVAL; |
7042 | 0 | goto err_out; |
7043 | 0 | } |
7044 | | |
7045 | | /* all the BTF manipulations invalidated pointers, refetch them */ |
7046 | 0 | fn_t = btf_type_by_id(btf, func_rec->type_id); |
7047 | 0 | fn_proto_t = btf_type_by_id(btf, fn_t->type); |
7048 | | |
7049 | | /* fix up type ID pointed to by param */ |
7050 | 0 | p = &btf_params(fn_proto_t)[arg_idx]; |
7051 | 0 | p->type = ptr_id; |
7052 | 0 | } |
7053 | | |
7054 | 0 | free(orig_ids); |
7055 | 0 | return 0; |
7056 | 0 | err_out: |
7057 | 0 | free(orig_ids); |
7058 | 0 | return err; |
7059 | 0 | } |
7060 | | |
7061 | | static int bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path) |
7062 | 0 | { |
7063 | 0 | struct bpf_program *prog; |
7064 | 0 | size_t i, j; |
7065 | 0 | int err; |
7066 | |
|
7067 | 0 | if (obj->btf_ext) { |
7068 | 0 | err = bpf_object__relocate_core(obj, targ_btf_path); |
7069 | 0 | if (err) { |
7070 | 0 | pr_warn("failed to perform CO-RE relocations: %s\n", |
7071 | 0 | errstr(err)); |
7072 | 0 | return err; |
7073 | 0 | } |
7074 | 0 | bpf_object__sort_relos(obj); |
7075 | 0 | } |
7076 | | |
7077 | | /* Before relocating calls pre-process relocations and mark |
7078 | | * few ld_imm64 instructions that points to subprogs. |
7079 | | * Otherwise bpf_object__reloc_code() later would have to consider |
7080 | | * all ld_imm64 insns as relocation candidates. That would |
7081 | | * reduce relocation speed, since amount of find_prog_insn_relo() |
7082 | | * would increase and most of them will fail to find a relo. |
7083 | | */ |
7084 | 0 | for (i = 0; i < obj->nr_programs; i++) { |
7085 | 0 | prog = &obj->programs[i]; |
7086 | 0 | for (j = 0; j < prog->nr_reloc; j++) { |
7087 | 0 | struct reloc_desc *relo = &prog->reloc_desc[j]; |
7088 | 0 | struct bpf_insn *insn = &prog->insns[relo->insn_idx]; |
7089 | | |
7090 | | /* mark the insn, so it's recognized by insn_is_pseudo_func() */ |
7091 | 0 | if (relo->type == RELO_SUBPROG_ADDR) |
7092 | 0 | insn[0].src_reg = BPF_PSEUDO_FUNC; |
7093 | 0 | } |
7094 | 0 | } |
7095 | | |
7096 | | /* relocate subprogram calls and append used subprograms to main |
7097 | | * programs; each copy of subprogram code needs to be relocated |
7098 | | * differently for each main program, because its code location might |
7099 | | * have changed. |
7100 | | * Append subprog relos to main programs to allow data relos to be |
7101 | | * processed after text is completely relocated. |
7102 | | */ |
7103 | 0 | for (i = 0; i < obj->nr_programs; i++) { |
7104 | 0 | prog = &obj->programs[i]; |
7105 | | /* sub-program's sub-calls are relocated within the context of |
7106 | | * its main program only |
7107 | | */ |
7108 | 0 | if (prog_is_subprog(obj, prog)) |
7109 | 0 | continue; |
7110 | 0 | if (!prog->autoload) |
7111 | 0 | continue; |
7112 | | |
7113 | 0 | err = bpf_object__relocate_calls(obj, prog); |
7114 | 0 | if (err) { |
7115 | 0 | pr_warn("prog '%s': failed to relocate calls: %s\n", |
7116 | 0 | prog->name, errstr(err)); |
7117 | 0 | return err; |
7118 | 0 | } |
7119 | | |
7120 | 0 | err = bpf_prog_assign_exc_cb(obj, prog); |
7121 | 0 | if (err) |
7122 | 0 | return err; |
7123 | | /* Now, also append exception callback if it has not been done already. */ |
7124 | 0 | if (prog->exception_cb_idx >= 0) { |
7125 | 0 | struct bpf_program *subprog = &obj->programs[prog->exception_cb_idx]; |
7126 | | |
7127 | | /* Calling exception callback directly is disallowed, which the |
7128 | | * verifier will reject later. In case it was processed already, |
7129 | | * we can skip this step, otherwise for all other valid cases we |
7130 | | * have to append exception callback now. |
7131 | | */ |
7132 | 0 | if (subprog->sub_insn_off == 0) { |
7133 | 0 | err = bpf_object__append_subprog_code(obj, prog, subprog); |
7134 | 0 | if (err) |
7135 | 0 | return err; |
7136 | 0 | err = bpf_object__reloc_code(obj, prog, subprog); |
7137 | 0 | if (err) |
7138 | 0 | return err; |
7139 | 0 | } |
7140 | 0 | } |
7141 | 0 | } |
7142 | 0 | for (i = 0; i < obj->nr_programs; i++) { |
7143 | 0 | prog = &obj->programs[i]; |
7144 | 0 | if (prog_is_subprog(obj, prog)) |
7145 | 0 | continue; |
7146 | 0 | if (!prog->autoload) |
7147 | 0 | continue; |
7148 | | |
7149 | | /* Process data relos for main programs */ |
7150 | 0 | err = bpf_object__relocate_data(obj, prog); |
7151 | 0 | if (err) { |
7152 | 0 | pr_warn("prog '%s': failed to relocate data references: %s\n", |
7153 | 0 | prog->name, errstr(err)); |
7154 | 0 | return err; |
7155 | 0 | } |
7156 | | |
7157 | | /* Fix up .BTF.ext information, if necessary */ |
7158 | 0 | err = bpf_program_fixup_func_info(obj, prog); |
7159 | 0 | if (err) { |
7160 | 0 | pr_warn("prog '%s': failed to perform .BTF.ext fix ups: %s\n", |
7161 | 0 | prog->name, errstr(err)); |
7162 | 0 | return err; |
7163 | 0 | } |
7164 | 0 | } |
7165 | | |
7166 | 0 | return 0; |
7167 | 0 | } |
7168 | | |
7169 | | static int bpf_object__collect_st_ops_relos(struct bpf_object *obj, |
7170 | | Elf64_Shdr *shdr, Elf_Data *data); |
7171 | | |
7172 | | static int bpf_object__collect_map_relos(struct bpf_object *obj, |
7173 | | Elf64_Shdr *shdr, Elf_Data *data) |
7174 | 28 | { |
7175 | 28 | const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *); |
7176 | 28 | int i, j, nrels, new_sz; |
7177 | 28 | const struct btf_var_secinfo *vi = NULL; |
7178 | 28 | const struct btf_type *sec, *var, *def; |
7179 | 28 | struct bpf_map *map = NULL, *targ_map = NULL; |
7180 | 28 | struct bpf_program *targ_prog = NULL; |
7181 | 28 | bool is_prog_array, is_map_in_map; |
7182 | 28 | const struct btf_member *member; |
7183 | 28 | const char *name, *mname, *type; |
7184 | 28 | unsigned int moff; |
7185 | 28 | Elf64_Sym *sym; |
7186 | 28 | Elf64_Rel *rel; |
7187 | 28 | void *tmp; |
7188 | | |
7189 | 28 | if (!obj->efile.btf_maps_sec_btf_id || !obj->btf) |
7190 | 0 | return -EINVAL; |
7191 | 28 | sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id); |
7192 | 28 | if (!sec) |
7193 | 0 | return -EINVAL; |
7194 | | |
7195 | 28 | nrels = shdr->sh_size / shdr->sh_entsize; |
7196 | 28 | for (i = 0; i < nrels; i++) { |
7197 | 20 | rel = elf_rel_by_idx(data, i); |
7198 | 20 | if (!rel) { |
7199 | 0 | pr_warn(".maps relo #%d: failed to get ELF relo\n", i); |
7200 | 0 | return -LIBBPF_ERRNO__FORMAT; |
7201 | 0 | } |
7202 | | |
7203 | 20 | sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info)); |
7204 | 20 | if (!sym) { |
7205 | 14 | pr_warn(".maps relo #%d: symbol %zx not found\n", |
7206 | 14 | i, (size_t)ELF64_R_SYM(rel->r_info)); |
7207 | 14 | return -LIBBPF_ERRNO__FORMAT; |
7208 | 14 | } |
7209 | 6 | name = elf_sym_str(obj, sym->st_name) ?: "<?>"; |
7210 | | |
7211 | 6 | pr_debug(".maps relo #%d: for %zd value %zd rel->r_offset %zu name %d ('%s')\n", |
7212 | 4 | i, (ssize_t)(rel->r_info >> 32), (size_t)sym->st_value, |
7213 | 4 | (size_t)rel->r_offset, sym->st_name, name); |
7214 | | |
7215 | 19 | for (j = 0; j < obj->nr_maps; j++) { |
7216 | 15 | map = &obj->maps[j]; |
7217 | 15 | if (map->sec_idx != obj->efile.btf_maps_shndx) |
7218 | 15 | continue; |
7219 | | |
7220 | 0 | vi = btf_var_secinfos(sec) + map->btf_var_idx; |
7221 | 0 | if (vi->offset <= rel->r_offset && |
7222 | 0 | rel->r_offset + bpf_ptr_sz <= vi->offset + vi->size) |
7223 | 0 | break; |
7224 | 0 | } |
7225 | 6 | if (j == obj->nr_maps) { |
7226 | 6 | pr_warn(".maps relo #%d: cannot find map '%s' at rel->r_offset %zu\n", |
7227 | 6 | i, name, (size_t)rel->r_offset); |
7228 | 6 | return -EINVAL; |
7229 | 6 | } |
7230 | | |
7231 | 18.4E | is_map_in_map = bpf_map_type__is_map_in_map(map->def.type); |
7232 | 18.4E | is_prog_array = map->def.type == BPF_MAP_TYPE_PROG_ARRAY; |
7233 | 18.4E | type = is_map_in_map ? "map" : "prog"; |
7234 | 18.4E | if (is_map_in_map) { |
7235 | 0 | if (sym->st_shndx != obj->efile.btf_maps_shndx) { |
7236 | 0 | pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n", |
7237 | 0 | i, name); |
7238 | 0 | return -LIBBPF_ERRNO__RELOC; |
7239 | 0 | } |
7240 | 0 | if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS && |
7241 | 0 | map->def.key_size != sizeof(int)) { |
7242 | 0 | pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n", |
7243 | 0 | i, map->name, sizeof(int)); |
7244 | 0 | return -EINVAL; |
7245 | 0 | } |
7246 | 0 | targ_map = bpf_object__find_map_by_name(obj, name); |
7247 | 0 | if (!targ_map) { |
7248 | 0 | pr_warn(".maps relo #%d: '%s' isn't a valid map reference\n", |
7249 | 0 | i, name); |
7250 | 0 | return -ESRCH; |
7251 | 0 | } |
7252 | 18.4E | } else if (is_prog_array) { |
7253 | 0 | targ_prog = bpf_object__find_program_by_name(obj, name); |
7254 | 0 | if (!targ_prog) { |
7255 | 0 | pr_warn(".maps relo #%d: '%s' isn't a valid program reference\n", |
7256 | 0 | i, name); |
7257 | 0 | return -ESRCH; |
7258 | 0 | } |
7259 | 0 | if (targ_prog->sec_idx != sym->st_shndx || |
7260 | 0 | targ_prog->sec_insn_off * 8 != sym->st_value || |
7261 | 0 | prog_is_subprog(obj, targ_prog)) { |
7262 | 0 | pr_warn(".maps relo #%d: '%s' isn't an entry-point program\n", |
7263 | 0 | i, name); |
7264 | 0 | return -LIBBPF_ERRNO__RELOC; |
7265 | 0 | } |
7266 | 18.4E | } else { |
7267 | 18.4E | return -EINVAL; |
7268 | 18.4E | } |
7269 | | |
7270 | 0 | var = btf__type_by_id(obj->btf, vi->type); |
7271 | 0 | def = skip_mods_and_typedefs(obj->btf, var->type, NULL); |
7272 | 0 | if (btf_vlen(def) == 0) |
7273 | 0 | return -EINVAL; |
7274 | 0 | member = btf_members(def) + btf_vlen(def) - 1; |
7275 | 0 | mname = btf__name_by_offset(obj->btf, member->name_off); |
7276 | 0 | if (strcmp(mname, "values")) |
7277 | 0 | return -EINVAL; |
7278 | | |
7279 | 0 | moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8; |
7280 | 0 | if (rel->r_offset - vi->offset < moff) |
7281 | 0 | return -EINVAL; |
7282 | | |
7283 | 0 | moff = rel->r_offset - vi->offset - moff; |
7284 | | /* here we use BPF pointer size, which is always 64 bit, as we |
7285 | | * are parsing ELF that was built for BPF target |
7286 | | */ |
7287 | 0 | if (moff % bpf_ptr_sz) |
7288 | 0 | return -EINVAL; |
7289 | 0 | moff /= bpf_ptr_sz; |
7290 | 0 | if (moff >= map->init_slots_sz) { |
7291 | 0 | new_sz = moff + 1; |
7292 | 0 | tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz); |
7293 | 0 | if (!tmp) |
7294 | 0 | return -ENOMEM; |
7295 | 0 | map->init_slots = tmp; |
7296 | 0 | memset(map->init_slots + map->init_slots_sz, 0, |
7297 | 0 | (new_sz - map->init_slots_sz) * host_ptr_sz); |
7298 | 0 | map->init_slots_sz = new_sz; |
7299 | 0 | } |
7300 | 0 | map->init_slots[moff] = is_map_in_map ? (void *)targ_map : (void *)targ_prog; |
7301 | |
|
7302 | 0 | pr_debug(".maps relo #%d: map '%s' slot [%d] points to %s '%s'\n", |
7303 | 0 | i, map->name, moff, type, name); |
7304 | 0 | } |
7305 | | |
7306 | 8 | return 0; |
7307 | 28 | } |
7308 | | |
7309 | | static int bpf_object__collect_relos(struct bpf_object *obj) |
7310 | 2.21k | { |
7311 | 2.21k | int i, err; |
7312 | | |
7313 | 17.9k | for (i = 0; i < obj->efile.sec_cnt; i++) { |
7314 | 16.1k | struct elf_sec_desc *sec_desc = &obj->efile.secs[i]; |
7315 | 16.1k | Elf64_Shdr *shdr; |
7316 | 16.1k | Elf_Data *data; |
7317 | 16.1k | int idx; |
7318 | | |
7319 | 16.1k | if (sec_desc->sec_type != SEC_RELO) |
7320 | 15.1k | continue; |
7321 | | |
7322 | 1.02k | shdr = sec_desc->shdr; |
7323 | 1.02k | data = sec_desc->data; |
7324 | 1.02k | idx = shdr->sh_info; |
7325 | | |
7326 | 1.02k | if (shdr->sh_type != SHT_REL || idx < 0 || idx >= obj->efile.sec_cnt) { |
7327 | 0 | pr_warn("internal error at %d\n", __LINE__); |
7328 | 0 | return -LIBBPF_ERRNO__INTERNAL; |
7329 | 0 | } |
7330 | | |
7331 | 1.02k | if (obj->efile.secs[idx].sec_type == SEC_ST_OPS) |
7332 | 8 | err = bpf_object__collect_st_ops_relos(obj, shdr, data); |
7333 | 1.01k | else if (idx == obj->efile.btf_maps_shndx) |
7334 | 28 | err = bpf_object__collect_map_relos(obj, shdr, data); |
7335 | 991 | else |
7336 | 991 | err = bpf_object__collect_prog_relos(obj, shdr, data); |
7337 | 1.02k | if (err) |
7338 | 494 | return err; |
7339 | 1.02k | } |
7340 | | |
7341 | 1.72k | bpf_object__sort_relos(obj); |
7342 | 1.72k | return 0; |
7343 | 2.21k | } |
7344 | | |
7345 | | static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id) |
7346 | 0 | { |
7347 | 0 | if (BPF_CLASS(insn->code) == BPF_JMP && |
7348 | 0 | BPF_OP(insn->code) == BPF_CALL && |
7349 | 0 | BPF_SRC(insn->code) == BPF_K && |
7350 | 0 | insn->src_reg == 0 && |
7351 | 0 | insn->dst_reg == 0) { |
7352 | 0 | *func_id = insn->imm; |
7353 | 0 | return true; |
7354 | 0 | } |
7355 | 0 | return false; |
7356 | 0 | } |
7357 | | |
7358 | | static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program *prog) |
7359 | 0 | { |
7360 | 0 | struct bpf_insn *insn = prog->insns; |
7361 | 0 | enum bpf_func_id func_id; |
7362 | 0 | int i; |
7363 | |
|
7364 | 0 | if (obj->gen_loader) |
7365 | 0 | return 0; |
7366 | | |
7367 | 0 | for (i = 0; i < prog->insns_cnt; i++, insn++) { |
7368 | 0 | if (!insn_is_helper_call(insn, &func_id)) |
7369 | 0 | continue; |
7370 | | |
7371 | | /* on kernels that don't yet support |
7372 | | * bpf_probe_read_{kernel,user}[_str] helpers, fall back |
7373 | | * to bpf_probe_read() which works well for old kernels |
7374 | | */ |
7375 | 0 | switch (func_id) { |
7376 | 0 | case BPF_FUNC_probe_read_kernel: |
7377 | 0 | case BPF_FUNC_probe_read_user: |
7378 | 0 | if (!kernel_supports(obj, FEAT_PROBE_READ_KERN)) |
7379 | 0 | insn->imm = BPF_FUNC_probe_read; |
7380 | 0 | break; |
7381 | 0 | case BPF_FUNC_probe_read_kernel_str: |
7382 | 0 | case BPF_FUNC_probe_read_user_str: |
7383 | 0 | if (!kernel_supports(obj, FEAT_PROBE_READ_KERN)) |
7384 | 0 | insn->imm = BPF_FUNC_probe_read_str; |
7385 | 0 | break; |
7386 | 0 | default: |
7387 | 0 | break; |
7388 | 0 | } |
7389 | 0 | } |
7390 | 0 | return 0; |
7391 | 0 | } |
7392 | | |
7393 | | static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name, |
7394 | | int *btf_obj_fd, int *btf_type_id); |
7395 | | |
7396 | | /* this is called as prog->sec_def->prog_prepare_load_fn for libbpf-supported sec_defs */ |
7397 | | static int libbpf_prepare_prog_load(struct bpf_program *prog, |
7398 | | struct bpf_prog_load_opts *opts, long cookie) |
7399 | 0 | { |
7400 | 0 | enum sec_def_flags def = cookie; |
7401 | | |
7402 | | /* old kernels might not support specifying expected_attach_type */ |
7403 | 0 | if ((def & SEC_EXP_ATTACH_OPT) && !kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE)) |
7404 | 0 | opts->expected_attach_type = 0; |
7405 | |
|
7406 | 0 | if (def & SEC_SLEEPABLE) |
7407 | 0 | opts->prog_flags |= BPF_F_SLEEPABLE; |
7408 | |
|
7409 | 0 | if (prog->type == BPF_PROG_TYPE_XDP && (def & SEC_XDP_FRAGS)) |
7410 | 0 | opts->prog_flags |= BPF_F_XDP_HAS_FRAGS; |
7411 | | |
7412 | | /* special check for usdt to use uprobe_multi link */ |
7413 | 0 | if ((def & SEC_USDT) && kernel_supports(prog->obj, FEAT_UPROBE_MULTI_LINK)) { |
7414 | | /* for BPF_TRACE_UPROBE_MULTI, user might want to query expected_attach_type |
7415 | | * in prog, and expected_attach_type we set in kernel is from opts, so we |
7416 | | * update both. |
7417 | | */ |
7418 | 0 | prog->expected_attach_type = BPF_TRACE_UPROBE_MULTI; |
7419 | 0 | opts->expected_attach_type = BPF_TRACE_UPROBE_MULTI; |
7420 | 0 | } |
7421 | |
|
7422 | 0 | if ((def & SEC_ATTACH_BTF) && !prog->attach_btf_id) { |
7423 | 0 | int btf_obj_fd = 0, btf_type_id = 0, err; |
7424 | 0 | const char *attach_name; |
7425 | |
|
7426 | 0 | attach_name = strchr(prog->sec_name, '/'); |
7427 | 0 | if (!attach_name) { |
7428 | | /* if BPF program is annotated with just SEC("fentry") |
7429 | | * (or similar) without declaratively specifying |
7430 | | * target, then it is expected that target will be |
7431 | | * specified with bpf_program__set_attach_target() at |
7432 | | * runtime before BPF object load step. If not, then |
7433 | | * there is nothing to load into the kernel as BPF |
7434 | | * verifier won't be able to validate BPF program |
7435 | | * correctness anyways. |
7436 | | */ |
7437 | 0 | pr_warn("prog '%s': no BTF-based attach target is specified, use bpf_program__set_attach_target()\n", |
7438 | 0 | prog->name); |
7439 | 0 | return -EINVAL; |
7440 | 0 | } |
7441 | 0 | attach_name++; /* skip over / */ |
7442 | |
|
7443 | 0 | err = libbpf_find_attach_btf_id(prog, attach_name, &btf_obj_fd, &btf_type_id); |
7444 | 0 | if (err) |
7445 | 0 | return err; |
7446 | | |
7447 | | /* cache resolved BTF FD and BTF type ID in the prog */ |
7448 | 0 | prog->attach_btf_obj_fd = btf_obj_fd; |
7449 | 0 | prog->attach_btf_id = btf_type_id; |
7450 | | |
7451 | | /* but by now libbpf common logic is not utilizing |
7452 | | * prog->atach_btf_obj_fd/prog->attach_btf_id anymore because |
7453 | | * this callback is called after opts were populated by |
7454 | | * libbpf, so this callback has to update opts explicitly here |
7455 | | */ |
7456 | 0 | opts->attach_btf_obj_fd = btf_obj_fd; |
7457 | 0 | opts->attach_btf_id = btf_type_id; |
7458 | 0 | } |
7459 | 0 | return 0; |
7460 | 0 | } |
7461 | | |
7462 | | static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz); |
7463 | | |
7464 | | static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog, |
7465 | | struct bpf_insn *insns, int insns_cnt, |
7466 | | const char *license, __u32 kern_version, int *prog_fd) |
7467 | 0 | { |
7468 | 0 | LIBBPF_OPTS(bpf_prog_load_opts, load_attr); |
7469 | 0 | const char *prog_name = NULL; |
7470 | 0 | size_t log_buf_size = 0; |
7471 | 0 | char *log_buf = NULL, *tmp; |
7472 | 0 | bool own_log_buf = true; |
7473 | 0 | __u32 log_level = prog->log_level; |
7474 | 0 | int ret, err; |
7475 | | |
7476 | | /* Be more helpful by rejecting programs that can't be validated early |
7477 | | * with more meaningful and actionable error message. |
7478 | | */ |
7479 | 0 | switch (prog->type) { |
7480 | 0 | case BPF_PROG_TYPE_UNSPEC: |
7481 | | /* |
7482 | | * The program type must be set. Most likely we couldn't find a proper |
7483 | | * section definition at load time, and thus we didn't infer the type. |
7484 | | */ |
7485 | 0 | pr_warn("prog '%s': missing BPF prog type, check ELF section name '%s'\n", |
7486 | 0 | prog->name, prog->sec_name); |
7487 | 0 | return -EINVAL; |
7488 | 0 | case BPF_PROG_TYPE_STRUCT_OPS: |
7489 | 0 | if (prog->attach_btf_id == 0) { |
7490 | 0 | pr_warn("prog '%s': SEC(\"struct_ops\") program isn't referenced anywhere, did you forget to use it?\n", |
7491 | 0 | prog->name); |
7492 | 0 | return -EINVAL; |
7493 | 0 | } |
7494 | 0 | break; |
7495 | 0 | default: |
7496 | 0 | break; |
7497 | 0 | } |
7498 | | |
7499 | 0 | if (!insns || !insns_cnt) |
7500 | 0 | return -EINVAL; |
7501 | | |
7502 | 0 | if (kernel_supports(obj, FEAT_PROG_NAME)) |
7503 | 0 | prog_name = prog->name; |
7504 | 0 | load_attr.attach_prog_fd = prog->attach_prog_fd; |
7505 | 0 | load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd; |
7506 | 0 | load_attr.attach_btf_id = prog->attach_btf_id; |
7507 | 0 | load_attr.kern_version = kern_version; |
7508 | 0 | load_attr.prog_ifindex = prog->prog_ifindex; |
7509 | 0 | load_attr.expected_attach_type = prog->expected_attach_type; |
7510 | | |
7511 | | /* specify func_info/line_info only if kernel supports them */ |
7512 | 0 | if (obj->btf && btf__fd(obj->btf) >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) { |
7513 | 0 | load_attr.prog_btf_fd = btf__fd(obj->btf); |
7514 | 0 | load_attr.func_info = prog->func_info; |
7515 | 0 | load_attr.func_info_rec_size = prog->func_info_rec_size; |
7516 | 0 | load_attr.func_info_cnt = prog->func_info_cnt; |
7517 | 0 | load_attr.line_info = prog->line_info; |
7518 | 0 | load_attr.line_info_rec_size = prog->line_info_rec_size; |
7519 | 0 | load_attr.line_info_cnt = prog->line_info_cnt; |
7520 | 0 | } |
7521 | 0 | load_attr.log_level = log_level; |
7522 | 0 | load_attr.prog_flags = prog->prog_flags; |
7523 | 0 | load_attr.fd_array = obj->fd_array; |
7524 | |
|
7525 | 0 | load_attr.token_fd = obj->token_fd; |
7526 | 0 | if (obj->token_fd) |
7527 | 0 | load_attr.prog_flags |= BPF_F_TOKEN_FD; |
7528 | | |
7529 | | /* adjust load_attr if sec_def provides custom preload callback */ |
7530 | 0 | if (prog->sec_def && prog->sec_def->prog_prepare_load_fn) { |
7531 | 0 | err = prog->sec_def->prog_prepare_load_fn(prog, &load_attr, prog->sec_def->cookie); |
7532 | 0 | if (err < 0) { |
7533 | 0 | pr_warn("prog '%s': failed to prepare load attributes: %s\n", |
7534 | 0 | prog->name, errstr(err)); |
7535 | 0 | return err; |
7536 | 0 | } |
7537 | 0 | insns = prog->insns; |
7538 | 0 | insns_cnt = prog->insns_cnt; |
7539 | 0 | } |
7540 | | |
7541 | 0 | if (obj->gen_loader) { |
7542 | 0 | bpf_gen__prog_load(obj->gen_loader, prog->type, prog->name, |
7543 | 0 | license, insns, insns_cnt, &load_attr, |
7544 | 0 | prog - obj->programs); |
7545 | 0 | *prog_fd = -1; |
7546 | 0 | return 0; |
7547 | 0 | } |
7548 | | |
7549 | 0 | retry_load: |
7550 | | /* if log_level is zero, we don't request logs initially even if |
7551 | | * custom log_buf is specified; if the program load fails, then we'll |
7552 | | * bump log_level to 1 and use either custom log_buf or we'll allocate |
7553 | | * our own and retry the load to get details on what failed |
7554 | | */ |
7555 | 0 | if (log_level) { |
7556 | 0 | if (prog->log_buf) { |
7557 | 0 | log_buf = prog->log_buf; |
7558 | 0 | log_buf_size = prog->log_size; |
7559 | 0 | own_log_buf = false; |
7560 | 0 | } else if (obj->log_buf) { |
7561 | 0 | log_buf = obj->log_buf; |
7562 | 0 | log_buf_size = obj->log_size; |
7563 | 0 | own_log_buf = false; |
7564 | 0 | } else { |
7565 | 0 | log_buf_size = max((size_t)BPF_LOG_BUF_SIZE, log_buf_size * 2); |
7566 | 0 | tmp = realloc(log_buf, log_buf_size); |
7567 | 0 | if (!tmp) { |
7568 | 0 | ret = -ENOMEM; |
7569 | 0 | goto out; |
7570 | 0 | } |
7571 | 0 | log_buf = tmp; |
7572 | 0 | log_buf[0] = '\0'; |
7573 | 0 | own_log_buf = true; |
7574 | 0 | } |
7575 | 0 | } |
7576 | | |
7577 | 0 | load_attr.log_buf = log_buf; |
7578 | 0 | load_attr.log_size = log_buf_size; |
7579 | 0 | load_attr.log_level = log_level; |
7580 | |
|
7581 | 0 | ret = bpf_prog_load(prog->type, prog_name, license, insns, insns_cnt, &load_attr); |
7582 | 0 | if (ret >= 0) { |
7583 | 0 | if (log_level && own_log_buf) { |
7584 | 0 | pr_debug("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n", |
7585 | 0 | prog->name, log_buf); |
7586 | 0 | } |
7587 | |
|
7588 | 0 | if (obj->has_rodata && kernel_supports(obj, FEAT_PROG_BIND_MAP)) { |
7589 | 0 | struct bpf_map *map; |
7590 | 0 | int i; |
7591 | |
|
7592 | 0 | for (i = 0; i < obj->nr_maps; i++) { |
7593 | 0 | map = &prog->obj->maps[i]; |
7594 | 0 | if (map->libbpf_type != LIBBPF_MAP_RODATA) |
7595 | 0 | continue; |
7596 | | |
7597 | 0 | if (bpf_prog_bind_map(ret, map->fd, NULL)) { |
7598 | 0 | pr_warn("prog '%s': failed to bind map '%s': %s\n", |
7599 | 0 | prog->name, map->real_name, errstr(errno)); |
7600 | | /* Don't fail hard if can't bind rodata. */ |
7601 | 0 | } |
7602 | 0 | } |
7603 | 0 | } |
7604 | |
|
7605 | 0 | *prog_fd = ret; |
7606 | 0 | ret = 0; |
7607 | 0 | goto out; |
7608 | 0 | } |
7609 | | |
7610 | 0 | if (log_level == 0) { |
7611 | 0 | log_level = 1; |
7612 | 0 | goto retry_load; |
7613 | 0 | } |
7614 | | /* On ENOSPC, increase log buffer size and retry, unless custom |
7615 | | * log_buf is specified. |
7616 | | * Be careful to not overflow u32, though. Kernel's log buf size limit |
7617 | | * isn't part of UAPI so it can always be bumped to full 4GB. So don't |
7618 | | * multiply by 2 unless we are sure we'll fit within 32 bits. |
7619 | | * Currently, we'll get -EINVAL when we reach (UINT_MAX >> 2). |
7620 | | */ |
7621 | 0 | if (own_log_buf && errno == ENOSPC && log_buf_size <= UINT_MAX / 2) |
7622 | 0 | goto retry_load; |
7623 | | |
7624 | 0 | ret = -errno; |
7625 | | |
7626 | | /* post-process verifier log to improve error descriptions */ |
7627 | 0 | fixup_verifier_log(prog, log_buf, log_buf_size); |
7628 | |
|
7629 | 0 | pr_warn("prog '%s': BPF program load failed: %s\n", prog->name, errstr(errno)); |
7630 | 0 | pr_perm_msg(ret); |
7631 | |
|
7632 | 0 | if (own_log_buf && log_buf && log_buf[0] != '\0') { |
7633 | 0 | pr_warn("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n", |
7634 | 0 | prog->name, log_buf); |
7635 | 0 | } |
7636 | |
|
7637 | 0 | out: |
7638 | 0 | if (own_log_buf) |
7639 | 0 | free(log_buf); |
7640 | 0 | return ret; |
7641 | 0 | } |
7642 | | |
7643 | | static char *find_prev_line(char *buf, char *cur) |
7644 | 0 | { |
7645 | 0 | char *p; |
7646 | |
|
7647 | 0 | if (cur == buf) /* end of a log buf */ |
7648 | 0 | return NULL; |
7649 | | |
7650 | 0 | p = cur - 1; |
7651 | 0 | while (p - 1 >= buf && *(p - 1) != '\n') |
7652 | 0 | p--; |
7653 | |
|
7654 | 0 | return p; |
7655 | 0 | } |
7656 | | |
7657 | | static void patch_log(char *buf, size_t buf_sz, size_t log_sz, |
7658 | | char *orig, size_t orig_sz, const char *patch) |
7659 | 0 | { |
7660 | | /* size of the remaining log content to the right from the to-be-replaced part */ |
7661 | 0 | size_t rem_sz = (buf + log_sz) - (orig + orig_sz); |
7662 | 0 | size_t patch_sz = strlen(patch); |
7663 | |
|
7664 | 0 | if (patch_sz != orig_sz) { |
7665 | | /* If patch line(s) are longer than original piece of verifier log, |
7666 | | * shift log contents by (patch_sz - orig_sz) bytes to the right |
7667 | | * starting from after to-be-replaced part of the log. |
7668 | | * |
7669 | | * If patch line(s) are shorter than original piece of verifier log, |
7670 | | * shift log contents by (orig_sz - patch_sz) bytes to the left |
7671 | | * starting from after to-be-replaced part of the log |
7672 | | * |
7673 | | * We need to be careful about not overflowing available |
7674 | | * buf_sz capacity. If that's the case, we'll truncate the end |
7675 | | * of the original log, as necessary. |
7676 | | */ |
7677 | 0 | if (patch_sz > orig_sz) { |
7678 | 0 | if (orig + patch_sz >= buf + buf_sz) { |
7679 | | /* patch is big enough to cover remaining space completely */ |
7680 | 0 | patch_sz -= (orig + patch_sz) - (buf + buf_sz) + 1; |
7681 | 0 | rem_sz = 0; |
7682 | 0 | } else if (patch_sz - orig_sz > buf_sz - log_sz) { |
7683 | | /* patch causes part of remaining log to be truncated */ |
7684 | 0 | rem_sz -= (patch_sz - orig_sz) - (buf_sz - log_sz); |
7685 | 0 | } |
7686 | 0 | } |
7687 | | /* shift remaining log to the right by calculated amount */ |
7688 | 0 | memmove(orig + patch_sz, orig + orig_sz, rem_sz); |
7689 | 0 | } |
7690 | |
|
7691 | 0 | memcpy(orig, patch, patch_sz); |
7692 | 0 | } |
7693 | | |
7694 | | static void fixup_log_failed_core_relo(struct bpf_program *prog, |
7695 | | char *buf, size_t buf_sz, size_t log_sz, |
7696 | | char *line1, char *line2, char *line3) |
7697 | 0 | { |
7698 | | /* Expected log for failed and not properly guarded CO-RE relocation: |
7699 | | * line1 -> 123: (85) call unknown#195896080 |
7700 | | * line2 -> invalid func unknown#195896080 |
7701 | | * line3 -> <anything else or end of buffer> |
7702 | | * |
7703 | | * "123" is the index of the instruction that was poisoned. We extract |
7704 | | * instruction index to find corresponding CO-RE relocation and |
7705 | | * replace this part of the log with more relevant information about |
7706 | | * failed CO-RE relocation. |
7707 | | */ |
7708 | 0 | const struct bpf_core_relo *relo; |
7709 | 0 | struct bpf_core_spec spec; |
7710 | 0 | char patch[512], spec_buf[256]; |
7711 | 0 | int insn_idx, err, spec_len; |
7712 | |
|
7713 | 0 | if (sscanf(line1, "%d: (%*d) call unknown#195896080\n", &insn_idx) != 1) |
7714 | 0 | return; |
7715 | | |
7716 | 0 | relo = find_relo_core(prog, insn_idx); |
7717 | 0 | if (!relo) |
7718 | 0 | return; |
7719 | | |
7720 | 0 | err = bpf_core_parse_spec(prog->name, prog->obj->btf, relo, &spec); |
7721 | 0 | if (err) |
7722 | 0 | return; |
7723 | | |
7724 | 0 | spec_len = bpf_core_format_spec(spec_buf, sizeof(spec_buf), &spec); |
7725 | 0 | snprintf(patch, sizeof(patch), |
7726 | 0 | "%d: <invalid CO-RE relocation>\n" |
7727 | 0 | "failed to resolve CO-RE relocation %s%s\n", |
7728 | 0 | insn_idx, spec_buf, spec_len >= sizeof(spec_buf) ? "..." : ""); |
7729 | |
|
7730 | 0 | patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch); |
7731 | 0 | } |
7732 | | |
7733 | | static void fixup_log_missing_map_load(struct bpf_program *prog, |
7734 | | char *buf, size_t buf_sz, size_t log_sz, |
7735 | | char *line1, char *line2, char *line3) |
7736 | 0 | { |
7737 | | /* Expected log for failed and not properly guarded map reference: |
7738 | | * line1 -> 123: (85) call unknown#2001000345 |
7739 | | * line2 -> invalid func unknown#2001000345 |
7740 | | * line3 -> <anything else or end of buffer> |
7741 | | * |
7742 | | * "123" is the index of the instruction that was poisoned. |
7743 | | * "345" in "2001000345" is a map index in obj->maps to fetch map name. |
7744 | | */ |
7745 | 0 | struct bpf_object *obj = prog->obj; |
7746 | 0 | const struct bpf_map *map; |
7747 | 0 | int insn_idx, map_idx; |
7748 | 0 | char patch[128]; |
7749 | |
|
7750 | 0 | if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &map_idx) != 2) |
7751 | 0 | return; |
7752 | | |
7753 | 0 | map_idx -= POISON_LDIMM64_MAP_BASE; |
7754 | 0 | if (map_idx < 0 || map_idx >= obj->nr_maps) |
7755 | 0 | return; |
7756 | 0 | map = &obj->maps[map_idx]; |
7757 | |
|
7758 | 0 | snprintf(patch, sizeof(patch), |
7759 | 0 | "%d: <invalid BPF map reference>\n" |
7760 | 0 | "BPF map '%s' is referenced but wasn't created\n", |
7761 | 0 | insn_idx, map->name); |
7762 | |
|
7763 | 0 | patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch); |
7764 | 0 | } |
7765 | | |
7766 | | static void fixup_log_missing_kfunc_call(struct bpf_program *prog, |
7767 | | char *buf, size_t buf_sz, size_t log_sz, |
7768 | | char *line1, char *line2, char *line3) |
7769 | 0 | { |
7770 | | /* Expected log for failed and not properly guarded kfunc call: |
7771 | | * line1 -> 123: (85) call unknown#2002000345 |
7772 | | * line2 -> invalid func unknown#2002000345 |
7773 | | * line3 -> <anything else or end of buffer> |
7774 | | * |
7775 | | * "123" is the index of the instruction that was poisoned. |
7776 | | * "345" in "2002000345" is an extern index in obj->externs to fetch kfunc name. |
7777 | | */ |
7778 | 0 | struct bpf_object *obj = prog->obj; |
7779 | 0 | const struct extern_desc *ext; |
7780 | 0 | int insn_idx, ext_idx; |
7781 | 0 | char patch[128]; |
7782 | |
|
7783 | 0 | if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &ext_idx) != 2) |
7784 | 0 | return; |
7785 | | |
7786 | 0 | ext_idx -= POISON_CALL_KFUNC_BASE; |
7787 | 0 | if (ext_idx < 0 || ext_idx >= obj->nr_extern) |
7788 | 0 | return; |
7789 | 0 | ext = &obj->externs[ext_idx]; |
7790 | |
|
7791 | 0 | snprintf(patch, sizeof(patch), |
7792 | 0 | "%d: <invalid kfunc call>\n" |
7793 | 0 | "kfunc '%s' is referenced but wasn't resolved\n", |
7794 | 0 | insn_idx, ext->name); |
7795 | |
|
7796 | 0 | patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch); |
7797 | 0 | } |
7798 | | |
7799 | | static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz) |
7800 | 0 | { |
7801 | | /* look for familiar error patterns in last N lines of the log */ |
7802 | 0 | const size_t max_last_line_cnt = 10; |
7803 | 0 | char *prev_line, *cur_line, *next_line; |
7804 | 0 | size_t log_sz; |
7805 | 0 | int i; |
7806 | |
|
7807 | 0 | if (!buf) |
7808 | 0 | return; |
7809 | | |
7810 | 0 | log_sz = strlen(buf) + 1; |
7811 | 0 | next_line = buf + log_sz - 1; |
7812 | |
|
7813 | 0 | for (i = 0; i < max_last_line_cnt; i++, next_line = cur_line) { |
7814 | 0 | cur_line = find_prev_line(buf, next_line); |
7815 | 0 | if (!cur_line) |
7816 | 0 | return; |
7817 | | |
7818 | 0 | if (str_has_pfx(cur_line, "invalid func unknown#195896080\n")) { |
7819 | 0 | prev_line = find_prev_line(buf, cur_line); |
7820 | 0 | if (!prev_line) |
7821 | 0 | continue; |
7822 | | |
7823 | | /* failed CO-RE relocation case */ |
7824 | 0 | fixup_log_failed_core_relo(prog, buf, buf_sz, log_sz, |
7825 | 0 | prev_line, cur_line, next_line); |
7826 | 0 | return; |
7827 | 0 | } else if (str_has_pfx(cur_line, "invalid func unknown#"POISON_LDIMM64_MAP_PFX)) { |
7828 | 0 | prev_line = find_prev_line(buf, cur_line); |
7829 | 0 | if (!prev_line) |
7830 | 0 | continue; |
7831 | | |
7832 | | /* reference to uncreated BPF map */ |
7833 | 0 | fixup_log_missing_map_load(prog, buf, buf_sz, log_sz, |
7834 | 0 | prev_line, cur_line, next_line); |
7835 | 0 | return; |
7836 | 0 | } else if (str_has_pfx(cur_line, "invalid func unknown#"POISON_CALL_KFUNC_PFX)) { |
7837 | 0 | prev_line = find_prev_line(buf, cur_line); |
7838 | 0 | if (!prev_line) |
7839 | 0 | continue; |
7840 | | |
7841 | | /* reference to unresolved kfunc */ |
7842 | 0 | fixup_log_missing_kfunc_call(prog, buf, buf_sz, log_sz, |
7843 | 0 | prev_line, cur_line, next_line); |
7844 | 0 | return; |
7845 | 0 | } |
7846 | 0 | } |
7847 | 0 | } |
7848 | | |
7849 | | static int bpf_program_record_relos(struct bpf_program *prog) |
7850 | 0 | { |
7851 | 0 | struct bpf_object *obj = prog->obj; |
7852 | 0 | int i; |
7853 | |
|
7854 | 0 | for (i = 0; i < prog->nr_reloc; i++) { |
7855 | 0 | struct reloc_desc *relo = &prog->reloc_desc[i]; |
7856 | 0 | struct extern_desc *ext = &obj->externs[relo->ext_idx]; |
7857 | 0 | int kind; |
7858 | |
|
7859 | 0 | switch (relo->type) { |
7860 | 0 | case RELO_EXTERN_LD64: |
7861 | 0 | if (ext->type != EXT_KSYM) |
7862 | 0 | continue; |
7863 | 0 | kind = btf_is_var(btf__type_by_id(obj->btf, ext->btf_id)) ? |
7864 | 0 | BTF_KIND_VAR : BTF_KIND_FUNC; |
7865 | 0 | bpf_gen__record_extern(obj->gen_loader, ext->name, |
7866 | 0 | ext->is_weak, !ext->ksym.type_id, |
7867 | 0 | true, kind, relo->insn_idx); |
7868 | 0 | break; |
7869 | 0 | case RELO_EXTERN_CALL: |
7870 | 0 | bpf_gen__record_extern(obj->gen_loader, ext->name, |
7871 | 0 | ext->is_weak, false, false, BTF_KIND_FUNC, |
7872 | 0 | relo->insn_idx); |
7873 | 0 | break; |
7874 | 0 | case RELO_CORE: { |
7875 | 0 | struct bpf_core_relo cr = { |
7876 | 0 | .insn_off = relo->insn_idx * 8, |
7877 | 0 | .type_id = relo->core_relo->type_id, |
7878 | 0 | .access_str_off = relo->core_relo->access_str_off, |
7879 | 0 | .kind = relo->core_relo->kind, |
7880 | 0 | }; |
7881 | |
|
7882 | 0 | bpf_gen__record_relo_core(obj->gen_loader, &cr); |
7883 | 0 | break; |
7884 | 0 | } |
7885 | 0 | default: |
7886 | 0 | continue; |
7887 | 0 | } |
7888 | 0 | } |
7889 | 0 | return 0; |
7890 | 0 | } |
7891 | | |
7892 | | static int |
7893 | | bpf_object__load_progs(struct bpf_object *obj, int log_level) |
7894 | 0 | { |
7895 | 0 | struct bpf_program *prog; |
7896 | 0 | size_t i; |
7897 | 0 | int err; |
7898 | |
|
7899 | 0 | for (i = 0; i < obj->nr_programs; i++) { |
7900 | 0 | prog = &obj->programs[i]; |
7901 | 0 | if (prog_is_subprog(obj, prog)) |
7902 | 0 | continue; |
7903 | 0 | if (!prog->autoload) { |
7904 | 0 | pr_debug("prog '%s': skipped loading\n", prog->name); |
7905 | 0 | continue; |
7906 | 0 | } |
7907 | 0 | prog->log_level |= log_level; |
7908 | |
|
7909 | 0 | if (obj->gen_loader) |
7910 | 0 | bpf_program_record_relos(prog); |
7911 | |
|
7912 | 0 | err = bpf_object_load_prog(obj, prog, prog->insns, prog->insns_cnt, |
7913 | 0 | obj->license, obj->kern_version, &prog->fd); |
7914 | 0 | if (err) { |
7915 | 0 | pr_warn("prog '%s': failed to load: %s\n", prog->name, errstr(err)); |
7916 | 0 | return err; |
7917 | 0 | } |
7918 | 0 | } |
7919 | | |
7920 | 0 | bpf_object__free_relocs(obj); |
7921 | 0 | return 0; |
7922 | 0 | } |
7923 | | |
7924 | | static int bpf_object_prepare_progs(struct bpf_object *obj) |
7925 | 0 | { |
7926 | 0 | struct bpf_program *prog; |
7927 | 0 | size_t i; |
7928 | 0 | int err; |
7929 | |
|
7930 | 0 | for (i = 0; i < obj->nr_programs; i++) { |
7931 | 0 | prog = &obj->programs[i]; |
7932 | 0 | err = bpf_object__sanitize_prog(obj, prog); |
7933 | 0 | if (err) |
7934 | 0 | return err; |
7935 | 0 | } |
7936 | 0 | return 0; |
7937 | 0 | } |
7938 | | |
7939 | | static const struct bpf_sec_def *find_sec_def(const char *sec_name); |
7940 | | |
7941 | | static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object_open_opts *opts) |
7942 | 2.21k | { |
7943 | 2.21k | struct bpf_program *prog; |
7944 | 2.21k | int err; |
7945 | | |
7946 | 5.40k | bpf_object__for_each_program(prog, obj) { |
7947 | 5.40k | prog->sec_def = find_sec_def(prog->sec_name); |
7948 | 5.40k | if (!prog->sec_def) { |
7949 | | /* couldn't guess, but user might manually specify */ |
7950 | 4.37k | pr_debug("prog '%s': unrecognized ELF section name '%s'\n", |
7951 | 4.37k | prog->name, prog->sec_name); |
7952 | 4.37k | continue; |
7953 | 4.37k | } |
7954 | | |
7955 | 1.03k | prog->type = prog->sec_def->prog_type; |
7956 | 1.03k | prog->expected_attach_type = prog->sec_def->expected_attach_type; |
7957 | | |
7958 | | /* sec_def can have custom callback which should be called |
7959 | | * after bpf_program is initialized to adjust its properties |
7960 | | */ |
7961 | 1.03k | if (prog->sec_def->prog_setup_fn) { |
7962 | 0 | err = prog->sec_def->prog_setup_fn(prog, prog->sec_def->cookie); |
7963 | 0 | if (err < 0) { |
7964 | 0 | pr_warn("prog '%s': failed to initialize: %s\n", |
7965 | 0 | prog->name, errstr(err)); |
7966 | 0 | return err; |
7967 | 0 | } |
7968 | 0 | } |
7969 | 1.03k | } |
7970 | | |
7971 | 2.21k | return 0; |
7972 | 2.21k | } |
7973 | | |
7974 | | static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz, |
7975 | | const char *obj_name, |
7976 | | const struct bpf_object_open_opts *opts) |
7977 | 11.5k | { |
7978 | 11.5k | const char *kconfig, *btf_tmp_path, *token_path; |
7979 | 11.5k | struct bpf_object *obj; |
7980 | 11.5k | int err; |
7981 | 11.5k | char *log_buf; |
7982 | 11.5k | size_t log_size; |
7983 | 11.5k | __u32 log_level; |
7984 | | |
7985 | 11.5k | if (obj_buf && !obj_name) |
7986 | 0 | return ERR_PTR(-EINVAL); |
7987 | | |
7988 | 11.5k | if (elf_version(EV_CURRENT) == EV_NONE) { |
7989 | 0 | pr_warn("failed to init libelf for %s\n", |
7990 | 0 | path ? : "(mem buf)"); |
7991 | 0 | return ERR_PTR(-LIBBPF_ERRNO__LIBELF); |
7992 | 0 | } |
7993 | | |
7994 | 11.5k | if (!OPTS_VALID(opts, bpf_object_open_opts)) |
7995 | 0 | return ERR_PTR(-EINVAL); |
7996 | | |
7997 | 11.5k | obj_name = OPTS_GET(opts, object_name, NULL) ?: obj_name; |
7998 | 11.5k | if (obj_buf) { |
7999 | 11.5k | path = obj_name; |
8000 | 11.5k | pr_debug("loading object '%s' from buffer\n", obj_name); |
8001 | 18.4E | } else { |
8002 | 18.4E | pr_debug("loading object from %s\n", path); |
8003 | 18.4E | } |
8004 | |
|
8005 | 0 | log_buf = OPTS_GET(opts, kernel_log_buf, NULL); |
8006 | 0 | log_size = OPTS_GET(opts, kernel_log_size, 0); |
8007 | 0 | log_level = OPTS_GET(opts, kernel_log_level, 0); |
8008 | 0 | if (log_size > UINT_MAX) |
8009 | 0 | return ERR_PTR(-EINVAL); |
8010 | 0 | if (log_size && !log_buf) |
8011 | 0 | return ERR_PTR(-EINVAL); |
8012 | | |
8013 | 0 | token_path = OPTS_GET(opts, bpf_token_path, NULL); |
8014 | | /* if user didn't specify bpf_token_path explicitly, check if |
8015 | | * LIBBPF_BPF_TOKEN_PATH envvar was set and treat it as bpf_token_path |
8016 | | * option |
8017 | | */ |
8018 | 0 | if (!token_path) |
8019 | 11.5k | token_path = getenv("LIBBPF_BPF_TOKEN_PATH"); |
8020 | 0 | if (token_path && strlen(token_path) >= PATH_MAX) |
8021 | 0 | return ERR_PTR(-ENAMETOOLONG); |
8022 | | |
8023 | 0 | obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name); |
8024 | 0 | if (IS_ERR(obj)) |
8025 | 0 | return obj; |
8026 | | |
8027 | 0 | obj->log_buf = log_buf; |
8028 | 0 | obj->log_size = log_size; |
8029 | 0 | obj->log_level = log_level; |
8030 | |
|
8031 | 0 | if (token_path) { |
8032 | 0 | obj->token_path = strdup(token_path); |
8033 | 0 | if (!obj->token_path) { |
8034 | 0 | err = -ENOMEM; |
8035 | 0 | goto out; |
8036 | 0 | } |
8037 | 0 | } |
8038 | | |
8039 | 0 | btf_tmp_path = OPTS_GET(opts, btf_custom_path, NULL); |
8040 | 0 | if (btf_tmp_path) { |
8041 | 0 | if (strlen(btf_tmp_path) >= PATH_MAX) { |
8042 | 0 | err = -ENAMETOOLONG; |
8043 | 0 | goto out; |
8044 | 0 | } |
8045 | 0 | obj->btf_custom_path = strdup(btf_tmp_path); |
8046 | 0 | if (!obj->btf_custom_path) { |
8047 | 0 | err = -ENOMEM; |
8048 | 0 | goto out; |
8049 | 0 | } |
8050 | 0 | } |
8051 | | |
8052 | 0 | kconfig = OPTS_GET(opts, kconfig, NULL); |
8053 | 0 | if (kconfig) { |
8054 | 0 | obj->kconfig = strdup(kconfig); |
8055 | 0 | if (!obj->kconfig) { |
8056 | 0 | err = -ENOMEM; |
8057 | 0 | goto out; |
8058 | 0 | } |
8059 | 0 | } |
8060 | | |
8061 | 0 | err = bpf_object__elf_init(obj); |
8062 | 18.4E | err = err ? : bpf_object__elf_collect(obj); |
8063 | 18.4E | err = err ? : bpf_object__collect_externs(obj); |
8064 | 18.4E | err = err ? : bpf_object_fixup_btf(obj); |
8065 | 18.4E | err = err ? : bpf_object__init_maps(obj, opts); |
8066 | 18.4E | err = err ? : bpf_object_init_progs(obj, opts); |
8067 | 18.4E | err = err ? : bpf_object__collect_relos(obj); |
8068 | 18.4E | if (err) |
8069 | 9.85k | goto out; |
8070 | | |
8071 | 18.4E | bpf_object__elf_finish(obj); |
8072 | | |
8073 | 18.4E | return obj; |
8074 | 9.85k | out: |
8075 | 9.85k | bpf_object__close(obj); |
8076 | 9.85k | return ERR_PTR(err); |
8077 | 18.4E | } |
8078 | | |
8079 | | struct bpf_object * |
8080 | | bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts) |
8081 | 0 | { |
8082 | 0 | if (!path) |
8083 | 0 | return libbpf_err_ptr(-EINVAL); |
8084 | | |
8085 | 0 | return libbpf_ptr(bpf_object_open(path, NULL, 0, NULL, opts)); |
8086 | 0 | } |
8087 | | |
8088 | | struct bpf_object *bpf_object__open(const char *path) |
8089 | 0 | { |
8090 | 0 | return bpf_object__open_file(path, NULL); |
8091 | 0 | } |
8092 | | |
8093 | | struct bpf_object * |
8094 | | bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz, |
8095 | | const struct bpf_object_open_opts *opts) |
8096 | 11.5k | { |
8097 | 11.5k | char tmp_name[64]; |
8098 | | |
8099 | 11.5k | if (!obj_buf || obj_buf_sz == 0) |
8100 | 0 | return libbpf_err_ptr(-EINVAL); |
8101 | | |
8102 | | /* create a (quite useless) default "name" for this memory buffer object */ |
8103 | 11.5k | snprintf(tmp_name, sizeof(tmp_name), "%lx-%zx", (unsigned long)obj_buf, obj_buf_sz); |
8104 | | |
8105 | 11.5k | return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, tmp_name, opts)); |
8106 | 11.5k | } |
8107 | | |
8108 | | static int bpf_object_unload(struct bpf_object *obj) |
8109 | 11.5k | { |
8110 | 11.5k | size_t i; |
8111 | | |
8112 | 11.5k | if (!obj) |
8113 | 0 | return libbpf_err(-EINVAL); |
8114 | | |
8115 | 14.4k | for (i = 0; i < obj->nr_maps; i++) { |
8116 | 2.84k | zclose(obj->maps[i].fd); |
8117 | 2.84k | if (obj->maps[i].st_ops) |
8118 | 60 | zfree(&obj->maps[i].st_ops->kern_vdata); |
8119 | 2.84k | } |
8120 | | |
8121 | 20.8k | for (i = 0; i < obj->nr_programs; i++) |
8122 | 9.26k | bpf_program__unload(&obj->programs[i]); |
8123 | | |
8124 | 11.5k | return 0; |
8125 | 11.5k | } |
8126 | | |
8127 | | static int bpf_object__sanitize_maps(struct bpf_object *obj) |
8128 | 0 | { |
8129 | 0 | struct bpf_map *m; |
8130 | |
|
8131 | 0 | bpf_object__for_each_map(m, obj) { |
8132 | 0 | if (!bpf_map__is_internal(m)) |
8133 | 0 | continue; |
8134 | 0 | if (!kernel_supports(obj, FEAT_ARRAY_MMAP)) |
8135 | 0 | m->def.map_flags &= ~BPF_F_MMAPABLE; |
8136 | 0 | } |
8137 | |
|
8138 | 0 | return 0; |
8139 | 0 | } |
8140 | | |
8141 | | typedef int (*kallsyms_cb_t)(unsigned long long sym_addr, char sym_type, |
8142 | | const char *sym_name, void *ctx); |
8143 | | |
8144 | | static int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *ctx) |
8145 | 0 | { |
8146 | 0 | char sym_type, sym_name[500]; |
8147 | 0 | unsigned long long sym_addr; |
8148 | 0 | int ret, err = 0; |
8149 | 0 | FILE *f; |
8150 | |
|
8151 | 0 | f = fopen("/proc/kallsyms", "re"); |
8152 | 0 | if (!f) { |
8153 | 0 | err = -errno; |
8154 | 0 | pr_warn("failed to open /proc/kallsyms: %s\n", errstr(err)); |
8155 | 0 | return err; |
8156 | 0 | } |
8157 | | |
8158 | 0 | while (true) { |
8159 | 0 | ret = fscanf(f, "%llx %c %499s%*[^\n]\n", |
8160 | 0 | &sym_addr, &sym_type, sym_name); |
8161 | 0 | if (ret == EOF && feof(f)) |
8162 | 0 | break; |
8163 | 0 | if (ret != 3) { |
8164 | 0 | pr_warn("failed to read kallsyms entry: %d\n", ret); |
8165 | 0 | err = -EINVAL; |
8166 | 0 | break; |
8167 | 0 | } |
8168 | | |
8169 | 0 | err = cb(sym_addr, sym_type, sym_name, ctx); |
8170 | 0 | if (err) |
8171 | 0 | break; |
8172 | 0 | } |
8173 | |
|
8174 | 0 | fclose(f); |
8175 | 0 | return err; |
8176 | 0 | } |
8177 | | |
8178 | | static int kallsyms_cb(unsigned long long sym_addr, char sym_type, |
8179 | | const char *sym_name, void *ctx) |
8180 | 0 | { |
8181 | 0 | struct bpf_object *obj = ctx; |
8182 | 0 | const struct btf_type *t; |
8183 | 0 | struct extern_desc *ext; |
8184 | 0 | char *res; |
8185 | |
|
8186 | 0 | res = strstr(sym_name, ".llvm."); |
8187 | 0 | if (sym_type == 'd' && res) |
8188 | 0 | ext = find_extern_by_name_with_len(obj, sym_name, res - sym_name); |
8189 | 0 | else |
8190 | 0 | ext = find_extern_by_name(obj, sym_name); |
8191 | 0 | if (!ext || ext->type != EXT_KSYM) |
8192 | 0 | return 0; |
8193 | | |
8194 | 0 | t = btf__type_by_id(obj->btf, ext->btf_id); |
8195 | 0 | if (!btf_is_var(t)) |
8196 | 0 | return 0; |
8197 | | |
8198 | 0 | if (ext->is_set && ext->ksym.addr != sym_addr) { |
8199 | 0 | pr_warn("extern (ksym) '%s': resolution is ambiguous: 0x%llx or 0x%llx\n", |
8200 | 0 | sym_name, ext->ksym.addr, sym_addr); |
8201 | 0 | return -EINVAL; |
8202 | 0 | } |
8203 | 0 | if (!ext->is_set) { |
8204 | 0 | ext->is_set = true; |
8205 | 0 | ext->ksym.addr = sym_addr; |
8206 | 0 | pr_debug("extern (ksym) '%s': set to 0x%llx\n", sym_name, sym_addr); |
8207 | 0 | } |
8208 | 0 | return 0; |
8209 | 0 | } |
8210 | | |
8211 | | static int bpf_object__read_kallsyms_file(struct bpf_object *obj) |
8212 | 0 | { |
8213 | 0 | return libbpf_kallsyms_parse(kallsyms_cb, obj); |
8214 | 0 | } |
8215 | | |
8216 | | static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name, |
8217 | | __u16 kind, struct btf **res_btf, |
8218 | | struct module_btf **res_mod_btf) |
8219 | 0 | { |
8220 | 0 | struct module_btf *mod_btf; |
8221 | 0 | struct btf *btf; |
8222 | 0 | int i, id, err; |
8223 | |
|
8224 | 0 | btf = obj->btf_vmlinux; |
8225 | 0 | mod_btf = NULL; |
8226 | 0 | id = btf__find_by_name_kind(btf, ksym_name, kind); |
8227 | |
|
8228 | 0 | if (id == -ENOENT) { |
8229 | 0 | err = load_module_btfs(obj); |
8230 | 0 | if (err) |
8231 | 0 | return err; |
8232 | | |
8233 | 0 | for (i = 0; i < obj->btf_module_cnt; i++) { |
8234 | | /* we assume module_btf's BTF FD is always >0 */ |
8235 | 0 | mod_btf = &obj->btf_modules[i]; |
8236 | 0 | btf = mod_btf->btf; |
8237 | 0 | id = btf__find_by_name_kind_own(btf, ksym_name, kind); |
8238 | 0 | if (id != -ENOENT) |
8239 | 0 | break; |
8240 | 0 | } |
8241 | 0 | } |
8242 | 0 | if (id <= 0) |
8243 | 0 | return -ESRCH; |
8244 | | |
8245 | 0 | *res_btf = btf; |
8246 | 0 | *res_mod_btf = mod_btf; |
8247 | 0 | return id; |
8248 | 0 | } |
8249 | | |
8250 | | static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj, |
8251 | | struct extern_desc *ext) |
8252 | 0 | { |
8253 | 0 | const struct btf_type *targ_var, *targ_type; |
8254 | 0 | __u32 targ_type_id, local_type_id; |
8255 | 0 | struct module_btf *mod_btf = NULL; |
8256 | 0 | const char *targ_var_name; |
8257 | 0 | struct btf *btf = NULL; |
8258 | 0 | int id, err; |
8259 | |
|
8260 | 0 | id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &mod_btf); |
8261 | 0 | if (id < 0) { |
8262 | 0 | if (id == -ESRCH && ext->is_weak) |
8263 | 0 | return 0; |
8264 | 0 | pr_warn("extern (var ksym) '%s': not found in kernel BTF\n", |
8265 | 0 | ext->name); |
8266 | 0 | return id; |
8267 | 0 | } |
8268 | | |
8269 | | /* find local type_id */ |
8270 | 0 | local_type_id = ext->ksym.type_id; |
8271 | | |
8272 | | /* find target type_id */ |
8273 | 0 | targ_var = btf__type_by_id(btf, id); |
8274 | 0 | targ_var_name = btf__name_by_offset(btf, targ_var->name_off); |
8275 | 0 | targ_type = skip_mods_and_typedefs(btf, targ_var->type, &targ_type_id); |
8276 | |
|
8277 | 0 | err = bpf_core_types_are_compat(obj->btf, local_type_id, |
8278 | 0 | btf, targ_type_id); |
8279 | 0 | if (err <= 0) { |
8280 | 0 | const struct btf_type *local_type; |
8281 | 0 | const char *targ_name, *local_name; |
8282 | |
|
8283 | 0 | local_type = btf__type_by_id(obj->btf, local_type_id); |
8284 | 0 | local_name = btf__name_by_offset(obj->btf, local_type->name_off); |
8285 | 0 | targ_name = btf__name_by_offset(btf, targ_type->name_off); |
8286 | |
|
8287 | 0 | pr_warn("extern (var ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n", |
8288 | 0 | ext->name, local_type_id, |
8289 | 0 | btf_kind_str(local_type), local_name, targ_type_id, |
8290 | 0 | btf_kind_str(targ_type), targ_name); |
8291 | 0 | return -EINVAL; |
8292 | 0 | } |
8293 | | |
8294 | 0 | ext->is_set = true; |
8295 | 0 | ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0; |
8296 | 0 | ext->ksym.kernel_btf_id = id; |
8297 | 0 | pr_debug("extern (var ksym) '%s': resolved to [%d] %s %s\n", |
8298 | 0 | ext->name, id, btf_kind_str(targ_var), targ_var_name); |
8299 | |
|
8300 | 0 | return 0; |
8301 | 0 | } |
8302 | | |
8303 | | static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj, |
8304 | | struct extern_desc *ext) |
8305 | 0 | { |
8306 | 0 | int local_func_proto_id, kfunc_proto_id, kfunc_id; |
8307 | 0 | struct module_btf *mod_btf = NULL; |
8308 | 0 | const struct btf_type *kern_func; |
8309 | 0 | struct btf *kern_btf = NULL; |
8310 | 0 | int ret; |
8311 | |
|
8312 | 0 | local_func_proto_id = ext->ksym.type_id; |
8313 | |
|
8314 | 0 | kfunc_id = find_ksym_btf_id(obj, ext->essent_name ?: ext->name, BTF_KIND_FUNC, &kern_btf, |
8315 | 0 | &mod_btf); |
8316 | 0 | if (kfunc_id < 0) { |
8317 | 0 | if (kfunc_id == -ESRCH && ext->is_weak) |
8318 | 0 | return 0; |
8319 | 0 | pr_warn("extern (func ksym) '%s': not found in kernel or module BTFs\n", |
8320 | 0 | ext->name); |
8321 | 0 | return kfunc_id; |
8322 | 0 | } |
8323 | | |
8324 | 0 | kern_func = btf__type_by_id(kern_btf, kfunc_id); |
8325 | 0 | kfunc_proto_id = kern_func->type; |
8326 | |
|
8327 | 0 | ret = bpf_core_types_are_compat(obj->btf, local_func_proto_id, |
8328 | 0 | kern_btf, kfunc_proto_id); |
8329 | 0 | if (ret <= 0) { |
8330 | 0 | if (ext->is_weak) |
8331 | 0 | return 0; |
8332 | | |
8333 | 0 | pr_warn("extern (func ksym) '%s': func_proto [%d] incompatible with %s [%d]\n", |
8334 | 0 | ext->name, local_func_proto_id, |
8335 | 0 | mod_btf ? mod_btf->name : "vmlinux", kfunc_proto_id); |
8336 | 0 | return -EINVAL; |
8337 | 0 | } |
8338 | | |
8339 | | /* set index for module BTF fd in fd_array, if unset */ |
8340 | 0 | if (mod_btf && !mod_btf->fd_array_idx) { |
8341 | | /* insn->off is s16 */ |
8342 | 0 | if (obj->fd_array_cnt == INT16_MAX) { |
8343 | 0 | pr_warn("extern (func ksym) '%s': module BTF fd index %d too big to fit in bpf_insn offset\n", |
8344 | 0 | ext->name, mod_btf->fd_array_idx); |
8345 | 0 | return -E2BIG; |
8346 | 0 | } |
8347 | | /* Cannot use index 0 for module BTF fd */ |
8348 | 0 | if (!obj->fd_array_cnt) |
8349 | 0 | obj->fd_array_cnt = 1; |
8350 | |
|
8351 | 0 | ret = libbpf_ensure_mem((void **)&obj->fd_array, &obj->fd_array_cap, sizeof(int), |
8352 | 0 | obj->fd_array_cnt + 1); |
8353 | 0 | if (ret) |
8354 | 0 | return ret; |
8355 | 0 | mod_btf->fd_array_idx = obj->fd_array_cnt; |
8356 | | /* we assume module BTF FD is always >0 */ |
8357 | 0 | obj->fd_array[obj->fd_array_cnt++] = mod_btf->fd; |
8358 | 0 | } |
8359 | | |
8360 | 0 | ext->is_set = true; |
8361 | 0 | ext->ksym.kernel_btf_id = kfunc_id; |
8362 | 0 | ext->ksym.btf_fd_idx = mod_btf ? mod_btf->fd_array_idx : 0; |
8363 | | /* Also set kernel_btf_obj_fd to make sure that bpf_object__relocate_data() |
8364 | | * populates FD into ld_imm64 insn when it's used to point to kfunc. |
8365 | | * {kernel_btf_id, btf_fd_idx} -> fixup bpf_call. |
8366 | | * {kernel_btf_id, kernel_btf_obj_fd} -> fixup ld_imm64. |
8367 | | */ |
8368 | 0 | ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0; |
8369 | 0 | pr_debug("extern (func ksym) '%s': resolved to %s [%d]\n", |
8370 | 0 | ext->name, mod_btf ? mod_btf->name : "vmlinux", kfunc_id); |
8371 | |
|
8372 | 0 | return 0; |
8373 | 0 | } |
8374 | | |
8375 | | static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj) |
8376 | 0 | { |
8377 | 0 | const struct btf_type *t; |
8378 | 0 | struct extern_desc *ext; |
8379 | 0 | int i, err; |
8380 | |
|
8381 | 0 | for (i = 0; i < obj->nr_extern; i++) { |
8382 | 0 | ext = &obj->externs[i]; |
8383 | 0 | if (ext->type != EXT_KSYM || !ext->ksym.type_id) |
8384 | 0 | continue; |
8385 | | |
8386 | 0 | if (obj->gen_loader) { |
8387 | 0 | ext->is_set = true; |
8388 | 0 | ext->ksym.kernel_btf_obj_fd = 0; |
8389 | 0 | ext->ksym.kernel_btf_id = 0; |
8390 | 0 | continue; |
8391 | 0 | } |
8392 | 0 | t = btf__type_by_id(obj->btf, ext->btf_id); |
8393 | 0 | if (btf_is_var(t)) |
8394 | 0 | err = bpf_object__resolve_ksym_var_btf_id(obj, ext); |
8395 | 0 | else |
8396 | 0 | err = bpf_object__resolve_ksym_func_btf_id(obj, ext); |
8397 | 0 | if (err) |
8398 | 0 | return err; |
8399 | 0 | } |
8400 | 0 | return 0; |
8401 | 0 | } |
8402 | | |
8403 | | static int bpf_object__resolve_externs(struct bpf_object *obj, |
8404 | | const char *extra_kconfig) |
8405 | 0 | { |
8406 | 0 | bool need_config = false, need_kallsyms = false; |
8407 | 0 | bool need_vmlinux_btf = false; |
8408 | 0 | struct extern_desc *ext; |
8409 | 0 | void *kcfg_data = NULL; |
8410 | 0 | int err, i; |
8411 | |
|
8412 | 0 | if (obj->nr_extern == 0) |
8413 | 0 | return 0; |
8414 | | |
8415 | 0 | if (obj->kconfig_map_idx >= 0) |
8416 | 0 | kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped; |
8417 | |
|
8418 | 0 | for (i = 0; i < obj->nr_extern; i++) { |
8419 | 0 | ext = &obj->externs[i]; |
8420 | |
|
8421 | 0 | if (ext->type == EXT_KSYM) { |
8422 | 0 | if (ext->ksym.type_id) |
8423 | 0 | need_vmlinux_btf = true; |
8424 | 0 | else |
8425 | 0 | need_kallsyms = true; |
8426 | 0 | continue; |
8427 | 0 | } else if (ext->type == EXT_KCFG) { |
8428 | 0 | void *ext_ptr = kcfg_data + ext->kcfg.data_off; |
8429 | 0 | __u64 value = 0; |
8430 | | |
8431 | | /* Kconfig externs need actual /proc/config.gz */ |
8432 | 0 | if (str_has_pfx(ext->name, "CONFIG_")) { |
8433 | 0 | need_config = true; |
8434 | 0 | continue; |
8435 | 0 | } |
8436 | | |
8437 | | /* Virtual kcfg externs are customly handled by libbpf */ |
8438 | 0 | if (strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) { |
8439 | 0 | value = get_kernel_version(); |
8440 | 0 | if (!value) { |
8441 | 0 | pr_warn("extern (kcfg) '%s': failed to get kernel version\n", ext->name); |
8442 | 0 | return -EINVAL; |
8443 | 0 | } |
8444 | 0 | } else if (strcmp(ext->name, "LINUX_HAS_BPF_COOKIE") == 0) { |
8445 | 0 | value = kernel_supports(obj, FEAT_BPF_COOKIE); |
8446 | 0 | } else if (strcmp(ext->name, "LINUX_HAS_SYSCALL_WRAPPER") == 0) { |
8447 | 0 | value = kernel_supports(obj, FEAT_SYSCALL_WRAPPER); |
8448 | 0 | } else if (!str_has_pfx(ext->name, "LINUX_") || !ext->is_weak) { |
8449 | | /* Currently libbpf supports only CONFIG_ and LINUX_ prefixed |
8450 | | * __kconfig externs, where LINUX_ ones are virtual and filled out |
8451 | | * customly by libbpf (their values don't come from Kconfig). |
8452 | | * If LINUX_xxx variable is not recognized by libbpf, but is marked |
8453 | | * __weak, it defaults to zero value, just like for CONFIG_xxx |
8454 | | * externs. |
8455 | | */ |
8456 | 0 | pr_warn("extern (kcfg) '%s': unrecognized virtual extern\n", ext->name); |
8457 | 0 | return -EINVAL; |
8458 | 0 | } |
8459 | | |
8460 | 0 | err = set_kcfg_value_num(ext, ext_ptr, value); |
8461 | 0 | if (err) |
8462 | 0 | return err; |
8463 | 0 | pr_debug("extern (kcfg) '%s': set to 0x%llx\n", |
8464 | 0 | ext->name, (long long)value); |
8465 | 0 | } else { |
8466 | 0 | pr_warn("extern '%s': unrecognized extern kind\n", ext->name); |
8467 | 0 | return -EINVAL; |
8468 | 0 | } |
8469 | 0 | } |
8470 | 0 | if (need_config && extra_kconfig) { |
8471 | 0 | err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data); |
8472 | 0 | if (err) |
8473 | 0 | return -EINVAL; |
8474 | 0 | need_config = false; |
8475 | 0 | for (i = 0; i < obj->nr_extern; i++) { |
8476 | 0 | ext = &obj->externs[i]; |
8477 | 0 | if (ext->type == EXT_KCFG && !ext->is_set) { |
8478 | 0 | need_config = true; |
8479 | 0 | break; |
8480 | 0 | } |
8481 | 0 | } |
8482 | 0 | } |
8483 | 0 | if (need_config) { |
8484 | 0 | err = bpf_object__read_kconfig_file(obj, kcfg_data); |
8485 | 0 | if (err) |
8486 | 0 | return -EINVAL; |
8487 | 0 | } |
8488 | 0 | if (need_kallsyms) { |
8489 | 0 | err = bpf_object__read_kallsyms_file(obj); |
8490 | 0 | if (err) |
8491 | 0 | return -EINVAL; |
8492 | 0 | } |
8493 | 0 | if (need_vmlinux_btf) { |
8494 | 0 | err = bpf_object__resolve_ksyms_btf_id(obj); |
8495 | 0 | if (err) |
8496 | 0 | return -EINVAL; |
8497 | 0 | } |
8498 | 0 | for (i = 0; i < obj->nr_extern; i++) { |
8499 | 0 | ext = &obj->externs[i]; |
8500 | |
|
8501 | 0 | if (!ext->is_set && !ext->is_weak) { |
8502 | 0 | pr_warn("extern '%s' (strong): not resolved\n", ext->name); |
8503 | 0 | return -ESRCH; |
8504 | 0 | } else if (!ext->is_set) { |
8505 | 0 | pr_debug("extern '%s' (weak): not resolved, defaulting to zero\n", |
8506 | 0 | ext->name); |
8507 | 0 | } |
8508 | 0 | } |
8509 | | |
8510 | 0 | return 0; |
8511 | 0 | } |
8512 | | |
8513 | | static void bpf_map_prepare_vdata(const struct bpf_map *map) |
8514 | 0 | { |
8515 | 0 | const struct btf_type *type; |
8516 | 0 | struct bpf_struct_ops *st_ops; |
8517 | 0 | __u32 i; |
8518 | |
|
8519 | 0 | st_ops = map->st_ops; |
8520 | 0 | type = btf__type_by_id(map->obj->btf, st_ops->type_id); |
8521 | 0 | for (i = 0; i < btf_vlen(type); i++) { |
8522 | 0 | struct bpf_program *prog = st_ops->progs[i]; |
8523 | 0 | void *kern_data; |
8524 | 0 | int prog_fd; |
8525 | |
|
8526 | 0 | if (!prog) |
8527 | 0 | continue; |
8528 | | |
8529 | 0 | prog_fd = bpf_program__fd(prog); |
8530 | 0 | kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i]; |
8531 | 0 | *(unsigned long *)kern_data = prog_fd; |
8532 | 0 | } |
8533 | 0 | } |
8534 | | |
8535 | | static int bpf_object_prepare_struct_ops(struct bpf_object *obj) |
8536 | 0 | { |
8537 | 0 | struct bpf_map *map; |
8538 | 0 | int i; |
8539 | |
|
8540 | 0 | for (i = 0; i < obj->nr_maps; i++) { |
8541 | 0 | map = &obj->maps[i]; |
8542 | |
|
8543 | 0 | if (!bpf_map__is_struct_ops(map)) |
8544 | 0 | continue; |
8545 | | |
8546 | 0 | if (!map->autocreate) |
8547 | 0 | continue; |
8548 | | |
8549 | 0 | bpf_map_prepare_vdata(map); |
8550 | 0 | } |
8551 | |
|
8552 | 0 | return 0; |
8553 | 0 | } |
8554 | | |
8555 | | static void bpf_object_unpin(struct bpf_object *obj) |
8556 | 0 | { |
8557 | 0 | int i; |
8558 | | |
8559 | | /* unpin any maps that were auto-pinned during load */ |
8560 | 0 | for (i = 0; i < obj->nr_maps; i++) |
8561 | 0 | if (obj->maps[i].pinned && !obj->maps[i].reused) |
8562 | 0 | bpf_map__unpin(&obj->maps[i], NULL); |
8563 | 0 | } |
8564 | | |
8565 | | static void bpf_object_post_load_cleanup(struct bpf_object *obj) |
8566 | 11.5k | { |
8567 | 11.5k | int i; |
8568 | | |
8569 | | /* clean up fd_array */ |
8570 | 11.5k | zfree(&obj->fd_array); |
8571 | | |
8572 | | /* clean up module BTFs */ |
8573 | 11.5k | for (i = 0; i < obj->btf_module_cnt; i++) { |
8574 | 0 | close(obj->btf_modules[i].fd); |
8575 | 0 | btf__free(obj->btf_modules[i].btf); |
8576 | 0 | free(obj->btf_modules[i].name); |
8577 | 0 | } |
8578 | 11.5k | obj->btf_module_cnt = 0; |
8579 | 11.5k | zfree(&obj->btf_modules); |
8580 | | |
8581 | | /* clean up vmlinux BTF */ |
8582 | 11.5k | btf__free(obj->btf_vmlinux); |
8583 | 11.5k | obj->btf_vmlinux = NULL; |
8584 | 11.5k | } |
8585 | | |
8586 | | static int bpf_object_prepare(struct bpf_object *obj, const char *target_btf_path) |
8587 | 0 | { |
8588 | 0 | int err; |
8589 | |
|
8590 | 0 | if (obj->state >= OBJ_PREPARED) { |
8591 | 0 | pr_warn("object '%s': prepare loading can't be attempted twice\n", obj->name); |
8592 | 0 | return -EINVAL; |
8593 | 0 | } |
8594 | | |
8595 | 0 | err = bpf_object_prepare_token(obj); |
8596 | 0 | err = err ? : bpf_object__probe_loading(obj); |
8597 | 0 | err = err ? : bpf_object__load_vmlinux_btf(obj, false); |
8598 | 0 | err = err ? : bpf_object__resolve_externs(obj, obj->kconfig); |
8599 | 0 | err = err ? : bpf_object__sanitize_maps(obj); |
8600 | 0 | err = err ? : bpf_object__init_kern_struct_ops_maps(obj); |
8601 | 0 | err = err ? : bpf_object_adjust_struct_ops_autoload(obj); |
8602 | 0 | err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path); |
8603 | 0 | err = err ? : bpf_object__sanitize_and_load_btf(obj); |
8604 | 0 | err = err ? : bpf_object__create_maps(obj); |
8605 | 0 | err = err ? : bpf_object_prepare_progs(obj); |
8606 | |
|
8607 | 0 | if (err) { |
8608 | 0 | bpf_object_unpin(obj); |
8609 | 0 | bpf_object_unload(obj); |
8610 | 0 | obj->state = OBJ_LOADED; |
8611 | 0 | return err; |
8612 | 0 | } |
8613 | | |
8614 | 0 | obj->state = OBJ_PREPARED; |
8615 | 0 | return 0; |
8616 | 0 | } |
8617 | | |
8618 | | static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path) |
8619 | 0 | { |
8620 | 0 | int err; |
8621 | |
|
8622 | 0 | if (!obj) |
8623 | 0 | return libbpf_err(-EINVAL); |
8624 | | |
8625 | 0 | if (obj->state >= OBJ_LOADED) { |
8626 | 0 | pr_warn("object '%s': load can't be attempted twice\n", obj->name); |
8627 | 0 | return libbpf_err(-EINVAL); |
8628 | 0 | } |
8629 | | |
8630 | | /* Disallow kernel loading programs of non-native endianness but |
8631 | | * permit cross-endian creation of "light skeleton". |
8632 | | */ |
8633 | 0 | if (obj->gen_loader) { |
8634 | 0 | bpf_gen__init(obj->gen_loader, extra_log_level, obj->nr_programs, obj->nr_maps); |
8635 | 0 | } else if (!is_native_endianness(obj)) { |
8636 | 0 | pr_warn("object '%s': loading non-native endianness is unsupported\n", obj->name); |
8637 | 0 | return libbpf_err(-LIBBPF_ERRNO__ENDIAN); |
8638 | 0 | } |
8639 | | |
8640 | 0 | if (obj->state < OBJ_PREPARED) { |
8641 | 0 | err = bpf_object_prepare(obj, target_btf_path); |
8642 | 0 | if (err) |
8643 | 0 | return libbpf_err(err); |
8644 | 0 | } |
8645 | 0 | err = bpf_object__load_progs(obj, extra_log_level); |
8646 | 0 | err = err ? : bpf_object_init_prog_arrays(obj); |
8647 | 0 | err = err ? : bpf_object_prepare_struct_ops(obj); |
8648 | |
|
8649 | 0 | if (obj->gen_loader) { |
8650 | | /* reset FDs */ |
8651 | 0 | if (obj->btf) |
8652 | 0 | btf__set_fd(obj->btf, -1); |
8653 | 0 | if (!err) |
8654 | 0 | err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps); |
8655 | 0 | } |
8656 | |
|
8657 | 0 | bpf_object_post_load_cleanup(obj); |
8658 | 0 | obj->state = OBJ_LOADED; /* doesn't matter if successfully or not */ |
8659 | |
|
8660 | 0 | if (err) { |
8661 | 0 | bpf_object_unpin(obj); |
8662 | 0 | bpf_object_unload(obj); |
8663 | 0 | pr_warn("failed to load object '%s'\n", obj->path); |
8664 | 0 | return libbpf_err(err); |
8665 | 0 | } |
8666 | | |
8667 | 0 | return 0; |
8668 | 0 | } |
8669 | | |
8670 | | int bpf_object__prepare(struct bpf_object *obj) |
8671 | 0 | { |
8672 | 0 | return libbpf_err(bpf_object_prepare(obj, NULL)); |
8673 | 0 | } |
8674 | | |
8675 | | int bpf_object__load(struct bpf_object *obj) |
8676 | 0 | { |
8677 | 0 | return bpf_object_load(obj, 0, NULL); |
8678 | 0 | } |
8679 | | |
8680 | | static int make_parent_dir(const char *path) |
8681 | 0 | { |
8682 | 0 | char *dname, *dir; |
8683 | 0 | int err = 0; |
8684 | |
|
8685 | 0 | dname = strdup(path); |
8686 | 0 | if (dname == NULL) |
8687 | 0 | return -ENOMEM; |
8688 | | |
8689 | 0 | dir = dirname(dname); |
8690 | 0 | if (mkdir(dir, 0700) && errno != EEXIST) |
8691 | 0 | err = -errno; |
8692 | |
|
8693 | 0 | free(dname); |
8694 | 0 | if (err) { |
8695 | 0 | pr_warn("failed to mkdir %s: %s\n", path, errstr(err)); |
8696 | 0 | } |
8697 | 0 | return err; |
8698 | 0 | } |
8699 | | |
8700 | | static int check_path(const char *path) |
8701 | 0 | { |
8702 | 0 | struct statfs st_fs; |
8703 | 0 | char *dname, *dir; |
8704 | 0 | int err = 0; |
8705 | |
|
8706 | 0 | if (path == NULL) |
8707 | 0 | return -EINVAL; |
8708 | | |
8709 | 0 | dname = strdup(path); |
8710 | 0 | if (dname == NULL) |
8711 | 0 | return -ENOMEM; |
8712 | | |
8713 | 0 | dir = dirname(dname); |
8714 | 0 | if (statfs(dir, &st_fs)) { |
8715 | 0 | pr_warn("failed to statfs %s: %s\n", dir, errstr(errno)); |
8716 | 0 | err = -errno; |
8717 | 0 | } |
8718 | 0 | free(dname); |
8719 | |
|
8720 | 0 | if (!err && st_fs.f_type != BPF_FS_MAGIC) { |
8721 | 0 | pr_warn("specified path %s is not on BPF FS\n", path); |
8722 | 0 | err = -EINVAL; |
8723 | 0 | } |
8724 | |
|
8725 | 0 | return err; |
8726 | 0 | } |
8727 | | |
8728 | | int bpf_program__pin(struct bpf_program *prog, const char *path) |
8729 | 0 | { |
8730 | 0 | int err; |
8731 | |
|
8732 | 0 | if (prog->fd < 0) { |
8733 | 0 | pr_warn("prog '%s': can't pin program that wasn't loaded\n", prog->name); |
8734 | 0 | return libbpf_err(-EINVAL); |
8735 | 0 | } |
8736 | | |
8737 | 0 | err = make_parent_dir(path); |
8738 | 0 | if (err) |
8739 | 0 | return libbpf_err(err); |
8740 | | |
8741 | 0 | err = check_path(path); |
8742 | 0 | if (err) |
8743 | 0 | return libbpf_err(err); |
8744 | | |
8745 | 0 | if (bpf_obj_pin(prog->fd, path)) { |
8746 | 0 | err = -errno; |
8747 | 0 | pr_warn("prog '%s': failed to pin at '%s': %s\n", prog->name, path, errstr(err)); |
8748 | 0 | return libbpf_err(err); |
8749 | 0 | } |
8750 | | |
8751 | 0 | pr_debug("prog '%s': pinned at '%s'\n", prog->name, path); |
8752 | 0 | return 0; |
8753 | 0 | } |
8754 | | |
8755 | | int bpf_program__unpin(struct bpf_program *prog, const char *path) |
8756 | 0 | { |
8757 | 0 | int err; |
8758 | |
|
8759 | 0 | if (prog->fd < 0) { |
8760 | 0 | pr_warn("prog '%s': can't unpin program that wasn't loaded\n", prog->name); |
8761 | 0 | return libbpf_err(-EINVAL); |
8762 | 0 | } |
8763 | | |
8764 | 0 | err = check_path(path); |
8765 | 0 | if (err) |
8766 | 0 | return libbpf_err(err); |
8767 | | |
8768 | 0 | err = unlink(path); |
8769 | 0 | if (err) |
8770 | 0 | return libbpf_err(-errno); |
8771 | | |
8772 | 0 | pr_debug("prog '%s': unpinned from '%s'\n", prog->name, path); |
8773 | 0 | return 0; |
8774 | 0 | } |
8775 | | |
8776 | | int bpf_map__pin(struct bpf_map *map, const char *path) |
8777 | 0 | { |
8778 | 0 | int err; |
8779 | |
|
8780 | 0 | if (map == NULL) { |
8781 | 0 | pr_warn("invalid map pointer\n"); |
8782 | 0 | return libbpf_err(-EINVAL); |
8783 | 0 | } |
8784 | | |
8785 | 0 | if (map->fd < 0) { |
8786 | 0 | pr_warn("map '%s': can't pin BPF map without FD (was it created?)\n", map->name); |
8787 | 0 | return libbpf_err(-EINVAL); |
8788 | 0 | } |
8789 | | |
8790 | 0 | if (map->pin_path) { |
8791 | 0 | if (path && strcmp(path, map->pin_path)) { |
8792 | 0 | pr_warn("map '%s' already has pin path '%s' different from '%s'\n", |
8793 | 0 | bpf_map__name(map), map->pin_path, path); |
8794 | 0 | return libbpf_err(-EINVAL); |
8795 | 0 | } else if (map->pinned) { |
8796 | 0 | pr_debug("map '%s' already pinned at '%s'; not re-pinning\n", |
8797 | 0 | bpf_map__name(map), map->pin_path); |
8798 | 0 | return 0; |
8799 | 0 | } |
8800 | 0 | } else { |
8801 | 0 | if (!path) { |
8802 | 0 | pr_warn("missing a path to pin map '%s' at\n", |
8803 | 0 | bpf_map__name(map)); |
8804 | 0 | return libbpf_err(-EINVAL); |
8805 | 0 | } else if (map->pinned) { |
8806 | 0 | pr_warn("map '%s' already pinned\n", bpf_map__name(map)); |
8807 | 0 | return libbpf_err(-EEXIST); |
8808 | 0 | } |
8809 | | |
8810 | 0 | map->pin_path = strdup(path); |
8811 | 0 | if (!map->pin_path) { |
8812 | 0 | err = -errno; |
8813 | 0 | goto out_err; |
8814 | 0 | } |
8815 | 0 | } |
8816 | | |
8817 | 0 | err = make_parent_dir(map->pin_path); |
8818 | 0 | if (err) |
8819 | 0 | return libbpf_err(err); |
8820 | | |
8821 | 0 | err = check_path(map->pin_path); |
8822 | 0 | if (err) |
8823 | 0 | return libbpf_err(err); |
8824 | | |
8825 | 0 | if (bpf_obj_pin(map->fd, map->pin_path)) { |
8826 | 0 | err = -errno; |
8827 | 0 | goto out_err; |
8828 | 0 | } |
8829 | | |
8830 | 0 | map->pinned = true; |
8831 | 0 | pr_debug("pinned map '%s'\n", map->pin_path); |
8832 | |
|
8833 | 0 | return 0; |
8834 | | |
8835 | 0 | out_err: |
8836 | 0 | pr_warn("failed to pin map: %s\n", errstr(err)); |
8837 | 0 | return libbpf_err(err); |
8838 | 0 | } |
8839 | | |
8840 | | int bpf_map__unpin(struct bpf_map *map, const char *path) |
8841 | 0 | { |
8842 | 0 | int err; |
8843 | |
|
8844 | 0 | if (map == NULL) { |
8845 | 0 | pr_warn("invalid map pointer\n"); |
8846 | 0 | return libbpf_err(-EINVAL); |
8847 | 0 | } |
8848 | | |
8849 | 0 | if (map->pin_path) { |
8850 | 0 | if (path && strcmp(path, map->pin_path)) { |
8851 | 0 | pr_warn("map '%s' already has pin path '%s' different from '%s'\n", |
8852 | 0 | bpf_map__name(map), map->pin_path, path); |
8853 | 0 | return libbpf_err(-EINVAL); |
8854 | 0 | } |
8855 | 0 | path = map->pin_path; |
8856 | 0 | } else if (!path) { |
8857 | 0 | pr_warn("no path to unpin map '%s' from\n", |
8858 | 0 | bpf_map__name(map)); |
8859 | 0 | return libbpf_err(-EINVAL); |
8860 | 0 | } |
8861 | | |
8862 | 0 | err = check_path(path); |
8863 | 0 | if (err) |
8864 | 0 | return libbpf_err(err); |
8865 | | |
8866 | 0 | err = unlink(path); |
8867 | 0 | if (err != 0) |
8868 | 0 | return libbpf_err(-errno); |
8869 | | |
8870 | 0 | map->pinned = false; |
8871 | 0 | pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path); |
8872 | |
|
8873 | 0 | return 0; |
8874 | 0 | } |
8875 | | |
8876 | | int bpf_map__set_pin_path(struct bpf_map *map, const char *path) |
8877 | 1 | { |
8878 | 1 | char *new = NULL; |
8879 | | |
8880 | 1 | if (path) { |
8881 | 1 | new = strdup(path); |
8882 | 1 | if (!new) |
8883 | 0 | return libbpf_err(-errno); |
8884 | 1 | } |
8885 | | |
8886 | 1 | free(map->pin_path); |
8887 | 1 | map->pin_path = new; |
8888 | 1 | return 0; |
8889 | 1 | } |
8890 | | |
8891 | | __alias(bpf_map__pin_path) |
8892 | | const char *bpf_map__get_pin_path(const struct bpf_map *map); |
8893 | | |
8894 | | const char *bpf_map__pin_path(const struct bpf_map *map) |
8895 | 0 | { |
8896 | 0 | return map->pin_path; |
8897 | 0 | } |
8898 | | |
8899 | | bool bpf_map__is_pinned(const struct bpf_map *map) |
8900 | 0 | { |
8901 | 0 | return map->pinned; |
8902 | 0 | } |
8903 | | |
8904 | | static void sanitize_pin_path(char *s) |
8905 | 0 | { |
8906 | | /* bpffs disallows periods in path names */ |
8907 | 0 | while (*s) { |
8908 | 0 | if (*s == '.') |
8909 | 0 | *s = '_'; |
8910 | 0 | s++; |
8911 | 0 | } |
8912 | 0 | } |
8913 | | |
8914 | | int bpf_object__pin_maps(struct bpf_object *obj, const char *path) |
8915 | 0 | { |
8916 | 0 | struct bpf_map *map; |
8917 | 0 | int err; |
8918 | |
|
8919 | 0 | if (!obj) |
8920 | 0 | return libbpf_err(-ENOENT); |
8921 | | |
8922 | 0 | if (obj->state < OBJ_PREPARED) { |
8923 | 0 | pr_warn("object not yet loaded; load it first\n"); |
8924 | 0 | return libbpf_err(-ENOENT); |
8925 | 0 | } |
8926 | | |
8927 | 0 | bpf_object__for_each_map(map, obj) { |
8928 | 0 | char *pin_path = NULL; |
8929 | 0 | char buf[PATH_MAX]; |
8930 | |
|
8931 | 0 | if (!map->autocreate) |
8932 | 0 | continue; |
8933 | | |
8934 | 0 | if (path) { |
8935 | 0 | err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map)); |
8936 | 0 | if (err) |
8937 | 0 | goto err_unpin_maps; |
8938 | 0 | sanitize_pin_path(buf); |
8939 | 0 | pin_path = buf; |
8940 | 0 | } else if (!map->pin_path) { |
8941 | 0 | continue; |
8942 | 0 | } |
8943 | | |
8944 | 0 | err = bpf_map__pin(map, pin_path); |
8945 | 0 | if (err) |
8946 | 0 | goto err_unpin_maps; |
8947 | 0 | } |
8948 | | |
8949 | 0 | return 0; |
8950 | | |
8951 | 0 | err_unpin_maps: |
8952 | 0 | while ((map = bpf_object__prev_map(obj, map))) { |
8953 | 0 | if (!map->pin_path) |
8954 | 0 | continue; |
8955 | | |
8956 | 0 | bpf_map__unpin(map, NULL); |
8957 | 0 | } |
8958 | |
|
8959 | 0 | return libbpf_err(err); |
8960 | 0 | } |
8961 | | |
8962 | | int bpf_object__unpin_maps(struct bpf_object *obj, const char *path) |
8963 | 0 | { |
8964 | 0 | struct bpf_map *map; |
8965 | 0 | int err; |
8966 | |
|
8967 | 0 | if (!obj) |
8968 | 0 | return libbpf_err(-ENOENT); |
8969 | | |
8970 | 0 | bpf_object__for_each_map(map, obj) { |
8971 | 0 | char *pin_path = NULL; |
8972 | 0 | char buf[PATH_MAX]; |
8973 | |
|
8974 | 0 | if (path) { |
8975 | 0 | err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map)); |
8976 | 0 | if (err) |
8977 | 0 | return libbpf_err(err); |
8978 | 0 | sanitize_pin_path(buf); |
8979 | 0 | pin_path = buf; |
8980 | 0 | } else if (!map->pin_path) { |
8981 | 0 | continue; |
8982 | 0 | } |
8983 | | |
8984 | 0 | err = bpf_map__unpin(map, pin_path); |
8985 | 0 | if (err) |
8986 | 0 | return libbpf_err(err); |
8987 | 0 | } |
8988 | | |
8989 | 0 | return 0; |
8990 | 0 | } |
8991 | | |
8992 | | int bpf_object__pin_programs(struct bpf_object *obj, const char *path) |
8993 | 0 | { |
8994 | 0 | struct bpf_program *prog; |
8995 | 0 | char buf[PATH_MAX]; |
8996 | 0 | int err; |
8997 | |
|
8998 | 0 | if (!obj) |
8999 | 0 | return libbpf_err(-ENOENT); |
9000 | | |
9001 | 0 | if (obj->state < OBJ_LOADED) { |
9002 | 0 | pr_warn("object not yet loaded; load it first\n"); |
9003 | 0 | return libbpf_err(-ENOENT); |
9004 | 0 | } |
9005 | | |
9006 | 0 | bpf_object__for_each_program(prog, obj) { |
9007 | 0 | err = pathname_concat(buf, sizeof(buf), path, prog->name); |
9008 | 0 | if (err) |
9009 | 0 | goto err_unpin_programs; |
9010 | | |
9011 | 0 | err = bpf_program__pin(prog, buf); |
9012 | 0 | if (err) |
9013 | 0 | goto err_unpin_programs; |
9014 | 0 | } |
9015 | | |
9016 | 0 | return 0; |
9017 | | |
9018 | 0 | err_unpin_programs: |
9019 | 0 | while ((prog = bpf_object__prev_program(obj, prog))) { |
9020 | 0 | if (pathname_concat(buf, sizeof(buf), path, prog->name)) |
9021 | 0 | continue; |
9022 | | |
9023 | 0 | bpf_program__unpin(prog, buf); |
9024 | 0 | } |
9025 | |
|
9026 | 0 | return libbpf_err(err); |
9027 | 0 | } |
9028 | | |
9029 | | int bpf_object__unpin_programs(struct bpf_object *obj, const char *path) |
9030 | 0 | { |
9031 | 0 | struct bpf_program *prog; |
9032 | 0 | int err; |
9033 | |
|
9034 | 0 | if (!obj) |
9035 | 0 | return libbpf_err(-ENOENT); |
9036 | | |
9037 | 0 | bpf_object__for_each_program(prog, obj) { |
9038 | 0 | char buf[PATH_MAX]; |
9039 | |
|
9040 | 0 | err = pathname_concat(buf, sizeof(buf), path, prog->name); |
9041 | 0 | if (err) |
9042 | 0 | return libbpf_err(err); |
9043 | | |
9044 | 0 | err = bpf_program__unpin(prog, buf); |
9045 | 0 | if (err) |
9046 | 0 | return libbpf_err(err); |
9047 | 0 | } |
9048 | | |
9049 | 0 | return 0; |
9050 | 0 | } |
9051 | | |
9052 | | int bpf_object__pin(struct bpf_object *obj, const char *path) |
9053 | 0 | { |
9054 | 0 | int err; |
9055 | |
|
9056 | 0 | err = bpf_object__pin_maps(obj, path); |
9057 | 0 | if (err) |
9058 | 0 | return libbpf_err(err); |
9059 | | |
9060 | 0 | err = bpf_object__pin_programs(obj, path); |
9061 | 0 | if (err) { |
9062 | 0 | bpf_object__unpin_maps(obj, path); |
9063 | 0 | return libbpf_err(err); |
9064 | 0 | } |
9065 | | |
9066 | 0 | return 0; |
9067 | 0 | } |
9068 | | |
9069 | | int bpf_object__unpin(struct bpf_object *obj, const char *path) |
9070 | 0 | { |
9071 | 0 | int err; |
9072 | |
|
9073 | 0 | err = bpf_object__unpin_programs(obj, path); |
9074 | 0 | if (err) |
9075 | 0 | return libbpf_err(err); |
9076 | | |
9077 | 0 | err = bpf_object__unpin_maps(obj, path); |
9078 | 0 | if (err) |
9079 | 0 | return libbpf_err(err); |
9080 | | |
9081 | 0 | return 0; |
9082 | 0 | } |
9083 | | |
9084 | | static void bpf_map__destroy(struct bpf_map *map) |
9085 | 2.84k | { |
9086 | 2.84k | if (map->inner_map) { |
9087 | 0 | bpf_map__destroy(map->inner_map); |
9088 | 0 | zfree(&map->inner_map); |
9089 | 0 | } |
9090 | | |
9091 | 2.84k | zfree(&map->init_slots); |
9092 | 2.84k | map->init_slots_sz = 0; |
9093 | | |
9094 | 2.84k | if (map->mmaped && map->mmaped != map->obj->arena_data) |
9095 | 1.80k | munmap(map->mmaped, bpf_map_mmap_sz(map)); |
9096 | 2.84k | map->mmaped = NULL; |
9097 | | |
9098 | 2.84k | if (map->st_ops) { |
9099 | 60 | zfree(&map->st_ops->data); |
9100 | 60 | zfree(&map->st_ops->progs); |
9101 | 60 | zfree(&map->st_ops->kern_func_off); |
9102 | 60 | zfree(&map->st_ops); |
9103 | 60 | } |
9104 | | |
9105 | 2.84k | zfree(&map->name); |
9106 | 2.84k | zfree(&map->real_name); |
9107 | 2.84k | zfree(&map->pin_path); |
9108 | | |
9109 | 2.84k | if (map->fd >= 0) |
9110 | 0 | zclose(map->fd); |
9111 | 2.84k | } |
9112 | | |
9113 | | void bpf_object__close(struct bpf_object *obj) |
9114 | 11.5k | { |
9115 | 11.5k | size_t i; |
9116 | | |
9117 | 11.5k | if (IS_ERR_OR_NULL(obj)) |
9118 | 0 | return; |
9119 | | |
9120 | | /* |
9121 | | * if user called bpf_object__prepare() without ever getting to |
9122 | | * bpf_object__load(), we need to clean up stuff that is normally |
9123 | | * cleaned up at the end of loading step |
9124 | | */ |
9125 | 11.5k | bpf_object_post_load_cleanup(obj); |
9126 | | |
9127 | 11.5k | usdt_manager_free(obj->usdt_man); |
9128 | 11.5k | obj->usdt_man = NULL; |
9129 | | |
9130 | 11.5k | bpf_gen__free(obj->gen_loader); |
9131 | 11.5k | bpf_object__elf_finish(obj); |
9132 | 11.5k | bpf_object_unload(obj); |
9133 | 11.5k | btf__free(obj->btf); |
9134 | 11.5k | btf__free(obj->btf_vmlinux); |
9135 | 11.5k | btf_ext__free(obj->btf_ext); |
9136 | | |
9137 | 14.4k | for (i = 0; i < obj->nr_maps; i++) |
9138 | 2.84k | bpf_map__destroy(&obj->maps[i]); |
9139 | | |
9140 | 11.5k | zfree(&obj->btf_custom_path); |
9141 | 11.5k | zfree(&obj->kconfig); |
9142 | | |
9143 | 15.1k | for (i = 0; i < obj->nr_extern; i++) { |
9144 | 3.56k | zfree(&obj->externs[i].name); |
9145 | 3.56k | zfree(&obj->externs[i].essent_name); |
9146 | 3.56k | } |
9147 | | |
9148 | 11.5k | zfree(&obj->externs); |
9149 | 11.5k | obj->nr_extern = 0; |
9150 | | |
9151 | 11.5k | zfree(&obj->maps); |
9152 | 11.5k | obj->nr_maps = 0; |
9153 | | |
9154 | 11.5k | if (obj->programs && obj->nr_programs) { |
9155 | 9.91k | for (i = 0; i < obj->nr_programs; i++) |
9156 | 9.26k | bpf_program__exit(&obj->programs[i]); |
9157 | 648 | } |
9158 | 11.5k | zfree(&obj->programs); |
9159 | | |
9160 | 11.5k | zfree(&obj->feat_cache); |
9161 | 11.5k | zfree(&obj->token_path); |
9162 | 11.5k | if (obj->token_fd > 0) |
9163 | 0 | close(obj->token_fd); |
9164 | | |
9165 | 11.5k | zfree(&obj->arena_data); |
9166 | | |
9167 | 11.5k | free(obj); |
9168 | 11.5k | } |
9169 | | |
9170 | | const char *bpf_object__name(const struct bpf_object *obj) |
9171 | 0 | { |
9172 | 0 | return obj ? obj->name : libbpf_err_ptr(-EINVAL); |
9173 | 0 | } |
9174 | | |
9175 | | unsigned int bpf_object__kversion(const struct bpf_object *obj) |
9176 | 0 | { |
9177 | 0 | return obj ? obj->kern_version : 0; |
9178 | 0 | } |
9179 | | |
9180 | | int bpf_object__token_fd(const struct bpf_object *obj) |
9181 | 0 | { |
9182 | 0 | return obj->token_fd ?: -1; |
9183 | 0 | } |
9184 | | |
9185 | | struct btf *bpf_object__btf(const struct bpf_object *obj) |
9186 | 0 | { |
9187 | 0 | return obj ? obj->btf : NULL; |
9188 | 0 | } |
9189 | | |
9190 | | int bpf_object__btf_fd(const struct bpf_object *obj) |
9191 | 0 | { |
9192 | 0 | return obj->btf ? btf__fd(obj->btf) : -1; |
9193 | 0 | } |
9194 | | |
9195 | | int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version) |
9196 | 0 | { |
9197 | 0 | if (obj->state >= OBJ_LOADED) |
9198 | 0 | return libbpf_err(-EINVAL); |
9199 | | |
9200 | 0 | obj->kern_version = kern_version; |
9201 | |
|
9202 | 0 | return 0; |
9203 | 0 | } |
9204 | | |
9205 | | int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts) |
9206 | 0 | { |
9207 | 0 | struct bpf_gen *gen; |
9208 | |
|
9209 | 0 | if (!opts) |
9210 | 0 | return libbpf_err(-EFAULT); |
9211 | 0 | if (!OPTS_VALID(opts, gen_loader_opts)) |
9212 | 0 | return libbpf_err(-EINVAL); |
9213 | 0 | gen = calloc(sizeof(*gen), 1); |
9214 | 0 | if (!gen) |
9215 | 0 | return libbpf_err(-ENOMEM); |
9216 | 0 | gen->opts = opts; |
9217 | 0 | gen->swapped_endian = !is_native_endianness(obj); |
9218 | 0 | obj->gen_loader = gen; |
9219 | 0 | return 0; |
9220 | 0 | } |
9221 | | |
9222 | | static struct bpf_program * |
9223 | | __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj, |
9224 | | bool forward) |
9225 | 9.75k | { |
9226 | 9.75k | size_t nr_programs = obj->nr_programs; |
9227 | 9.75k | ssize_t idx; |
9228 | | |
9229 | 9.75k | if (!nr_programs) |
9230 | 1.66k | return NULL; |
9231 | | |
9232 | 8.08k | if (!p) |
9233 | | /* Iter from the beginning */ |
9234 | 545 | return forward ? &obj->programs[0] : |
9235 | 545 | &obj->programs[nr_programs - 1]; |
9236 | | |
9237 | 7.53k | if (p->obj != obj) { |
9238 | 0 | pr_warn("error: program handler doesn't match object\n"); |
9239 | 0 | return errno = EINVAL, NULL; |
9240 | 0 | } |
9241 | | |
9242 | 7.53k | idx = (p - obj->programs) + (forward ? 1 : -1); |
9243 | 7.53k | if (idx >= obj->nr_programs || idx < 0) |
9244 | 545 | return NULL; |
9245 | 6.99k | return &obj->programs[idx]; |
9246 | 7.53k | } |
9247 | | |
9248 | | struct bpf_program * |
9249 | | bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev) |
9250 | 7.62k | { |
9251 | 7.62k | struct bpf_program *prog = prev; |
9252 | | |
9253 | 9.75k | do { |
9254 | 9.75k | prog = __bpf_program__iter(prog, obj, true); |
9255 | 9.75k | } while (prog && prog_is_subprog(obj, prog)); |
9256 | | |
9257 | 7.62k | return prog; |
9258 | 7.62k | } |
9259 | | |
9260 | | struct bpf_program * |
9261 | | bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *next) |
9262 | 0 | { |
9263 | 0 | struct bpf_program *prog = next; |
9264 | |
|
9265 | 0 | do { |
9266 | 0 | prog = __bpf_program__iter(prog, obj, false); |
9267 | 0 | } while (prog && prog_is_subprog(obj, prog)); |
9268 | |
|
9269 | 0 | return prog; |
9270 | 0 | } |
9271 | | |
9272 | | void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex) |
9273 | 0 | { |
9274 | 0 | prog->prog_ifindex = ifindex; |
9275 | 0 | } |
9276 | | |
9277 | | const char *bpf_program__name(const struct bpf_program *prog) |
9278 | 0 | { |
9279 | 0 | return prog->name; |
9280 | 0 | } |
9281 | | |
9282 | | const char *bpf_program__section_name(const struct bpf_program *prog) |
9283 | 0 | { |
9284 | 0 | return prog->sec_name; |
9285 | 0 | } |
9286 | | |
9287 | | bool bpf_program__autoload(const struct bpf_program *prog) |
9288 | 0 | { |
9289 | 0 | return prog->autoload; |
9290 | 0 | } |
9291 | | |
9292 | | int bpf_program__set_autoload(struct bpf_program *prog, bool autoload) |
9293 | 0 | { |
9294 | 0 | if (prog->obj->state >= OBJ_LOADED) |
9295 | 0 | return libbpf_err(-EINVAL); |
9296 | | |
9297 | 0 | prog->autoload = autoload; |
9298 | 0 | return 0; |
9299 | 0 | } |
9300 | | |
9301 | | bool bpf_program__autoattach(const struct bpf_program *prog) |
9302 | 0 | { |
9303 | 0 | return prog->autoattach; |
9304 | 0 | } |
9305 | | |
9306 | | void bpf_program__set_autoattach(struct bpf_program *prog, bool autoattach) |
9307 | 0 | { |
9308 | 0 | prog->autoattach = autoattach; |
9309 | 0 | } |
9310 | | |
9311 | | const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog) |
9312 | 0 | { |
9313 | 0 | return prog->insns; |
9314 | 0 | } |
9315 | | |
9316 | | size_t bpf_program__insn_cnt(const struct bpf_program *prog) |
9317 | 0 | { |
9318 | 0 | return prog->insns_cnt; |
9319 | 0 | } |
9320 | | |
9321 | | int bpf_program__set_insns(struct bpf_program *prog, |
9322 | | struct bpf_insn *new_insns, size_t new_insn_cnt) |
9323 | 0 | { |
9324 | 0 | struct bpf_insn *insns; |
9325 | |
|
9326 | 0 | if (prog->obj->state >= OBJ_LOADED) |
9327 | 0 | return libbpf_err(-EBUSY); |
9328 | | |
9329 | 0 | insns = libbpf_reallocarray(prog->insns, new_insn_cnt, sizeof(*insns)); |
9330 | | /* NULL is a valid return from reallocarray if the new count is zero */ |
9331 | 0 | if (!insns && new_insn_cnt) { |
9332 | 0 | pr_warn("prog '%s': failed to realloc prog code\n", prog->name); |
9333 | 0 | return libbpf_err(-ENOMEM); |
9334 | 0 | } |
9335 | 0 | memcpy(insns, new_insns, new_insn_cnt * sizeof(*insns)); |
9336 | |
|
9337 | 0 | prog->insns = insns; |
9338 | 0 | prog->insns_cnt = new_insn_cnt; |
9339 | 0 | return 0; |
9340 | 0 | } |
9341 | | |
9342 | | int bpf_program__fd(const struct bpf_program *prog) |
9343 | 0 | { |
9344 | 0 | if (!prog) |
9345 | 0 | return libbpf_err(-EINVAL); |
9346 | | |
9347 | 0 | if (prog->fd < 0) |
9348 | 0 | return libbpf_err(-ENOENT); |
9349 | | |
9350 | 0 | return prog->fd; |
9351 | 0 | } |
9352 | | |
9353 | | __alias(bpf_program__type) |
9354 | | enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog); |
9355 | | |
9356 | | enum bpf_prog_type bpf_program__type(const struct bpf_program *prog) |
9357 | 0 | { |
9358 | 0 | return prog->type; |
9359 | 0 | } |
9360 | | |
9361 | | static size_t custom_sec_def_cnt; |
9362 | | static struct bpf_sec_def *custom_sec_defs; |
9363 | | static struct bpf_sec_def custom_fallback_def; |
9364 | | static bool has_custom_fallback_def; |
9365 | | static int last_custom_sec_def_handler_id; |
9366 | | |
9367 | | int bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type) |
9368 | 0 | { |
9369 | 0 | if (prog->obj->state >= OBJ_LOADED) |
9370 | 0 | return libbpf_err(-EBUSY); |
9371 | | |
9372 | | /* if type is not changed, do nothing */ |
9373 | 0 | if (prog->type == type) |
9374 | 0 | return 0; |
9375 | | |
9376 | 0 | prog->type = type; |
9377 | | |
9378 | | /* If a program type was changed, we need to reset associated SEC() |
9379 | | * handler, as it will be invalid now. The only exception is a generic |
9380 | | * fallback handler, which by definition is program type-agnostic and |
9381 | | * is a catch-all custom handler, optionally set by the application, |
9382 | | * so should be able to handle any type of BPF program. |
9383 | | */ |
9384 | 0 | if (prog->sec_def != &custom_fallback_def) |
9385 | 0 | prog->sec_def = NULL; |
9386 | 0 | return 0; |
9387 | 0 | } |
9388 | | |
9389 | | __alias(bpf_program__expected_attach_type) |
9390 | | enum bpf_attach_type bpf_program__get_expected_attach_type(const struct bpf_program *prog); |
9391 | | |
9392 | | enum bpf_attach_type bpf_program__expected_attach_type(const struct bpf_program *prog) |
9393 | 0 | { |
9394 | 0 | return prog->expected_attach_type; |
9395 | 0 | } |
9396 | | |
9397 | | int bpf_program__set_expected_attach_type(struct bpf_program *prog, |
9398 | | enum bpf_attach_type type) |
9399 | 0 | { |
9400 | 0 | if (prog->obj->state >= OBJ_LOADED) |
9401 | 0 | return libbpf_err(-EBUSY); |
9402 | | |
9403 | 0 | prog->expected_attach_type = type; |
9404 | 0 | return 0; |
9405 | 0 | } |
9406 | | |
9407 | | __u32 bpf_program__flags(const struct bpf_program *prog) |
9408 | 0 | { |
9409 | 0 | return prog->prog_flags; |
9410 | 0 | } |
9411 | | |
9412 | | int bpf_program__set_flags(struct bpf_program *prog, __u32 flags) |
9413 | 0 | { |
9414 | 0 | if (prog->obj->state >= OBJ_LOADED) |
9415 | 0 | return libbpf_err(-EBUSY); |
9416 | | |
9417 | 0 | prog->prog_flags = flags; |
9418 | 0 | return 0; |
9419 | 0 | } |
9420 | | |
9421 | | __u32 bpf_program__log_level(const struct bpf_program *prog) |
9422 | 0 | { |
9423 | 0 | return prog->log_level; |
9424 | 0 | } |
9425 | | |
9426 | | int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level) |
9427 | 0 | { |
9428 | 0 | if (prog->obj->state >= OBJ_LOADED) |
9429 | 0 | return libbpf_err(-EBUSY); |
9430 | | |
9431 | 0 | prog->log_level = log_level; |
9432 | 0 | return 0; |
9433 | 0 | } |
9434 | | |
9435 | | const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size) |
9436 | 0 | { |
9437 | 0 | *log_size = prog->log_size; |
9438 | 0 | return prog->log_buf; |
9439 | 0 | } |
9440 | | |
9441 | | int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size) |
9442 | 0 | { |
9443 | 0 | if (log_size && !log_buf) |
9444 | 0 | return libbpf_err(-EINVAL); |
9445 | 0 | if (prog->log_size > UINT_MAX) |
9446 | 0 | return libbpf_err(-EINVAL); |
9447 | 0 | if (prog->obj->state >= OBJ_LOADED) |
9448 | 0 | return libbpf_err(-EBUSY); |
9449 | | |
9450 | 0 | prog->log_buf = log_buf; |
9451 | 0 | prog->log_size = log_size; |
9452 | 0 | return 0; |
9453 | 0 | } |
9454 | | |
9455 | | struct bpf_func_info *bpf_program__func_info(const struct bpf_program *prog) |
9456 | 0 | { |
9457 | 0 | if (prog->func_info_rec_size != sizeof(struct bpf_func_info)) |
9458 | 0 | return libbpf_err_ptr(-EOPNOTSUPP); |
9459 | 0 | return prog->func_info; |
9460 | 0 | } |
9461 | | |
9462 | | __u32 bpf_program__func_info_cnt(const struct bpf_program *prog) |
9463 | 0 | { |
9464 | 0 | return prog->func_info_cnt; |
9465 | 0 | } |
9466 | | |
9467 | | struct bpf_line_info *bpf_program__line_info(const struct bpf_program *prog) |
9468 | 0 | { |
9469 | 0 | if (prog->line_info_rec_size != sizeof(struct bpf_line_info)) |
9470 | 0 | return libbpf_err_ptr(-EOPNOTSUPP); |
9471 | 0 | return prog->line_info; |
9472 | 0 | } |
9473 | | |
9474 | | __u32 bpf_program__line_info_cnt(const struct bpf_program *prog) |
9475 | 0 | { |
9476 | 0 | return prog->line_info_cnt; |
9477 | 0 | } |
9478 | | |
9479 | | #define SEC_DEF(sec_pfx, ptype, atype, flags, ...) { \ |
9480 | | .sec = (char *)sec_pfx, \ |
9481 | | .prog_type = BPF_PROG_TYPE_##ptype, \ |
9482 | | .expected_attach_type = atype, \ |
9483 | | .cookie = (long)(flags), \ |
9484 | | .prog_prepare_load_fn = libbpf_prepare_prog_load, \ |
9485 | | __VA_ARGS__ \ |
9486 | | } |
9487 | | |
9488 | | static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link); |
9489 | | static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link); |
9490 | | static int attach_ksyscall(const struct bpf_program *prog, long cookie, struct bpf_link **link); |
9491 | | static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link); |
9492 | | static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link); |
9493 | | static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link); |
9494 | | static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link); |
9495 | | static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link); |
9496 | | static int attach_kprobe_session(const struct bpf_program *prog, long cookie, struct bpf_link **link); |
9497 | | static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link); |
9498 | | static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link); |
9499 | | static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link); |
9500 | | |
9501 | | static const struct bpf_sec_def section_defs[] = { |
9502 | | SEC_DEF("socket", SOCKET_FILTER, 0, SEC_NONE), |
9503 | | SEC_DEF("sk_reuseport/migrate", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, SEC_ATTACHABLE), |
9504 | | SEC_DEF("sk_reuseport", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT, SEC_ATTACHABLE), |
9505 | | SEC_DEF("kprobe+", KPROBE, 0, SEC_NONE, attach_kprobe), |
9506 | | SEC_DEF("uprobe+", KPROBE, 0, SEC_NONE, attach_uprobe), |
9507 | | SEC_DEF("uprobe.s+", KPROBE, 0, SEC_SLEEPABLE, attach_uprobe), |
9508 | | SEC_DEF("kretprobe+", KPROBE, 0, SEC_NONE, attach_kprobe), |
9509 | | SEC_DEF("uretprobe+", KPROBE, 0, SEC_NONE, attach_uprobe), |
9510 | | SEC_DEF("uretprobe.s+", KPROBE, 0, SEC_SLEEPABLE, attach_uprobe), |
9511 | | SEC_DEF("kprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi), |
9512 | | SEC_DEF("kretprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi), |
9513 | | SEC_DEF("kprobe.session+", KPROBE, BPF_TRACE_KPROBE_SESSION, SEC_NONE, attach_kprobe_session), |
9514 | | SEC_DEF("uprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi), |
9515 | | SEC_DEF("uretprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi), |
9516 | | SEC_DEF("uprobe.session+", KPROBE, BPF_TRACE_UPROBE_SESSION, SEC_NONE, attach_uprobe_multi), |
9517 | | SEC_DEF("uprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi), |
9518 | | SEC_DEF("uretprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi), |
9519 | | SEC_DEF("uprobe.session.s+", KPROBE, BPF_TRACE_UPROBE_SESSION, SEC_SLEEPABLE, attach_uprobe_multi), |
9520 | | SEC_DEF("ksyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall), |
9521 | | SEC_DEF("kretsyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall), |
9522 | | SEC_DEF("usdt+", KPROBE, 0, SEC_USDT, attach_usdt), |
9523 | | SEC_DEF("usdt.s+", KPROBE, 0, SEC_USDT | SEC_SLEEPABLE, attach_usdt), |
9524 | | SEC_DEF("tc/ingress", SCHED_CLS, BPF_TCX_INGRESS, SEC_NONE), /* alias for tcx */ |
9525 | | SEC_DEF("tc/egress", SCHED_CLS, BPF_TCX_EGRESS, SEC_NONE), /* alias for tcx */ |
9526 | | SEC_DEF("tcx/ingress", SCHED_CLS, BPF_TCX_INGRESS, SEC_NONE), |
9527 | | SEC_DEF("tcx/egress", SCHED_CLS, BPF_TCX_EGRESS, SEC_NONE), |
9528 | | SEC_DEF("tc", SCHED_CLS, 0, SEC_NONE), /* deprecated / legacy, use tcx */ |
9529 | | SEC_DEF("classifier", SCHED_CLS, 0, SEC_NONE), /* deprecated / legacy, use tcx */ |
9530 | | SEC_DEF("action", SCHED_ACT, 0, SEC_NONE), /* deprecated / legacy, use tcx */ |
9531 | | SEC_DEF("netkit/primary", SCHED_CLS, BPF_NETKIT_PRIMARY, SEC_NONE), |
9532 | | SEC_DEF("netkit/peer", SCHED_CLS, BPF_NETKIT_PEER, SEC_NONE), |
9533 | | SEC_DEF("tracepoint+", TRACEPOINT, 0, SEC_NONE, attach_tp), |
9534 | | SEC_DEF("tp+", TRACEPOINT, 0, SEC_NONE, attach_tp), |
9535 | | SEC_DEF("raw_tracepoint+", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp), |
9536 | | SEC_DEF("raw_tp+", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp), |
9537 | | SEC_DEF("raw_tracepoint.w+", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp), |
9538 | | SEC_DEF("raw_tp.w+", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp), |
9539 | | SEC_DEF("tp_btf+", TRACING, BPF_TRACE_RAW_TP, SEC_ATTACH_BTF, attach_trace), |
9540 | | SEC_DEF("fentry+", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF, attach_trace), |
9541 | | SEC_DEF("fmod_ret+", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF, attach_trace), |
9542 | | SEC_DEF("fexit+", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF, attach_trace), |
9543 | | SEC_DEF("fentry.s+", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace), |
9544 | | SEC_DEF("fmod_ret.s+", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace), |
9545 | | SEC_DEF("fexit.s+", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace), |
9546 | | SEC_DEF("freplace+", EXT, 0, SEC_ATTACH_BTF, attach_trace), |
9547 | | SEC_DEF("lsm+", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF, attach_lsm), |
9548 | | SEC_DEF("lsm.s+", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_lsm), |
9549 | | SEC_DEF("lsm_cgroup+", LSM, BPF_LSM_CGROUP, SEC_ATTACH_BTF), |
9550 | | SEC_DEF("iter+", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF, attach_iter), |
9551 | | SEC_DEF("iter.s+", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_iter), |
9552 | | SEC_DEF("syscall", SYSCALL, 0, SEC_SLEEPABLE), |
9553 | | SEC_DEF("xdp.frags/devmap", XDP, BPF_XDP_DEVMAP, SEC_XDP_FRAGS), |
9554 | | SEC_DEF("xdp/devmap", XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE), |
9555 | | SEC_DEF("xdp.frags/cpumap", XDP, BPF_XDP_CPUMAP, SEC_XDP_FRAGS), |
9556 | | SEC_DEF("xdp/cpumap", XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE), |
9557 | | SEC_DEF("xdp.frags", XDP, BPF_XDP, SEC_XDP_FRAGS), |
9558 | | SEC_DEF("xdp", XDP, BPF_XDP, SEC_ATTACHABLE_OPT), |
9559 | | SEC_DEF("perf_event", PERF_EVENT, 0, SEC_NONE), |
9560 | | SEC_DEF("lwt_in", LWT_IN, 0, SEC_NONE), |
9561 | | SEC_DEF("lwt_out", LWT_OUT, 0, SEC_NONE), |
9562 | | SEC_DEF("lwt_xmit", LWT_XMIT, 0, SEC_NONE), |
9563 | | SEC_DEF("lwt_seg6local", LWT_SEG6LOCAL, 0, SEC_NONE), |
9564 | | SEC_DEF("sockops", SOCK_OPS, BPF_CGROUP_SOCK_OPS, SEC_ATTACHABLE_OPT), |
9565 | | SEC_DEF("sk_skb/stream_parser", SK_SKB, BPF_SK_SKB_STREAM_PARSER, SEC_ATTACHABLE_OPT), |
9566 | | SEC_DEF("sk_skb/stream_verdict",SK_SKB, BPF_SK_SKB_STREAM_VERDICT, SEC_ATTACHABLE_OPT), |
9567 | | SEC_DEF("sk_skb/verdict", SK_SKB, BPF_SK_SKB_VERDICT, SEC_ATTACHABLE_OPT), |
9568 | | SEC_DEF("sk_skb", SK_SKB, 0, SEC_NONE), |
9569 | | SEC_DEF("sk_msg", SK_MSG, BPF_SK_MSG_VERDICT, SEC_ATTACHABLE_OPT), |
9570 | | SEC_DEF("lirc_mode2", LIRC_MODE2, BPF_LIRC_MODE2, SEC_ATTACHABLE_OPT), |
9571 | | SEC_DEF("flow_dissector", FLOW_DISSECTOR, BPF_FLOW_DISSECTOR, SEC_ATTACHABLE_OPT), |
9572 | | SEC_DEF("cgroup_skb/ingress", CGROUP_SKB, BPF_CGROUP_INET_INGRESS, SEC_ATTACHABLE_OPT), |
9573 | | SEC_DEF("cgroup_skb/egress", CGROUP_SKB, BPF_CGROUP_INET_EGRESS, SEC_ATTACHABLE_OPT), |
9574 | | SEC_DEF("cgroup/skb", CGROUP_SKB, 0, SEC_NONE), |
9575 | | SEC_DEF("cgroup/sock_create", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE), |
9576 | | SEC_DEF("cgroup/sock_release", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_RELEASE, SEC_ATTACHABLE), |
9577 | | SEC_DEF("cgroup/sock", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE_OPT), |
9578 | | SEC_DEF("cgroup/post_bind4", CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND, SEC_ATTACHABLE), |
9579 | | SEC_DEF("cgroup/post_bind6", CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND, SEC_ATTACHABLE), |
9580 | | SEC_DEF("cgroup/bind4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND, SEC_ATTACHABLE), |
9581 | | SEC_DEF("cgroup/bind6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND, SEC_ATTACHABLE), |
9582 | | SEC_DEF("cgroup/connect4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT, SEC_ATTACHABLE), |
9583 | | SEC_DEF("cgroup/connect6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT, SEC_ATTACHABLE), |
9584 | | SEC_DEF("cgroup/connect_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_CONNECT, SEC_ATTACHABLE), |
9585 | | SEC_DEF("cgroup/sendmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG, SEC_ATTACHABLE), |
9586 | | SEC_DEF("cgroup/sendmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG, SEC_ATTACHABLE), |
9587 | | SEC_DEF("cgroup/sendmsg_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_SENDMSG, SEC_ATTACHABLE), |
9588 | | SEC_DEF("cgroup/recvmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG, SEC_ATTACHABLE), |
9589 | | SEC_DEF("cgroup/recvmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG, SEC_ATTACHABLE), |
9590 | | SEC_DEF("cgroup/recvmsg_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_RECVMSG, SEC_ATTACHABLE), |
9591 | | SEC_DEF("cgroup/getpeername4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETPEERNAME, SEC_ATTACHABLE), |
9592 | | SEC_DEF("cgroup/getpeername6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETPEERNAME, SEC_ATTACHABLE), |
9593 | | SEC_DEF("cgroup/getpeername_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_GETPEERNAME, SEC_ATTACHABLE), |
9594 | | SEC_DEF("cgroup/getsockname4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETSOCKNAME, SEC_ATTACHABLE), |
9595 | | SEC_DEF("cgroup/getsockname6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETSOCKNAME, SEC_ATTACHABLE), |
9596 | | SEC_DEF("cgroup/getsockname_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_GETSOCKNAME, SEC_ATTACHABLE), |
9597 | | SEC_DEF("cgroup/sysctl", CGROUP_SYSCTL, BPF_CGROUP_SYSCTL, SEC_ATTACHABLE), |
9598 | | SEC_DEF("cgroup/getsockopt", CGROUP_SOCKOPT, BPF_CGROUP_GETSOCKOPT, SEC_ATTACHABLE), |
9599 | | SEC_DEF("cgroup/setsockopt", CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT, SEC_ATTACHABLE), |
9600 | | SEC_DEF("cgroup/dev", CGROUP_DEVICE, BPF_CGROUP_DEVICE, SEC_ATTACHABLE_OPT), |
9601 | | SEC_DEF("struct_ops+", STRUCT_OPS, 0, SEC_NONE), |
9602 | | SEC_DEF("struct_ops.s+", STRUCT_OPS, 0, SEC_SLEEPABLE), |
9603 | | SEC_DEF("sk_lookup", SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE), |
9604 | | SEC_DEF("netfilter", NETFILTER, BPF_NETFILTER, SEC_NONE), |
9605 | | }; |
9606 | | |
9607 | | int libbpf_register_prog_handler(const char *sec, |
9608 | | enum bpf_prog_type prog_type, |
9609 | | enum bpf_attach_type exp_attach_type, |
9610 | | const struct libbpf_prog_handler_opts *opts) |
9611 | 0 | { |
9612 | 0 | struct bpf_sec_def *sec_def; |
9613 | |
|
9614 | 0 | if (!OPTS_VALID(opts, libbpf_prog_handler_opts)) |
9615 | 0 | return libbpf_err(-EINVAL); |
9616 | | |
9617 | 0 | if (last_custom_sec_def_handler_id == INT_MAX) /* prevent overflow */ |
9618 | 0 | return libbpf_err(-E2BIG); |
9619 | | |
9620 | 0 | if (sec) { |
9621 | 0 | sec_def = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt + 1, |
9622 | 0 | sizeof(*sec_def)); |
9623 | 0 | if (!sec_def) |
9624 | 0 | return libbpf_err(-ENOMEM); |
9625 | | |
9626 | 0 | custom_sec_defs = sec_def; |
9627 | 0 | sec_def = &custom_sec_defs[custom_sec_def_cnt]; |
9628 | 0 | } else { |
9629 | 0 | if (has_custom_fallback_def) |
9630 | 0 | return libbpf_err(-EBUSY); |
9631 | | |
9632 | 0 | sec_def = &custom_fallback_def; |
9633 | 0 | } |
9634 | | |
9635 | 0 | sec_def->sec = sec ? strdup(sec) : NULL; |
9636 | 0 | if (sec && !sec_def->sec) |
9637 | 0 | return libbpf_err(-ENOMEM); |
9638 | | |
9639 | 0 | sec_def->prog_type = prog_type; |
9640 | 0 | sec_def->expected_attach_type = exp_attach_type; |
9641 | 0 | sec_def->cookie = OPTS_GET(opts, cookie, 0); |
9642 | |
|
9643 | 0 | sec_def->prog_setup_fn = OPTS_GET(opts, prog_setup_fn, NULL); |
9644 | 0 | sec_def->prog_prepare_load_fn = OPTS_GET(opts, prog_prepare_load_fn, NULL); |
9645 | 0 | sec_def->prog_attach_fn = OPTS_GET(opts, prog_attach_fn, NULL); |
9646 | |
|
9647 | 0 | sec_def->handler_id = ++last_custom_sec_def_handler_id; |
9648 | |
|
9649 | 0 | if (sec) |
9650 | 0 | custom_sec_def_cnt++; |
9651 | 0 | else |
9652 | 0 | has_custom_fallback_def = true; |
9653 | |
|
9654 | 0 | return sec_def->handler_id; |
9655 | 0 | } |
9656 | | |
9657 | | int libbpf_unregister_prog_handler(int handler_id) |
9658 | 0 | { |
9659 | 0 | struct bpf_sec_def *sec_defs; |
9660 | 0 | int i; |
9661 | |
|
9662 | 0 | if (handler_id <= 0) |
9663 | 0 | return libbpf_err(-EINVAL); |
9664 | | |
9665 | 0 | if (has_custom_fallback_def && custom_fallback_def.handler_id == handler_id) { |
9666 | 0 | memset(&custom_fallback_def, 0, sizeof(custom_fallback_def)); |
9667 | 0 | has_custom_fallback_def = false; |
9668 | 0 | return 0; |
9669 | 0 | } |
9670 | | |
9671 | 0 | for (i = 0; i < custom_sec_def_cnt; i++) { |
9672 | 0 | if (custom_sec_defs[i].handler_id == handler_id) |
9673 | 0 | break; |
9674 | 0 | } |
9675 | |
|
9676 | 0 | if (i == custom_sec_def_cnt) |
9677 | 0 | return libbpf_err(-ENOENT); |
9678 | | |
9679 | 0 | free(custom_sec_defs[i].sec); |
9680 | 0 | for (i = i + 1; i < custom_sec_def_cnt; i++) |
9681 | 0 | custom_sec_defs[i - 1] = custom_sec_defs[i]; |
9682 | 0 | custom_sec_def_cnt--; |
9683 | | |
9684 | | /* try to shrink the array, but it's ok if we couldn't */ |
9685 | 0 | sec_defs = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt, sizeof(*sec_defs)); |
9686 | | /* if new count is zero, reallocarray can return a valid NULL result; |
9687 | | * in this case the previous pointer will be freed, so we *have to* |
9688 | | * reassign old pointer to the new value (even if it's NULL) |
9689 | | */ |
9690 | 0 | if (sec_defs || custom_sec_def_cnt == 0) |
9691 | 0 | custom_sec_defs = sec_defs; |
9692 | |
|
9693 | 0 | return 0; |
9694 | 0 | } |
9695 | | |
9696 | | static bool sec_def_matches(const struct bpf_sec_def *sec_def, const char *sec_name) |
9697 | 517k | { |
9698 | 517k | size_t len = strlen(sec_def->sec); |
9699 | | |
9700 | | /* "type/" always has to have proper SEC("type/extras") form */ |
9701 | 517k | if (sec_def->sec[len - 1] == '/') { |
9702 | 0 | if (str_has_pfx(sec_name, sec_def->sec)) |
9703 | 0 | return true; |
9704 | 0 | return false; |
9705 | 0 | } |
9706 | | |
9707 | | /* "type+" means it can be either exact SEC("type") or |
9708 | | * well-formed SEC("type/extras") with proper '/' separator |
9709 | | */ |
9710 | 517k | if (sec_def->sec[len - 1] == '+') { |
9711 | 210k | len--; |
9712 | | /* not even a prefix */ |
9713 | 210k | if (strncmp(sec_name, sec_def->sec, len) != 0) |
9714 | 208k | return false; |
9715 | | /* exact match or has '/' separator */ |
9716 | 1.77k | if (sec_name[len] == '\0' || sec_name[len] == '/') |
9717 | 836 | return true; |
9718 | 943 | return false; |
9719 | 1.77k | } |
9720 | | |
9721 | 306k | return strcmp(sec_name, sec_def->sec) == 0; |
9722 | 517k | } |
9723 | | |
9724 | | static const struct bpf_sec_def *find_sec_def(const char *sec_name) |
9725 | 5.40k | { |
9726 | 5.40k | const struct bpf_sec_def *sec_def; |
9727 | 5.40k | int i, n; |
9728 | | |
9729 | 5.40k | n = custom_sec_def_cnt; |
9730 | 5.40k | for (i = 0; i < n; i++) { |
9731 | 0 | sec_def = &custom_sec_defs[i]; |
9732 | 0 | if (sec_def_matches(sec_def, sec_name)) |
9733 | 0 | return sec_def; |
9734 | 0 | } |
9735 | | |
9736 | 5.40k | n = ARRAY_SIZE(section_defs); |
9737 | 521k | for (i = 0; i < n; i++) { |
9738 | 517k | sec_def = §ion_defs[i]; |
9739 | 517k | if (sec_def_matches(sec_def, sec_name)) |
9740 | 1.03k | return sec_def; |
9741 | 517k | } |
9742 | | |
9743 | 4.37k | if (has_custom_fallback_def) |
9744 | 0 | return &custom_fallback_def; |
9745 | | |
9746 | 4.37k | return NULL; |
9747 | 4.37k | } |
9748 | | |
9749 | 0 | #define MAX_TYPE_NAME_SIZE 32 |
9750 | | |
9751 | | static char *libbpf_get_type_names(bool attach_type) |
9752 | 0 | { |
9753 | 0 | int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE; |
9754 | 0 | char *buf; |
9755 | |
|
9756 | 0 | buf = malloc(len); |
9757 | 0 | if (!buf) |
9758 | 0 | return NULL; |
9759 | | |
9760 | 0 | buf[0] = '\0'; |
9761 | | /* Forge string buf with all available names */ |
9762 | 0 | for (i = 0; i < ARRAY_SIZE(section_defs); i++) { |
9763 | 0 | const struct bpf_sec_def *sec_def = §ion_defs[i]; |
9764 | |
|
9765 | 0 | if (attach_type) { |
9766 | 0 | if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load) |
9767 | 0 | continue; |
9768 | | |
9769 | 0 | if (!(sec_def->cookie & SEC_ATTACHABLE)) |
9770 | 0 | continue; |
9771 | 0 | } |
9772 | | |
9773 | 0 | if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) { |
9774 | 0 | free(buf); |
9775 | 0 | return NULL; |
9776 | 0 | } |
9777 | 0 | strcat(buf, " "); |
9778 | 0 | strcat(buf, section_defs[i].sec); |
9779 | 0 | } |
9780 | | |
9781 | 0 | return buf; |
9782 | 0 | } |
9783 | | |
9784 | | int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, |
9785 | | enum bpf_attach_type *expected_attach_type) |
9786 | 0 | { |
9787 | 0 | const struct bpf_sec_def *sec_def; |
9788 | 0 | char *type_names; |
9789 | |
|
9790 | 0 | if (!name) |
9791 | 0 | return libbpf_err(-EINVAL); |
9792 | | |
9793 | 0 | sec_def = find_sec_def(name); |
9794 | 0 | if (sec_def) { |
9795 | 0 | *prog_type = sec_def->prog_type; |
9796 | 0 | *expected_attach_type = sec_def->expected_attach_type; |
9797 | 0 | return 0; |
9798 | 0 | } |
9799 | | |
9800 | 0 | pr_debug("failed to guess program type from ELF section '%s'\n", name); |
9801 | 0 | type_names = libbpf_get_type_names(false); |
9802 | 0 | if (type_names != NULL) { |
9803 | 0 | pr_debug("supported section(type) names are:%s\n", type_names); |
9804 | 0 | free(type_names); |
9805 | 0 | } |
9806 | |
|
9807 | 0 | return libbpf_err(-ESRCH); |
9808 | 0 | } |
9809 | | |
9810 | | const char *libbpf_bpf_attach_type_str(enum bpf_attach_type t) |
9811 | 0 | { |
9812 | 0 | if (t < 0 || t >= ARRAY_SIZE(attach_type_name)) |
9813 | 0 | return NULL; |
9814 | | |
9815 | 0 | return attach_type_name[t]; |
9816 | 0 | } |
9817 | | |
9818 | | const char *libbpf_bpf_link_type_str(enum bpf_link_type t) |
9819 | 0 | { |
9820 | 0 | if (t < 0 || t >= ARRAY_SIZE(link_type_name)) |
9821 | 0 | return NULL; |
9822 | | |
9823 | 0 | return link_type_name[t]; |
9824 | 0 | } |
9825 | | |
9826 | | const char *libbpf_bpf_map_type_str(enum bpf_map_type t) |
9827 | 0 | { |
9828 | 0 | if (t < 0 || t >= ARRAY_SIZE(map_type_name)) |
9829 | 0 | return NULL; |
9830 | | |
9831 | 0 | return map_type_name[t]; |
9832 | 0 | } |
9833 | | |
9834 | | const char *libbpf_bpf_prog_type_str(enum bpf_prog_type t) |
9835 | 0 | { |
9836 | 0 | if (t < 0 || t >= ARRAY_SIZE(prog_type_name)) |
9837 | 0 | return NULL; |
9838 | | |
9839 | 0 | return prog_type_name[t]; |
9840 | 0 | } |
9841 | | |
9842 | | static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj, |
9843 | | int sec_idx, |
9844 | | size_t offset) |
9845 | 6 | { |
9846 | 6 | struct bpf_map *map; |
9847 | 6 | size_t i; |
9848 | | |
9849 | 21 | for (i = 0; i < obj->nr_maps; i++) { |
9850 | 15 | map = &obj->maps[i]; |
9851 | 15 | if (!bpf_map__is_struct_ops(map)) |
9852 | 15 | continue; |
9853 | 0 | if (map->sec_idx == sec_idx && |
9854 | 0 | map->sec_offset <= offset && |
9855 | 0 | offset - map->sec_offset < map->def.value_size) |
9856 | 0 | return map; |
9857 | 0 | } |
9858 | | |
9859 | 6 | return NULL; |
9860 | 6 | } |
9861 | | |
9862 | | /* Collect the reloc from ELF, populate the st_ops->progs[], and update |
9863 | | * st_ops->data for shadow type. |
9864 | | */ |
9865 | | static int bpf_object__collect_st_ops_relos(struct bpf_object *obj, |
9866 | | Elf64_Shdr *shdr, Elf_Data *data) |
9867 | 8 | { |
9868 | 8 | const struct btf_type *type; |
9869 | 8 | const struct btf_member *member; |
9870 | 8 | struct bpf_struct_ops *st_ops; |
9871 | 8 | struct bpf_program *prog; |
9872 | 8 | unsigned int shdr_idx; |
9873 | 8 | const struct btf *btf; |
9874 | 8 | struct bpf_map *map; |
9875 | 8 | unsigned int moff, insn_idx; |
9876 | 8 | const char *name; |
9877 | 8 | __u32 member_idx; |
9878 | 8 | Elf64_Sym *sym; |
9879 | 8 | Elf64_Rel *rel; |
9880 | 8 | int i, nrels; |
9881 | | |
9882 | 8 | btf = obj->btf; |
9883 | 8 | nrels = shdr->sh_size / shdr->sh_entsize; |
9884 | 18.4E | for (i = 0; i < nrels; i++) { |
9885 | 7 | rel = elf_rel_by_idx(data, i); |
9886 | 7 | if (!rel) { |
9887 | 0 | pr_warn("struct_ops reloc: failed to get %d reloc\n", i); |
9888 | 0 | return -LIBBPF_ERRNO__FORMAT; |
9889 | 0 | } |
9890 | | |
9891 | 7 | sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info)); |
9892 | 7 | if (!sym) { |
9893 | 1 | pr_warn("struct_ops reloc: symbol %zx not found\n", |
9894 | 1 | (size_t)ELF64_R_SYM(rel->r_info)); |
9895 | 1 | return -LIBBPF_ERRNO__FORMAT; |
9896 | 1 | } |
9897 | | |
9898 | 6 | name = elf_sym_str(obj, sym->st_name) ?: "<?>"; |
9899 | 6 | map = find_struct_ops_map_by_offset(obj, shdr->sh_info, rel->r_offset); |
9900 | 6 | if (!map) { |
9901 | 6 | pr_warn("struct_ops reloc: cannot find map at rel->r_offset %zu\n", |
9902 | 6 | (size_t)rel->r_offset); |
9903 | 6 | return -EINVAL; |
9904 | 6 | } |
9905 | | |
9906 | 18.4E | moff = rel->r_offset - map->sec_offset; |
9907 | 18.4E | shdr_idx = sym->st_shndx; |
9908 | 18.4E | st_ops = map->st_ops; |
9909 | 18.4E | pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel->r_offset %zu map->sec_offset %zu name %d (\'%s\')\n", |
9910 | 18.4E | map->name, |
9911 | 18.4E | (long long)(rel->r_info >> 32), |
9912 | 18.4E | (long long)sym->st_value, |
9913 | 18.4E | shdr_idx, (size_t)rel->r_offset, |
9914 | 18.4E | map->sec_offset, sym->st_name, name); |
9915 | | |
9916 | 18.4E | if (shdr_idx >= SHN_LORESERVE) { |
9917 | 0 | pr_warn("struct_ops reloc %s: rel->r_offset %zu shdr_idx %u unsupported non-static function\n", |
9918 | 0 | map->name, (size_t)rel->r_offset, shdr_idx); |
9919 | 0 | return -LIBBPF_ERRNO__RELOC; |
9920 | 0 | } |
9921 | 18.4E | if (sym->st_value % BPF_INSN_SZ) { |
9922 | 0 | pr_warn("struct_ops reloc %s: invalid target program offset %llu\n", |
9923 | 0 | map->name, (unsigned long long)sym->st_value); |
9924 | 0 | return -LIBBPF_ERRNO__FORMAT; |
9925 | 0 | } |
9926 | 18.4E | insn_idx = sym->st_value / BPF_INSN_SZ; |
9927 | | |
9928 | 18.4E | type = btf__type_by_id(btf, st_ops->type_id); |
9929 | 18.4E | member = find_member_by_offset(type, moff * 8); |
9930 | 18.4E | if (!member) { |
9931 | 0 | pr_warn("struct_ops reloc %s: cannot find member at moff %u\n", |
9932 | 0 | map->name, moff); |
9933 | 0 | return -EINVAL; |
9934 | 0 | } |
9935 | 18.4E | member_idx = member - btf_members(type); |
9936 | 18.4E | name = btf__name_by_offset(btf, member->name_off); |
9937 | | |
9938 | 18.4E | if (!resolve_func_ptr(btf, member->type, NULL)) { |
9939 | 0 | pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n", |
9940 | 0 | map->name, name); |
9941 | 0 | return -EINVAL; |
9942 | 0 | } |
9943 | | |
9944 | 18.4E | prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx); |
9945 | 18.4E | if (!prog) { |
9946 | 0 | pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n", |
9947 | 0 | map->name, shdr_idx, name); |
9948 | 0 | return -EINVAL; |
9949 | 0 | } |
9950 | | |
9951 | | /* prevent the use of BPF prog with invalid type */ |
9952 | 18.4E | if (prog->type != BPF_PROG_TYPE_STRUCT_OPS) { |
9953 | 0 | pr_warn("struct_ops reloc %s: prog %s is not struct_ops BPF program\n", |
9954 | 0 | map->name, prog->name); |
9955 | 0 | return -EINVAL; |
9956 | 0 | } |
9957 | | |
9958 | 18.4E | st_ops->progs[member_idx] = prog; |
9959 | | |
9960 | | /* st_ops->data will be exposed to users, being returned by |
9961 | | * bpf_map__initial_value() as a pointer to the shadow |
9962 | | * type. All function pointers in the original struct type |
9963 | | * should be converted to a pointer to struct bpf_program |
9964 | | * in the shadow type. |
9965 | | */ |
9966 | 18.4E | *((struct bpf_program **)(st_ops->data + moff)) = prog; |
9967 | 18.4E | } |
9968 | | |
9969 | 18.4E | return 0; |
9970 | 8 | } |
9971 | | |
9972 | 0 | #define BTF_TRACE_PREFIX "btf_trace_" |
9973 | 0 | #define BTF_LSM_PREFIX "bpf_lsm_" |
9974 | 0 | #define BTF_ITER_PREFIX "bpf_iter_" |
9975 | | #define BTF_MAX_NAME_SIZE 128 |
9976 | | |
9977 | | void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type, |
9978 | | const char **prefix, int *kind) |
9979 | 0 | { |
9980 | 0 | switch (attach_type) { |
9981 | 0 | case BPF_TRACE_RAW_TP: |
9982 | 0 | *prefix = BTF_TRACE_PREFIX; |
9983 | 0 | *kind = BTF_KIND_TYPEDEF; |
9984 | 0 | break; |
9985 | 0 | case BPF_LSM_MAC: |
9986 | 0 | case BPF_LSM_CGROUP: |
9987 | 0 | *prefix = BTF_LSM_PREFIX; |
9988 | 0 | *kind = BTF_KIND_FUNC; |
9989 | 0 | break; |
9990 | 0 | case BPF_TRACE_ITER: |
9991 | 0 | *prefix = BTF_ITER_PREFIX; |
9992 | 0 | *kind = BTF_KIND_FUNC; |
9993 | 0 | break; |
9994 | 0 | default: |
9995 | 0 | *prefix = ""; |
9996 | 0 | *kind = BTF_KIND_FUNC; |
9997 | 0 | } |
9998 | 0 | } |
9999 | | |
10000 | | static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix, |
10001 | | const char *name, __u32 kind) |
10002 | 0 | { |
10003 | 0 | char btf_type_name[BTF_MAX_NAME_SIZE]; |
10004 | 0 | int ret; |
10005 | |
|
10006 | 0 | ret = snprintf(btf_type_name, sizeof(btf_type_name), |
10007 | 0 | "%s%s", prefix, name); |
10008 | | /* snprintf returns the number of characters written excluding the |
10009 | | * terminating null. So, if >= BTF_MAX_NAME_SIZE are written, it |
10010 | | * indicates truncation. |
10011 | | */ |
10012 | 0 | if (ret < 0 || ret >= sizeof(btf_type_name)) |
10013 | 0 | return -ENAMETOOLONG; |
10014 | 0 | return btf__find_by_name_kind(btf, btf_type_name, kind); |
10015 | 0 | } |
10016 | | |
10017 | | static inline int find_attach_btf_id(struct btf *btf, const char *name, |
10018 | | enum bpf_attach_type attach_type) |
10019 | 0 | { |
10020 | 0 | const char *prefix; |
10021 | 0 | int kind; |
10022 | |
|
10023 | 0 | btf_get_kernel_prefix_kind(attach_type, &prefix, &kind); |
10024 | 0 | return find_btf_by_prefix_kind(btf, prefix, name, kind); |
10025 | 0 | } |
10026 | | |
10027 | | int libbpf_find_vmlinux_btf_id(const char *name, |
10028 | | enum bpf_attach_type attach_type) |
10029 | 0 | { |
10030 | 0 | struct btf *btf; |
10031 | 0 | int err; |
10032 | |
|
10033 | 0 | btf = btf__load_vmlinux_btf(); |
10034 | 0 | err = libbpf_get_error(btf); |
10035 | 0 | if (err) { |
10036 | 0 | pr_warn("vmlinux BTF is not found\n"); |
10037 | 0 | return libbpf_err(err); |
10038 | 0 | } |
10039 | | |
10040 | 0 | err = find_attach_btf_id(btf, name, attach_type); |
10041 | 0 | if (err <= 0) |
10042 | 0 | pr_warn("%s is not found in vmlinux BTF\n", name); |
10043 | |
|
10044 | 0 | btf__free(btf); |
10045 | 0 | return libbpf_err(err); |
10046 | 0 | } |
10047 | | |
10048 | | static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd, int token_fd) |
10049 | 0 | { |
10050 | 0 | struct bpf_prog_info info; |
10051 | 0 | __u32 info_len = sizeof(info); |
10052 | 0 | struct btf *btf; |
10053 | 0 | int err; |
10054 | |
|
10055 | 0 | memset(&info, 0, info_len); |
10056 | 0 | err = bpf_prog_get_info_by_fd(attach_prog_fd, &info, &info_len); |
10057 | 0 | if (err) { |
10058 | 0 | pr_warn("failed bpf_prog_get_info_by_fd for FD %d: %s\n", |
10059 | 0 | attach_prog_fd, errstr(err)); |
10060 | 0 | return err; |
10061 | 0 | } |
10062 | | |
10063 | 0 | err = -EINVAL; |
10064 | 0 | if (!info.btf_id) { |
10065 | 0 | pr_warn("The target program doesn't have BTF\n"); |
10066 | 0 | goto out; |
10067 | 0 | } |
10068 | 0 | btf = btf_load_from_kernel(info.btf_id, NULL, token_fd); |
10069 | 0 | err = libbpf_get_error(btf); |
10070 | 0 | if (err) { |
10071 | 0 | pr_warn("Failed to get BTF %d of the program: %s\n", info.btf_id, errstr(err)); |
10072 | 0 | goto out; |
10073 | 0 | } |
10074 | 0 | err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC); |
10075 | 0 | btf__free(btf); |
10076 | 0 | if (err <= 0) { |
10077 | 0 | pr_warn("%s is not found in prog's BTF\n", name); |
10078 | 0 | goto out; |
10079 | 0 | } |
10080 | 0 | out: |
10081 | 0 | return err; |
10082 | 0 | } |
10083 | | |
10084 | | static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name, |
10085 | | enum bpf_attach_type attach_type, |
10086 | | int *btf_obj_fd, int *btf_type_id) |
10087 | 0 | { |
10088 | 0 | int ret, i, mod_len; |
10089 | 0 | const char *fn_name, *mod_name = NULL; |
10090 | |
|
10091 | 0 | fn_name = strchr(attach_name, ':'); |
10092 | 0 | if (fn_name) { |
10093 | 0 | mod_name = attach_name; |
10094 | 0 | mod_len = fn_name - mod_name; |
10095 | 0 | fn_name++; |
10096 | 0 | } |
10097 | |
|
10098 | 0 | if (!mod_name || strncmp(mod_name, "vmlinux", mod_len) == 0) { |
10099 | 0 | ret = find_attach_btf_id(obj->btf_vmlinux, |
10100 | 0 | mod_name ? fn_name : attach_name, |
10101 | 0 | attach_type); |
10102 | 0 | if (ret > 0) { |
10103 | 0 | *btf_obj_fd = 0; /* vmlinux BTF */ |
10104 | 0 | *btf_type_id = ret; |
10105 | 0 | return 0; |
10106 | 0 | } |
10107 | 0 | if (ret != -ENOENT) |
10108 | 0 | return ret; |
10109 | 0 | } |
10110 | | |
10111 | 0 | ret = load_module_btfs(obj); |
10112 | 0 | if (ret) |
10113 | 0 | return ret; |
10114 | | |
10115 | 0 | for (i = 0; i < obj->btf_module_cnt; i++) { |
10116 | 0 | const struct module_btf *mod = &obj->btf_modules[i]; |
10117 | |
|
10118 | 0 | if (mod_name && strncmp(mod->name, mod_name, mod_len) != 0) |
10119 | 0 | continue; |
10120 | | |
10121 | 0 | ret = find_attach_btf_id(mod->btf, |
10122 | 0 | mod_name ? fn_name : attach_name, |
10123 | 0 | attach_type); |
10124 | 0 | if (ret > 0) { |
10125 | 0 | *btf_obj_fd = mod->fd; |
10126 | 0 | *btf_type_id = ret; |
10127 | 0 | return 0; |
10128 | 0 | } |
10129 | 0 | if (ret == -ENOENT) |
10130 | 0 | continue; |
10131 | | |
10132 | 0 | return ret; |
10133 | 0 | } |
10134 | | |
10135 | 0 | return -ESRCH; |
10136 | 0 | } |
10137 | | |
10138 | | static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name, |
10139 | | int *btf_obj_fd, int *btf_type_id) |
10140 | 0 | { |
10141 | 0 | enum bpf_attach_type attach_type = prog->expected_attach_type; |
10142 | 0 | __u32 attach_prog_fd = prog->attach_prog_fd; |
10143 | 0 | int err = 0; |
10144 | | |
10145 | | /* BPF program's BTF ID */ |
10146 | 0 | if (prog->type == BPF_PROG_TYPE_EXT || attach_prog_fd) { |
10147 | 0 | if (!attach_prog_fd) { |
10148 | 0 | pr_warn("prog '%s': attach program FD is not set\n", prog->name); |
10149 | 0 | return -EINVAL; |
10150 | 0 | } |
10151 | 0 | err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd, prog->obj->token_fd); |
10152 | 0 | if (err < 0) { |
10153 | 0 | pr_warn("prog '%s': failed to find BPF program (FD %d) BTF ID for '%s': %s\n", |
10154 | 0 | prog->name, attach_prog_fd, attach_name, errstr(err)); |
10155 | 0 | return err; |
10156 | 0 | } |
10157 | 0 | *btf_obj_fd = 0; |
10158 | 0 | *btf_type_id = err; |
10159 | 0 | return 0; |
10160 | 0 | } |
10161 | | |
10162 | | /* kernel/module BTF ID */ |
10163 | 0 | if (prog->obj->gen_loader) { |
10164 | 0 | bpf_gen__record_attach_target(prog->obj->gen_loader, attach_name, attach_type); |
10165 | 0 | *btf_obj_fd = 0; |
10166 | 0 | *btf_type_id = 1; |
10167 | 0 | } else { |
10168 | 0 | err = find_kernel_btf_id(prog->obj, attach_name, |
10169 | 0 | attach_type, btf_obj_fd, |
10170 | 0 | btf_type_id); |
10171 | 0 | } |
10172 | 0 | if (err) { |
10173 | 0 | pr_warn("prog '%s': failed to find kernel BTF type ID of '%s': %s\n", |
10174 | 0 | prog->name, attach_name, errstr(err)); |
10175 | 0 | return err; |
10176 | 0 | } |
10177 | 0 | return 0; |
10178 | 0 | } |
10179 | | |
10180 | | int libbpf_attach_type_by_name(const char *name, |
10181 | | enum bpf_attach_type *attach_type) |
10182 | 0 | { |
10183 | 0 | char *type_names; |
10184 | 0 | const struct bpf_sec_def *sec_def; |
10185 | |
|
10186 | 0 | if (!name) |
10187 | 0 | return libbpf_err(-EINVAL); |
10188 | | |
10189 | 0 | sec_def = find_sec_def(name); |
10190 | 0 | if (!sec_def) { |
10191 | 0 | pr_debug("failed to guess attach type based on ELF section name '%s'\n", name); |
10192 | 0 | type_names = libbpf_get_type_names(true); |
10193 | 0 | if (type_names != NULL) { |
10194 | 0 | pr_debug("attachable section(type) names are:%s\n", type_names); |
10195 | 0 | free(type_names); |
10196 | 0 | } |
10197 | |
|
10198 | 0 | return libbpf_err(-EINVAL); |
10199 | 0 | } |
10200 | | |
10201 | 0 | if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load) |
10202 | 0 | return libbpf_err(-EINVAL); |
10203 | 0 | if (!(sec_def->cookie & SEC_ATTACHABLE)) |
10204 | 0 | return libbpf_err(-EINVAL); |
10205 | | |
10206 | 0 | *attach_type = sec_def->expected_attach_type; |
10207 | 0 | return 0; |
10208 | 0 | } |
10209 | | |
10210 | | int bpf_map__fd(const struct bpf_map *map) |
10211 | 0 | { |
10212 | 0 | if (!map) |
10213 | 0 | return libbpf_err(-EINVAL); |
10214 | 0 | if (!map_is_created(map)) |
10215 | 0 | return -1; |
10216 | 0 | return map->fd; |
10217 | 0 | } |
10218 | | |
10219 | | static bool map_uses_real_name(const struct bpf_map *map) |
10220 | 1 | { |
10221 | | /* Since libbpf started to support custom .data.* and .rodata.* maps, |
10222 | | * their user-visible name differs from kernel-visible name. Users see |
10223 | | * such map's corresponding ELF section name as a map name. |
10224 | | * This check distinguishes .data/.rodata from .data.* and .rodata.* |
10225 | | * maps to know which name has to be returned to the user. |
10226 | | */ |
10227 | 1 | if (map->libbpf_type == LIBBPF_MAP_DATA && strcmp(map->real_name, DATA_SEC) != 0) |
10228 | 0 | return true; |
10229 | 1 | if (map->libbpf_type == LIBBPF_MAP_RODATA && strcmp(map->real_name, RODATA_SEC) != 0) |
10230 | 0 | return true; |
10231 | 1 | return false; |
10232 | 1 | } |
10233 | | |
10234 | | const char *bpf_map__name(const struct bpf_map *map) |
10235 | 1 | { |
10236 | 1 | if (!map) |
10237 | 0 | return NULL; |
10238 | | |
10239 | 1 | if (map_uses_real_name(map)) |
10240 | 0 | return map->real_name; |
10241 | | |
10242 | 1 | return map->name; |
10243 | 1 | } |
10244 | | |
10245 | | enum bpf_map_type bpf_map__type(const struct bpf_map *map) |
10246 | 0 | { |
10247 | 0 | return map->def.type; |
10248 | 0 | } |
10249 | | |
10250 | | int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type) |
10251 | 0 | { |
10252 | 0 | if (map_is_created(map)) |
10253 | 0 | return libbpf_err(-EBUSY); |
10254 | 0 | map->def.type = type; |
10255 | 0 | return 0; |
10256 | 0 | } |
10257 | | |
10258 | | __u32 bpf_map__map_flags(const struct bpf_map *map) |
10259 | 0 | { |
10260 | 0 | return map->def.map_flags; |
10261 | 0 | } |
10262 | | |
10263 | | int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags) |
10264 | 0 | { |
10265 | 0 | if (map_is_created(map)) |
10266 | 0 | return libbpf_err(-EBUSY); |
10267 | 0 | map->def.map_flags = flags; |
10268 | 0 | return 0; |
10269 | 0 | } |
10270 | | |
10271 | | __u64 bpf_map__map_extra(const struct bpf_map *map) |
10272 | 0 | { |
10273 | 0 | return map->map_extra; |
10274 | 0 | } |
10275 | | |
10276 | | int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra) |
10277 | 0 | { |
10278 | 0 | if (map_is_created(map)) |
10279 | 0 | return libbpf_err(-EBUSY); |
10280 | 0 | map->map_extra = map_extra; |
10281 | 0 | return 0; |
10282 | 0 | } |
10283 | | |
10284 | | __u32 bpf_map__numa_node(const struct bpf_map *map) |
10285 | 0 | { |
10286 | 0 | return map->numa_node; |
10287 | 0 | } |
10288 | | |
10289 | | int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node) |
10290 | 0 | { |
10291 | 0 | if (map_is_created(map)) |
10292 | 0 | return libbpf_err(-EBUSY); |
10293 | 0 | map->numa_node = numa_node; |
10294 | 0 | return 0; |
10295 | 0 | } |
10296 | | |
10297 | | __u32 bpf_map__key_size(const struct bpf_map *map) |
10298 | 0 | { |
10299 | 0 | return map->def.key_size; |
10300 | 0 | } |
10301 | | |
10302 | | int bpf_map__set_key_size(struct bpf_map *map, __u32 size) |
10303 | 0 | { |
10304 | 0 | if (map_is_created(map)) |
10305 | 0 | return libbpf_err(-EBUSY); |
10306 | 0 | map->def.key_size = size; |
10307 | 0 | return 0; |
10308 | 0 | } |
10309 | | |
10310 | | __u32 bpf_map__value_size(const struct bpf_map *map) |
10311 | 0 | { |
10312 | 0 | return map->def.value_size; |
10313 | 0 | } |
10314 | | |
10315 | | static int map_btf_datasec_resize(struct bpf_map *map, __u32 size) |
10316 | 0 | { |
10317 | 0 | struct btf *btf; |
10318 | 0 | struct btf_type *datasec_type, *var_type; |
10319 | 0 | struct btf_var_secinfo *var; |
10320 | 0 | const struct btf_type *array_type; |
10321 | 0 | const struct btf_array *array; |
10322 | 0 | int vlen, element_sz, new_array_id; |
10323 | 0 | __u32 nr_elements; |
10324 | | |
10325 | | /* check btf existence */ |
10326 | 0 | btf = bpf_object__btf(map->obj); |
10327 | 0 | if (!btf) |
10328 | 0 | return -ENOENT; |
10329 | | |
10330 | | /* verify map is datasec */ |
10331 | 0 | datasec_type = btf_type_by_id(btf, bpf_map__btf_value_type_id(map)); |
10332 | 0 | if (!btf_is_datasec(datasec_type)) { |
10333 | 0 | pr_warn("map '%s': cannot be resized, map value type is not a datasec\n", |
10334 | 0 | bpf_map__name(map)); |
10335 | 0 | return -EINVAL; |
10336 | 0 | } |
10337 | | |
10338 | | /* verify datasec has at least one var */ |
10339 | 0 | vlen = btf_vlen(datasec_type); |
10340 | 0 | if (vlen == 0) { |
10341 | 0 | pr_warn("map '%s': cannot be resized, map value datasec is empty\n", |
10342 | 0 | bpf_map__name(map)); |
10343 | 0 | return -EINVAL; |
10344 | 0 | } |
10345 | | |
10346 | | /* verify last var in the datasec is an array */ |
10347 | 0 | var = &btf_var_secinfos(datasec_type)[vlen - 1]; |
10348 | 0 | var_type = btf_type_by_id(btf, var->type); |
10349 | 0 | array_type = skip_mods_and_typedefs(btf, var_type->type, NULL); |
10350 | 0 | if (!btf_is_array(array_type)) { |
10351 | 0 | pr_warn("map '%s': cannot be resized, last var must be an array\n", |
10352 | 0 | bpf_map__name(map)); |
10353 | 0 | return -EINVAL; |
10354 | 0 | } |
10355 | | |
10356 | | /* verify request size aligns with array */ |
10357 | 0 | array = btf_array(array_type); |
10358 | 0 | element_sz = btf__resolve_size(btf, array->type); |
10359 | 0 | if (element_sz <= 0 || (size - var->offset) % element_sz != 0) { |
10360 | 0 | pr_warn("map '%s': cannot be resized, element size (%d) doesn't align with new total size (%u)\n", |
10361 | 0 | bpf_map__name(map), element_sz, size); |
10362 | 0 | return -EINVAL; |
10363 | 0 | } |
10364 | | |
10365 | | /* create a new array based on the existing array, but with new length */ |
10366 | 0 | nr_elements = (size - var->offset) / element_sz; |
10367 | 0 | new_array_id = btf__add_array(btf, array->index_type, array->type, nr_elements); |
10368 | 0 | if (new_array_id < 0) |
10369 | 0 | return new_array_id; |
10370 | | |
10371 | | /* adding a new btf type invalidates existing pointers to btf objects, |
10372 | | * so refresh pointers before proceeding |
10373 | | */ |
10374 | 0 | datasec_type = btf_type_by_id(btf, map->btf_value_type_id); |
10375 | 0 | var = &btf_var_secinfos(datasec_type)[vlen - 1]; |
10376 | 0 | var_type = btf_type_by_id(btf, var->type); |
10377 | | |
10378 | | /* finally update btf info */ |
10379 | 0 | datasec_type->size = size; |
10380 | 0 | var->size = size - var->offset; |
10381 | 0 | var_type->type = new_array_id; |
10382 | |
|
10383 | 0 | return 0; |
10384 | 0 | } |
10385 | | |
10386 | | int bpf_map__set_value_size(struct bpf_map *map, __u32 size) |
10387 | 0 | { |
10388 | 0 | if (map_is_created(map)) |
10389 | 0 | return libbpf_err(-EBUSY); |
10390 | | |
10391 | 0 | if (map->mmaped) { |
10392 | 0 | size_t mmap_old_sz, mmap_new_sz; |
10393 | 0 | int err; |
10394 | |
|
10395 | 0 | if (map->def.type != BPF_MAP_TYPE_ARRAY) |
10396 | 0 | return libbpf_err(-EOPNOTSUPP); |
10397 | | |
10398 | 0 | mmap_old_sz = bpf_map_mmap_sz(map); |
10399 | 0 | mmap_new_sz = array_map_mmap_sz(size, map->def.max_entries); |
10400 | 0 | err = bpf_map_mmap_resize(map, mmap_old_sz, mmap_new_sz); |
10401 | 0 | if (err) { |
10402 | 0 | pr_warn("map '%s': failed to resize memory-mapped region: %s\n", |
10403 | 0 | bpf_map__name(map), errstr(err)); |
10404 | 0 | return libbpf_err(err); |
10405 | 0 | } |
10406 | 0 | err = map_btf_datasec_resize(map, size); |
10407 | 0 | if (err && err != -ENOENT) { |
10408 | 0 | pr_warn("map '%s': failed to adjust resized BTF, clearing BTF key/value info: %s\n", |
10409 | 0 | bpf_map__name(map), errstr(err)); |
10410 | 0 | map->btf_value_type_id = 0; |
10411 | 0 | map->btf_key_type_id = 0; |
10412 | 0 | } |
10413 | 0 | } |
10414 | | |
10415 | 0 | map->def.value_size = size; |
10416 | 0 | return 0; |
10417 | 0 | } |
10418 | | |
10419 | | __u32 bpf_map__btf_key_type_id(const struct bpf_map *map) |
10420 | 0 | { |
10421 | 0 | return map ? map->btf_key_type_id : 0; |
10422 | 0 | } |
10423 | | |
10424 | | __u32 bpf_map__btf_value_type_id(const struct bpf_map *map) |
10425 | 0 | { |
10426 | 0 | return map ? map->btf_value_type_id : 0; |
10427 | 0 | } |
10428 | | |
10429 | | int bpf_map__set_initial_value(struct bpf_map *map, |
10430 | | const void *data, size_t size) |
10431 | 0 | { |
10432 | 0 | size_t actual_sz; |
10433 | |
|
10434 | 0 | if (map_is_created(map)) |
10435 | 0 | return libbpf_err(-EBUSY); |
10436 | | |
10437 | 0 | if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG) |
10438 | 0 | return libbpf_err(-EINVAL); |
10439 | | |
10440 | 0 | if (map->def.type == BPF_MAP_TYPE_ARENA) |
10441 | 0 | actual_sz = map->obj->arena_data_sz; |
10442 | 0 | else |
10443 | 0 | actual_sz = map->def.value_size; |
10444 | 0 | if (size != actual_sz) |
10445 | 0 | return libbpf_err(-EINVAL); |
10446 | | |
10447 | 0 | memcpy(map->mmaped, data, size); |
10448 | 0 | return 0; |
10449 | 0 | } |
10450 | | |
10451 | | void *bpf_map__initial_value(const struct bpf_map *map, size_t *psize) |
10452 | 0 | { |
10453 | 0 | if (bpf_map__is_struct_ops(map)) { |
10454 | 0 | if (psize) |
10455 | 0 | *psize = map->def.value_size; |
10456 | 0 | return map->st_ops->data; |
10457 | 0 | } |
10458 | | |
10459 | 0 | if (!map->mmaped) |
10460 | 0 | return NULL; |
10461 | | |
10462 | 0 | if (map->def.type == BPF_MAP_TYPE_ARENA) |
10463 | 0 | *psize = map->obj->arena_data_sz; |
10464 | 0 | else |
10465 | 0 | *psize = map->def.value_size; |
10466 | |
|
10467 | 0 | return map->mmaped; |
10468 | 0 | } |
10469 | | |
10470 | | bool bpf_map__is_internal(const struct bpf_map *map) |
10471 | 513 | { |
10472 | 513 | return map->libbpf_type != LIBBPF_MAP_UNSPEC; |
10473 | 513 | } |
10474 | | |
10475 | | __u32 bpf_map__ifindex(const struct bpf_map *map) |
10476 | 0 | { |
10477 | 0 | return map->map_ifindex; |
10478 | 0 | } |
10479 | | |
10480 | | int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex) |
10481 | 0 | { |
10482 | 0 | if (map_is_created(map)) |
10483 | 0 | return libbpf_err(-EBUSY); |
10484 | 0 | map->map_ifindex = ifindex; |
10485 | 0 | return 0; |
10486 | 0 | } |
10487 | | |
10488 | | int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd) |
10489 | 0 | { |
10490 | 0 | if (!bpf_map_type__is_map_in_map(map->def.type)) { |
10491 | 0 | pr_warn("error: unsupported map type\n"); |
10492 | 0 | return libbpf_err(-EINVAL); |
10493 | 0 | } |
10494 | 0 | if (map->inner_map_fd != -1) { |
10495 | 0 | pr_warn("error: inner_map_fd already specified\n"); |
10496 | 0 | return libbpf_err(-EINVAL); |
10497 | 0 | } |
10498 | 0 | if (map->inner_map) { |
10499 | 0 | bpf_map__destroy(map->inner_map); |
10500 | 0 | zfree(&map->inner_map); |
10501 | 0 | } |
10502 | 0 | map->inner_map_fd = fd; |
10503 | 0 | return 0; |
10504 | 0 | } |
10505 | | |
10506 | | static struct bpf_map * |
10507 | | __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i) |
10508 | 0 | { |
10509 | 0 | ssize_t idx; |
10510 | 0 | struct bpf_map *s, *e; |
10511 | |
|
10512 | 0 | if (!obj || !obj->maps) |
10513 | 0 | return errno = EINVAL, NULL; |
10514 | | |
10515 | 0 | s = obj->maps; |
10516 | 0 | e = obj->maps + obj->nr_maps; |
10517 | |
|
10518 | 0 | if ((m < s) || (m >= e)) { |
10519 | 0 | pr_warn("error in %s: map handler doesn't belong to object\n", |
10520 | 0 | __func__); |
10521 | 0 | return errno = EINVAL, NULL; |
10522 | 0 | } |
10523 | | |
10524 | 0 | idx = (m - obj->maps) + i; |
10525 | 0 | if (idx >= obj->nr_maps || idx < 0) |
10526 | 0 | return NULL; |
10527 | 0 | return &obj->maps[idx]; |
10528 | 0 | } |
10529 | | |
10530 | | struct bpf_map * |
10531 | | bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev) |
10532 | 0 | { |
10533 | 0 | if (prev == NULL && obj != NULL) |
10534 | 0 | return obj->maps; |
10535 | | |
10536 | 0 | return __bpf_map__iter(prev, obj, 1); |
10537 | 0 | } |
10538 | | |
10539 | | struct bpf_map * |
10540 | | bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next) |
10541 | 0 | { |
10542 | 0 | if (next == NULL && obj != NULL) { |
10543 | 0 | if (!obj->nr_maps) |
10544 | 0 | return NULL; |
10545 | 0 | return obj->maps + obj->nr_maps - 1; |
10546 | 0 | } |
10547 | | |
10548 | 0 | return __bpf_map__iter(next, obj, -1); |
10549 | 0 | } |
10550 | | |
10551 | | struct bpf_map * |
10552 | | bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name) |
10553 | 0 | { |
10554 | 0 | struct bpf_map *pos; |
10555 | |
|
10556 | 0 | bpf_object__for_each_map(pos, obj) { |
10557 | | /* if it's a special internal map name (which always starts |
10558 | | * with dot) then check if that special name matches the |
10559 | | * real map name (ELF section name) |
10560 | | */ |
10561 | 0 | if (name[0] == '.') { |
10562 | 0 | if (pos->real_name && strcmp(pos->real_name, name) == 0) |
10563 | 0 | return pos; |
10564 | 0 | continue; |
10565 | 0 | } |
10566 | | /* otherwise map name has to be an exact match */ |
10567 | 0 | if (map_uses_real_name(pos)) { |
10568 | 0 | if (strcmp(pos->real_name, name) == 0) |
10569 | 0 | return pos; |
10570 | 0 | continue; |
10571 | 0 | } |
10572 | 0 | if (strcmp(pos->name, name) == 0) |
10573 | 0 | return pos; |
10574 | 0 | } |
10575 | 0 | return errno = ENOENT, NULL; |
10576 | 0 | } |
10577 | | |
10578 | | int |
10579 | | bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name) |
10580 | 0 | { |
10581 | 0 | return bpf_map__fd(bpf_object__find_map_by_name(obj, name)); |
10582 | 0 | } |
10583 | | |
10584 | | static int validate_map_op(const struct bpf_map *map, size_t key_sz, |
10585 | | size_t value_sz, bool check_value_sz) |
10586 | 0 | { |
10587 | 0 | if (!map_is_created(map)) /* map is not yet created */ |
10588 | 0 | return -ENOENT; |
10589 | | |
10590 | 0 | if (map->def.key_size != key_sz) { |
10591 | 0 | pr_warn("map '%s': unexpected key size %zu provided, expected %u\n", |
10592 | 0 | map->name, key_sz, map->def.key_size); |
10593 | 0 | return -EINVAL; |
10594 | 0 | } |
10595 | | |
10596 | 0 | if (map->fd < 0) { |
10597 | 0 | pr_warn("map '%s': can't use BPF map without FD (was it created?)\n", map->name); |
10598 | 0 | return -EINVAL; |
10599 | 0 | } |
10600 | | |
10601 | 0 | if (!check_value_sz) |
10602 | 0 | return 0; |
10603 | | |
10604 | 0 | switch (map->def.type) { |
10605 | 0 | case BPF_MAP_TYPE_PERCPU_ARRAY: |
10606 | 0 | case BPF_MAP_TYPE_PERCPU_HASH: |
10607 | 0 | case BPF_MAP_TYPE_LRU_PERCPU_HASH: |
10608 | 0 | case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: { |
10609 | 0 | int num_cpu = libbpf_num_possible_cpus(); |
10610 | 0 | size_t elem_sz = roundup(map->def.value_size, 8); |
10611 | |
|
10612 | 0 | if (value_sz != num_cpu * elem_sz) { |
10613 | 0 | pr_warn("map '%s': unexpected value size %zu provided for per-CPU map, expected %d * %zu = %zd\n", |
10614 | 0 | map->name, value_sz, num_cpu, elem_sz, num_cpu * elem_sz); |
10615 | 0 | return -EINVAL; |
10616 | 0 | } |
10617 | 0 | break; |
10618 | 0 | } |
10619 | 0 | default: |
10620 | 0 | if (map->def.value_size != value_sz) { |
10621 | 0 | pr_warn("map '%s': unexpected value size %zu provided, expected %u\n", |
10622 | 0 | map->name, value_sz, map->def.value_size); |
10623 | 0 | return -EINVAL; |
10624 | 0 | } |
10625 | 0 | break; |
10626 | 0 | } |
10627 | 0 | return 0; |
10628 | 0 | } |
10629 | | |
10630 | | int bpf_map__lookup_elem(const struct bpf_map *map, |
10631 | | const void *key, size_t key_sz, |
10632 | | void *value, size_t value_sz, __u64 flags) |
10633 | 0 | { |
10634 | 0 | int err; |
10635 | |
|
10636 | 0 | err = validate_map_op(map, key_sz, value_sz, true); |
10637 | 0 | if (err) |
10638 | 0 | return libbpf_err(err); |
10639 | | |
10640 | 0 | return bpf_map_lookup_elem_flags(map->fd, key, value, flags); |
10641 | 0 | } |
10642 | | |
10643 | | int bpf_map__update_elem(const struct bpf_map *map, |
10644 | | const void *key, size_t key_sz, |
10645 | | const void *value, size_t value_sz, __u64 flags) |
10646 | 0 | { |
10647 | 0 | int err; |
10648 | |
|
10649 | 0 | err = validate_map_op(map, key_sz, value_sz, true); |
10650 | 0 | if (err) |
10651 | 0 | return libbpf_err(err); |
10652 | | |
10653 | 0 | return bpf_map_update_elem(map->fd, key, value, flags); |
10654 | 0 | } |
10655 | | |
10656 | | int bpf_map__delete_elem(const struct bpf_map *map, |
10657 | | const void *key, size_t key_sz, __u64 flags) |
10658 | 0 | { |
10659 | 0 | int err; |
10660 | |
|
10661 | 0 | err = validate_map_op(map, key_sz, 0, false /* check_value_sz */); |
10662 | 0 | if (err) |
10663 | 0 | return libbpf_err(err); |
10664 | | |
10665 | 0 | return bpf_map_delete_elem_flags(map->fd, key, flags); |
10666 | 0 | } |
10667 | | |
10668 | | int bpf_map__lookup_and_delete_elem(const struct bpf_map *map, |
10669 | | const void *key, size_t key_sz, |
10670 | | void *value, size_t value_sz, __u64 flags) |
10671 | 0 | { |
10672 | 0 | int err; |
10673 | |
|
10674 | 0 | err = validate_map_op(map, key_sz, value_sz, true); |
10675 | 0 | if (err) |
10676 | 0 | return libbpf_err(err); |
10677 | | |
10678 | 0 | return bpf_map_lookup_and_delete_elem_flags(map->fd, key, value, flags); |
10679 | 0 | } |
10680 | | |
10681 | | int bpf_map__get_next_key(const struct bpf_map *map, |
10682 | | const void *cur_key, void *next_key, size_t key_sz) |
10683 | 0 | { |
10684 | 0 | int err; |
10685 | |
|
10686 | 0 | err = validate_map_op(map, key_sz, 0, false /* check_value_sz */); |
10687 | 0 | if (err) |
10688 | 0 | return libbpf_err(err); |
10689 | | |
10690 | 0 | return bpf_map_get_next_key(map->fd, cur_key, next_key); |
10691 | 0 | } |
10692 | | |
10693 | | long libbpf_get_error(const void *ptr) |
10694 | 16.5k | { |
10695 | 16.5k | if (!IS_ERR_OR_NULL(ptr)) |
10696 | 5.44k | return 0; |
10697 | | |
10698 | 11.1k | if (IS_ERR(ptr)) |
10699 | 0 | errno = -PTR_ERR(ptr); |
10700 | | |
10701 | | /* If ptr == NULL, then errno should be already set by the failing |
10702 | | * API, because libbpf never returns NULL on success and it now always |
10703 | | * sets errno on error. So no extra errno handling for ptr == NULL |
10704 | | * case. |
10705 | | */ |
10706 | 11.1k | return -errno; |
10707 | 16.5k | } |
10708 | | |
10709 | | /* Replace link's underlying BPF program with the new one */ |
10710 | | int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog) |
10711 | 0 | { |
10712 | 0 | int ret; |
10713 | 0 | int prog_fd = bpf_program__fd(prog); |
10714 | |
|
10715 | 0 | if (prog_fd < 0) { |
10716 | 0 | pr_warn("prog '%s': can't use BPF program without FD (was it loaded?)\n", |
10717 | 0 | prog->name); |
10718 | 0 | return libbpf_err(-EINVAL); |
10719 | 0 | } |
10720 | | |
10721 | 0 | ret = bpf_link_update(bpf_link__fd(link), prog_fd, NULL); |
10722 | 0 | return libbpf_err_errno(ret); |
10723 | 0 | } |
10724 | | |
10725 | | /* Release "ownership" of underlying BPF resource (typically, BPF program |
10726 | | * attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected |
10727 | | * link, when destructed through bpf_link__destroy() call won't attempt to |
10728 | | * detach/unregisted that BPF resource. This is useful in situations where, |
10729 | | * say, attached BPF program has to outlive userspace program that attached it |
10730 | | * in the system. Depending on type of BPF program, though, there might be |
10731 | | * additional steps (like pinning BPF program in BPF FS) necessary to ensure |
10732 | | * exit of userspace program doesn't trigger automatic detachment and clean up |
10733 | | * inside the kernel. |
10734 | | */ |
10735 | | void bpf_link__disconnect(struct bpf_link *link) |
10736 | 0 | { |
10737 | 0 | link->disconnected = true; |
10738 | 0 | } |
10739 | | |
10740 | | int bpf_link__destroy(struct bpf_link *link) |
10741 | 0 | { |
10742 | 0 | int err = 0; |
10743 | |
|
10744 | 0 | if (IS_ERR_OR_NULL(link)) |
10745 | 0 | return 0; |
10746 | | |
10747 | 0 | if (!link->disconnected && link->detach) |
10748 | 0 | err = link->detach(link); |
10749 | 0 | if (link->pin_path) |
10750 | 0 | free(link->pin_path); |
10751 | 0 | if (link->dealloc) |
10752 | 0 | link->dealloc(link); |
10753 | 0 | else |
10754 | 0 | free(link); |
10755 | |
|
10756 | 0 | return libbpf_err(err); |
10757 | 0 | } |
10758 | | |
10759 | | int bpf_link__fd(const struct bpf_link *link) |
10760 | 0 | { |
10761 | 0 | return link->fd; |
10762 | 0 | } |
10763 | | |
10764 | | const char *bpf_link__pin_path(const struct bpf_link *link) |
10765 | 0 | { |
10766 | 0 | return link->pin_path; |
10767 | 0 | } |
10768 | | |
10769 | | static int bpf_link__detach_fd(struct bpf_link *link) |
10770 | 0 | { |
10771 | 0 | return libbpf_err_errno(close(link->fd)); |
10772 | 0 | } |
10773 | | |
10774 | | struct bpf_link *bpf_link__open(const char *path) |
10775 | 0 | { |
10776 | 0 | struct bpf_link *link; |
10777 | 0 | int fd; |
10778 | |
|
10779 | 0 | fd = bpf_obj_get(path); |
10780 | 0 | if (fd < 0) { |
10781 | 0 | fd = -errno; |
10782 | 0 | pr_warn("failed to open link at %s: %d\n", path, fd); |
10783 | 0 | return libbpf_err_ptr(fd); |
10784 | 0 | } |
10785 | | |
10786 | 0 | link = calloc(1, sizeof(*link)); |
10787 | 0 | if (!link) { |
10788 | 0 | close(fd); |
10789 | 0 | return libbpf_err_ptr(-ENOMEM); |
10790 | 0 | } |
10791 | 0 | link->detach = &bpf_link__detach_fd; |
10792 | 0 | link->fd = fd; |
10793 | |
|
10794 | 0 | link->pin_path = strdup(path); |
10795 | 0 | if (!link->pin_path) { |
10796 | 0 | bpf_link__destroy(link); |
10797 | 0 | return libbpf_err_ptr(-ENOMEM); |
10798 | 0 | } |
10799 | | |
10800 | 0 | return link; |
10801 | 0 | } |
10802 | | |
10803 | | int bpf_link__detach(struct bpf_link *link) |
10804 | 0 | { |
10805 | 0 | return bpf_link_detach(link->fd) ? -errno : 0; |
10806 | 0 | } |
10807 | | |
10808 | | int bpf_link__pin(struct bpf_link *link, const char *path) |
10809 | 0 | { |
10810 | 0 | int err; |
10811 | |
|
10812 | 0 | if (link->pin_path) |
10813 | 0 | return libbpf_err(-EBUSY); |
10814 | 0 | err = make_parent_dir(path); |
10815 | 0 | if (err) |
10816 | 0 | return libbpf_err(err); |
10817 | 0 | err = check_path(path); |
10818 | 0 | if (err) |
10819 | 0 | return libbpf_err(err); |
10820 | | |
10821 | 0 | link->pin_path = strdup(path); |
10822 | 0 | if (!link->pin_path) |
10823 | 0 | return libbpf_err(-ENOMEM); |
10824 | | |
10825 | 0 | if (bpf_obj_pin(link->fd, link->pin_path)) { |
10826 | 0 | err = -errno; |
10827 | 0 | zfree(&link->pin_path); |
10828 | 0 | return libbpf_err(err); |
10829 | 0 | } |
10830 | | |
10831 | 0 | pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path); |
10832 | 0 | return 0; |
10833 | 0 | } |
10834 | | |
10835 | | int bpf_link__unpin(struct bpf_link *link) |
10836 | 0 | { |
10837 | 0 | int err; |
10838 | |
|
10839 | 0 | if (!link->pin_path) |
10840 | 0 | return libbpf_err(-EINVAL); |
10841 | | |
10842 | 0 | err = unlink(link->pin_path); |
10843 | 0 | if (err != 0) |
10844 | 0 | return -errno; |
10845 | | |
10846 | 0 | pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path); |
10847 | 0 | zfree(&link->pin_path); |
10848 | 0 | return 0; |
10849 | 0 | } |
10850 | | |
10851 | | struct bpf_link_perf { |
10852 | | struct bpf_link link; |
10853 | | int perf_event_fd; |
10854 | | /* legacy kprobe support: keep track of probe identifier and type */ |
10855 | | char *legacy_probe_name; |
10856 | | bool legacy_is_kprobe; |
10857 | | bool legacy_is_retprobe; |
10858 | | }; |
10859 | | |
10860 | | static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe); |
10861 | | static int remove_uprobe_event_legacy(const char *probe_name, bool retprobe); |
10862 | | |
10863 | | static int bpf_link_perf_detach(struct bpf_link *link) |
10864 | 0 | { |
10865 | 0 | struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link); |
10866 | 0 | int err = 0; |
10867 | |
|
10868 | 0 | if (ioctl(perf_link->perf_event_fd, PERF_EVENT_IOC_DISABLE, 0) < 0) |
10869 | 0 | err = -errno; |
10870 | |
|
10871 | 0 | if (perf_link->perf_event_fd != link->fd) |
10872 | 0 | close(perf_link->perf_event_fd); |
10873 | 0 | close(link->fd); |
10874 | | |
10875 | | /* legacy uprobe/kprobe needs to be removed after perf event fd closure */ |
10876 | 0 | if (perf_link->legacy_probe_name) { |
10877 | 0 | if (perf_link->legacy_is_kprobe) { |
10878 | 0 | err = remove_kprobe_event_legacy(perf_link->legacy_probe_name, |
10879 | 0 | perf_link->legacy_is_retprobe); |
10880 | 0 | } else { |
10881 | 0 | err = remove_uprobe_event_legacy(perf_link->legacy_probe_name, |
10882 | 0 | perf_link->legacy_is_retprobe); |
10883 | 0 | } |
10884 | 0 | } |
10885 | |
|
10886 | 0 | return err; |
10887 | 0 | } |
10888 | | |
10889 | | static void bpf_link_perf_dealloc(struct bpf_link *link) |
10890 | 0 | { |
10891 | 0 | struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link); |
10892 | |
|
10893 | 0 | free(perf_link->legacy_probe_name); |
10894 | 0 | free(perf_link); |
10895 | 0 | } |
10896 | | |
10897 | | struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd, |
10898 | | const struct bpf_perf_event_opts *opts) |
10899 | 0 | { |
10900 | 0 | struct bpf_link_perf *link; |
10901 | 0 | int prog_fd, link_fd = -1, err; |
10902 | 0 | bool force_ioctl_attach; |
10903 | |
|
10904 | 0 | if (!OPTS_VALID(opts, bpf_perf_event_opts)) |
10905 | 0 | return libbpf_err_ptr(-EINVAL); |
10906 | | |
10907 | 0 | if (pfd < 0) { |
10908 | 0 | pr_warn("prog '%s': invalid perf event FD %d\n", |
10909 | 0 | prog->name, pfd); |
10910 | 0 | return libbpf_err_ptr(-EINVAL); |
10911 | 0 | } |
10912 | 0 | prog_fd = bpf_program__fd(prog); |
10913 | 0 | if (prog_fd < 0) { |
10914 | 0 | pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n", |
10915 | 0 | prog->name); |
10916 | 0 | return libbpf_err_ptr(-EINVAL); |
10917 | 0 | } |
10918 | | |
10919 | 0 | link = calloc(1, sizeof(*link)); |
10920 | 0 | if (!link) |
10921 | 0 | return libbpf_err_ptr(-ENOMEM); |
10922 | 0 | link->link.detach = &bpf_link_perf_detach; |
10923 | 0 | link->link.dealloc = &bpf_link_perf_dealloc; |
10924 | 0 | link->perf_event_fd = pfd; |
10925 | |
|
10926 | 0 | force_ioctl_attach = OPTS_GET(opts, force_ioctl_attach, false); |
10927 | 0 | if (kernel_supports(prog->obj, FEAT_PERF_LINK) && !force_ioctl_attach) { |
10928 | 0 | DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_opts, |
10929 | 0 | .perf_event.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0)); |
10930 | |
|
10931 | 0 | link_fd = bpf_link_create(prog_fd, pfd, BPF_PERF_EVENT, &link_opts); |
10932 | 0 | if (link_fd < 0) { |
10933 | 0 | err = -errno; |
10934 | 0 | pr_warn("prog '%s': failed to create BPF link for perf_event FD %d: %s\n", |
10935 | 0 | prog->name, pfd, errstr(err)); |
10936 | 0 | goto err_out; |
10937 | 0 | } |
10938 | 0 | link->link.fd = link_fd; |
10939 | 0 | } else { |
10940 | 0 | if (OPTS_GET(opts, bpf_cookie, 0)) { |
10941 | 0 | pr_warn("prog '%s': user context value is not supported\n", prog->name); |
10942 | 0 | err = -EOPNOTSUPP; |
10943 | 0 | goto err_out; |
10944 | 0 | } |
10945 | | |
10946 | 0 | if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) { |
10947 | 0 | err = -errno; |
10948 | 0 | pr_warn("prog '%s': failed to attach to perf_event FD %d: %s\n", |
10949 | 0 | prog->name, pfd, errstr(err)); |
10950 | 0 | if (err == -EPROTO) |
10951 | 0 | pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n", |
10952 | 0 | prog->name, pfd); |
10953 | 0 | goto err_out; |
10954 | 0 | } |
10955 | 0 | link->link.fd = pfd; |
10956 | 0 | } |
10957 | 0 | if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) { |
10958 | 0 | err = -errno; |
10959 | 0 | pr_warn("prog '%s': failed to enable perf_event FD %d: %s\n", |
10960 | 0 | prog->name, pfd, errstr(err)); |
10961 | 0 | goto err_out; |
10962 | 0 | } |
10963 | | |
10964 | 0 | return &link->link; |
10965 | 0 | err_out: |
10966 | 0 | if (link_fd >= 0) |
10967 | 0 | close(link_fd); |
10968 | 0 | free(link); |
10969 | 0 | return libbpf_err_ptr(err); |
10970 | 0 | } |
10971 | | |
10972 | | struct bpf_link *bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd) |
10973 | 0 | { |
10974 | 0 | return bpf_program__attach_perf_event_opts(prog, pfd, NULL); |
10975 | 0 | } |
10976 | | |
10977 | | /* |
10978 | | * this function is expected to parse integer in the range of [0, 2^31-1] from |
10979 | | * given file using scanf format string fmt. If actual parsed value is |
10980 | | * negative, the result might be indistinguishable from error |
10981 | | */ |
10982 | | static int parse_uint_from_file(const char *file, const char *fmt) |
10983 | 0 | { |
10984 | 0 | int err, ret; |
10985 | 0 | FILE *f; |
10986 | |
|
10987 | 0 | f = fopen(file, "re"); |
10988 | 0 | if (!f) { |
10989 | 0 | err = -errno; |
10990 | 0 | pr_debug("failed to open '%s': %s\n", file, errstr(err)); |
10991 | 0 | return err; |
10992 | 0 | } |
10993 | 0 | err = fscanf(f, fmt, &ret); |
10994 | 0 | if (err != 1) { |
10995 | 0 | err = err == EOF ? -EIO : -errno; |
10996 | 0 | pr_debug("failed to parse '%s': %s\n", file, errstr(err)); |
10997 | 0 | fclose(f); |
10998 | 0 | return err; |
10999 | 0 | } |
11000 | 0 | fclose(f); |
11001 | 0 | return ret; |
11002 | 0 | } |
11003 | | |
11004 | | static int determine_kprobe_perf_type(void) |
11005 | 0 | { |
11006 | 0 | const char *file = "/sys/bus/event_source/devices/kprobe/type"; |
11007 | |
|
11008 | 0 | return parse_uint_from_file(file, "%d\n"); |
11009 | 0 | } |
11010 | | |
11011 | | static int determine_uprobe_perf_type(void) |
11012 | 0 | { |
11013 | 0 | const char *file = "/sys/bus/event_source/devices/uprobe/type"; |
11014 | |
|
11015 | 0 | return parse_uint_from_file(file, "%d\n"); |
11016 | 0 | } |
11017 | | |
11018 | | static int determine_kprobe_retprobe_bit(void) |
11019 | 0 | { |
11020 | 0 | const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe"; |
11021 | |
|
11022 | 0 | return parse_uint_from_file(file, "config:%d\n"); |
11023 | 0 | } |
11024 | | |
11025 | | static int determine_uprobe_retprobe_bit(void) |
11026 | 0 | { |
11027 | 0 | const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe"; |
11028 | |
|
11029 | 0 | return parse_uint_from_file(file, "config:%d\n"); |
11030 | 0 | } |
11031 | | |
11032 | 0 | #define PERF_UPROBE_REF_CTR_OFFSET_BITS 32 |
11033 | 0 | #define PERF_UPROBE_REF_CTR_OFFSET_SHIFT 32 |
11034 | | |
11035 | | static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name, |
11036 | | uint64_t offset, int pid, size_t ref_ctr_off) |
11037 | 0 | { |
11038 | 0 | const size_t attr_sz = sizeof(struct perf_event_attr); |
11039 | 0 | struct perf_event_attr attr; |
11040 | 0 | int type, pfd; |
11041 | |
|
11042 | 0 | if ((__u64)ref_ctr_off >= (1ULL << PERF_UPROBE_REF_CTR_OFFSET_BITS)) |
11043 | 0 | return -EINVAL; |
11044 | | |
11045 | 0 | memset(&attr, 0, attr_sz); |
11046 | |
|
11047 | 0 | type = uprobe ? determine_uprobe_perf_type() |
11048 | 0 | : determine_kprobe_perf_type(); |
11049 | 0 | if (type < 0) { |
11050 | 0 | pr_warn("failed to determine %s perf type: %s\n", |
11051 | 0 | uprobe ? "uprobe" : "kprobe", |
11052 | 0 | errstr(type)); |
11053 | 0 | return type; |
11054 | 0 | } |
11055 | 0 | if (retprobe) { |
11056 | 0 | int bit = uprobe ? determine_uprobe_retprobe_bit() |
11057 | 0 | : determine_kprobe_retprobe_bit(); |
11058 | |
|
11059 | 0 | if (bit < 0) { |
11060 | 0 | pr_warn("failed to determine %s retprobe bit: %s\n", |
11061 | 0 | uprobe ? "uprobe" : "kprobe", |
11062 | 0 | errstr(bit)); |
11063 | 0 | return bit; |
11064 | 0 | } |
11065 | 0 | attr.config |= 1 << bit; |
11066 | 0 | } |
11067 | 0 | attr.size = attr_sz; |
11068 | 0 | attr.type = type; |
11069 | 0 | attr.config |= (__u64)ref_ctr_off << PERF_UPROBE_REF_CTR_OFFSET_SHIFT; |
11070 | 0 | attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */ |
11071 | 0 | attr.config2 = offset; /* kprobe_addr or probe_offset */ |
11072 | | |
11073 | | /* pid filter is meaningful only for uprobes */ |
11074 | 0 | pfd = syscall(__NR_perf_event_open, &attr, |
11075 | 0 | pid < 0 ? -1 : pid /* pid */, |
11076 | 0 | pid == -1 ? 0 : -1 /* cpu */, |
11077 | 0 | -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); |
11078 | 0 | return pfd >= 0 ? pfd : -errno; |
11079 | 0 | } |
11080 | | |
11081 | | static int append_to_file(const char *file, const char *fmt, ...) |
11082 | 0 | { |
11083 | 0 | int fd, n, err = 0; |
11084 | 0 | va_list ap; |
11085 | 0 | char buf[1024]; |
11086 | |
|
11087 | 0 | va_start(ap, fmt); |
11088 | 0 | n = vsnprintf(buf, sizeof(buf), fmt, ap); |
11089 | 0 | va_end(ap); |
11090 | |
|
11091 | 0 | if (n < 0 || n >= sizeof(buf)) |
11092 | 0 | return -EINVAL; |
11093 | | |
11094 | 0 | fd = open(file, O_WRONLY | O_APPEND | O_CLOEXEC, 0); |
11095 | 0 | if (fd < 0) |
11096 | 0 | return -errno; |
11097 | | |
11098 | 0 | if (write(fd, buf, n) < 0) |
11099 | 0 | err = -errno; |
11100 | |
|
11101 | 0 | close(fd); |
11102 | 0 | return err; |
11103 | 0 | } |
11104 | | |
11105 | 0 | #define DEBUGFS "/sys/kernel/debug/tracing" |
11106 | 0 | #define TRACEFS "/sys/kernel/tracing" |
11107 | | |
11108 | | static bool use_debugfs(void) |
11109 | 0 | { |
11110 | 0 | static int has_debugfs = -1; |
11111 | |
|
11112 | 0 | if (has_debugfs < 0) |
11113 | 0 | has_debugfs = faccessat(AT_FDCWD, DEBUGFS, F_OK, AT_EACCESS) == 0; |
11114 | |
|
11115 | 0 | return has_debugfs == 1; |
11116 | 0 | } |
11117 | | |
11118 | | static const char *tracefs_path(void) |
11119 | 0 | { |
11120 | 0 | return use_debugfs() ? DEBUGFS : TRACEFS; |
11121 | 0 | } |
11122 | | |
11123 | | static const char *tracefs_kprobe_events(void) |
11124 | 0 | { |
11125 | 0 | return use_debugfs() ? DEBUGFS"/kprobe_events" : TRACEFS"/kprobe_events"; |
11126 | 0 | } |
11127 | | |
11128 | | static const char *tracefs_uprobe_events(void) |
11129 | 0 | { |
11130 | 0 | return use_debugfs() ? DEBUGFS"/uprobe_events" : TRACEFS"/uprobe_events"; |
11131 | 0 | } |
11132 | | |
11133 | | static const char *tracefs_available_filter_functions(void) |
11134 | 0 | { |
11135 | 0 | return use_debugfs() ? DEBUGFS"/available_filter_functions" |
11136 | 0 | : TRACEFS"/available_filter_functions"; |
11137 | 0 | } |
11138 | | |
11139 | | static const char *tracefs_available_filter_functions_addrs(void) |
11140 | 0 | { |
11141 | 0 | return use_debugfs() ? DEBUGFS"/available_filter_functions_addrs" |
11142 | 0 | : TRACEFS"/available_filter_functions_addrs"; |
11143 | 0 | } |
11144 | | |
11145 | | static void gen_probe_legacy_event_name(char *buf, size_t buf_sz, |
11146 | | const char *name, size_t offset) |
11147 | 0 | { |
11148 | 0 | static int index = 0; |
11149 | 0 | int i; |
11150 | |
|
11151 | 0 | snprintf(buf, buf_sz, "libbpf_%u_%d_%s_0x%zx", getpid(), |
11152 | 0 | __sync_fetch_and_add(&index, 1), name, offset); |
11153 | | |
11154 | | /* sanitize name in the probe name */ |
11155 | 0 | for (i = 0; buf[i]; i++) { |
11156 | 0 | if (!isalnum(buf[i])) |
11157 | 0 | buf[i] = '_'; |
11158 | 0 | } |
11159 | 0 | } |
11160 | | |
11161 | | static int add_kprobe_event_legacy(const char *probe_name, bool retprobe, |
11162 | | const char *kfunc_name, size_t offset) |
11163 | 0 | { |
11164 | 0 | return append_to_file(tracefs_kprobe_events(), "%c:%s/%s %s+0x%zx", |
11165 | 0 | retprobe ? 'r' : 'p', |
11166 | 0 | retprobe ? "kretprobes" : "kprobes", |
11167 | 0 | probe_name, kfunc_name, offset); |
11168 | 0 | } |
11169 | | |
11170 | | static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe) |
11171 | 0 | { |
11172 | 0 | return append_to_file(tracefs_kprobe_events(), "-:%s/%s", |
11173 | 0 | retprobe ? "kretprobes" : "kprobes", probe_name); |
11174 | 0 | } |
11175 | | |
11176 | | static int determine_kprobe_perf_type_legacy(const char *probe_name, bool retprobe) |
11177 | 0 | { |
11178 | 0 | char file[256]; |
11179 | |
|
11180 | 0 | snprintf(file, sizeof(file), "%s/events/%s/%s/id", |
11181 | 0 | tracefs_path(), retprobe ? "kretprobes" : "kprobes", probe_name); |
11182 | |
|
11183 | 0 | return parse_uint_from_file(file, "%d\n"); |
11184 | 0 | } |
11185 | | |
11186 | | static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe, |
11187 | | const char *kfunc_name, size_t offset, int pid) |
11188 | 0 | { |
11189 | 0 | const size_t attr_sz = sizeof(struct perf_event_attr); |
11190 | 0 | struct perf_event_attr attr; |
11191 | 0 | int type, pfd, err; |
11192 | |
|
11193 | 0 | err = add_kprobe_event_legacy(probe_name, retprobe, kfunc_name, offset); |
11194 | 0 | if (err < 0) { |
11195 | 0 | pr_warn("failed to add legacy kprobe event for '%s+0x%zx': %s\n", |
11196 | 0 | kfunc_name, offset, |
11197 | 0 | errstr(err)); |
11198 | 0 | return err; |
11199 | 0 | } |
11200 | 0 | type = determine_kprobe_perf_type_legacy(probe_name, retprobe); |
11201 | 0 | if (type < 0) { |
11202 | 0 | err = type; |
11203 | 0 | pr_warn("failed to determine legacy kprobe event id for '%s+0x%zx': %s\n", |
11204 | 0 | kfunc_name, offset, |
11205 | 0 | errstr(err)); |
11206 | 0 | goto err_clean_legacy; |
11207 | 0 | } |
11208 | | |
11209 | 0 | memset(&attr, 0, attr_sz); |
11210 | 0 | attr.size = attr_sz; |
11211 | 0 | attr.config = type; |
11212 | 0 | attr.type = PERF_TYPE_TRACEPOINT; |
11213 | |
|
11214 | 0 | pfd = syscall(__NR_perf_event_open, &attr, |
11215 | 0 | pid < 0 ? -1 : pid, /* pid */ |
11216 | 0 | pid == -1 ? 0 : -1, /* cpu */ |
11217 | 0 | -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); |
11218 | 0 | if (pfd < 0) { |
11219 | 0 | err = -errno; |
11220 | 0 | pr_warn("legacy kprobe perf_event_open() failed: %s\n", |
11221 | 0 | errstr(err)); |
11222 | 0 | goto err_clean_legacy; |
11223 | 0 | } |
11224 | 0 | return pfd; |
11225 | | |
11226 | 0 | err_clean_legacy: |
11227 | | /* Clear the newly added legacy kprobe_event */ |
11228 | 0 | remove_kprobe_event_legacy(probe_name, retprobe); |
11229 | 0 | return err; |
11230 | 0 | } |
11231 | | |
11232 | | static const char *arch_specific_syscall_pfx(void) |
11233 | 0 | { |
11234 | 0 | #if defined(__x86_64__) |
11235 | 0 | return "x64"; |
11236 | | #elif defined(__i386__) |
11237 | | return "ia32"; |
11238 | | #elif defined(__s390x__) |
11239 | | return "s390x"; |
11240 | | #elif defined(__s390__) |
11241 | | return "s390"; |
11242 | | #elif defined(__arm__) |
11243 | | return "arm"; |
11244 | | #elif defined(__aarch64__) |
11245 | | return "arm64"; |
11246 | | #elif defined(__mips__) |
11247 | | return "mips"; |
11248 | | #elif defined(__riscv) |
11249 | | return "riscv"; |
11250 | | #elif defined(__powerpc__) |
11251 | | return "powerpc"; |
11252 | | #elif defined(__powerpc64__) |
11253 | | return "powerpc64"; |
11254 | | #else |
11255 | | return NULL; |
11256 | | #endif |
11257 | 0 | } |
11258 | | |
11259 | | int probe_kern_syscall_wrapper(int token_fd) |
11260 | 0 | { |
11261 | 0 | char syscall_name[64]; |
11262 | 0 | const char *ksys_pfx; |
11263 | |
|
11264 | 0 | ksys_pfx = arch_specific_syscall_pfx(); |
11265 | 0 | if (!ksys_pfx) |
11266 | 0 | return 0; |
11267 | | |
11268 | 0 | snprintf(syscall_name, sizeof(syscall_name), "__%s_sys_bpf", ksys_pfx); |
11269 | |
|
11270 | 0 | if (determine_kprobe_perf_type() >= 0) { |
11271 | 0 | int pfd; |
11272 | |
|
11273 | 0 | pfd = perf_event_open_probe(false, false, syscall_name, 0, getpid(), 0); |
11274 | 0 | if (pfd >= 0) |
11275 | 0 | close(pfd); |
11276 | |
|
11277 | 0 | return pfd >= 0 ? 1 : 0; |
11278 | 0 | } else { /* legacy mode */ |
11279 | 0 | char probe_name[MAX_EVENT_NAME_LEN]; |
11280 | |
|
11281 | 0 | gen_probe_legacy_event_name(probe_name, sizeof(probe_name), syscall_name, 0); |
11282 | 0 | if (add_kprobe_event_legacy(probe_name, false, syscall_name, 0) < 0) |
11283 | 0 | return 0; |
11284 | | |
11285 | 0 | (void)remove_kprobe_event_legacy(probe_name, false); |
11286 | 0 | return 1; |
11287 | 0 | } |
11288 | 0 | } |
11289 | | |
11290 | | struct bpf_link * |
11291 | | bpf_program__attach_kprobe_opts(const struct bpf_program *prog, |
11292 | | const char *func_name, |
11293 | | const struct bpf_kprobe_opts *opts) |
11294 | 0 | { |
11295 | 0 | DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts); |
11296 | 0 | enum probe_attach_mode attach_mode; |
11297 | 0 | char *legacy_probe = NULL; |
11298 | 0 | struct bpf_link *link; |
11299 | 0 | size_t offset; |
11300 | 0 | bool retprobe, legacy; |
11301 | 0 | int pfd, err; |
11302 | |
|
11303 | 0 | if (!OPTS_VALID(opts, bpf_kprobe_opts)) |
11304 | 0 | return libbpf_err_ptr(-EINVAL); |
11305 | | |
11306 | 0 | attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT); |
11307 | 0 | retprobe = OPTS_GET(opts, retprobe, false); |
11308 | 0 | offset = OPTS_GET(opts, offset, 0); |
11309 | 0 | pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0); |
11310 | |
|
11311 | 0 | legacy = determine_kprobe_perf_type() < 0; |
11312 | 0 | switch (attach_mode) { |
11313 | 0 | case PROBE_ATTACH_MODE_LEGACY: |
11314 | 0 | legacy = true; |
11315 | 0 | pe_opts.force_ioctl_attach = true; |
11316 | 0 | break; |
11317 | 0 | case PROBE_ATTACH_MODE_PERF: |
11318 | 0 | if (legacy) |
11319 | 0 | return libbpf_err_ptr(-ENOTSUP); |
11320 | 0 | pe_opts.force_ioctl_attach = true; |
11321 | 0 | break; |
11322 | 0 | case PROBE_ATTACH_MODE_LINK: |
11323 | 0 | if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK)) |
11324 | 0 | return libbpf_err_ptr(-ENOTSUP); |
11325 | 0 | break; |
11326 | 0 | case PROBE_ATTACH_MODE_DEFAULT: |
11327 | 0 | break; |
11328 | 0 | default: |
11329 | 0 | return libbpf_err_ptr(-EINVAL); |
11330 | 0 | } |
11331 | | |
11332 | 0 | if (!legacy) { |
11333 | 0 | pfd = perf_event_open_probe(false /* uprobe */, retprobe, |
11334 | 0 | func_name, offset, |
11335 | 0 | -1 /* pid */, 0 /* ref_ctr_off */); |
11336 | 0 | } else { |
11337 | 0 | char probe_name[MAX_EVENT_NAME_LEN]; |
11338 | |
|
11339 | 0 | gen_probe_legacy_event_name(probe_name, sizeof(probe_name), |
11340 | 0 | func_name, offset); |
11341 | |
|
11342 | 0 | legacy_probe = strdup(probe_name); |
11343 | 0 | if (!legacy_probe) |
11344 | 0 | return libbpf_err_ptr(-ENOMEM); |
11345 | | |
11346 | 0 | pfd = perf_event_kprobe_open_legacy(legacy_probe, retprobe, func_name, |
11347 | 0 | offset, -1 /* pid */); |
11348 | 0 | } |
11349 | 0 | if (pfd < 0) { |
11350 | 0 | err = -errno; |
11351 | 0 | pr_warn("prog '%s': failed to create %s '%s+0x%zx' perf event: %s\n", |
11352 | 0 | prog->name, retprobe ? "kretprobe" : "kprobe", |
11353 | 0 | func_name, offset, |
11354 | 0 | errstr(err)); |
11355 | 0 | goto err_out; |
11356 | 0 | } |
11357 | 0 | link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts); |
11358 | 0 | err = libbpf_get_error(link); |
11359 | 0 | if (err) { |
11360 | 0 | close(pfd); |
11361 | 0 | pr_warn("prog '%s': failed to attach to %s '%s+0x%zx': %s\n", |
11362 | 0 | prog->name, retprobe ? "kretprobe" : "kprobe", |
11363 | 0 | func_name, offset, |
11364 | 0 | errstr(err)); |
11365 | 0 | goto err_clean_legacy; |
11366 | 0 | } |
11367 | 0 | if (legacy) { |
11368 | 0 | struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link); |
11369 | |
|
11370 | 0 | perf_link->legacy_probe_name = legacy_probe; |
11371 | 0 | perf_link->legacy_is_kprobe = true; |
11372 | 0 | perf_link->legacy_is_retprobe = retprobe; |
11373 | 0 | } |
11374 | |
|
11375 | 0 | return link; |
11376 | | |
11377 | 0 | err_clean_legacy: |
11378 | 0 | if (legacy) |
11379 | 0 | remove_kprobe_event_legacy(legacy_probe, retprobe); |
11380 | 0 | err_out: |
11381 | 0 | free(legacy_probe); |
11382 | 0 | return libbpf_err_ptr(err); |
11383 | 0 | } |
11384 | | |
11385 | | struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog, |
11386 | | bool retprobe, |
11387 | | const char *func_name) |
11388 | 0 | { |
11389 | 0 | DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts, |
11390 | 0 | .retprobe = retprobe, |
11391 | 0 | ); |
11392 | |
|
11393 | 0 | return bpf_program__attach_kprobe_opts(prog, func_name, &opts); |
11394 | 0 | } |
11395 | | |
11396 | | struct bpf_link *bpf_program__attach_ksyscall(const struct bpf_program *prog, |
11397 | | const char *syscall_name, |
11398 | | const struct bpf_ksyscall_opts *opts) |
11399 | 0 | { |
11400 | 0 | LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts); |
11401 | 0 | char func_name[128]; |
11402 | |
|
11403 | 0 | if (!OPTS_VALID(opts, bpf_ksyscall_opts)) |
11404 | 0 | return libbpf_err_ptr(-EINVAL); |
11405 | | |
11406 | 0 | if (kernel_supports(prog->obj, FEAT_SYSCALL_WRAPPER)) { |
11407 | | /* arch_specific_syscall_pfx() should never return NULL here |
11408 | | * because it is guarded by kernel_supports(). However, since |
11409 | | * compiler does not know that we have an explicit conditional |
11410 | | * as well. |
11411 | | */ |
11412 | 0 | snprintf(func_name, sizeof(func_name), "__%s_sys_%s", |
11413 | 0 | arch_specific_syscall_pfx() ? : "", syscall_name); |
11414 | 0 | } else { |
11415 | 0 | snprintf(func_name, sizeof(func_name), "__se_sys_%s", syscall_name); |
11416 | 0 | } |
11417 | |
|
11418 | 0 | kprobe_opts.retprobe = OPTS_GET(opts, retprobe, false); |
11419 | 0 | kprobe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0); |
11420 | |
|
11421 | 0 | return bpf_program__attach_kprobe_opts(prog, func_name, &kprobe_opts); |
11422 | 0 | } |
11423 | | |
11424 | | /* Adapted from perf/util/string.c */ |
11425 | | bool glob_match(const char *str, const char *pat) |
11426 | 0 | { |
11427 | 0 | while (*str && *pat && *pat != '*') { |
11428 | 0 | if (*pat == '?') { /* Matches any single character */ |
11429 | 0 | str++; |
11430 | 0 | pat++; |
11431 | 0 | continue; |
11432 | 0 | } |
11433 | 0 | if (*str != *pat) |
11434 | 0 | return false; |
11435 | 0 | str++; |
11436 | 0 | pat++; |
11437 | 0 | } |
11438 | | /* Check wild card */ |
11439 | 0 | if (*pat == '*') { |
11440 | 0 | while (*pat == '*') |
11441 | 0 | pat++; |
11442 | 0 | if (!*pat) /* Tail wild card matches all */ |
11443 | 0 | return true; |
11444 | 0 | while (*str) |
11445 | 0 | if (glob_match(str++, pat)) |
11446 | 0 | return true; |
11447 | 0 | } |
11448 | 0 | return !*str && !*pat; |
11449 | 0 | } |
11450 | | |
11451 | | struct kprobe_multi_resolve { |
11452 | | const char *pattern; |
11453 | | unsigned long *addrs; |
11454 | | size_t cap; |
11455 | | size_t cnt; |
11456 | | }; |
11457 | | |
11458 | | struct avail_kallsyms_data { |
11459 | | char **syms; |
11460 | | size_t cnt; |
11461 | | struct kprobe_multi_resolve *res; |
11462 | | }; |
11463 | | |
11464 | | static int avail_func_cmp(const void *a, const void *b) |
11465 | 0 | { |
11466 | 0 | return strcmp(*(const char **)a, *(const char **)b); |
11467 | 0 | } |
11468 | | |
11469 | | static int avail_kallsyms_cb(unsigned long long sym_addr, char sym_type, |
11470 | | const char *sym_name, void *ctx) |
11471 | 0 | { |
11472 | 0 | struct avail_kallsyms_data *data = ctx; |
11473 | 0 | struct kprobe_multi_resolve *res = data->res; |
11474 | 0 | int err; |
11475 | |
|
11476 | 0 | if (!glob_match(sym_name, res->pattern)) |
11477 | 0 | return 0; |
11478 | | |
11479 | 0 | if (!bsearch(&sym_name, data->syms, data->cnt, sizeof(*data->syms), avail_func_cmp)) { |
11480 | | /* Some versions of kernel strip out .llvm.<hash> suffix from |
11481 | | * function names reported in available_filter_functions, but |
11482 | | * don't do so for kallsyms. While this is clearly a kernel |
11483 | | * bug (fixed by [0]) we try to accommodate that in libbpf to |
11484 | | * make multi-kprobe usability a bit better: if no match is |
11485 | | * found, we will strip .llvm. suffix and try one more time. |
11486 | | * |
11487 | | * [0] fb6a421fb615 ("kallsyms: Match symbols exactly with CONFIG_LTO_CLANG") |
11488 | | */ |
11489 | 0 | char sym_trim[256], *psym_trim = sym_trim, *sym_sfx; |
11490 | |
|
11491 | 0 | if (!(sym_sfx = strstr(sym_name, ".llvm."))) |
11492 | 0 | return 0; |
11493 | | |
11494 | | /* psym_trim vs sym_trim dance is done to avoid pointer vs array |
11495 | | * coercion differences and get proper `const char **` pointer |
11496 | | * which avail_func_cmp() expects |
11497 | | */ |
11498 | 0 | snprintf(sym_trim, sizeof(sym_trim), "%.*s", (int)(sym_sfx - sym_name), sym_name); |
11499 | 0 | if (!bsearch(&psym_trim, data->syms, data->cnt, sizeof(*data->syms), avail_func_cmp)) |
11500 | 0 | return 0; |
11501 | 0 | } |
11502 | | |
11503 | 0 | err = libbpf_ensure_mem((void **)&res->addrs, &res->cap, sizeof(*res->addrs), res->cnt + 1); |
11504 | 0 | if (err) |
11505 | 0 | return err; |
11506 | | |
11507 | 0 | res->addrs[res->cnt++] = (unsigned long)sym_addr; |
11508 | 0 | return 0; |
11509 | 0 | } |
11510 | | |
11511 | | static int libbpf_available_kallsyms_parse(struct kprobe_multi_resolve *res) |
11512 | 0 | { |
11513 | 0 | const char *available_functions_file = tracefs_available_filter_functions(); |
11514 | 0 | struct avail_kallsyms_data data; |
11515 | 0 | char sym_name[500]; |
11516 | 0 | FILE *f; |
11517 | 0 | int err = 0, ret, i; |
11518 | 0 | char **syms = NULL; |
11519 | 0 | size_t cap = 0, cnt = 0; |
11520 | |
|
11521 | 0 | f = fopen(available_functions_file, "re"); |
11522 | 0 | if (!f) { |
11523 | 0 | err = -errno; |
11524 | 0 | pr_warn("failed to open %s: %s\n", available_functions_file, errstr(err)); |
11525 | 0 | return err; |
11526 | 0 | } |
11527 | | |
11528 | 0 | while (true) { |
11529 | 0 | char *name; |
11530 | |
|
11531 | 0 | ret = fscanf(f, "%499s%*[^\n]\n", sym_name); |
11532 | 0 | if (ret == EOF && feof(f)) |
11533 | 0 | break; |
11534 | | |
11535 | 0 | if (ret != 1) { |
11536 | 0 | pr_warn("failed to parse available_filter_functions entry: %d\n", ret); |
11537 | 0 | err = -EINVAL; |
11538 | 0 | goto cleanup; |
11539 | 0 | } |
11540 | | |
11541 | 0 | if (!glob_match(sym_name, res->pattern)) |
11542 | 0 | continue; |
11543 | | |
11544 | 0 | err = libbpf_ensure_mem((void **)&syms, &cap, sizeof(*syms), cnt + 1); |
11545 | 0 | if (err) |
11546 | 0 | goto cleanup; |
11547 | | |
11548 | 0 | name = strdup(sym_name); |
11549 | 0 | if (!name) { |
11550 | 0 | err = -errno; |
11551 | 0 | goto cleanup; |
11552 | 0 | } |
11553 | | |
11554 | 0 | syms[cnt++] = name; |
11555 | 0 | } |
11556 | | |
11557 | | /* no entries found, bail out */ |
11558 | 0 | if (cnt == 0) { |
11559 | 0 | err = -ENOENT; |
11560 | 0 | goto cleanup; |
11561 | 0 | } |
11562 | | |
11563 | | /* sort available functions */ |
11564 | 0 | qsort(syms, cnt, sizeof(*syms), avail_func_cmp); |
11565 | |
|
11566 | 0 | data.syms = syms; |
11567 | 0 | data.res = res; |
11568 | 0 | data.cnt = cnt; |
11569 | 0 | libbpf_kallsyms_parse(avail_kallsyms_cb, &data); |
11570 | |
|
11571 | 0 | if (res->cnt == 0) |
11572 | 0 | err = -ENOENT; |
11573 | |
|
11574 | 0 | cleanup: |
11575 | 0 | for (i = 0; i < cnt; i++) |
11576 | 0 | free((char *)syms[i]); |
11577 | 0 | free(syms); |
11578 | |
|
11579 | 0 | fclose(f); |
11580 | 0 | return err; |
11581 | 0 | } |
11582 | | |
11583 | | static bool has_available_filter_functions_addrs(void) |
11584 | 0 | { |
11585 | 0 | return access(tracefs_available_filter_functions_addrs(), R_OK) != -1; |
11586 | 0 | } |
11587 | | |
11588 | | static int libbpf_available_kprobes_parse(struct kprobe_multi_resolve *res) |
11589 | 0 | { |
11590 | 0 | const char *available_path = tracefs_available_filter_functions_addrs(); |
11591 | 0 | char sym_name[500]; |
11592 | 0 | FILE *f; |
11593 | 0 | int ret, err = 0; |
11594 | 0 | unsigned long long sym_addr; |
11595 | |
|
11596 | 0 | f = fopen(available_path, "re"); |
11597 | 0 | if (!f) { |
11598 | 0 | err = -errno; |
11599 | 0 | pr_warn("failed to open %s: %s\n", available_path, errstr(err)); |
11600 | 0 | return err; |
11601 | 0 | } |
11602 | | |
11603 | 0 | while (true) { |
11604 | 0 | ret = fscanf(f, "%llx %499s%*[^\n]\n", &sym_addr, sym_name); |
11605 | 0 | if (ret == EOF && feof(f)) |
11606 | 0 | break; |
11607 | | |
11608 | 0 | if (ret != 2) { |
11609 | 0 | pr_warn("failed to parse available_filter_functions_addrs entry: %d\n", |
11610 | 0 | ret); |
11611 | 0 | err = -EINVAL; |
11612 | 0 | goto cleanup; |
11613 | 0 | } |
11614 | | |
11615 | 0 | if (!glob_match(sym_name, res->pattern)) |
11616 | 0 | continue; |
11617 | | |
11618 | 0 | err = libbpf_ensure_mem((void **)&res->addrs, &res->cap, |
11619 | 0 | sizeof(*res->addrs), res->cnt + 1); |
11620 | 0 | if (err) |
11621 | 0 | goto cleanup; |
11622 | | |
11623 | 0 | res->addrs[res->cnt++] = (unsigned long)sym_addr; |
11624 | 0 | } |
11625 | | |
11626 | 0 | if (res->cnt == 0) |
11627 | 0 | err = -ENOENT; |
11628 | |
|
11629 | 0 | cleanup: |
11630 | 0 | fclose(f); |
11631 | 0 | return err; |
11632 | 0 | } |
11633 | | |
11634 | | struct bpf_link * |
11635 | | bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog, |
11636 | | const char *pattern, |
11637 | | const struct bpf_kprobe_multi_opts *opts) |
11638 | 0 | { |
11639 | 0 | LIBBPF_OPTS(bpf_link_create_opts, lopts); |
11640 | 0 | struct kprobe_multi_resolve res = { |
11641 | 0 | .pattern = pattern, |
11642 | 0 | }; |
11643 | 0 | enum bpf_attach_type attach_type; |
11644 | 0 | struct bpf_link *link = NULL; |
11645 | 0 | const unsigned long *addrs; |
11646 | 0 | int err, link_fd, prog_fd; |
11647 | 0 | bool retprobe, session, unique_match; |
11648 | 0 | const __u64 *cookies; |
11649 | 0 | const char **syms; |
11650 | 0 | size_t cnt; |
11651 | |
|
11652 | 0 | if (!OPTS_VALID(opts, bpf_kprobe_multi_opts)) |
11653 | 0 | return libbpf_err_ptr(-EINVAL); |
11654 | | |
11655 | 0 | prog_fd = bpf_program__fd(prog); |
11656 | 0 | if (prog_fd < 0) { |
11657 | 0 | pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n", |
11658 | 0 | prog->name); |
11659 | 0 | return libbpf_err_ptr(-EINVAL); |
11660 | 0 | } |
11661 | | |
11662 | 0 | syms = OPTS_GET(opts, syms, false); |
11663 | 0 | addrs = OPTS_GET(opts, addrs, false); |
11664 | 0 | cnt = OPTS_GET(opts, cnt, false); |
11665 | 0 | cookies = OPTS_GET(opts, cookies, false); |
11666 | 0 | unique_match = OPTS_GET(opts, unique_match, false); |
11667 | |
|
11668 | 0 | if (!pattern && !addrs && !syms) |
11669 | 0 | return libbpf_err_ptr(-EINVAL); |
11670 | 0 | if (pattern && (addrs || syms || cookies || cnt)) |
11671 | 0 | return libbpf_err_ptr(-EINVAL); |
11672 | 0 | if (!pattern && !cnt) |
11673 | 0 | return libbpf_err_ptr(-EINVAL); |
11674 | 0 | if (!pattern && unique_match) |
11675 | 0 | return libbpf_err_ptr(-EINVAL); |
11676 | 0 | if (addrs && syms) |
11677 | 0 | return libbpf_err_ptr(-EINVAL); |
11678 | | |
11679 | 0 | if (pattern) { |
11680 | 0 | if (has_available_filter_functions_addrs()) |
11681 | 0 | err = libbpf_available_kprobes_parse(&res); |
11682 | 0 | else |
11683 | 0 | err = libbpf_available_kallsyms_parse(&res); |
11684 | 0 | if (err) |
11685 | 0 | goto error; |
11686 | | |
11687 | 0 | if (unique_match && res.cnt != 1) { |
11688 | 0 | pr_warn("prog '%s': failed to find a unique match for '%s' (%zu matches)\n", |
11689 | 0 | prog->name, pattern, res.cnt); |
11690 | 0 | err = -EINVAL; |
11691 | 0 | goto error; |
11692 | 0 | } |
11693 | | |
11694 | 0 | addrs = res.addrs; |
11695 | 0 | cnt = res.cnt; |
11696 | 0 | } |
11697 | | |
11698 | 0 | retprobe = OPTS_GET(opts, retprobe, false); |
11699 | 0 | session = OPTS_GET(opts, session, false); |
11700 | |
|
11701 | 0 | if (retprobe && session) |
11702 | 0 | return libbpf_err_ptr(-EINVAL); |
11703 | | |
11704 | 0 | attach_type = session ? BPF_TRACE_KPROBE_SESSION : BPF_TRACE_KPROBE_MULTI; |
11705 | |
|
11706 | 0 | lopts.kprobe_multi.syms = syms; |
11707 | 0 | lopts.kprobe_multi.addrs = addrs; |
11708 | 0 | lopts.kprobe_multi.cookies = cookies; |
11709 | 0 | lopts.kprobe_multi.cnt = cnt; |
11710 | 0 | lopts.kprobe_multi.flags = retprobe ? BPF_F_KPROBE_MULTI_RETURN : 0; |
11711 | |
|
11712 | 0 | link = calloc(1, sizeof(*link)); |
11713 | 0 | if (!link) { |
11714 | 0 | err = -ENOMEM; |
11715 | 0 | goto error; |
11716 | 0 | } |
11717 | 0 | link->detach = &bpf_link__detach_fd; |
11718 | |
|
11719 | 0 | link_fd = bpf_link_create(prog_fd, 0, attach_type, &lopts); |
11720 | 0 | if (link_fd < 0) { |
11721 | 0 | err = -errno; |
11722 | 0 | pr_warn("prog '%s': failed to attach: %s\n", |
11723 | 0 | prog->name, errstr(err)); |
11724 | 0 | goto error; |
11725 | 0 | } |
11726 | 0 | link->fd = link_fd; |
11727 | 0 | free(res.addrs); |
11728 | 0 | return link; |
11729 | | |
11730 | 0 | error: |
11731 | 0 | free(link); |
11732 | 0 | free(res.addrs); |
11733 | 0 | return libbpf_err_ptr(err); |
11734 | 0 | } |
11735 | | |
11736 | | static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link) |
11737 | 0 | { |
11738 | 0 | DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts); |
11739 | 0 | unsigned long offset = 0; |
11740 | 0 | const char *func_name; |
11741 | 0 | char *func; |
11742 | 0 | int n; |
11743 | |
|
11744 | 0 | *link = NULL; |
11745 | | |
11746 | | /* no auto-attach for SEC("kprobe") and SEC("kretprobe") */ |
11747 | 0 | if (strcmp(prog->sec_name, "kprobe") == 0 || strcmp(prog->sec_name, "kretprobe") == 0) |
11748 | 0 | return 0; |
11749 | | |
11750 | 0 | opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe/"); |
11751 | 0 | if (opts.retprobe) |
11752 | 0 | func_name = prog->sec_name + sizeof("kretprobe/") - 1; |
11753 | 0 | else |
11754 | 0 | func_name = prog->sec_name + sizeof("kprobe/") - 1; |
11755 | |
|
11756 | 0 | n = sscanf(func_name, "%m[a-zA-Z0-9_.]+%li", &func, &offset); |
11757 | 0 | if (n < 1) { |
11758 | 0 | pr_warn("kprobe name is invalid: %s\n", func_name); |
11759 | 0 | return -EINVAL; |
11760 | 0 | } |
11761 | 0 | if (opts.retprobe && offset != 0) { |
11762 | 0 | free(func); |
11763 | 0 | pr_warn("kretprobes do not support offset specification\n"); |
11764 | 0 | return -EINVAL; |
11765 | 0 | } |
11766 | | |
11767 | 0 | opts.offset = offset; |
11768 | 0 | *link = bpf_program__attach_kprobe_opts(prog, func, &opts); |
11769 | 0 | free(func); |
11770 | 0 | return libbpf_get_error(*link); |
11771 | 0 | } |
11772 | | |
11773 | | static int attach_ksyscall(const struct bpf_program *prog, long cookie, struct bpf_link **link) |
11774 | 0 | { |
11775 | 0 | LIBBPF_OPTS(bpf_ksyscall_opts, opts); |
11776 | 0 | const char *syscall_name; |
11777 | |
|
11778 | 0 | *link = NULL; |
11779 | | |
11780 | | /* no auto-attach for SEC("ksyscall") and SEC("kretsyscall") */ |
11781 | 0 | if (strcmp(prog->sec_name, "ksyscall") == 0 || strcmp(prog->sec_name, "kretsyscall") == 0) |
11782 | 0 | return 0; |
11783 | | |
11784 | 0 | opts.retprobe = str_has_pfx(prog->sec_name, "kretsyscall/"); |
11785 | 0 | if (opts.retprobe) |
11786 | 0 | syscall_name = prog->sec_name + sizeof("kretsyscall/") - 1; |
11787 | 0 | else |
11788 | 0 | syscall_name = prog->sec_name + sizeof("ksyscall/") - 1; |
11789 | |
|
11790 | 0 | *link = bpf_program__attach_ksyscall(prog, syscall_name, &opts); |
11791 | 0 | return *link ? 0 : -errno; |
11792 | 0 | } |
11793 | | |
11794 | | static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link) |
11795 | 0 | { |
11796 | 0 | LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); |
11797 | 0 | const char *spec; |
11798 | 0 | char *pattern; |
11799 | 0 | int n; |
11800 | |
|
11801 | 0 | *link = NULL; |
11802 | | |
11803 | | /* no auto-attach for SEC("kprobe.multi") and SEC("kretprobe.multi") */ |
11804 | 0 | if (strcmp(prog->sec_name, "kprobe.multi") == 0 || |
11805 | 0 | strcmp(prog->sec_name, "kretprobe.multi") == 0) |
11806 | 0 | return 0; |
11807 | | |
11808 | 0 | opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe.multi/"); |
11809 | 0 | if (opts.retprobe) |
11810 | 0 | spec = prog->sec_name + sizeof("kretprobe.multi/") - 1; |
11811 | 0 | else |
11812 | 0 | spec = prog->sec_name + sizeof("kprobe.multi/") - 1; |
11813 | |
|
11814 | 0 | n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern); |
11815 | 0 | if (n < 1) { |
11816 | 0 | pr_warn("kprobe multi pattern is invalid: %s\n", spec); |
11817 | 0 | return -EINVAL; |
11818 | 0 | } |
11819 | | |
11820 | 0 | *link = bpf_program__attach_kprobe_multi_opts(prog, pattern, &opts); |
11821 | 0 | free(pattern); |
11822 | 0 | return libbpf_get_error(*link); |
11823 | 0 | } |
11824 | | |
11825 | | static int attach_kprobe_session(const struct bpf_program *prog, long cookie, |
11826 | | struct bpf_link **link) |
11827 | 0 | { |
11828 | 0 | LIBBPF_OPTS(bpf_kprobe_multi_opts, opts, .session = true); |
11829 | 0 | const char *spec; |
11830 | 0 | char *pattern; |
11831 | 0 | int n; |
11832 | |
|
11833 | 0 | *link = NULL; |
11834 | | |
11835 | | /* no auto-attach for SEC("kprobe.session") */ |
11836 | 0 | if (strcmp(prog->sec_name, "kprobe.session") == 0) |
11837 | 0 | return 0; |
11838 | | |
11839 | 0 | spec = prog->sec_name + sizeof("kprobe.session/") - 1; |
11840 | 0 | n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern); |
11841 | 0 | if (n < 1) { |
11842 | 0 | pr_warn("kprobe session pattern is invalid: %s\n", spec); |
11843 | 0 | return -EINVAL; |
11844 | 0 | } |
11845 | | |
11846 | 0 | *link = bpf_program__attach_kprobe_multi_opts(prog, pattern, &opts); |
11847 | 0 | free(pattern); |
11848 | 0 | return *link ? 0 : -errno; |
11849 | 0 | } |
11850 | | |
11851 | | static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link) |
11852 | 0 | { |
11853 | 0 | char *probe_type = NULL, *binary_path = NULL, *func_name = NULL; |
11854 | 0 | LIBBPF_OPTS(bpf_uprobe_multi_opts, opts); |
11855 | 0 | int n, ret = -EINVAL; |
11856 | |
|
11857 | 0 | *link = NULL; |
11858 | |
|
11859 | 0 | n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%m[^\n]", |
11860 | 0 | &probe_type, &binary_path, &func_name); |
11861 | 0 | switch (n) { |
11862 | 0 | case 1: |
11863 | | /* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */ |
11864 | 0 | ret = 0; |
11865 | 0 | break; |
11866 | 0 | case 3: |
11867 | 0 | opts.session = str_has_pfx(probe_type, "uprobe.session"); |
11868 | 0 | opts.retprobe = str_has_pfx(probe_type, "uretprobe.multi"); |
11869 | |
|
11870 | 0 | *link = bpf_program__attach_uprobe_multi(prog, -1, binary_path, func_name, &opts); |
11871 | 0 | ret = libbpf_get_error(*link); |
11872 | 0 | break; |
11873 | 0 | default: |
11874 | 0 | pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name, |
11875 | 0 | prog->sec_name); |
11876 | 0 | break; |
11877 | 0 | } |
11878 | 0 | free(probe_type); |
11879 | 0 | free(binary_path); |
11880 | 0 | free(func_name); |
11881 | 0 | return ret; |
11882 | 0 | } |
11883 | | |
11884 | | static inline int add_uprobe_event_legacy(const char *probe_name, bool retprobe, |
11885 | | const char *binary_path, size_t offset) |
11886 | 0 | { |
11887 | 0 | return append_to_file(tracefs_uprobe_events(), "%c:%s/%s %s:0x%zx", |
11888 | 0 | retprobe ? 'r' : 'p', |
11889 | 0 | retprobe ? "uretprobes" : "uprobes", |
11890 | 0 | probe_name, binary_path, offset); |
11891 | 0 | } |
11892 | | |
11893 | | static inline int remove_uprobe_event_legacy(const char *probe_name, bool retprobe) |
11894 | 0 | { |
11895 | 0 | return append_to_file(tracefs_uprobe_events(), "-:%s/%s", |
11896 | 0 | retprobe ? "uretprobes" : "uprobes", probe_name); |
11897 | 0 | } |
11898 | | |
11899 | | static int determine_uprobe_perf_type_legacy(const char *probe_name, bool retprobe) |
11900 | 0 | { |
11901 | 0 | char file[512]; |
11902 | |
|
11903 | 0 | snprintf(file, sizeof(file), "%s/events/%s/%s/id", |
11904 | 0 | tracefs_path(), retprobe ? "uretprobes" : "uprobes", probe_name); |
11905 | |
|
11906 | 0 | return parse_uint_from_file(file, "%d\n"); |
11907 | 0 | } |
11908 | | |
11909 | | static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe, |
11910 | | const char *binary_path, size_t offset, int pid) |
11911 | 0 | { |
11912 | 0 | const size_t attr_sz = sizeof(struct perf_event_attr); |
11913 | 0 | struct perf_event_attr attr; |
11914 | 0 | int type, pfd, err; |
11915 | |
|
11916 | 0 | err = add_uprobe_event_legacy(probe_name, retprobe, binary_path, offset); |
11917 | 0 | if (err < 0) { |
11918 | 0 | pr_warn("failed to add legacy uprobe event for %s:0x%zx: %s\n", |
11919 | 0 | binary_path, (size_t)offset, errstr(err)); |
11920 | 0 | return err; |
11921 | 0 | } |
11922 | 0 | type = determine_uprobe_perf_type_legacy(probe_name, retprobe); |
11923 | 0 | if (type < 0) { |
11924 | 0 | err = type; |
11925 | 0 | pr_warn("failed to determine legacy uprobe event id for %s:0x%zx: %s\n", |
11926 | 0 | binary_path, offset, errstr(err)); |
11927 | 0 | goto err_clean_legacy; |
11928 | 0 | } |
11929 | | |
11930 | 0 | memset(&attr, 0, attr_sz); |
11931 | 0 | attr.size = attr_sz; |
11932 | 0 | attr.config = type; |
11933 | 0 | attr.type = PERF_TYPE_TRACEPOINT; |
11934 | |
|
11935 | 0 | pfd = syscall(__NR_perf_event_open, &attr, |
11936 | 0 | pid < 0 ? -1 : pid, /* pid */ |
11937 | 0 | pid == -1 ? 0 : -1, /* cpu */ |
11938 | 0 | -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); |
11939 | 0 | if (pfd < 0) { |
11940 | 0 | err = -errno; |
11941 | 0 | pr_warn("legacy uprobe perf_event_open() failed: %s\n", errstr(err)); |
11942 | 0 | goto err_clean_legacy; |
11943 | 0 | } |
11944 | 0 | return pfd; |
11945 | | |
11946 | 0 | err_clean_legacy: |
11947 | | /* Clear the newly added legacy uprobe_event */ |
11948 | 0 | remove_uprobe_event_legacy(probe_name, retprobe); |
11949 | 0 | return err; |
11950 | 0 | } |
11951 | | |
11952 | | /* Find offset of function name in archive specified by path. Currently |
11953 | | * supported are .zip files that do not compress their contents, as used on |
11954 | | * Android in the form of APKs, for example. "file_name" is the name of the ELF |
11955 | | * file inside the archive. "func_name" matches symbol name or name@@LIB for |
11956 | | * library functions. |
11957 | | * |
11958 | | * An overview of the APK format specifically provided here: |
11959 | | * https://en.wikipedia.org/w/index.php?title=Apk_(file_format)&oldid=1139099120#Package_contents |
11960 | | */ |
11961 | | static long elf_find_func_offset_from_archive(const char *archive_path, const char *file_name, |
11962 | | const char *func_name) |
11963 | 0 | { |
11964 | 0 | struct zip_archive *archive; |
11965 | 0 | struct zip_entry entry; |
11966 | 0 | long ret; |
11967 | 0 | Elf *elf; |
11968 | |
|
11969 | 0 | archive = zip_archive_open(archive_path); |
11970 | 0 | if (IS_ERR(archive)) { |
11971 | 0 | ret = PTR_ERR(archive); |
11972 | 0 | pr_warn("zip: failed to open %s: %ld\n", archive_path, ret); |
11973 | 0 | return ret; |
11974 | 0 | } |
11975 | | |
11976 | 0 | ret = zip_archive_find_entry(archive, file_name, &entry); |
11977 | 0 | if (ret) { |
11978 | 0 | pr_warn("zip: could not find archive member %s in %s: %ld\n", file_name, |
11979 | 0 | archive_path, ret); |
11980 | 0 | goto out; |
11981 | 0 | } |
11982 | 0 | pr_debug("zip: found entry for %s in %s at 0x%lx\n", file_name, archive_path, |
11983 | 0 | (unsigned long)entry.data_offset); |
11984 | |
|
11985 | 0 | if (entry.compression) { |
11986 | 0 | pr_warn("zip: entry %s of %s is compressed and cannot be handled\n", file_name, |
11987 | 0 | archive_path); |
11988 | 0 | ret = -LIBBPF_ERRNO__FORMAT; |
11989 | 0 | goto out; |
11990 | 0 | } |
11991 | | |
11992 | 0 | elf = elf_memory((void *)entry.data, entry.data_length); |
11993 | 0 | if (!elf) { |
11994 | 0 | pr_warn("elf: could not read elf file %s from %s: %s\n", file_name, archive_path, |
11995 | 0 | elf_errmsg(-1)); |
11996 | 0 | ret = -LIBBPF_ERRNO__LIBELF; |
11997 | 0 | goto out; |
11998 | 0 | } |
11999 | | |
12000 | 0 | ret = elf_find_func_offset(elf, file_name, func_name); |
12001 | 0 | if (ret > 0) { |
12002 | 0 | pr_debug("elf: symbol address match for %s of %s in %s: 0x%x + 0x%lx = 0x%lx\n", |
12003 | 0 | func_name, file_name, archive_path, entry.data_offset, ret, |
12004 | 0 | ret + entry.data_offset); |
12005 | 0 | ret += entry.data_offset; |
12006 | 0 | } |
12007 | 0 | elf_end(elf); |
12008 | |
|
12009 | 0 | out: |
12010 | 0 | zip_archive_close(archive); |
12011 | 0 | return ret; |
12012 | 0 | } |
12013 | | |
12014 | | static const char *arch_specific_lib_paths(void) |
12015 | 0 | { |
12016 | | /* |
12017 | | * Based on https://packages.debian.org/sid/libc6. |
12018 | | * |
12019 | | * Assume that the traced program is built for the same architecture |
12020 | | * as libbpf, which should cover the vast majority of cases. |
12021 | | */ |
12022 | 0 | #if defined(__x86_64__) |
12023 | 0 | return "/lib/x86_64-linux-gnu"; |
12024 | | #elif defined(__i386__) |
12025 | | return "/lib/i386-linux-gnu"; |
12026 | | #elif defined(__s390x__) |
12027 | | return "/lib/s390x-linux-gnu"; |
12028 | | #elif defined(__s390__) |
12029 | | return "/lib/s390-linux-gnu"; |
12030 | | #elif defined(__arm__) && defined(__SOFTFP__) |
12031 | | return "/lib/arm-linux-gnueabi"; |
12032 | | #elif defined(__arm__) && !defined(__SOFTFP__) |
12033 | | return "/lib/arm-linux-gnueabihf"; |
12034 | | #elif defined(__aarch64__) |
12035 | | return "/lib/aarch64-linux-gnu"; |
12036 | | #elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 64 |
12037 | | return "/lib/mips64el-linux-gnuabi64"; |
12038 | | #elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 32 |
12039 | | return "/lib/mipsel-linux-gnu"; |
12040 | | #elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ |
12041 | | return "/lib/powerpc64le-linux-gnu"; |
12042 | | #elif defined(__sparc__) && defined(__arch64__) |
12043 | | return "/lib/sparc64-linux-gnu"; |
12044 | | #elif defined(__riscv) && __riscv_xlen == 64 |
12045 | | return "/lib/riscv64-linux-gnu"; |
12046 | | #else |
12047 | | return NULL; |
12048 | | #endif |
12049 | 0 | } |
12050 | | |
12051 | | /* Get full path to program/shared library. */ |
12052 | | static int resolve_full_path(const char *file, char *result, size_t result_sz) |
12053 | 0 | { |
12054 | 0 | const char *search_paths[3] = {}; |
12055 | 0 | int i, perm; |
12056 | |
|
12057 | 0 | if (str_has_sfx(file, ".so") || strstr(file, ".so.")) { |
12058 | 0 | search_paths[0] = getenv("LD_LIBRARY_PATH"); |
12059 | 0 | search_paths[1] = "/usr/lib64:/usr/lib"; |
12060 | 0 | search_paths[2] = arch_specific_lib_paths(); |
12061 | 0 | perm = R_OK; |
12062 | 0 | } else { |
12063 | 0 | search_paths[0] = getenv("PATH"); |
12064 | 0 | search_paths[1] = "/usr/bin:/usr/sbin"; |
12065 | 0 | perm = R_OK | X_OK; |
12066 | 0 | } |
12067 | |
|
12068 | 0 | for (i = 0; i < ARRAY_SIZE(search_paths); i++) { |
12069 | 0 | const char *s; |
12070 | |
|
12071 | 0 | if (!search_paths[i]) |
12072 | 0 | continue; |
12073 | 0 | for (s = search_paths[i]; s != NULL; s = strchr(s, ':')) { |
12074 | 0 | char *next_path; |
12075 | 0 | int seg_len; |
12076 | |
|
12077 | 0 | if (s[0] == ':') |
12078 | 0 | s++; |
12079 | 0 | next_path = strchr(s, ':'); |
12080 | 0 | seg_len = next_path ? next_path - s : strlen(s); |
12081 | 0 | if (!seg_len) |
12082 | 0 | continue; |
12083 | 0 | snprintf(result, result_sz, "%.*s/%s", seg_len, s, file); |
12084 | | /* ensure it has required permissions */ |
12085 | 0 | if (faccessat(AT_FDCWD, result, perm, AT_EACCESS) < 0) |
12086 | 0 | continue; |
12087 | 0 | pr_debug("resolved '%s' to '%s'\n", file, result); |
12088 | 0 | return 0; |
12089 | 0 | } |
12090 | 0 | } |
12091 | 0 | return -ENOENT; |
12092 | 0 | } |
12093 | | |
12094 | | struct bpf_link * |
12095 | | bpf_program__attach_uprobe_multi(const struct bpf_program *prog, |
12096 | | pid_t pid, |
12097 | | const char *path, |
12098 | | const char *func_pattern, |
12099 | | const struct bpf_uprobe_multi_opts *opts) |
12100 | 0 | { |
12101 | 0 | const unsigned long *ref_ctr_offsets = NULL, *offsets = NULL; |
12102 | 0 | LIBBPF_OPTS(bpf_link_create_opts, lopts); |
12103 | 0 | unsigned long *resolved_offsets = NULL; |
12104 | 0 | enum bpf_attach_type attach_type; |
12105 | 0 | int err = 0, link_fd, prog_fd; |
12106 | 0 | struct bpf_link *link = NULL; |
12107 | 0 | char full_path[PATH_MAX]; |
12108 | 0 | bool retprobe, session; |
12109 | 0 | const __u64 *cookies; |
12110 | 0 | const char **syms; |
12111 | 0 | size_t cnt; |
12112 | |
|
12113 | 0 | if (!OPTS_VALID(opts, bpf_uprobe_multi_opts)) |
12114 | 0 | return libbpf_err_ptr(-EINVAL); |
12115 | | |
12116 | 0 | prog_fd = bpf_program__fd(prog); |
12117 | 0 | if (prog_fd < 0) { |
12118 | 0 | pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n", |
12119 | 0 | prog->name); |
12120 | 0 | return libbpf_err_ptr(-EINVAL); |
12121 | 0 | } |
12122 | | |
12123 | 0 | syms = OPTS_GET(opts, syms, NULL); |
12124 | 0 | offsets = OPTS_GET(opts, offsets, NULL); |
12125 | 0 | ref_ctr_offsets = OPTS_GET(opts, ref_ctr_offsets, NULL); |
12126 | 0 | cookies = OPTS_GET(opts, cookies, NULL); |
12127 | 0 | cnt = OPTS_GET(opts, cnt, 0); |
12128 | 0 | retprobe = OPTS_GET(opts, retprobe, false); |
12129 | 0 | session = OPTS_GET(opts, session, false); |
12130 | | |
12131 | | /* |
12132 | | * User can specify 2 mutually exclusive set of inputs: |
12133 | | * |
12134 | | * 1) use only path/func_pattern/pid arguments |
12135 | | * |
12136 | | * 2) use path/pid with allowed combinations of: |
12137 | | * syms/offsets/ref_ctr_offsets/cookies/cnt |
12138 | | * |
12139 | | * - syms and offsets are mutually exclusive |
12140 | | * - ref_ctr_offsets and cookies are optional |
12141 | | * |
12142 | | * Any other usage results in error. |
12143 | | */ |
12144 | |
|
12145 | 0 | if (!path) |
12146 | 0 | return libbpf_err_ptr(-EINVAL); |
12147 | 0 | if (!func_pattern && cnt == 0) |
12148 | 0 | return libbpf_err_ptr(-EINVAL); |
12149 | | |
12150 | 0 | if (func_pattern) { |
12151 | 0 | if (syms || offsets || ref_ctr_offsets || cookies || cnt) |
12152 | 0 | return libbpf_err_ptr(-EINVAL); |
12153 | 0 | } else { |
12154 | 0 | if (!!syms == !!offsets) |
12155 | 0 | return libbpf_err_ptr(-EINVAL); |
12156 | 0 | } |
12157 | | |
12158 | 0 | if (retprobe && session) |
12159 | 0 | return libbpf_err_ptr(-EINVAL); |
12160 | | |
12161 | 0 | if (func_pattern) { |
12162 | 0 | if (!strchr(path, '/')) { |
12163 | 0 | err = resolve_full_path(path, full_path, sizeof(full_path)); |
12164 | 0 | if (err) { |
12165 | 0 | pr_warn("prog '%s': failed to resolve full path for '%s': %s\n", |
12166 | 0 | prog->name, path, errstr(err)); |
12167 | 0 | return libbpf_err_ptr(err); |
12168 | 0 | } |
12169 | 0 | path = full_path; |
12170 | 0 | } |
12171 | | |
12172 | 0 | err = elf_resolve_pattern_offsets(path, func_pattern, |
12173 | 0 | &resolved_offsets, &cnt); |
12174 | 0 | if (err < 0) |
12175 | 0 | return libbpf_err_ptr(err); |
12176 | 0 | offsets = resolved_offsets; |
12177 | 0 | } else if (syms) { |
12178 | 0 | err = elf_resolve_syms_offsets(path, cnt, syms, &resolved_offsets, STT_FUNC); |
12179 | 0 | if (err < 0) |
12180 | 0 | return libbpf_err_ptr(err); |
12181 | 0 | offsets = resolved_offsets; |
12182 | 0 | } |
12183 | | |
12184 | 0 | attach_type = session ? BPF_TRACE_UPROBE_SESSION : BPF_TRACE_UPROBE_MULTI; |
12185 | |
|
12186 | 0 | lopts.uprobe_multi.path = path; |
12187 | 0 | lopts.uprobe_multi.offsets = offsets; |
12188 | 0 | lopts.uprobe_multi.ref_ctr_offsets = ref_ctr_offsets; |
12189 | 0 | lopts.uprobe_multi.cookies = cookies; |
12190 | 0 | lopts.uprobe_multi.cnt = cnt; |
12191 | 0 | lopts.uprobe_multi.flags = retprobe ? BPF_F_UPROBE_MULTI_RETURN : 0; |
12192 | |
|
12193 | 0 | if (pid == 0) |
12194 | 0 | pid = getpid(); |
12195 | 0 | if (pid > 0) |
12196 | 0 | lopts.uprobe_multi.pid = pid; |
12197 | |
|
12198 | 0 | link = calloc(1, sizeof(*link)); |
12199 | 0 | if (!link) { |
12200 | 0 | err = -ENOMEM; |
12201 | 0 | goto error; |
12202 | 0 | } |
12203 | 0 | link->detach = &bpf_link__detach_fd; |
12204 | |
|
12205 | 0 | link_fd = bpf_link_create(prog_fd, 0, attach_type, &lopts); |
12206 | 0 | if (link_fd < 0) { |
12207 | 0 | err = -errno; |
12208 | 0 | pr_warn("prog '%s': failed to attach multi-uprobe: %s\n", |
12209 | 0 | prog->name, errstr(err)); |
12210 | 0 | goto error; |
12211 | 0 | } |
12212 | 0 | link->fd = link_fd; |
12213 | 0 | free(resolved_offsets); |
12214 | 0 | return link; |
12215 | | |
12216 | 0 | error: |
12217 | 0 | free(resolved_offsets); |
12218 | 0 | free(link); |
12219 | 0 | return libbpf_err_ptr(err); |
12220 | 0 | } |
12221 | | |
12222 | | LIBBPF_API struct bpf_link * |
12223 | | bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid, |
12224 | | const char *binary_path, size_t func_offset, |
12225 | | const struct bpf_uprobe_opts *opts) |
12226 | 0 | { |
12227 | 0 | const char *archive_path = NULL, *archive_sep = NULL; |
12228 | 0 | char *legacy_probe = NULL; |
12229 | 0 | DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts); |
12230 | 0 | enum probe_attach_mode attach_mode; |
12231 | 0 | char full_path[PATH_MAX]; |
12232 | 0 | struct bpf_link *link; |
12233 | 0 | size_t ref_ctr_off; |
12234 | 0 | int pfd, err; |
12235 | 0 | bool retprobe, legacy; |
12236 | 0 | const char *func_name; |
12237 | |
|
12238 | 0 | if (!OPTS_VALID(opts, bpf_uprobe_opts)) |
12239 | 0 | return libbpf_err_ptr(-EINVAL); |
12240 | | |
12241 | 0 | attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT); |
12242 | 0 | retprobe = OPTS_GET(opts, retprobe, false); |
12243 | 0 | ref_ctr_off = OPTS_GET(opts, ref_ctr_offset, 0); |
12244 | 0 | pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0); |
12245 | |
|
12246 | 0 | if (!binary_path) |
12247 | 0 | return libbpf_err_ptr(-EINVAL); |
12248 | | |
12249 | | /* Check if "binary_path" refers to an archive. */ |
12250 | 0 | archive_sep = strstr(binary_path, "!/"); |
12251 | 0 | if (archive_sep) { |
12252 | 0 | full_path[0] = '\0'; |
12253 | 0 | libbpf_strlcpy(full_path, binary_path, |
12254 | 0 | min(sizeof(full_path), (size_t)(archive_sep - binary_path + 1))); |
12255 | 0 | archive_path = full_path; |
12256 | 0 | binary_path = archive_sep + 2; |
12257 | 0 | } else if (!strchr(binary_path, '/')) { |
12258 | 0 | err = resolve_full_path(binary_path, full_path, sizeof(full_path)); |
12259 | 0 | if (err) { |
12260 | 0 | pr_warn("prog '%s': failed to resolve full path for '%s': %s\n", |
12261 | 0 | prog->name, binary_path, errstr(err)); |
12262 | 0 | return libbpf_err_ptr(err); |
12263 | 0 | } |
12264 | 0 | binary_path = full_path; |
12265 | 0 | } |
12266 | 0 | func_name = OPTS_GET(opts, func_name, NULL); |
12267 | 0 | if (func_name) { |
12268 | 0 | long sym_off; |
12269 | |
|
12270 | 0 | if (archive_path) { |
12271 | 0 | sym_off = elf_find_func_offset_from_archive(archive_path, binary_path, |
12272 | 0 | func_name); |
12273 | 0 | binary_path = archive_path; |
12274 | 0 | } else { |
12275 | 0 | sym_off = elf_find_func_offset_from_file(binary_path, func_name); |
12276 | 0 | } |
12277 | 0 | if (sym_off < 0) |
12278 | 0 | return libbpf_err_ptr(sym_off); |
12279 | 0 | func_offset += sym_off; |
12280 | 0 | } |
12281 | | |
12282 | 0 | legacy = determine_uprobe_perf_type() < 0; |
12283 | 0 | switch (attach_mode) { |
12284 | 0 | case PROBE_ATTACH_MODE_LEGACY: |
12285 | 0 | legacy = true; |
12286 | 0 | pe_opts.force_ioctl_attach = true; |
12287 | 0 | break; |
12288 | 0 | case PROBE_ATTACH_MODE_PERF: |
12289 | 0 | if (legacy) |
12290 | 0 | return libbpf_err_ptr(-ENOTSUP); |
12291 | 0 | pe_opts.force_ioctl_attach = true; |
12292 | 0 | break; |
12293 | 0 | case PROBE_ATTACH_MODE_LINK: |
12294 | 0 | if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK)) |
12295 | 0 | return libbpf_err_ptr(-ENOTSUP); |
12296 | 0 | break; |
12297 | 0 | case PROBE_ATTACH_MODE_DEFAULT: |
12298 | 0 | break; |
12299 | 0 | default: |
12300 | 0 | return libbpf_err_ptr(-EINVAL); |
12301 | 0 | } |
12302 | | |
12303 | 0 | if (!legacy) { |
12304 | 0 | pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path, |
12305 | 0 | func_offset, pid, ref_ctr_off); |
12306 | 0 | } else { |
12307 | 0 | char probe_name[MAX_EVENT_NAME_LEN]; |
12308 | |
|
12309 | 0 | if (ref_ctr_off) |
12310 | 0 | return libbpf_err_ptr(-EINVAL); |
12311 | | |
12312 | 0 | gen_probe_legacy_event_name(probe_name, sizeof(probe_name), |
12313 | 0 | strrchr(binary_path, '/') ? : binary_path, |
12314 | 0 | func_offset); |
12315 | |
|
12316 | 0 | legacy_probe = strdup(probe_name); |
12317 | 0 | if (!legacy_probe) |
12318 | 0 | return libbpf_err_ptr(-ENOMEM); |
12319 | | |
12320 | 0 | pfd = perf_event_uprobe_open_legacy(legacy_probe, retprobe, |
12321 | 0 | binary_path, func_offset, pid); |
12322 | 0 | } |
12323 | 0 | if (pfd < 0) { |
12324 | 0 | err = -errno; |
12325 | 0 | pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n", |
12326 | 0 | prog->name, retprobe ? "uretprobe" : "uprobe", |
12327 | 0 | binary_path, func_offset, |
12328 | 0 | errstr(err)); |
12329 | 0 | goto err_out; |
12330 | 0 | } |
12331 | | |
12332 | 0 | link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts); |
12333 | 0 | err = libbpf_get_error(link); |
12334 | 0 | if (err) { |
12335 | 0 | close(pfd); |
12336 | 0 | pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n", |
12337 | 0 | prog->name, retprobe ? "uretprobe" : "uprobe", |
12338 | 0 | binary_path, func_offset, |
12339 | 0 | errstr(err)); |
12340 | 0 | goto err_clean_legacy; |
12341 | 0 | } |
12342 | 0 | if (legacy) { |
12343 | 0 | struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link); |
12344 | |
|
12345 | 0 | perf_link->legacy_probe_name = legacy_probe; |
12346 | 0 | perf_link->legacy_is_kprobe = false; |
12347 | 0 | perf_link->legacy_is_retprobe = retprobe; |
12348 | 0 | } |
12349 | 0 | return link; |
12350 | | |
12351 | 0 | err_clean_legacy: |
12352 | 0 | if (legacy) |
12353 | 0 | remove_uprobe_event_legacy(legacy_probe, retprobe); |
12354 | 0 | err_out: |
12355 | 0 | free(legacy_probe); |
12356 | 0 | return libbpf_err_ptr(err); |
12357 | 0 | } |
12358 | | |
12359 | | /* Format of u[ret]probe section definition supporting auto-attach: |
12360 | | * u[ret]probe/binary:function[+offset] |
12361 | | * |
12362 | | * binary can be an absolute/relative path or a filename; the latter is resolved to a |
12363 | | * full binary path via bpf_program__attach_uprobe_opts. |
12364 | | * |
12365 | | * Specifying uprobe+ ensures we carry out strict matching; either "uprobe" must be |
12366 | | * specified (and auto-attach is not possible) or the above format is specified for |
12367 | | * auto-attach. |
12368 | | */ |
12369 | | static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link) |
12370 | 0 | { |
12371 | 0 | DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts); |
12372 | 0 | char *probe_type = NULL, *binary_path = NULL, *func_name = NULL, *func_off; |
12373 | 0 | int n, c, ret = -EINVAL; |
12374 | 0 | long offset = 0; |
12375 | |
|
12376 | 0 | *link = NULL; |
12377 | |
|
12378 | 0 | n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%m[^\n]", |
12379 | 0 | &probe_type, &binary_path, &func_name); |
12380 | 0 | switch (n) { |
12381 | 0 | case 1: |
12382 | | /* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */ |
12383 | 0 | ret = 0; |
12384 | 0 | break; |
12385 | 0 | case 2: |
12386 | 0 | pr_warn("prog '%s': section '%s' missing ':function[+offset]' specification\n", |
12387 | 0 | prog->name, prog->sec_name); |
12388 | 0 | break; |
12389 | 0 | case 3: |
12390 | | /* check if user specifies `+offset`, if yes, this should be |
12391 | | * the last part of the string, make sure sscanf read to EOL |
12392 | | */ |
12393 | 0 | func_off = strrchr(func_name, '+'); |
12394 | 0 | if (func_off) { |
12395 | 0 | n = sscanf(func_off, "+%li%n", &offset, &c); |
12396 | 0 | if (n == 1 && *(func_off + c) == '\0') |
12397 | 0 | func_off[0] = '\0'; |
12398 | 0 | else |
12399 | 0 | offset = 0; |
12400 | 0 | } |
12401 | 0 | opts.retprobe = strcmp(probe_type, "uretprobe") == 0 || |
12402 | 0 | strcmp(probe_type, "uretprobe.s") == 0; |
12403 | 0 | if (opts.retprobe && offset != 0) { |
12404 | 0 | pr_warn("prog '%s': uretprobes do not support offset specification\n", |
12405 | 0 | prog->name); |
12406 | 0 | break; |
12407 | 0 | } |
12408 | 0 | opts.func_name = func_name; |
12409 | 0 | *link = bpf_program__attach_uprobe_opts(prog, -1, binary_path, offset, &opts); |
12410 | 0 | ret = libbpf_get_error(*link); |
12411 | 0 | break; |
12412 | 0 | default: |
12413 | 0 | pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name, |
12414 | 0 | prog->sec_name); |
12415 | 0 | break; |
12416 | 0 | } |
12417 | 0 | free(probe_type); |
12418 | 0 | free(binary_path); |
12419 | 0 | free(func_name); |
12420 | |
|
12421 | 0 | return ret; |
12422 | 0 | } |
12423 | | |
12424 | | struct bpf_link *bpf_program__attach_uprobe(const struct bpf_program *prog, |
12425 | | bool retprobe, pid_t pid, |
12426 | | const char *binary_path, |
12427 | | size_t func_offset) |
12428 | 0 | { |
12429 | 0 | DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts, .retprobe = retprobe); |
12430 | |
|
12431 | 0 | return bpf_program__attach_uprobe_opts(prog, pid, binary_path, func_offset, &opts); |
12432 | 0 | } |
12433 | | |
12434 | | struct bpf_link *bpf_program__attach_usdt(const struct bpf_program *prog, |
12435 | | pid_t pid, const char *binary_path, |
12436 | | const char *usdt_provider, const char *usdt_name, |
12437 | | const struct bpf_usdt_opts *opts) |
12438 | 0 | { |
12439 | 0 | char resolved_path[512]; |
12440 | 0 | struct bpf_object *obj = prog->obj; |
12441 | 0 | struct bpf_link *link; |
12442 | 0 | __u64 usdt_cookie; |
12443 | 0 | int err; |
12444 | |
|
12445 | 0 | if (!OPTS_VALID(opts, bpf_uprobe_opts)) |
12446 | 0 | return libbpf_err_ptr(-EINVAL); |
12447 | | |
12448 | 0 | if (bpf_program__fd(prog) < 0) { |
12449 | 0 | pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n", |
12450 | 0 | prog->name); |
12451 | 0 | return libbpf_err_ptr(-EINVAL); |
12452 | 0 | } |
12453 | | |
12454 | 0 | if (!binary_path) |
12455 | 0 | return libbpf_err_ptr(-EINVAL); |
12456 | | |
12457 | 0 | if (!strchr(binary_path, '/')) { |
12458 | 0 | err = resolve_full_path(binary_path, resolved_path, sizeof(resolved_path)); |
12459 | 0 | if (err) { |
12460 | 0 | pr_warn("prog '%s': failed to resolve full path for '%s': %s\n", |
12461 | 0 | prog->name, binary_path, errstr(err)); |
12462 | 0 | return libbpf_err_ptr(err); |
12463 | 0 | } |
12464 | 0 | binary_path = resolved_path; |
12465 | 0 | } |
12466 | | |
12467 | | /* USDT manager is instantiated lazily on first USDT attach. It will |
12468 | | * be destroyed together with BPF object in bpf_object__close(). |
12469 | | */ |
12470 | 0 | if (IS_ERR(obj->usdt_man)) |
12471 | 0 | return libbpf_ptr(obj->usdt_man); |
12472 | 0 | if (!obj->usdt_man) { |
12473 | 0 | obj->usdt_man = usdt_manager_new(obj); |
12474 | 0 | if (IS_ERR(obj->usdt_man)) |
12475 | 0 | return libbpf_ptr(obj->usdt_man); |
12476 | 0 | } |
12477 | | |
12478 | 0 | usdt_cookie = OPTS_GET(opts, usdt_cookie, 0); |
12479 | 0 | link = usdt_manager_attach_usdt(obj->usdt_man, prog, pid, binary_path, |
12480 | 0 | usdt_provider, usdt_name, usdt_cookie); |
12481 | 0 | err = libbpf_get_error(link); |
12482 | 0 | if (err) |
12483 | 0 | return libbpf_err_ptr(err); |
12484 | 0 | return link; |
12485 | 0 | } |
12486 | | |
12487 | | static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link) |
12488 | 0 | { |
12489 | 0 | char *path = NULL, *provider = NULL, *name = NULL; |
12490 | 0 | const char *sec_name; |
12491 | 0 | int n, err; |
12492 | |
|
12493 | 0 | sec_name = bpf_program__section_name(prog); |
12494 | 0 | if (strcmp(sec_name, "usdt") == 0) { |
12495 | | /* no auto-attach for just SEC("usdt") */ |
12496 | 0 | *link = NULL; |
12497 | 0 | return 0; |
12498 | 0 | } |
12499 | | |
12500 | 0 | n = sscanf(sec_name, "usdt/%m[^:]:%m[^:]:%m[^:]", &path, &provider, &name); |
12501 | 0 | if (n != 3) { |
12502 | 0 | pr_warn("invalid section '%s', expected SEC(\"usdt/<path>:<provider>:<name>\")\n", |
12503 | 0 | sec_name); |
12504 | 0 | err = -EINVAL; |
12505 | 0 | } else { |
12506 | 0 | *link = bpf_program__attach_usdt(prog, -1 /* any process */, path, |
12507 | 0 | provider, name, NULL); |
12508 | 0 | err = libbpf_get_error(*link); |
12509 | 0 | } |
12510 | 0 | free(path); |
12511 | 0 | free(provider); |
12512 | 0 | free(name); |
12513 | 0 | return err; |
12514 | 0 | } |
12515 | | |
12516 | | static int determine_tracepoint_id(const char *tp_category, |
12517 | | const char *tp_name) |
12518 | 0 | { |
12519 | 0 | char file[PATH_MAX]; |
12520 | 0 | int ret; |
12521 | |
|
12522 | 0 | ret = snprintf(file, sizeof(file), "%s/events/%s/%s/id", |
12523 | 0 | tracefs_path(), tp_category, tp_name); |
12524 | 0 | if (ret < 0) |
12525 | 0 | return -errno; |
12526 | 0 | if (ret >= sizeof(file)) { |
12527 | 0 | pr_debug("tracepoint %s/%s path is too long\n", |
12528 | 0 | tp_category, tp_name); |
12529 | 0 | return -E2BIG; |
12530 | 0 | } |
12531 | 0 | return parse_uint_from_file(file, "%d\n"); |
12532 | 0 | } |
12533 | | |
12534 | | static int perf_event_open_tracepoint(const char *tp_category, |
12535 | | const char *tp_name) |
12536 | 0 | { |
12537 | 0 | const size_t attr_sz = sizeof(struct perf_event_attr); |
12538 | 0 | struct perf_event_attr attr; |
12539 | 0 | int tp_id, pfd, err; |
12540 | |
|
12541 | 0 | tp_id = determine_tracepoint_id(tp_category, tp_name); |
12542 | 0 | if (tp_id < 0) { |
12543 | 0 | pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n", |
12544 | 0 | tp_category, tp_name, |
12545 | 0 | errstr(tp_id)); |
12546 | 0 | return tp_id; |
12547 | 0 | } |
12548 | | |
12549 | 0 | memset(&attr, 0, attr_sz); |
12550 | 0 | attr.type = PERF_TYPE_TRACEPOINT; |
12551 | 0 | attr.size = attr_sz; |
12552 | 0 | attr.config = tp_id; |
12553 | |
|
12554 | 0 | pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */, |
12555 | 0 | -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); |
12556 | 0 | if (pfd < 0) { |
12557 | 0 | err = -errno; |
12558 | 0 | pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n", |
12559 | 0 | tp_category, tp_name, |
12560 | 0 | errstr(err)); |
12561 | 0 | return err; |
12562 | 0 | } |
12563 | 0 | return pfd; |
12564 | 0 | } |
12565 | | |
12566 | | struct bpf_link *bpf_program__attach_tracepoint_opts(const struct bpf_program *prog, |
12567 | | const char *tp_category, |
12568 | | const char *tp_name, |
12569 | | const struct bpf_tracepoint_opts *opts) |
12570 | 0 | { |
12571 | 0 | DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts); |
12572 | 0 | struct bpf_link *link; |
12573 | 0 | int pfd, err; |
12574 | |
|
12575 | 0 | if (!OPTS_VALID(opts, bpf_tracepoint_opts)) |
12576 | 0 | return libbpf_err_ptr(-EINVAL); |
12577 | | |
12578 | 0 | pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0); |
12579 | |
|
12580 | 0 | pfd = perf_event_open_tracepoint(tp_category, tp_name); |
12581 | 0 | if (pfd < 0) { |
12582 | 0 | pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n", |
12583 | 0 | prog->name, tp_category, tp_name, |
12584 | 0 | errstr(pfd)); |
12585 | 0 | return libbpf_err_ptr(pfd); |
12586 | 0 | } |
12587 | 0 | link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts); |
12588 | 0 | err = libbpf_get_error(link); |
12589 | 0 | if (err) { |
12590 | 0 | close(pfd); |
12591 | 0 | pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n", |
12592 | 0 | prog->name, tp_category, tp_name, |
12593 | 0 | errstr(err)); |
12594 | 0 | return libbpf_err_ptr(err); |
12595 | 0 | } |
12596 | 0 | return link; |
12597 | 0 | } |
12598 | | |
12599 | | struct bpf_link *bpf_program__attach_tracepoint(const struct bpf_program *prog, |
12600 | | const char *tp_category, |
12601 | | const char *tp_name) |
12602 | 0 | { |
12603 | 0 | return bpf_program__attach_tracepoint_opts(prog, tp_category, tp_name, NULL); |
12604 | 0 | } |
12605 | | |
12606 | | static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link) |
12607 | 0 | { |
12608 | 0 | char *sec_name, *tp_cat, *tp_name; |
12609 | |
|
12610 | 0 | *link = NULL; |
12611 | | |
12612 | | /* no auto-attach for SEC("tp") or SEC("tracepoint") */ |
12613 | 0 | if (strcmp(prog->sec_name, "tp") == 0 || strcmp(prog->sec_name, "tracepoint") == 0) |
12614 | 0 | return 0; |
12615 | | |
12616 | 0 | sec_name = strdup(prog->sec_name); |
12617 | 0 | if (!sec_name) |
12618 | 0 | return -ENOMEM; |
12619 | | |
12620 | | /* extract "tp/<category>/<name>" or "tracepoint/<category>/<name>" */ |
12621 | 0 | if (str_has_pfx(prog->sec_name, "tp/")) |
12622 | 0 | tp_cat = sec_name + sizeof("tp/") - 1; |
12623 | 0 | else |
12624 | 0 | tp_cat = sec_name + sizeof("tracepoint/") - 1; |
12625 | 0 | tp_name = strchr(tp_cat, '/'); |
12626 | 0 | if (!tp_name) { |
12627 | 0 | free(sec_name); |
12628 | 0 | return -EINVAL; |
12629 | 0 | } |
12630 | 0 | *tp_name = '\0'; |
12631 | 0 | tp_name++; |
12632 | |
|
12633 | 0 | *link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name); |
12634 | 0 | free(sec_name); |
12635 | 0 | return libbpf_get_error(*link); |
12636 | 0 | } |
12637 | | |
12638 | | struct bpf_link * |
12639 | | bpf_program__attach_raw_tracepoint_opts(const struct bpf_program *prog, |
12640 | | const char *tp_name, |
12641 | | struct bpf_raw_tracepoint_opts *opts) |
12642 | 0 | { |
12643 | 0 | LIBBPF_OPTS(bpf_raw_tp_opts, raw_opts); |
12644 | 0 | struct bpf_link *link; |
12645 | 0 | int prog_fd, pfd; |
12646 | |
|
12647 | 0 | if (!OPTS_VALID(opts, bpf_raw_tracepoint_opts)) |
12648 | 0 | return libbpf_err_ptr(-EINVAL); |
12649 | | |
12650 | 0 | prog_fd = bpf_program__fd(prog); |
12651 | 0 | if (prog_fd < 0) { |
12652 | 0 | pr_warn("prog '%s': can't attach before loaded\n", prog->name); |
12653 | 0 | return libbpf_err_ptr(-EINVAL); |
12654 | 0 | } |
12655 | | |
12656 | 0 | link = calloc(1, sizeof(*link)); |
12657 | 0 | if (!link) |
12658 | 0 | return libbpf_err_ptr(-ENOMEM); |
12659 | 0 | link->detach = &bpf_link__detach_fd; |
12660 | |
|
12661 | 0 | raw_opts.tp_name = tp_name; |
12662 | 0 | raw_opts.cookie = OPTS_GET(opts, cookie, 0); |
12663 | 0 | pfd = bpf_raw_tracepoint_open_opts(prog_fd, &raw_opts); |
12664 | 0 | if (pfd < 0) { |
12665 | 0 | pfd = -errno; |
12666 | 0 | free(link); |
12667 | 0 | pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n", |
12668 | 0 | prog->name, tp_name, errstr(pfd)); |
12669 | 0 | return libbpf_err_ptr(pfd); |
12670 | 0 | } |
12671 | 0 | link->fd = pfd; |
12672 | 0 | return link; |
12673 | 0 | } |
12674 | | |
12675 | | struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *prog, |
12676 | | const char *tp_name) |
12677 | 0 | { |
12678 | 0 | return bpf_program__attach_raw_tracepoint_opts(prog, tp_name, NULL); |
12679 | 0 | } |
12680 | | |
12681 | | static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link) |
12682 | 0 | { |
12683 | 0 | static const char *const prefixes[] = { |
12684 | 0 | "raw_tp", |
12685 | 0 | "raw_tracepoint", |
12686 | 0 | "raw_tp.w", |
12687 | 0 | "raw_tracepoint.w", |
12688 | 0 | }; |
12689 | 0 | size_t i; |
12690 | 0 | const char *tp_name = NULL; |
12691 | |
|
12692 | 0 | *link = NULL; |
12693 | |
|
12694 | 0 | for (i = 0; i < ARRAY_SIZE(prefixes); i++) { |
12695 | 0 | size_t pfx_len; |
12696 | |
|
12697 | 0 | if (!str_has_pfx(prog->sec_name, prefixes[i])) |
12698 | 0 | continue; |
12699 | | |
12700 | 0 | pfx_len = strlen(prefixes[i]); |
12701 | | /* no auto-attach case of, e.g., SEC("raw_tp") */ |
12702 | 0 | if (prog->sec_name[pfx_len] == '\0') |
12703 | 0 | return 0; |
12704 | | |
12705 | 0 | if (prog->sec_name[pfx_len] != '/') |
12706 | 0 | continue; |
12707 | | |
12708 | 0 | tp_name = prog->sec_name + pfx_len + 1; |
12709 | 0 | break; |
12710 | 0 | } |
12711 | | |
12712 | 0 | if (!tp_name) { |
12713 | 0 | pr_warn("prog '%s': invalid section name '%s'\n", |
12714 | 0 | prog->name, prog->sec_name); |
12715 | 0 | return -EINVAL; |
12716 | 0 | } |
12717 | | |
12718 | 0 | *link = bpf_program__attach_raw_tracepoint(prog, tp_name); |
12719 | 0 | return libbpf_get_error(*link); |
12720 | 0 | } |
12721 | | |
12722 | | /* Common logic for all BPF program types that attach to a btf_id */ |
12723 | | static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *prog, |
12724 | | const struct bpf_trace_opts *opts) |
12725 | 0 | { |
12726 | 0 | LIBBPF_OPTS(bpf_link_create_opts, link_opts); |
12727 | 0 | struct bpf_link *link; |
12728 | 0 | int prog_fd, pfd; |
12729 | |
|
12730 | 0 | if (!OPTS_VALID(opts, bpf_trace_opts)) |
12731 | 0 | return libbpf_err_ptr(-EINVAL); |
12732 | | |
12733 | 0 | prog_fd = bpf_program__fd(prog); |
12734 | 0 | if (prog_fd < 0) { |
12735 | 0 | pr_warn("prog '%s': can't attach before loaded\n", prog->name); |
12736 | 0 | return libbpf_err_ptr(-EINVAL); |
12737 | 0 | } |
12738 | | |
12739 | 0 | link = calloc(1, sizeof(*link)); |
12740 | 0 | if (!link) |
12741 | 0 | return libbpf_err_ptr(-ENOMEM); |
12742 | 0 | link->detach = &bpf_link__detach_fd; |
12743 | | |
12744 | | /* libbpf is smart enough to redirect to BPF_RAW_TRACEPOINT_OPEN on old kernels */ |
12745 | 0 | link_opts.tracing.cookie = OPTS_GET(opts, cookie, 0); |
12746 | 0 | pfd = bpf_link_create(prog_fd, 0, bpf_program__expected_attach_type(prog), &link_opts); |
12747 | 0 | if (pfd < 0) { |
12748 | 0 | pfd = -errno; |
12749 | 0 | free(link); |
12750 | 0 | pr_warn("prog '%s': failed to attach: %s\n", |
12751 | 0 | prog->name, errstr(pfd)); |
12752 | 0 | return libbpf_err_ptr(pfd); |
12753 | 0 | } |
12754 | 0 | link->fd = pfd; |
12755 | 0 | return link; |
12756 | 0 | } |
12757 | | |
12758 | | struct bpf_link *bpf_program__attach_trace(const struct bpf_program *prog) |
12759 | 0 | { |
12760 | 0 | return bpf_program__attach_btf_id(prog, NULL); |
12761 | 0 | } |
12762 | | |
12763 | | struct bpf_link *bpf_program__attach_trace_opts(const struct bpf_program *prog, |
12764 | | const struct bpf_trace_opts *opts) |
12765 | 0 | { |
12766 | 0 | return bpf_program__attach_btf_id(prog, opts); |
12767 | 0 | } |
12768 | | |
12769 | | struct bpf_link *bpf_program__attach_lsm(const struct bpf_program *prog) |
12770 | 0 | { |
12771 | 0 | return bpf_program__attach_btf_id(prog, NULL); |
12772 | 0 | } |
12773 | | |
12774 | | static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link) |
12775 | 0 | { |
12776 | 0 | *link = bpf_program__attach_trace(prog); |
12777 | 0 | return libbpf_get_error(*link); |
12778 | 0 | } |
12779 | | |
12780 | | static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link) |
12781 | 0 | { |
12782 | 0 | *link = bpf_program__attach_lsm(prog); |
12783 | 0 | return libbpf_get_error(*link); |
12784 | 0 | } |
12785 | | |
12786 | | static struct bpf_link * |
12787 | | bpf_program_attach_fd(const struct bpf_program *prog, |
12788 | | int target_fd, const char *target_name, |
12789 | | const struct bpf_link_create_opts *opts) |
12790 | 0 | { |
12791 | 0 | enum bpf_attach_type attach_type; |
12792 | 0 | struct bpf_link *link; |
12793 | 0 | int prog_fd, link_fd; |
12794 | |
|
12795 | 0 | prog_fd = bpf_program__fd(prog); |
12796 | 0 | if (prog_fd < 0) { |
12797 | 0 | pr_warn("prog '%s': can't attach before loaded\n", prog->name); |
12798 | 0 | return libbpf_err_ptr(-EINVAL); |
12799 | 0 | } |
12800 | | |
12801 | 0 | link = calloc(1, sizeof(*link)); |
12802 | 0 | if (!link) |
12803 | 0 | return libbpf_err_ptr(-ENOMEM); |
12804 | 0 | link->detach = &bpf_link__detach_fd; |
12805 | |
|
12806 | 0 | attach_type = bpf_program__expected_attach_type(prog); |
12807 | 0 | link_fd = bpf_link_create(prog_fd, target_fd, attach_type, opts); |
12808 | 0 | if (link_fd < 0) { |
12809 | 0 | link_fd = -errno; |
12810 | 0 | free(link); |
12811 | 0 | pr_warn("prog '%s': failed to attach to %s: %s\n", |
12812 | 0 | prog->name, target_name, |
12813 | 0 | errstr(link_fd)); |
12814 | 0 | return libbpf_err_ptr(link_fd); |
12815 | 0 | } |
12816 | 0 | link->fd = link_fd; |
12817 | 0 | return link; |
12818 | 0 | } |
12819 | | |
12820 | | struct bpf_link * |
12821 | | bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd) |
12822 | 0 | { |
12823 | 0 | return bpf_program_attach_fd(prog, cgroup_fd, "cgroup", NULL); |
12824 | 0 | } |
12825 | | |
12826 | | struct bpf_link * |
12827 | | bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd) |
12828 | 0 | { |
12829 | 0 | return bpf_program_attach_fd(prog, netns_fd, "netns", NULL); |
12830 | 0 | } |
12831 | | |
12832 | | struct bpf_link * |
12833 | | bpf_program__attach_sockmap(const struct bpf_program *prog, int map_fd) |
12834 | 0 | { |
12835 | 0 | return bpf_program_attach_fd(prog, map_fd, "sockmap", NULL); |
12836 | 0 | } |
12837 | | |
12838 | | struct bpf_link *bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex) |
12839 | 0 | { |
12840 | | /* target_fd/target_ifindex use the same field in LINK_CREATE */ |
12841 | 0 | return bpf_program_attach_fd(prog, ifindex, "xdp", NULL); |
12842 | 0 | } |
12843 | | |
12844 | | struct bpf_link * |
12845 | | bpf_program__attach_cgroup_opts(const struct bpf_program *prog, int cgroup_fd, |
12846 | | const struct bpf_cgroup_opts *opts) |
12847 | 0 | { |
12848 | 0 | LIBBPF_OPTS(bpf_link_create_opts, link_create_opts); |
12849 | 0 | __u32 relative_id; |
12850 | 0 | int relative_fd; |
12851 | |
|
12852 | 0 | if (!OPTS_VALID(opts, bpf_cgroup_opts)) |
12853 | 0 | return libbpf_err_ptr(-EINVAL); |
12854 | | |
12855 | 0 | relative_id = OPTS_GET(opts, relative_id, 0); |
12856 | 0 | relative_fd = OPTS_GET(opts, relative_fd, 0); |
12857 | |
|
12858 | 0 | if (relative_fd && relative_id) { |
12859 | 0 | pr_warn("prog '%s': relative_fd and relative_id cannot be set at the same time\n", |
12860 | 0 | prog->name); |
12861 | 0 | return libbpf_err_ptr(-EINVAL); |
12862 | 0 | } |
12863 | | |
12864 | 0 | link_create_opts.cgroup.expected_revision = OPTS_GET(opts, expected_revision, 0); |
12865 | 0 | link_create_opts.cgroup.relative_fd = relative_fd; |
12866 | 0 | link_create_opts.cgroup.relative_id = relative_id; |
12867 | 0 | link_create_opts.flags = OPTS_GET(opts, flags, 0); |
12868 | |
|
12869 | 0 | return bpf_program_attach_fd(prog, cgroup_fd, "cgroup", &link_create_opts); |
12870 | 0 | } |
12871 | | |
12872 | | struct bpf_link * |
12873 | | bpf_program__attach_tcx(const struct bpf_program *prog, int ifindex, |
12874 | | const struct bpf_tcx_opts *opts) |
12875 | 0 | { |
12876 | 0 | LIBBPF_OPTS(bpf_link_create_opts, link_create_opts); |
12877 | 0 | __u32 relative_id; |
12878 | 0 | int relative_fd; |
12879 | |
|
12880 | 0 | if (!OPTS_VALID(opts, bpf_tcx_opts)) |
12881 | 0 | return libbpf_err_ptr(-EINVAL); |
12882 | | |
12883 | 0 | relative_id = OPTS_GET(opts, relative_id, 0); |
12884 | 0 | relative_fd = OPTS_GET(opts, relative_fd, 0); |
12885 | | |
12886 | | /* validate we don't have unexpected combinations of non-zero fields */ |
12887 | 0 | if (!ifindex) { |
12888 | 0 | pr_warn("prog '%s': target netdevice ifindex cannot be zero\n", |
12889 | 0 | prog->name); |
12890 | 0 | return libbpf_err_ptr(-EINVAL); |
12891 | 0 | } |
12892 | 0 | if (relative_fd && relative_id) { |
12893 | 0 | pr_warn("prog '%s': relative_fd and relative_id cannot be set at the same time\n", |
12894 | 0 | prog->name); |
12895 | 0 | return libbpf_err_ptr(-EINVAL); |
12896 | 0 | } |
12897 | | |
12898 | 0 | link_create_opts.tcx.expected_revision = OPTS_GET(opts, expected_revision, 0); |
12899 | 0 | link_create_opts.tcx.relative_fd = relative_fd; |
12900 | 0 | link_create_opts.tcx.relative_id = relative_id; |
12901 | 0 | link_create_opts.flags = OPTS_GET(opts, flags, 0); |
12902 | | |
12903 | | /* target_fd/target_ifindex use the same field in LINK_CREATE */ |
12904 | 0 | return bpf_program_attach_fd(prog, ifindex, "tcx", &link_create_opts); |
12905 | 0 | } |
12906 | | |
12907 | | struct bpf_link * |
12908 | | bpf_program__attach_netkit(const struct bpf_program *prog, int ifindex, |
12909 | | const struct bpf_netkit_opts *opts) |
12910 | 0 | { |
12911 | 0 | LIBBPF_OPTS(bpf_link_create_opts, link_create_opts); |
12912 | 0 | __u32 relative_id; |
12913 | 0 | int relative_fd; |
12914 | |
|
12915 | 0 | if (!OPTS_VALID(opts, bpf_netkit_opts)) |
12916 | 0 | return libbpf_err_ptr(-EINVAL); |
12917 | | |
12918 | 0 | relative_id = OPTS_GET(opts, relative_id, 0); |
12919 | 0 | relative_fd = OPTS_GET(opts, relative_fd, 0); |
12920 | | |
12921 | | /* validate we don't have unexpected combinations of non-zero fields */ |
12922 | 0 | if (!ifindex) { |
12923 | 0 | pr_warn("prog '%s': target netdevice ifindex cannot be zero\n", |
12924 | 0 | prog->name); |
12925 | 0 | return libbpf_err_ptr(-EINVAL); |
12926 | 0 | } |
12927 | 0 | if (relative_fd && relative_id) { |
12928 | 0 | pr_warn("prog '%s': relative_fd and relative_id cannot be set at the same time\n", |
12929 | 0 | prog->name); |
12930 | 0 | return libbpf_err_ptr(-EINVAL); |
12931 | 0 | } |
12932 | | |
12933 | 0 | link_create_opts.netkit.expected_revision = OPTS_GET(opts, expected_revision, 0); |
12934 | 0 | link_create_opts.netkit.relative_fd = relative_fd; |
12935 | 0 | link_create_opts.netkit.relative_id = relative_id; |
12936 | 0 | link_create_opts.flags = OPTS_GET(opts, flags, 0); |
12937 | |
|
12938 | 0 | return bpf_program_attach_fd(prog, ifindex, "netkit", &link_create_opts); |
12939 | 0 | } |
12940 | | |
12941 | | struct bpf_link *bpf_program__attach_freplace(const struct bpf_program *prog, |
12942 | | int target_fd, |
12943 | | const char *attach_func_name) |
12944 | 0 | { |
12945 | 0 | int btf_id; |
12946 | |
|
12947 | 0 | if (!!target_fd != !!attach_func_name) { |
12948 | 0 | pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n", |
12949 | 0 | prog->name); |
12950 | 0 | return libbpf_err_ptr(-EINVAL); |
12951 | 0 | } |
12952 | | |
12953 | 0 | if (prog->type != BPF_PROG_TYPE_EXT) { |
12954 | 0 | pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace\n", |
12955 | 0 | prog->name); |
12956 | 0 | return libbpf_err_ptr(-EINVAL); |
12957 | 0 | } |
12958 | | |
12959 | 0 | if (target_fd) { |
12960 | 0 | LIBBPF_OPTS(bpf_link_create_opts, target_opts); |
12961 | |
|
12962 | 0 | btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd, prog->obj->token_fd); |
12963 | 0 | if (btf_id < 0) |
12964 | 0 | return libbpf_err_ptr(btf_id); |
12965 | | |
12966 | 0 | target_opts.target_btf_id = btf_id; |
12967 | |
|
12968 | 0 | return bpf_program_attach_fd(prog, target_fd, "freplace", |
12969 | 0 | &target_opts); |
12970 | 0 | } else { |
12971 | | /* no target, so use raw_tracepoint_open for compatibility |
12972 | | * with old kernels |
12973 | | */ |
12974 | 0 | return bpf_program__attach_trace(prog); |
12975 | 0 | } |
12976 | 0 | } |
12977 | | |
12978 | | struct bpf_link * |
12979 | | bpf_program__attach_iter(const struct bpf_program *prog, |
12980 | | const struct bpf_iter_attach_opts *opts) |
12981 | 0 | { |
12982 | 0 | DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts); |
12983 | 0 | struct bpf_link *link; |
12984 | 0 | int prog_fd, link_fd; |
12985 | 0 | __u32 target_fd = 0; |
12986 | |
|
12987 | 0 | if (!OPTS_VALID(opts, bpf_iter_attach_opts)) |
12988 | 0 | return libbpf_err_ptr(-EINVAL); |
12989 | | |
12990 | 0 | link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0); |
12991 | 0 | link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0); |
12992 | |
|
12993 | 0 | prog_fd = bpf_program__fd(prog); |
12994 | 0 | if (prog_fd < 0) { |
12995 | 0 | pr_warn("prog '%s': can't attach before loaded\n", prog->name); |
12996 | 0 | return libbpf_err_ptr(-EINVAL); |
12997 | 0 | } |
12998 | | |
12999 | 0 | link = calloc(1, sizeof(*link)); |
13000 | 0 | if (!link) |
13001 | 0 | return libbpf_err_ptr(-ENOMEM); |
13002 | 0 | link->detach = &bpf_link__detach_fd; |
13003 | |
|
13004 | 0 | link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER, |
13005 | 0 | &link_create_opts); |
13006 | 0 | if (link_fd < 0) { |
13007 | 0 | link_fd = -errno; |
13008 | 0 | free(link); |
13009 | 0 | pr_warn("prog '%s': failed to attach to iterator: %s\n", |
13010 | 0 | prog->name, errstr(link_fd)); |
13011 | 0 | return libbpf_err_ptr(link_fd); |
13012 | 0 | } |
13013 | 0 | link->fd = link_fd; |
13014 | 0 | return link; |
13015 | 0 | } |
13016 | | |
13017 | | static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link) |
13018 | 0 | { |
13019 | 0 | *link = bpf_program__attach_iter(prog, NULL); |
13020 | 0 | return libbpf_get_error(*link); |
13021 | 0 | } |
13022 | | |
13023 | | struct bpf_link *bpf_program__attach_netfilter(const struct bpf_program *prog, |
13024 | | const struct bpf_netfilter_opts *opts) |
13025 | 0 | { |
13026 | 0 | LIBBPF_OPTS(bpf_link_create_opts, lopts); |
13027 | 0 | struct bpf_link *link; |
13028 | 0 | int prog_fd, link_fd; |
13029 | |
|
13030 | 0 | if (!OPTS_VALID(opts, bpf_netfilter_opts)) |
13031 | 0 | return libbpf_err_ptr(-EINVAL); |
13032 | | |
13033 | 0 | prog_fd = bpf_program__fd(prog); |
13034 | 0 | if (prog_fd < 0) { |
13035 | 0 | pr_warn("prog '%s': can't attach before loaded\n", prog->name); |
13036 | 0 | return libbpf_err_ptr(-EINVAL); |
13037 | 0 | } |
13038 | | |
13039 | 0 | link = calloc(1, sizeof(*link)); |
13040 | 0 | if (!link) |
13041 | 0 | return libbpf_err_ptr(-ENOMEM); |
13042 | | |
13043 | 0 | link->detach = &bpf_link__detach_fd; |
13044 | |
|
13045 | 0 | lopts.netfilter.pf = OPTS_GET(opts, pf, 0); |
13046 | 0 | lopts.netfilter.hooknum = OPTS_GET(opts, hooknum, 0); |
13047 | 0 | lopts.netfilter.priority = OPTS_GET(opts, priority, 0); |
13048 | 0 | lopts.netfilter.flags = OPTS_GET(opts, flags, 0); |
13049 | |
|
13050 | 0 | link_fd = bpf_link_create(prog_fd, 0, BPF_NETFILTER, &lopts); |
13051 | 0 | if (link_fd < 0) { |
13052 | 0 | link_fd = -errno; |
13053 | 0 | free(link); |
13054 | 0 | pr_warn("prog '%s': failed to attach to netfilter: %s\n", |
13055 | 0 | prog->name, errstr(link_fd)); |
13056 | 0 | return libbpf_err_ptr(link_fd); |
13057 | 0 | } |
13058 | 0 | link->fd = link_fd; |
13059 | |
|
13060 | 0 | return link; |
13061 | 0 | } |
13062 | | |
13063 | | struct bpf_link *bpf_program__attach(const struct bpf_program *prog) |
13064 | 0 | { |
13065 | 0 | struct bpf_link *link = NULL; |
13066 | 0 | int err; |
13067 | |
|
13068 | 0 | if (!prog->sec_def || !prog->sec_def->prog_attach_fn) |
13069 | 0 | return libbpf_err_ptr(-EOPNOTSUPP); |
13070 | | |
13071 | 0 | if (bpf_program__fd(prog) < 0) { |
13072 | 0 | pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n", |
13073 | 0 | prog->name); |
13074 | 0 | return libbpf_err_ptr(-EINVAL); |
13075 | 0 | } |
13076 | | |
13077 | 0 | err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, &link); |
13078 | 0 | if (err) |
13079 | 0 | return libbpf_err_ptr(err); |
13080 | | |
13081 | | /* When calling bpf_program__attach() explicitly, auto-attach support |
13082 | | * is expected to work, so NULL returned link is considered an error. |
13083 | | * This is different for skeleton's attach, see comment in |
13084 | | * bpf_object__attach_skeleton(). |
13085 | | */ |
13086 | 0 | if (!link) |
13087 | 0 | return libbpf_err_ptr(-EOPNOTSUPP); |
13088 | | |
13089 | 0 | return link; |
13090 | 0 | } |
13091 | | |
13092 | | struct bpf_link_struct_ops { |
13093 | | struct bpf_link link; |
13094 | | int map_fd; |
13095 | | }; |
13096 | | |
13097 | | static int bpf_link__detach_struct_ops(struct bpf_link *link) |
13098 | 0 | { |
13099 | 0 | struct bpf_link_struct_ops *st_link; |
13100 | 0 | __u32 zero = 0; |
13101 | |
|
13102 | 0 | st_link = container_of(link, struct bpf_link_struct_ops, link); |
13103 | |
|
13104 | 0 | if (st_link->map_fd < 0) |
13105 | | /* w/o a real link */ |
13106 | 0 | return bpf_map_delete_elem(link->fd, &zero); |
13107 | | |
13108 | 0 | return close(link->fd); |
13109 | 0 | } |
13110 | | |
13111 | | struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map) |
13112 | 0 | { |
13113 | 0 | struct bpf_link_struct_ops *link; |
13114 | 0 | __u32 zero = 0; |
13115 | 0 | int err, fd; |
13116 | |
|
13117 | 0 | if (!bpf_map__is_struct_ops(map)) { |
13118 | 0 | pr_warn("map '%s': can't attach non-struct_ops map\n", map->name); |
13119 | 0 | return libbpf_err_ptr(-EINVAL); |
13120 | 0 | } |
13121 | | |
13122 | 0 | if (map->fd < 0) { |
13123 | 0 | pr_warn("map '%s': can't attach BPF map without FD (was it created?)\n", map->name); |
13124 | 0 | return libbpf_err_ptr(-EINVAL); |
13125 | 0 | } |
13126 | | |
13127 | 0 | link = calloc(1, sizeof(*link)); |
13128 | 0 | if (!link) |
13129 | 0 | return libbpf_err_ptr(-EINVAL); |
13130 | | |
13131 | | /* kern_vdata should be prepared during the loading phase. */ |
13132 | 0 | err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0); |
13133 | | /* It can be EBUSY if the map has been used to create or |
13134 | | * update a link before. We don't allow updating the value of |
13135 | | * a struct_ops once it is set. That ensures that the value |
13136 | | * never changed. So, it is safe to skip EBUSY. |
13137 | | */ |
13138 | 0 | if (err && (!(map->def.map_flags & BPF_F_LINK) || err != -EBUSY)) { |
13139 | 0 | free(link); |
13140 | 0 | return libbpf_err_ptr(err); |
13141 | 0 | } |
13142 | | |
13143 | 0 | link->link.detach = bpf_link__detach_struct_ops; |
13144 | |
|
13145 | 0 | if (!(map->def.map_flags & BPF_F_LINK)) { |
13146 | | /* w/o a real link */ |
13147 | 0 | link->link.fd = map->fd; |
13148 | 0 | link->map_fd = -1; |
13149 | 0 | return &link->link; |
13150 | 0 | } |
13151 | | |
13152 | 0 | fd = bpf_link_create(map->fd, 0, BPF_STRUCT_OPS, NULL); |
13153 | 0 | if (fd < 0) { |
13154 | 0 | free(link); |
13155 | 0 | return libbpf_err_ptr(fd); |
13156 | 0 | } |
13157 | | |
13158 | 0 | link->link.fd = fd; |
13159 | 0 | link->map_fd = map->fd; |
13160 | |
|
13161 | 0 | return &link->link; |
13162 | 0 | } |
13163 | | |
13164 | | /* |
13165 | | * Swap the back struct_ops of a link with a new struct_ops map. |
13166 | | */ |
13167 | | int bpf_link__update_map(struct bpf_link *link, const struct bpf_map *map) |
13168 | 0 | { |
13169 | 0 | struct bpf_link_struct_ops *st_ops_link; |
13170 | 0 | __u32 zero = 0; |
13171 | 0 | int err; |
13172 | |
|
13173 | 0 | if (!bpf_map__is_struct_ops(map)) |
13174 | 0 | return libbpf_err(-EINVAL); |
13175 | | |
13176 | 0 | if (map->fd < 0) { |
13177 | 0 | pr_warn("map '%s': can't use BPF map without FD (was it created?)\n", map->name); |
13178 | 0 | return libbpf_err(-EINVAL); |
13179 | 0 | } |
13180 | | |
13181 | 0 | st_ops_link = container_of(link, struct bpf_link_struct_ops, link); |
13182 | | /* Ensure the type of a link is correct */ |
13183 | 0 | if (st_ops_link->map_fd < 0) |
13184 | 0 | return libbpf_err(-EINVAL); |
13185 | | |
13186 | 0 | err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0); |
13187 | | /* It can be EBUSY if the map has been used to create or |
13188 | | * update a link before. We don't allow updating the value of |
13189 | | * a struct_ops once it is set. That ensures that the value |
13190 | | * never changed. So, it is safe to skip EBUSY. |
13191 | | */ |
13192 | 0 | if (err && err != -EBUSY) |
13193 | 0 | return err; |
13194 | | |
13195 | 0 | err = bpf_link_update(link->fd, map->fd, NULL); |
13196 | 0 | if (err < 0) |
13197 | 0 | return err; |
13198 | | |
13199 | 0 | st_ops_link->map_fd = map->fd; |
13200 | |
|
13201 | 0 | return 0; |
13202 | 0 | } |
13203 | | |
13204 | | typedef enum bpf_perf_event_ret (*bpf_perf_event_print_t)(struct perf_event_header *hdr, |
13205 | | void *private_data); |
13206 | | |
13207 | | static enum bpf_perf_event_ret |
13208 | | perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size, |
13209 | | void **copy_mem, size_t *copy_size, |
13210 | | bpf_perf_event_print_t fn, void *private_data) |
13211 | 0 | { |
13212 | 0 | struct perf_event_mmap_page *header = mmap_mem; |
13213 | 0 | __u64 data_head = ring_buffer_read_head(header); |
13214 | 0 | __u64 data_tail = header->data_tail; |
13215 | 0 | void *base = ((__u8 *)header) + page_size; |
13216 | 0 | int ret = LIBBPF_PERF_EVENT_CONT; |
13217 | 0 | struct perf_event_header *ehdr; |
13218 | 0 | size_t ehdr_size; |
13219 | |
|
13220 | 0 | while (data_head != data_tail) { |
13221 | 0 | ehdr = base + (data_tail & (mmap_size - 1)); |
13222 | 0 | ehdr_size = ehdr->size; |
13223 | |
|
13224 | 0 | if (((void *)ehdr) + ehdr_size > base + mmap_size) { |
13225 | 0 | void *copy_start = ehdr; |
13226 | 0 | size_t len_first = base + mmap_size - copy_start; |
13227 | 0 | size_t len_secnd = ehdr_size - len_first; |
13228 | |
|
13229 | 0 | if (*copy_size < ehdr_size) { |
13230 | 0 | free(*copy_mem); |
13231 | 0 | *copy_mem = malloc(ehdr_size); |
13232 | 0 | if (!*copy_mem) { |
13233 | 0 | *copy_size = 0; |
13234 | 0 | ret = LIBBPF_PERF_EVENT_ERROR; |
13235 | 0 | break; |
13236 | 0 | } |
13237 | 0 | *copy_size = ehdr_size; |
13238 | 0 | } |
13239 | | |
13240 | 0 | memcpy(*copy_mem, copy_start, len_first); |
13241 | 0 | memcpy(*copy_mem + len_first, base, len_secnd); |
13242 | 0 | ehdr = *copy_mem; |
13243 | 0 | } |
13244 | | |
13245 | 0 | ret = fn(ehdr, private_data); |
13246 | 0 | data_tail += ehdr_size; |
13247 | 0 | if (ret != LIBBPF_PERF_EVENT_CONT) |
13248 | 0 | break; |
13249 | 0 | } |
13250 | |
|
13251 | 0 | ring_buffer_write_tail(header, data_tail); |
13252 | 0 | return libbpf_err(ret); |
13253 | 0 | } |
13254 | | |
13255 | | struct perf_buffer; |
13256 | | |
13257 | | struct perf_buffer_params { |
13258 | | struct perf_event_attr *attr; |
13259 | | /* if event_cb is specified, it takes precendence */ |
13260 | | perf_buffer_event_fn event_cb; |
13261 | | /* sample_cb and lost_cb are higher-level common-case callbacks */ |
13262 | | perf_buffer_sample_fn sample_cb; |
13263 | | perf_buffer_lost_fn lost_cb; |
13264 | | void *ctx; |
13265 | | int cpu_cnt; |
13266 | | int *cpus; |
13267 | | int *map_keys; |
13268 | | }; |
13269 | | |
13270 | | struct perf_cpu_buf { |
13271 | | struct perf_buffer *pb; |
13272 | | void *base; /* mmap()'ed memory */ |
13273 | | void *buf; /* for reconstructing segmented data */ |
13274 | | size_t buf_size; |
13275 | | int fd; |
13276 | | int cpu; |
13277 | | int map_key; |
13278 | | }; |
13279 | | |
13280 | | struct perf_buffer { |
13281 | | perf_buffer_event_fn event_cb; |
13282 | | perf_buffer_sample_fn sample_cb; |
13283 | | perf_buffer_lost_fn lost_cb; |
13284 | | void *ctx; /* passed into callbacks */ |
13285 | | |
13286 | | size_t page_size; |
13287 | | size_t mmap_size; |
13288 | | struct perf_cpu_buf **cpu_bufs; |
13289 | | struct epoll_event *events; |
13290 | | int cpu_cnt; /* number of allocated CPU buffers */ |
13291 | | int epoll_fd; /* perf event FD */ |
13292 | | int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */ |
13293 | | }; |
13294 | | |
13295 | | static void perf_buffer__free_cpu_buf(struct perf_buffer *pb, |
13296 | | struct perf_cpu_buf *cpu_buf) |
13297 | 0 | { |
13298 | 0 | if (!cpu_buf) |
13299 | 0 | return; |
13300 | 0 | if (cpu_buf->base && |
13301 | 0 | munmap(cpu_buf->base, pb->mmap_size + pb->page_size)) |
13302 | 0 | pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu); |
13303 | 0 | if (cpu_buf->fd >= 0) { |
13304 | 0 | ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0); |
13305 | 0 | close(cpu_buf->fd); |
13306 | 0 | } |
13307 | 0 | free(cpu_buf->buf); |
13308 | 0 | free(cpu_buf); |
13309 | 0 | } |
13310 | | |
13311 | | void perf_buffer__free(struct perf_buffer *pb) |
13312 | 0 | { |
13313 | 0 | int i; |
13314 | |
|
13315 | 0 | if (IS_ERR_OR_NULL(pb)) |
13316 | 0 | return; |
13317 | 0 | if (pb->cpu_bufs) { |
13318 | 0 | for (i = 0; i < pb->cpu_cnt; i++) { |
13319 | 0 | struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i]; |
13320 | |
|
13321 | 0 | if (!cpu_buf) |
13322 | 0 | continue; |
13323 | | |
13324 | 0 | bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key); |
13325 | 0 | perf_buffer__free_cpu_buf(pb, cpu_buf); |
13326 | 0 | } |
13327 | 0 | free(pb->cpu_bufs); |
13328 | 0 | } |
13329 | 0 | if (pb->epoll_fd >= 0) |
13330 | 0 | close(pb->epoll_fd); |
13331 | 0 | free(pb->events); |
13332 | 0 | free(pb); |
13333 | 0 | } |
13334 | | |
13335 | | static struct perf_cpu_buf * |
13336 | | perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr, |
13337 | | int cpu, int map_key) |
13338 | 0 | { |
13339 | 0 | struct perf_cpu_buf *cpu_buf; |
13340 | 0 | int err; |
13341 | |
|
13342 | 0 | cpu_buf = calloc(1, sizeof(*cpu_buf)); |
13343 | 0 | if (!cpu_buf) |
13344 | 0 | return ERR_PTR(-ENOMEM); |
13345 | | |
13346 | 0 | cpu_buf->pb = pb; |
13347 | 0 | cpu_buf->cpu = cpu; |
13348 | 0 | cpu_buf->map_key = map_key; |
13349 | |
|
13350 | 0 | cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu, |
13351 | 0 | -1, PERF_FLAG_FD_CLOEXEC); |
13352 | 0 | if (cpu_buf->fd < 0) { |
13353 | 0 | err = -errno; |
13354 | 0 | pr_warn("failed to open perf buffer event on cpu #%d: %s\n", |
13355 | 0 | cpu, errstr(err)); |
13356 | 0 | goto error; |
13357 | 0 | } |
13358 | | |
13359 | 0 | cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size, |
13360 | 0 | PROT_READ | PROT_WRITE, MAP_SHARED, |
13361 | 0 | cpu_buf->fd, 0); |
13362 | 0 | if (cpu_buf->base == MAP_FAILED) { |
13363 | 0 | cpu_buf->base = NULL; |
13364 | 0 | err = -errno; |
13365 | 0 | pr_warn("failed to mmap perf buffer on cpu #%d: %s\n", |
13366 | 0 | cpu, errstr(err)); |
13367 | 0 | goto error; |
13368 | 0 | } |
13369 | | |
13370 | 0 | if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) { |
13371 | 0 | err = -errno; |
13372 | 0 | pr_warn("failed to enable perf buffer event on cpu #%d: %s\n", |
13373 | 0 | cpu, errstr(err)); |
13374 | 0 | goto error; |
13375 | 0 | } |
13376 | | |
13377 | 0 | return cpu_buf; |
13378 | | |
13379 | 0 | error: |
13380 | 0 | perf_buffer__free_cpu_buf(pb, cpu_buf); |
13381 | 0 | return (struct perf_cpu_buf *)ERR_PTR(err); |
13382 | 0 | } |
13383 | | |
13384 | | static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, |
13385 | | struct perf_buffer_params *p); |
13386 | | |
13387 | | struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt, |
13388 | | perf_buffer_sample_fn sample_cb, |
13389 | | perf_buffer_lost_fn lost_cb, |
13390 | | void *ctx, |
13391 | | const struct perf_buffer_opts *opts) |
13392 | 0 | { |
13393 | 0 | const size_t attr_sz = sizeof(struct perf_event_attr); |
13394 | 0 | struct perf_buffer_params p = {}; |
13395 | 0 | struct perf_event_attr attr; |
13396 | 0 | __u32 sample_period; |
13397 | |
|
13398 | 0 | if (!OPTS_VALID(opts, perf_buffer_opts)) |
13399 | 0 | return libbpf_err_ptr(-EINVAL); |
13400 | | |
13401 | 0 | sample_period = OPTS_GET(opts, sample_period, 1); |
13402 | 0 | if (!sample_period) |
13403 | 0 | sample_period = 1; |
13404 | |
|
13405 | 0 | memset(&attr, 0, attr_sz); |
13406 | 0 | attr.size = attr_sz; |
13407 | 0 | attr.config = PERF_COUNT_SW_BPF_OUTPUT; |
13408 | 0 | attr.type = PERF_TYPE_SOFTWARE; |
13409 | 0 | attr.sample_type = PERF_SAMPLE_RAW; |
13410 | 0 | attr.wakeup_events = sample_period; |
13411 | |
|
13412 | 0 | p.attr = &attr; |
13413 | 0 | p.sample_cb = sample_cb; |
13414 | 0 | p.lost_cb = lost_cb; |
13415 | 0 | p.ctx = ctx; |
13416 | |
|
13417 | 0 | return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p)); |
13418 | 0 | } |
13419 | | |
13420 | | struct perf_buffer *perf_buffer__new_raw(int map_fd, size_t page_cnt, |
13421 | | struct perf_event_attr *attr, |
13422 | | perf_buffer_event_fn event_cb, void *ctx, |
13423 | | const struct perf_buffer_raw_opts *opts) |
13424 | 0 | { |
13425 | 0 | struct perf_buffer_params p = {}; |
13426 | |
|
13427 | 0 | if (!attr) |
13428 | 0 | return libbpf_err_ptr(-EINVAL); |
13429 | | |
13430 | 0 | if (!OPTS_VALID(opts, perf_buffer_raw_opts)) |
13431 | 0 | return libbpf_err_ptr(-EINVAL); |
13432 | | |
13433 | 0 | p.attr = attr; |
13434 | 0 | p.event_cb = event_cb; |
13435 | 0 | p.ctx = ctx; |
13436 | 0 | p.cpu_cnt = OPTS_GET(opts, cpu_cnt, 0); |
13437 | 0 | p.cpus = OPTS_GET(opts, cpus, NULL); |
13438 | 0 | p.map_keys = OPTS_GET(opts, map_keys, NULL); |
13439 | |
|
13440 | 0 | return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p)); |
13441 | 0 | } |
13442 | | |
13443 | | static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, |
13444 | | struct perf_buffer_params *p) |
13445 | 0 | { |
13446 | 0 | const char *online_cpus_file = "/sys/devices/system/cpu/online"; |
13447 | 0 | struct bpf_map_info map; |
13448 | 0 | struct perf_buffer *pb; |
13449 | 0 | bool *online = NULL; |
13450 | 0 | __u32 map_info_len; |
13451 | 0 | int err, i, j, n; |
13452 | |
|
13453 | 0 | if (page_cnt == 0 || (page_cnt & (page_cnt - 1))) { |
13454 | 0 | pr_warn("page count should be power of two, but is %zu\n", |
13455 | 0 | page_cnt); |
13456 | 0 | return ERR_PTR(-EINVAL); |
13457 | 0 | } |
13458 | | |
13459 | | /* best-effort sanity checks */ |
13460 | 0 | memset(&map, 0, sizeof(map)); |
13461 | 0 | map_info_len = sizeof(map); |
13462 | 0 | err = bpf_map_get_info_by_fd(map_fd, &map, &map_info_len); |
13463 | 0 | if (err) { |
13464 | 0 | err = -errno; |
13465 | | /* if BPF_OBJ_GET_INFO_BY_FD is supported, will return |
13466 | | * -EBADFD, -EFAULT, or -E2BIG on real error |
13467 | | */ |
13468 | 0 | if (err != -EINVAL) { |
13469 | 0 | pr_warn("failed to get map info for map FD %d: %s\n", |
13470 | 0 | map_fd, errstr(err)); |
13471 | 0 | return ERR_PTR(err); |
13472 | 0 | } |
13473 | 0 | pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n", |
13474 | 0 | map_fd); |
13475 | 0 | } else { |
13476 | 0 | if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { |
13477 | 0 | pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n", |
13478 | 0 | map.name); |
13479 | 0 | return ERR_PTR(-EINVAL); |
13480 | 0 | } |
13481 | 0 | } |
13482 | | |
13483 | 0 | pb = calloc(1, sizeof(*pb)); |
13484 | 0 | if (!pb) |
13485 | 0 | return ERR_PTR(-ENOMEM); |
13486 | | |
13487 | 0 | pb->event_cb = p->event_cb; |
13488 | 0 | pb->sample_cb = p->sample_cb; |
13489 | 0 | pb->lost_cb = p->lost_cb; |
13490 | 0 | pb->ctx = p->ctx; |
13491 | |
|
13492 | 0 | pb->page_size = getpagesize(); |
13493 | 0 | pb->mmap_size = pb->page_size * page_cnt; |
13494 | 0 | pb->map_fd = map_fd; |
13495 | |
|
13496 | 0 | pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC); |
13497 | 0 | if (pb->epoll_fd < 0) { |
13498 | 0 | err = -errno; |
13499 | 0 | pr_warn("failed to create epoll instance: %s\n", |
13500 | 0 | errstr(err)); |
13501 | 0 | goto error; |
13502 | 0 | } |
13503 | | |
13504 | 0 | if (p->cpu_cnt > 0) { |
13505 | 0 | pb->cpu_cnt = p->cpu_cnt; |
13506 | 0 | } else { |
13507 | 0 | pb->cpu_cnt = libbpf_num_possible_cpus(); |
13508 | 0 | if (pb->cpu_cnt < 0) { |
13509 | 0 | err = pb->cpu_cnt; |
13510 | 0 | goto error; |
13511 | 0 | } |
13512 | 0 | if (map.max_entries && map.max_entries < pb->cpu_cnt) |
13513 | 0 | pb->cpu_cnt = map.max_entries; |
13514 | 0 | } |
13515 | | |
13516 | 0 | pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events)); |
13517 | 0 | if (!pb->events) { |
13518 | 0 | err = -ENOMEM; |
13519 | 0 | pr_warn("failed to allocate events: out of memory\n"); |
13520 | 0 | goto error; |
13521 | 0 | } |
13522 | 0 | pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs)); |
13523 | 0 | if (!pb->cpu_bufs) { |
13524 | 0 | err = -ENOMEM; |
13525 | 0 | pr_warn("failed to allocate buffers: out of memory\n"); |
13526 | 0 | goto error; |
13527 | 0 | } |
13528 | | |
13529 | 0 | err = parse_cpu_mask_file(online_cpus_file, &online, &n); |
13530 | 0 | if (err) { |
13531 | 0 | pr_warn("failed to get online CPU mask: %s\n", errstr(err)); |
13532 | 0 | goto error; |
13533 | 0 | } |
13534 | | |
13535 | 0 | for (i = 0, j = 0; i < pb->cpu_cnt; i++) { |
13536 | 0 | struct perf_cpu_buf *cpu_buf; |
13537 | 0 | int cpu, map_key; |
13538 | |
|
13539 | 0 | cpu = p->cpu_cnt > 0 ? p->cpus[i] : i; |
13540 | 0 | map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i; |
13541 | | |
13542 | | /* in case user didn't explicitly requested particular CPUs to |
13543 | | * be attached to, skip offline/not present CPUs |
13544 | | */ |
13545 | 0 | if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu])) |
13546 | 0 | continue; |
13547 | | |
13548 | 0 | cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key); |
13549 | 0 | if (IS_ERR(cpu_buf)) { |
13550 | 0 | err = PTR_ERR(cpu_buf); |
13551 | 0 | goto error; |
13552 | 0 | } |
13553 | | |
13554 | 0 | pb->cpu_bufs[j] = cpu_buf; |
13555 | |
|
13556 | 0 | err = bpf_map_update_elem(pb->map_fd, &map_key, |
13557 | 0 | &cpu_buf->fd, 0); |
13558 | 0 | if (err) { |
13559 | 0 | err = -errno; |
13560 | 0 | pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n", |
13561 | 0 | cpu, map_key, cpu_buf->fd, |
13562 | 0 | errstr(err)); |
13563 | 0 | goto error; |
13564 | 0 | } |
13565 | | |
13566 | 0 | pb->events[j].events = EPOLLIN; |
13567 | 0 | pb->events[j].data.ptr = cpu_buf; |
13568 | 0 | if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd, |
13569 | 0 | &pb->events[j]) < 0) { |
13570 | 0 | err = -errno; |
13571 | 0 | pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n", |
13572 | 0 | cpu, cpu_buf->fd, |
13573 | 0 | errstr(err)); |
13574 | 0 | goto error; |
13575 | 0 | } |
13576 | 0 | j++; |
13577 | 0 | } |
13578 | 0 | pb->cpu_cnt = j; |
13579 | 0 | free(online); |
13580 | |
|
13581 | 0 | return pb; |
13582 | | |
13583 | 0 | error: |
13584 | 0 | free(online); |
13585 | 0 | if (pb) |
13586 | 0 | perf_buffer__free(pb); |
13587 | 0 | return ERR_PTR(err); |
13588 | 0 | } |
13589 | | |
13590 | | struct perf_sample_raw { |
13591 | | struct perf_event_header header; |
13592 | | uint32_t size; |
13593 | | char data[]; |
13594 | | }; |
13595 | | |
13596 | | struct perf_sample_lost { |
13597 | | struct perf_event_header header; |
13598 | | uint64_t id; |
13599 | | uint64_t lost; |
13600 | | uint64_t sample_id; |
13601 | | }; |
13602 | | |
13603 | | static enum bpf_perf_event_ret |
13604 | | perf_buffer__process_record(struct perf_event_header *e, void *ctx) |
13605 | 0 | { |
13606 | 0 | struct perf_cpu_buf *cpu_buf = ctx; |
13607 | 0 | struct perf_buffer *pb = cpu_buf->pb; |
13608 | 0 | void *data = e; |
13609 | | |
13610 | | /* user wants full control over parsing perf event */ |
13611 | 0 | if (pb->event_cb) |
13612 | 0 | return pb->event_cb(pb->ctx, cpu_buf->cpu, e); |
13613 | | |
13614 | 0 | switch (e->type) { |
13615 | 0 | case PERF_RECORD_SAMPLE: { |
13616 | 0 | struct perf_sample_raw *s = data; |
13617 | |
|
13618 | 0 | if (pb->sample_cb) |
13619 | 0 | pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size); |
13620 | 0 | break; |
13621 | 0 | } |
13622 | 0 | case PERF_RECORD_LOST: { |
13623 | 0 | struct perf_sample_lost *s = data; |
13624 | |
|
13625 | 0 | if (pb->lost_cb) |
13626 | 0 | pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost); |
13627 | 0 | break; |
13628 | 0 | } |
13629 | 0 | default: |
13630 | 0 | pr_warn("unknown perf sample type %d\n", e->type); |
13631 | 0 | return LIBBPF_PERF_EVENT_ERROR; |
13632 | 0 | } |
13633 | 0 | return LIBBPF_PERF_EVENT_CONT; |
13634 | 0 | } |
13635 | | |
13636 | | static int perf_buffer__process_records(struct perf_buffer *pb, |
13637 | | struct perf_cpu_buf *cpu_buf) |
13638 | 0 | { |
13639 | 0 | enum bpf_perf_event_ret ret; |
13640 | |
|
13641 | 0 | ret = perf_event_read_simple(cpu_buf->base, pb->mmap_size, |
13642 | 0 | pb->page_size, &cpu_buf->buf, |
13643 | 0 | &cpu_buf->buf_size, |
13644 | 0 | perf_buffer__process_record, cpu_buf); |
13645 | 0 | if (ret != LIBBPF_PERF_EVENT_CONT) |
13646 | 0 | return ret; |
13647 | 0 | return 0; |
13648 | 0 | } |
13649 | | |
13650 | | int perf_buffer__epoll_fd(const struct perf_buffer *pb) |
13651 | 0 | { |
13652 | 0 | return pb->epoll_fd; |
13653 | 0 | } |
13654 | | |
13655 | | int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms) |
13656 | 0 | { |
13657 | 0 | int i, cnt, err; |
13658 | |
|
13659 | 0 | cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms); |
13660 | 0 | if (cnt < 0) |
13661 | 0 | return -errno; |
13662 | | |
13663 | 0 | for (i = 0; i < cnt; i++) { |
13664 | 0 | struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr; |
13665 | |
|
13666 | 0 | err = perf_buffer__process_records(pb, cpu_buf); |
13667 | 0 | if (err) { |
13668 | 0 | pr_warn("error while processing records: %s\n", errstr(err)); |
13669 | 0 | return libbpf_err(err); |
13670 | 0 | } |
13671 | 0 | } |
13672 | 0 | return cnt; |
13673 | 0 | } |
13674 | | |
13675 | | /* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer |
13676 | | * manager. |
13677 | | */ |
13678 | | size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb) |
13679 | 0 | { |
13680 | 0 | return pb->cpu_cnt; |
13681 | 0 | } |
13682 | | |
13683 | | /* |
13684 | | * Return perf_event FD of a ring buffer in *buf_idx* slot of |
13685 | | * PERF_EVENT_ARRAY BPF map. This FD can be polled for new data using |
13686 | | * select()/poll()/epoll() Linux syscalls. |
13687 | | */ |
13688 | | int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx) |
13689 | 0 | { |
13690 | 0 | struct perf_cpu_buf *cpu_buf; |
13691 | |
|
13692 | 0 | if (buf_idx >= pb->cpu_cnt) |
13693 | 0 | return libbpf_err(-EINVAL); |
13694 | | |
13695 | 0 | cpu_buf = pb->cpu_bufs[buf_idx]; |
13696 | 0 | if (!cpu_buf) |
13697 | 0 | return libbpf_err(-ENOENT); |
13698 | | |
13699 | 0 | return cpu_buf->fd; |
13700 | 0 | } |
13701 | | |
13702 | | int perf_buffer__buffer(struct perf_buffer *pb, int buf_idx, void **buf, size_t *buf_size) |
13703 | 0 | { |
13704 | 0 | struct perf_cpu_buf *cpu_buf; |
13705 | |
|
13706 | 0 | if (buf_idx >= pb->cpu_cnt) |
13707 | 0 | return libbpf_err(-EINVAL); |
13708 | | |
13709 | 0 | cpu_buf = pb->cpu_bufs[buf_idx]; |
13710 | 0 | if (!cpu_buf) |
13711 | 0 | return libbpf_err(-ENOENT); |
13712 | | |
13713 | 0 | *buf = cpu_buf->base; |
13714 | 0 | *buf_size = pb->mmap_size; |
13715 | 0 | return 0; |
13716 | 0 | } |
13717 | | |
13718 | | /* |
13719 | | * Consume data from perf ring buffer corresponding to slot *buf_idx* in |
13720 | | * PERF_EVENT_ARRAY BPF map without waiting/polling. If there is no data to |
13721 | | * consume, do nothing and return success. |
13722 | | * Returns: |
13723 | | * - 0 on success; |
13724 | | * - <0 on failure. |
13725 | | */ |
13726 | | int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx) |
13727 | 0 | { |
13728 | 0 | struct perf_cpu_buf *cpu_buf; |
13729 | |
|
13730 | 0 | if (buf_idx >= pb->cpu_cnt) |
13731 | 0 | return libbpf_err(-EINVAL); |
13732 | | |
13733 | 0 | cpu_buf = pb->cpu_bufs[buf_idx]; |
13734 | 0 | if (!cpu_buf) |
13735 | 0 | return libbpf_err(-ENOENT); |
13736 | | |
13737 | 0 | return perf_buffer__process_records(pb, cpu_buf); |
13738 | 0 | } |
13739 | | |
13740 | | int perf_buffer__consume(struct perf_buffer *pb) |
13741 | 0 | { |
13742 | 0 | int i, err; |
13743 | |
|
13744 | 0 | for (i = 0; i < pb->cpu_cnt; i++) { |
13745 | 0 | struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i]; |
13746 | |
|
13747 | 0 | if (!cpu_buf) |
13748 | 0 | continue; |
13749 | | |
13750 | 0 | err = perf_buffer__process_records(pb, cpu_buf); |
13751 | 0 | if (err) { |
13752 | 0 | pr_warn("perf_buffer: failed to process records in buffer #%d: %s\n", |
13753 | 0 | i, errstr(err)); |
13754 | 0 | return libbpf_err(err); |
13755 | 0 | } |
13756 | 0 | } |
13757 | 0 | return 0; |
13758 | 0 | } |
13759 | | |
13760 | | int bpf_program__set_attach_target(struct bpf_program *prog, |
13761 | | int attach_prog_fd, |
13762 | | const char *attach_func_name) |
13763 | 0 | { |
13764 | 0 | int btf_obj_fd = 0, btf_id = 0, err; |
13765 | |
|
13766 | 0 | if (!prog || attach_prog_fd < 0) |
13767 | 0 | return libbpf_err(-EINVAL); |
13768 | | |
13769 | 0 | if (prog->obj->state >= OBJ_LOADED) |
13770 | 0 | return libbpf_err(-EINVAL); |
13771 | | |
13772 | 0 | if (attach_prog_fd && !attach_func_name) { |
13773 | | /* remember attach_prog_fd and let bpf_program__load() find |
13774 | | * BTF ID during the program load |
13775 | | */ |
13776 | 0 | prog->attach_prog_fd = attach_prog_fd; |
13777 | 0 | return 0; |
13778 | 0 | } |
13779 | | |
13780 | 0 | if (attach_prog_fd) { |
13781 | 0 | btf_id = libbpf_find_prog_btf_id(attach_func_name, |
13782 | 0 | attach_prog_fd, prog->obj->token_fd); |
13783 | 0 | if (btf_id < 0) |
13784 | 0 | return libbpf_err(btf_id); |
13785 | 0 | } else { |
13786 | 0 | if (!attach_func_name) |
13787 | 0 | return libbpf_err(-EINVAL); |
13788 | | |
13789 | | /* load btf_vmlinux, if not yet */ |
13790 | 0 | err = bpf_object__load_vmlinux_btf(prog->obj, true); |
13791 | 0 | if (err) |
13792 | 0 | return libbpf_err(err); |
13793 | 0 | err = find_kernel_btf_id(prog->obj, attach_func_name, |
13794 | 0 | prog->expected_attach_type, |
13795 | 0 | &btf_obj_fd, &btf_id); |
13796 | 0 | if (err) |
13797 | 0 | return libbpf_err(err); |
13798 | 0 | } |
13799 | | |
13800 | 0 | prog->attach_btf_id = btf_id; |
13801 | 0 | prog->attach_btf_obj_fd = btf_obj_fd; |
13802 | 0 | prog->attach_prog_fd = attach_prog_fd; |
13803 | 0 | return 0; |
13804 | 0 | } |
13805 | | |
13806 | | int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz) |
13807 | 0 | { |
13808 | 0 | int err = 0, n, len, start, end = -1; |
13809 | 0 | bool *tmp; |
13810 | |
|
13811 | 0 | *mask = NULL; |
13812 | 0 | *mask_sz = 0; |
13813 | | |
13814 | | /* Each sub string separated by ',' has format \d+-\d+ or \d+ */ |
13815 | 0 | while (*s) { |
13816 | 0 | if (*s == ',' || *s == '\n') { |
13817 | 0 | s++; |
13818 | 0 | continue; |
13819 | 0 | } |
13820 | 0 | n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len); |
13821 | 0 | if (n <= 0 || n > 2) { |
13822 | 0 | pr_warn("Failed to get CPU range %s: %d\n", s, n); |
13823 | 0 | err = -EINVAL; |
13824 | 0 | goto cleanup; |
13825 | 0 | } else if (n == 1) { |
13826 | 0 | end = start; |
13827 | 0 | } |
13828 | 0 | if (start < 0 || start > end) { |
13829 | 0 | pr_warn("Invalid CPU range [%d,%d] in %s\n", |
13830 | 0 | start, end, s); |
13831 | 0 | err = -EINVAL; |
13832 | 0 | goto cleanup; |
13833 | 0 | } |
13834 | 0 | tmp = realloc(*mask, end + 1); |
13835 | 0 | if (!tmp) { |
13836 | 0 | err = -ENOMEM; |
13837 | 0 | goto cleanup; |
13838 | 0 | } |
13839 | 0 | *mask = tmp; |
13840 | 0 | memset(tmp + *mask_sz, 0, start - *mask_sz); |
13841 | 0 | memset(tmp + start, 1, end - start + 1); |
13842 | 0 | *mask_sz = end + 1; |
13843 | 0 | s += len; |
13844 | 0 | } |
13845 | 0 | if (!*mask_sz) { |
13846 | 0 | pr_warn("Empty CPU range\n"); |
13847 | 0 | return -EINVAL; |
13848 | 0 | } |
13849 | 0 | return 0; |
13850 | 0 | cleanup: |
13851 | 0 | free(*mask); |
13852 | 0 | *mask = NULL; |
13853 | 0 | return err; |
13854 | 0 | } |
13855 | | |
13856 | | int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz) |
13857 | 0 | { |
13858 | 0 | int fd, err = 0, len; |
13859 | 0 | char buf[128]; |
13860 | |
|
13861 | 0 | fd = open(fcpu, O_RDONLY | O_CLOEXEC); |
13862 | 0 | if (fd < 0) { |
13863 | 0 | err = -errno; |
13864 | 0 | pr_warn("Failed to open cpu mask file %s: %s\n", fcpu, errstr(err)); |
13865 | 0 | return err; |
13866 | 0 | } |
13867 | 0 | len = read(fd, buf, sizeof(buf)); |
13868 | 0 | close(fd); |
13869 | 0 | if (len <= 0) { |
13870 | 0 | err = len ? -errno : -EINVAL; |
13871 | 0 | pr_warn("Failed to read cpu mask from %s: %s\n", fcpu, errstr(err)); |
13872 | 0 | return err; |
13873 | 0 | } |
13874 | 0 | if (len >= sizeof(buf)) { |
13875 | 0 | pr_warn("CPU mask is too big in file %s\n", fcpu); |
13876 | 0 | return -E2BIG; |
13877 | 0 | } |
13878 | 0 | buf[len] = '\0'; |
13879 | |
|
13880 | 0 | return parse_cpu_mask_str(buf, mask, mask_sz); |
13881 | 0 | } |
13882 | | |
13883 | | int libbpf_num_possible_cpus(void) |
13884 | 0 | { |
13885 | 0 | static const char *fcpu = "/sys/devices/system/cpu/possible"; |
13886 | 0 | static int cpus; |
13887 | 0 | int err, n, i, tmp_cpus; |
13888 | 0 | bool *mask; |
13889 | |
|
13890 | 0 | tmp_cpus = READ_ONCE(cpus); |
13891 | 0 | if (tmp_cpus > 0) |
13892 | 0 | return tmp_cpus; |
13893 | | |
13894 | 0 | err = parse_cpu_mask_file(fcpu, &mask, &n); |
13895 | 0 | if (err) |
13896 | 0 | return libbpf_err(err); |
13897 | | |
13898 | 0 | tmp_cpus = 0; |
13899 | 0 | for (i = 0; i < n; i++) { |
13900 | 0 | if (mask[i]) |
13901 | 0 | tmp_cpus++; |
13902 | 0 | } |
13903 | 0 | free(mask); |
13904 | |
|
13905 | 0 | WRITE_ONCE(cpus, tmp_cpus); |
13906 | 0 | return tmp_cpus; |
13907 | 0 | } |
13908 | | |
13909 | | static int populate_skeleton_maps(const struct bpf_object *obj, |
13910 | | struct bpf_map_skeleton *maps, |
13911 | | size_t map_cnt, size_t map_skel_sz) |
13912 | 0 | { |
13913 | 0 | int i; |
13914 | |
|
13915 | 0 | for (i = 0; i < map_cnt; i++) { |
13916 | 0 | struct bpf_map_skeleton *map_skel = (void *)maps + i * map_skel_sz; |
13917 | 0 | struct bpf_map **map = map_skel->map; |
13918 | 0 | const char *name = map_skel->name; |
13919 | 0 | void **mmaped = map_skel->mmaped; |
13920 | |
|
13921 | 0 | *map = bpf_object__find_map_by_name(obj, name); |
13922 | 0 | if (!*map) { |
13923 | 0 | pr_warn("failed to find skeleton map '%s'\n", name); |
13924 | 0 | return -ESRCH; |
13925 | 0 | } |
13926 | | |
13927 | | /* externs shouldn't be pre-setup from user code */ |
13928 | 0 | if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG) |
13929 | 0 | *mmaped = (*map)->mmaped; |
13930 | 0 | } |
13931 | 0 | return 0; |
13932 | 0 | } |
13933 | | |
13934 | | static int populate_skeleton_progs(const struct bpf_object *obj, |
13935 | | struct bpf_prog_skeleton *progs, |
13936 | | size_t prog_cnt, size_t prog_skel_sz) |
13937 | 0 | { |
13938 | 0 | int i; |
13939 | |
|
13940 | 0 | for (i = 0; i < prog_cnt; i++) { |
13941 | 0 | struct bpf_prog_skeleton *prog_skel = (void *)progs + i * prog_skel_sz; |
13942 | 0 | struct bpf_program **prog = prog_skel->prog; |
13943 | 0 | const char *name = prog_skel->name; |
13944 | |
|
13945 | 0 | *prog = bpf_object__find_program_by_name(obj, name); |
13946 | 0 | if (!*prog) { |
13947 | 0 | pr_warn("failed to find skeleton program '%s'\n", name); |
13948 | 0 | return -ESRCH; |
13949 | 0 | } |
13950 | 0 | } |
13951 | 0 | return 0; |
13952 | 0 | } |
13953 | | |
13954 | | int bpf_object__open_skeleton(struct bpf_object_skeleton *s, |
13955 | | const struct bpf_object_open_opts *opts) |
13956 | 0 | { |
13957 | 0 | struct bpf_object *obj; |
13958 | 0 | int err; |
13959 | |
|
13960 | 0 | obj = bpf_object_open(NULL, s->data, s->data_sz, s->name, opts); |
13961 | 0 | if (IS_ERR(obj)) { |
13962 | 0 | err = PTR_ERR(obj); |
13963 | 0 | pr_warn("failed to initialize skeleton BPF object '%s': %s\n", |
13964 | 0 | s->name, errstr(err)); |
13965 | 0 | return libbpf_err(err); |
13966 | 0 | } |
13967 | | |
13968 | 0 | *s->obj = obj; |
13969 | 0 | err = populate_skeleton_maps(obj, s->maps, s->map_cnt, s->map_skel_sz); |
13970 | 0 | if (err) { |
13971 | 0 | pr_warn("failed to populate skeleton maps for '%s': %s\n", s->name, errstr(err)); |
13972 | 0 | return libbpf_err(err); |
13973 | 0 | } |
13974 | | |
13975 | 0 | err = populate_skeleton_progs(obj, s->progs, s->prog_cnt, s->prog_skel_sz); |
13976 | 0 | if (err) { |
13977 | 0 | pr_warn("failed to populate skeleton progs for '%s': %s\n", s->name, errstr(err)); |
13978 | 0 | return libbpf_err(err); |
13979 | 0 | } |
13980 | | |
13981 | 0 | return 0; |
13982 | 0 | } |
13983 | | |
13984 | | int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s) |
13985 | 0 | { |
13986 | 0 | int err, len, var_idx, i; |
13987 | 0 | const char *var_name; |
13988 | 0 | const struct bpf_map *map; |
13989 | 0 | struct btf *btf; |
13990 | 0 | __u32 map_type_id; |
13991 | 0 | const struct btf_type *map_type, *var_type; |
13992 | 0 | const struct bpf_var_skeleton *var_skel; |
13993 | 0 | struct btf_var_secinfo *var; |
13994 | |
|
13995 | 0 | if (!s->obj) |
13996 | 0 | return libbpf_err(-EINVAL); |
13997 | | |
13998 | 0 | btf = bpf_object__btf(s->obj); |
13999 | 0 | if (!btf) { |
14000 | 0 | pr_warn("subskeletons require BTF at runtime (object %s)\n", |
14001 | 0 | bpf_object__name(s->obj)); |
14002 | 0 | return libbpf_err(-errno); |
14003 | 0 | } |
14004 | | |
14005 | 0 | err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt, s->map_skel_sz); |
14006 | 0 | if (err) { |
14007 | 0 | pr_warn("failed to populate subskeleton maps: %s\n", errstr(err)); |
14008 | 0 | return libbpf_err(err); |
14009 | 0 | } |
14010 | | |
14011 | 0 | err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt, s->prog_skel_sz); |
14012 | 0 | if (err) { |
14013 | 0 | pr_warn("failed to populate subskeleton maps: %s\n", errstr(err)); |
14014 | 0 | return libbpf_err(err); |
14015 | 0 | } |
14016 | | |
14017 | 0 | for (var_idx = 0; var_idx < s->var_cnt; var_idx++) { |
14018 | 0 | var_skel = (void *)s->vars + var_idx * s->var_skel_sz; |
14019 | 0 | map = *var_skel->map; |
14020 | 0 | map_type_id = bpf_map__btf_value_type_id(map); |
14021 | 0 | map_type = btf__type_by_id(btf, map_type_id); |
14022 | |
|
14023 | 0 | if (!btf_is_datasec(map_type)) { |
14024 | 0 | pr_warn("type for map '%1$s' is not a datasec: %2$s\n", |
14025 | 0 | bpf_map__name(map), |
14026 | 0 | __btf_kind_str(btf_kind(map_type))); |
14027 | 0 | return libbpf_err(-EINVAL); |
14028 | 0 | } |
14029 | | |
14030 | 0 | len = btf_vlen(map_type); |
14031 | 0 | var = btf_var_secinfos(map_type); |
14032 | 0 | for (i = 0; i < len; i++, var++) { |
14033 | 0 | var_type = btf__type_by_id(btf, var->type); |
14034 | 0 | var_name = btf__name_by_offset(btf, var_type->name_off); |
14035 | 0 | if (strcmp(var_name, var_skel->name) == 0) { |
14036 | 0 | *var_skel->addr = map->mmaped + var->offset; |
14037 | 0 | break; |
14038 | 0 | } |
14039 | 0 | } |
14040 | 0 | } |
14041 | 0 | return 0; |
14042 | 0 | } |
14043 | | |
14044 | | void bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s) |
14045 | 0 | { |
14046 | 0 | if (!s) |
14047 | 0 | return; |
14048 | 0 | free(s->maps); |
14049 | 0 | free(s->progs); |
14050 | 0 | free(s->vars); |
14051 | 0 | free(s); |
14052 | 0 | } |
14053 | | |
14054 | | int bpf_object__load_skeleton(struct bpf_object_skeleton *s) |
14055 | 0 | { |
14056 | 0 | int i, err; |
14057 | |
|
14058 | 0 | err = bpf_object__load(*s->obj); |
14059 | 0 | if (err) { |
14060 | 0 | pr_warn("failed to load BPF skeleton '%s': %s\n", s->name, errstr(err)); |
14061 | 0 | return libbpf_err(err); |
14062 | 0 | } |
14063 | | |
14064 | 0 | for (i = 0; i < s->map_cnt; i++) { |
14065 | 0 | struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz; |
14066 | 0 | struct bpf_map *map = *map_skel->map; |
14067 | |
|
14068 | 0 | if (!map_skel->mmaped) |
14069 | 0 | continue; |
14070 | | |
14071 | 0 | *map_skel->mmaped = map->mmaped; |
14072 | 0 | } |
14073 | |
|
14074 | 0 | return 0; |
14075 | 0 | } |
14076 | | |
14077 | | int bpf_object__attach_skeleton(struct bpf_object_skeleton *s) |
14078 | 0 | { |
14079 | 0 | int i, err; |
14080 | |
|
14081 | 0 | for (i = 0; i < s->prog_cnt; i++) { |
14082 | 0 | struct bpf_prog_skeleton *prog_skel = (void *)s->progs + i * s->prog_skel_sz; |
14083 | 0 | struct bpf_program *prog = *prog_skel->prog; |
14084 | 0 | struct bpf_link **link = prog_skel->link; |
14085 | |
|
14086 | 0 | if (!prog->autoload || !prog->autoattach) |
14087 | 0 | continue; |
14088 | | |
14089 | | /* auto-attaching not supported for this program */ |
14090 | 0 | if (!prog->sec_def || !prog->sec_def->prog_attach_fn) |
14091 | 0 | continue; |
14092 | | |
14093 | | /* if user already set the link manually, don't attempt auto-attach */ |
14094 | 0 | if (*link) |
14095 | 0 | continue; |
14096 | | |
14097 | 0 | err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, link); |
14098 | 0 | if (err) { |
14099 | 0 | pr_warn("prog '%s': failed to auto-attach: %s\n", |
14100 | 0 | bpf_program__name(prog), errstr(err)); |
14101 | 0 | return libbpf_err(err); |
14102 | 0 | } |
14103 | | |
14104 | | /* It's possible that for some SEC() definitions auto-attach |
14105 | | * is supported in some cases (e.g., if definition completely |
14106 | | * specifies target information), but is not in other cases. |
14107 | | * SEC("uprobe") is one such case. If user specified target |
14108 | | * binary and function name, such BPF program can be |
14109 | | * auto-attached. But if not, it shouldn't trigger skeleton's |
14110 | | * attach to fail. It should just be skipped. |
14111 | | * attach_fn signals such case with returning 0 (no error) and |
14112 | | * setting link to NULL. |
14113 | | */ |
14114 | 0 | } |
14115 | | |
14116 | | |
14117 | 0 | for (i = 0; i < s->map_cnt; i++) { |
14118 | 0 | struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz; |
14119 | 0 | struct bpf_map *map = *map_skel->map; |
14120 | 0 | struct bpf_link **link; |
14121 | |
|
14122 | 0 | if (!map->autocreate || !map->autoattach) |
14123 | 0 | continue; |
14124 | | |
14125 | | /* only struct_ops maps can be attached */ |
14126 | 0 | if (!bpf_map__is_struct_ops(map)) |
14127 | 0 | continue; |
14128 | | |
14129 | | /* skeleton is created with earlier version of bpftool, notify user */ |
14130 | 0 | if (s->map_skel_sz < offsetofend(struct bpf_map_skeleton, link)) { |
14131 | 0 | pr_warn("map '%s': BPF skeleton version is old, skipping map auto-attachment...\n", |
14132 | 0 | bpf_map__name(map)); |
14133 | 0 | continue; |
14134 | 0 | } |
14135 | | |
14136 | 0 | link = map_skel->link; |
14137 | 0 | if (!link) { |
14138 | 0 | pr_warn("map '%s': BPF map skeleton link is uninitialized\n", |
14139 | 0 | bpf_map__name(map)); |
14140 | 0 | continue; |
14141 | 0 | } |
14142 | | |
14143 | 0 | if (*link) |
14144 | 0 | continue; |
14145 | | |
14146 | 0 | *link = bpf_map__attach_struct_ops(map); |
14147 | 0 | if (!*link) { |
14148 | 0 | err = -errno; |
14149 | 0 | pr_warn("map '%s': failed to auto-attach: %s\n", |
14150 | 0 | bpf_map__name(map), errstr(err)); |
14151 | 0 | return libbpf_err(err); |
14152 | 0 | } |
14153 | 0 | } |
14154 | | |
14155 | 0 | return 0; |
14156 | 0 | } |
14157 | | |
14158 | | void bpf_object__detach_skeleton(struct bpf_object_skeleton *s) |
14159 | 0 | { |
14160 | 0 | int i; |
14161 | |
|
14162 | 0 | for (i = 0; i < s->prog_cnt; i++) { |
14163 | 0 | struct bpf_prog_skeleton *prog_skel = (void *)s->progs + i * s->prog_skel_sz; |
14164 | 0 | struct bpf_link **link = prog_skel->link; |
14165 | |
|
14166 | 0 | bpf_link__destroy(*link); |
14167 | 0 | *link = NULL; |
14168 | 0 | } |
14169 | |
|
14170 | 0 | if (s->map_skel_sz < sizeof(struct bpf_map_skeleton)) |
14171 | 0 | return; |
14172 | | |
14173 | 0 | for (i = 0; i < s->map_cnt; i++) { |
14174 | 0 | struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz; |
14175 | 0 | struct bpf_link **link = map_skel->link; |
14176 | |
|
14177 | 0 | if (link) { |
14178 | 0 | bpf_link__destroy(*link); |
14179 | 0 | *link = NULL; |
14180 | 0 | } |
14181 | 0 | } |
14182 | 0 | } |
14183 | | |
14184 | | void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s) |
14185 | 0 | { |
14186 | 0 | if (!s) |
14187 | 0 | return; |
14188 | | |
14189 | 0 | bpf_object__detach_skeleton(s); |
14190 | 0 | if (s->obj) |
14191 | 0 | bpf_object__close(*s->obj); |
14192 | 0 | free(s->maps); |
14193 | 0 | free(s->progs); |
14194 | 0 | free(s); |
14195 | 0 | } |