Line | Count | Source (jump to first uncovered line) |
1 | | // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) |
2 | | |
3 | | /* |
4 | | * Common eBPF ELF object loading operations. |
5 | | * |
6 | | * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org> |
7 | | * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> |
8 | | * Copyright (C) 2015 Huawei Inc. |
9 | | * Copyright (C) 2017 Nicira, Inc. |
10 | | * Copyright (C) 2019 Isovalent, Inc. |
11 | | */ |
12 | | |
13 | | #ifndef _GNU_SOURCE |
14 | | #define _GNU_SOURCE |
15 | | #endif |
16 | | #include <stdlib.h> |
17 | | #include <stdio.h> |
18 | | #include <stdarg.h> |
19 | | #include <libgen.h> |
20 | | #include <inttypes.h> |
21 | | #include <limits.h> |
22 | | #include <string.h> |
23 | | #include <unistd.h> |
24 | | #include <endian.h> |
25 | | #include <fcntl.h> |
26 | | #include <errno.h> |
27 | | #include <ctype.h> |
28 | | #include <asm/unistd.h> |
29 | | #include <linux/err.h> |
30 | | #include <linux/kernel.h> |
31 | | #include <linux/bpf.h> |
32 | | #include <linux/btf.h> |
33 | | #include <linux/filter.h> |
34 | | #include <linux/limits.h> |
35 | | #include <linux/perf_event.h> |
36 | | #include <linux/bpf_perf_event.h> |
37 | | #include <linux/ring_buffer.h> |
38 | | #include <sys/epoll.h> |
39 | | #include <sys/ioctl.h> |
40 | | #include <sys/mman.h> |
41 | | #include <sys/stat.h> |
42 | | #include <sys/types.h> |
43 | | #include <sys/vfs.h> |
44 | | #include <sys/utsname.h> |
45 | | #include <sys/resource.h> |
46 | | #include <libelf.h> |
47 | | #include <gelf.h> |
48 | | #include <zlib.h> |
49 | | |
50 | | #include "libbpf.h" |
51 | | #include "bpf.h" |
52 | | #include "btf.h" |
53 | | #include "str_error.h" |
54 | | #include "libbpf_internal.h" |
55 | | #include "hashmap.h" |
56 | | #include "bpf_gen_internal.h" |
57 | | #include "zip.h" |
58 | | |
59 | | #ifndef BPF_FS_MAGIC |
60 | 0 | #define BPF_FS_MAGIC 0xcafe4a11 |
61 | | #endif |
62 | | |
63 | 1 | #define BPF_FS_DEFAULT_PATH "/sys/fs/bpf" |
64 | | |
65 | 91.2k | #define BPF_INSN_SZ (sizeof(struct bpf_insn)) |
66 | | |
67 | | /* vsprintf() in __base_pr() uses nonliteral format string. It may break |
68 | | * compilation if user enables corresponding warning. Disable it explicitly. |
69 | | */ |
70 | | #pragma GCC diagnostic ignored "-Wformat-nonliteral" |
71 | | |
72 | | #define __printf(a, b) __attribute__((format(printf, a, b))) |
73 | | |
74 | | static struct bpf_map *bpf_object__add_map(struct bpf_object *obj); |
75 | | static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog); |
76 | | static int map_set_def_max_entries(struct bpf_map *map); |
77 | | |
78 | | static const char * const attach_type_name[] = { |
79 | | [BPF_CGROUP_INET_INGRESS] = "cgroup_inet_ingress", |
80 | | [BPF_CGROUP_INET_EGRESS] = "cgroup_inet_egress", |
81 | | [BPF_CGROUP_INET_SOCK_CREATE] = "cgroup_inet_sock_create", |
82 | | [BPF_CGROUP_INET_SOCK_RELEASE] = "cgroup_inet_sock_release", |
83 | | [BPF_CGROUP_SOCK_OPS] = "cgroup_sock_ops", |
84 | | [BPF_CGROUP_DEVICE] = "cgroup_device", |
85 | | [BPF_CGROUP_INET4_BIND] = "cgroup_inet4_bind", |
86 | | [BPF_CGROUP_INET6_BIND] = "cgroup_inet6_bind", |
87 | | [BPF_CGROUP_INET4_CONNECT] = "cgroup_inet4_connect", |
88 | | [BPF_CGROUP_INET6_CONNECT] = "cgroup_inet6_connect", |
89 | | [BPF_CGROUP_UNIX_CONNECT] = "cgroup_unix_connect", |
90 | | [BPF_CGROUP_INET4_POST_BIND] = "cgroup_inet4_post_bind", |
91 | | [BPF_CGROUP_INET6_POST_BIND] = "cgroup_inet6_post_bind", |
92 | | [BPF_CGROUP_INET4_GETPEERNAME] = "cgroup_inet4_getpeername", |
93 | | [BPF_CGROUP_INET6_GETPEERNAME] = "cgroup_inet6_getpeername", |
94 | | [BPF_CGROUP_UNIX_GETPEERNAME] = "cgroup_unix_getpeername", |
95 | | [BPF_CGROUP_INET4_GETSOCKNAME] = "cgroup_inet4_getsockname", |
96 | | [BPF_CGROUP_INET6_GETSOCKNAME] = "cgroup_inet6_getsockname", |
97 | | [BPF_CGROUP_UNIX_GETSOCKNAME] = "cgroup_unix_getsockname", |
98 | | [BPF_CGROUP_UDP4_SENDMSG] = "cgroup_udp4_sendmsg", |
99 | | [BPF_CGROUP_UDP6_SENDMSG] = "cgroup_udp6_sendmsg", |
100 | | [BPF_CGROUP_UNIX_SENDMSG] = "cgroup_unix_sendmsg", |
101 | | [BPF_CGROUP_SYSCTL] = "cgroup_sysctl", |
102 | | [BPF_CGROUP_UDP4_RECVMSG] = "cgroup_udp4_recvmsg", |
103 | | [BPF_CGROUP_UDP6_RECVMSG] = "cgroup_udp6_recvmsg", |
104 | | [BPF_CGROUP_UNIX_RECVMSG] = "cgroup_unix_recvmsg", |
105 | | [BPF_CGROUP_GETSOCKOPT] = "cgroup_getsockopt", |
106 | | [BPF_CGROUP_SETSOCKOPT] = "cgroup_setsockopt", |
107 | | [BPF_SK_SKB_STREAM_PARSER] = "sk_skb_stream_parser", |
108 | | [BPF_SK_SKB_STREAM_VERDICT] = "sk_skb_stream_verdict", |
109 | | [BPF_SK_SKB_VERDICT] = "sk_skb_verdict", |
110 | | [BPF_SK_MSG_VERDICT] = "sk_msg_verdict", |
111 | | [BPF_LIRC_MODE2] = "lirc_mode2", |
112 | | [BPF_FLOW_DISSECTOR] = "flow_dissector", |
113 | | [BPF_TRACE_RAW_TP] = "trace_raw_tp", |
114 | | [BPF_TRACE_FENTRY] = "trace_fentry", |
115 | | [BPF_TRACE_FEXIT] = "trace_fexit", |
116 | | [BPF_MODIFY_RETURN] = "modify_return", |
117 | | [BPF_LSM_MAC] = "lsm_mac", |
118 | | [BPF_LSM_CGROUP] = "lsm_cgroup", |
119 | | [BPF_SK_LOOKUP] = "sk_lookup", |
120 | | [BPF_TRACE_ITER] = "trace_iter", |
121 | | [BPF_XDP_DEVMAP] = "xdp_devmap", |
122 | | [BPF_XDP_CPUMAP] = "xdp_cpumap", |
123 | | [BPF_XDP] = "xdp", |
124 | | [BPF_SK_REUSEPORT_SELECT] = "sk_reuseport_select", |
125 | | [BPF_SK_REUSEPORT_SELECT_OR_MIGRATE] = "sk_reuseport_select_or_migrate", |
126 | | [BPF_PERF_EVENT] = "perf_event", |
127 | | [BPF_TRACE_KPROBE_MULTI] = "trace_kprobe_multi", |
128 | | [BPF_STRUCT_OPS] = "struct_ops", |
129 | | [BPF_NETFILTER] = "netfilter", |
130 | | [BPF_TCX_INGRESS] = "tcx_ingress", |
131 | | [BPF_TCX_EGRESS] = "tcx_egress", |
132 | | [BPF_TRACE_UPROBE_MULTI] = "trace_uprobe_multi", |
133 | | [BPF_NETKIT_PRIMARY] = "netkit_primary", |
134 | | [BPF_NETKIT_PEER] = "netkit_peer", |
135 | | [BPF_TRACE_KPROBE_SESSION] = "trace_kprobe_session", |
136 | | }; |
137 | | |
138 | | static const char * const link_type_name[] = { |
139 | | [BPF_LINK_TYPE_UNSPEC] = "unspec", |
140 | | [BPF_LINK_TYPE_RAW_TRACEPOINT] = "raw_tracepoint", |
141 | | [BPF_LINK_TYPE_TRACING] = "tracing", |
142 | | [BPF_LINK_TYPE_CGROUP] = "cgroup", |
143 | | [BPF_LINK_TYPE_ITER] = "iter", |
144 | | [BPF_LINK_TYPE_NETNS] = "netns", |
145 | | [BPF_LINK_TYPE_XDP] = "xdp", |
146 | | [BPF_LINK_TYPE_PERF_EVENT] = "perf_event", |
147 | | [BPF_LINK_TYPE_KPROBE_MULTI] = "kprobe_multi", |
148 | | [BPF_LINK_TYPE_STRUCT_OPS] = "struct_ops", |
149 | | [BPF_LINK_TYPE_NETFILTER] = "netfilter", |
150 | | [BPF_LINK_TYPE_TCX] = "tcx", |
151 | | [BPF_LINK_TYPE_UPROBE_MULTI] = "uprobe_multi", |
152 | | [BPF_LINK_TYPE_NETKIT] = "netkit", |
153 | | [BPF_LINK_TYPE_SOCKMAP] = "sockmap", |
154 | | }; |
155 | | |
156 | | static const char * const map_type_name[] = { |
157 | | [BPF_MAP_TYPE_UNSPEC] = "unspec", |
158 | | [BPF_MAP_TYPE_HASH] = "hash", |
159 | | [BPF_MAP_TYPE_ARRAY] = "array", |
160 | | [BPF_MAP_TYPE_PROG_ARRAY] = "prog_array", |
161 | | [BPF_MAP_TYPE_PERF_EVENT_ARRAY] = "perf_event_array", |
162 | | [BPF_MAP_TYPE_PERCPU_HASH] = "percpu_hash", |
163 | | [BPF_MAP_TYPE_PERCPU_ARRAY] = "percpu_array", |
164 | | [BPF_MAP_TYPE_STACK_TRACE] = "stack_trace", |
165 | | [BPF_MAP_TYPE_CGROUP_ARRAY] = "cgroup_array", |
166 | | [BPF_MAP_TYPE_LRU_HASH] = "lru_hash", |
167 | | [BPF_MAP_TYPE_LRU_PERCPU_HASH] = "lru_percpu_hash", |
168 | | [BPF_MAP_TYPE_LPM_TRIE] = "lpm_trie", |
169 | | [BPF_MAP_TYPE_ARRAY_OF_MAPS] = "array_of_maps", |
170 | | [BPF_MAP_TYPE_HASH_OF_MAPS] = "hash_of_maps", |
171 | | [BPF_MAP_TYPE_DEVMAP] = "devmap", |
172 | | [BPF_MAP_TYPE_DEVMAP_HASH] = "devmap_hash", |
173 | | [BPF_MAP_TYPE_SOCKMAP] = "sockmap", |
174 | | [BPF_MAP_TYPE_CPUMAP] = "cpumap", |
175 | | [BPF_MAP_TYPE_XSKMAP] = "xskmap", |
176 | | [BPF_MAP_TYPE_SOCKHASH] = "sockhash", |
177 | | [BPF_MAP_TYPE_CGROUP_STORAGE] = "cgroup_storage", |
178 | | [BPF_MAP_TYPE_REUSEPORT_SOCKARRAY] = "reuseport_sockarray", |
179 | | [BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE] = "percpu_cgroup_storage", |
180 | | [BPF_MAP_TYPE_QUEUE] = "queue", |
181 | | [BPF_MAP_TYPE_STACK] = "stack", |
182 | | [BPF_MAP_TYPE_SK_STORAGE] = "sk_storage", |
183 | | [BPF_MAP_TYPE_STRUCT_OPS] = "struct_ops", |
184 | | [BPF_MAP_TYPE_RINGBUF] = "ringbuf", |
185 | | [BPF_MAP_TYPE_INODE_STORAGE] = "inode_storage", |
186 | | [BPF_MAP_TYPE_TASK_STORAGE] = "task_storage", |
187 | | [BPF_MAP_TYPE_BLOOM_FILTER] = "bloom_filter", |
188 | | [BPF_MAP_TYPE_USER_RINGBUF] = "user_ringbuf", |
189 | | [BPF_MAP_TYPE_CGRP_STORAGE] = "cgrp_storage", |
190 | | [BPF_MAP_TYPE_ARENA] = "arena", |
191 | | }; |
192 | | |
193 | | static const char * const prog_type_name[] = { |
194 | | [BPF_PROG_TYPE_UNSPEC] = "unspec", |
195 | | [BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter", |
196 | | [BPF_PROG_TYPE_KPROBE] = "kprobe", |
197 | | [BPF_PROG_TYPE_SCHED_CLS] = "sched_cls", |
198 | | [BPF_PROG_TYPE_SCHED_ACT] = "sched_act", |
199 | | [BPF_PROG_TYPE_TRACEPOINT] = "tracepoint", |
200 | | [BPF_PROG_TYPE_XDP] = "xdp", |
201 | | [BPF_PROG_TYPE_PERF_EVENT] = "perf_event", |
202 | | [BPF_PROG_TYPE_CGROUP_SKB] = "cgroup_skb", |
203 | | [BPF_PROG_TYPE_CGROUP_SOCK] = "cgroup_sock", |
204 | | [BPF_PROG_TYPE_LWT_IN] = "lwt_in", |
205 | | [BPF_PROG_TYPE_LWT_OUT] = "lwt_out", |
206 | | [BPF_PROG_TYPE_LWT_XMIT] = "lwt_xmit", |
207 | | [BPF_PROG_TYPE_SOCK_OPS] = "sock_ops", |
208 | | [BPF_PROG_TYPE_SK_SKB] = "sk_skb", |
209 | | [BPF_PROG_TYPE_CGROUP_DEVICE] = "cgroup_device", |
210 | | [BPF_PROG_TYPE_SK_MSG] = "sk_msg", |
211 | | [BPF_PROG_TYPE_RAW_TRACEPOINT] = "raw_tracepoint", |
212 | | [BPF_PROG_TYPE_CGROUP_SOCK_ADDR] = "cgroup_sock_addr", |
213 | | [BPF_PROG_TYPE_LWT_SEG6LOCAL] = "lwt_seg6local", |
214 | | [BPF_PROG_TYPE_LIRC_MODE2] = "lirc_mode2", |
215 | | [BPF_PROG_TYPE_SK_REUSEPORT] = "sk_reuseport", |
216 | | [BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector", |
217 | | [BPF_PROG_TYPE_CGROUP_SYSCTL] = "cgroup_sysctl", |
218 | | [BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE] = "raw_tracepoint_writable", |
219 | | [BPF_PROG_TYPE_CGROUP_SOCKOPT] = "cgroup_sockopt", |
220 | | [BPF_PROG_TYPE_TRACING] = "tracing", |
221 | | [BPF_PROG_TYPE_STRUCT_OPS] = "struct_ops", |
222 | | [BPF_PROG_TYPE_EXT] = "ext", |
223 | | [BPF_PROG_TYPE_LSM] = "lsm", |
224 | | [BPF_PROG_TYPE_SK_LOOKUP] = "sk_lookup", |
225 | | [BPF_PROG_TYPE_SYSCALL] = "syscall", |
226 | | [BPF_PROG_TYPE_NETFILTER] = "netfilter", |
227 | | }; |
228 | | |
229 | | static int __base_pr(enum libbpf_print_level level, const char *format, |
230 | | va_list args) |
231 | 0 | { |
232 | 0 | const char *env_var = "LIBBPF_LOG_LEVEL"; |
233 | 0 | static enum libbpf_print_level min_level = LIBBPF_INFO; |
234 | 0 | static bool initialized; |
235 | |
|
236 | 0 | if (!initialized) { |
237 | 0 | char *verbosity; |
238 | |
|
239 | 0 | initialized = true; |
240 | 0 | verbosity = getenv(env_var); |
241 | 0 | if (verbosity) { |
242 | 0 | if (strcasecmp(verbosity, "warn") == 0) |
243 | 0 | min_level = LIBBPF_WARN; |
244 | 0 | else if (strcasecmp(verbosity, "debug") == 0) |
245 | 0 | min_level = LIBBPF_DEBUG; |
246 | 0 | else if (strcasecmp(verbosity, "info") == 0) |
247 | 0 | min_level = LIBBPF_INFO; |
248 | 0 | else |
249 | 0 | fprintf(stderr, "libbpf: unrecognized '%s' envvar value: '%s', should be one of 'warn', 'debug', or 'info'.\n", |
250 | 0 | env_var, verbosity); |
251 | 0 | } |
252 | 0 | } |
253 | | |
254 | | /* if too verbose, skip logging */ |
255 | 0 | if (level > min_level) |
256 | 0 | return 0; |
257 | | |
258 | 0 | return vfprintf(stderr, format, args); |
259 | 0 | } |
260 | | |
261 | | static libbpf_print_fn_t __libbpf_pr = __base_pr; |
262 | | |
263 | | libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn) |
264 | 11.7k | { |
265 | 11.7k | libbpf_print_fn_t old_print_fn; |
266 | | |
267 | 11.7k | old_print_fn = __atomic_exchange_n(&__libbpf_pr, fn, __ATOMIC_RELAXED); |
268 | | |
269 | 11.7k | return old_print_fn; |
270 | 11.7k | } |
271 | | |
272 | | __printf(2, 3) |
273 | | void libbpf_print(enum libbpf_print_level level, const char *format, ...) |
274 | 127k | { |
275 | 127k | va_list args; |
276 | 127k | int old_errno; |
277 | 127k | libbpf_print_fn_t print_fn; |
278 | | |
279 | 127k | print_fn = __atomic_load_n(&__libbpf_pr, __ATOMIC_RELAXED); |
280 | 127k | if (!print_fn) |
281 | 0 | return; |
282 | | |
283 | 127k | old_errno = errno; |
284 | | |
285 | 127k | va_start(args, format); |
286 | 127k | __libbpf_pr(level, format, args); |
287 | 127k | va_end(args); |
288 | | |
289 | 127k | errno = old_errno; |
290 | 127k | } |
291 | | |
292 | | static void pr_perm_msg(int err) |
293 | 0 | { |
294 | 0 | struct rlimit limit; |
295 | 0 | char buf[100]; |
296 | |
|
297 | 0 | if (err != -EPERM || geteuid() != 0) |
298 | 0 | return; |
299 | | |
300 | 0 | err = getrlimit(RLIMIT_MEMLOCK, &limit); |
301 | 0 | if (err) |
302 | 0 | return; |
303 | | |
304 | 0 | if (limit.rlim_cur == RLIM_INFINITY) |
305 | 0 | return; |
306 | | |
307 | 0 | if (limit.rlim_cur < 1024) |
308 | 0 | snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur); |
309 | 0 | else if (limit.rlim_cur < 1024*1024) |
310 | 0 | snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024); |
311 | 0 | else |
312 | 0 | snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024)); |
313 | |
|
314 | 0 | pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n", |
315 | 0 | buf); |
316 | 0 | } |
317 | | |
318 | | #define STRERR_BUFSIZE 128 |
319 | | |
320 | | /* Copied from tools/perf/util/util.h */ |
321 | | #ifndef zfree |
322 | 207k | # define zfree(ptr) ({ free(*ptr); *ptr = NULL; }) |
323 | | #endif |
324 | | |
325 | | #ifndef zclose |
326 | 32.8k | # define zclose(fd) ({ \ |
327 | 32.8k | int ___err = 0; \ |
328 | 32.8k | if ((fd) >= 0) \ |
329 | 32.8k | ___err = close((fd)); \ |
330 | 32.8k | fd = -1; \ |
331 | 32.8k | ___err; }) |
332 | | #endif |
333 | | |
334 | | static inline __u64 ptr_to_u64(const void *ptr) |
335 | 0 | { |
336 | 0 | return (__u64) (unsigned long) ptr; |
337 | 0 | } |
338 | | |
339 | | int libbpf_set_strict_mode(enum libbpf_strict_mode mode) |
340 | 0 | { |
341 | | /* as of v1.0 libbpf_set_strict_mode() is a no-op */ |
342 | 0 | return 0; |
343 | 0 | } |
344 | | |
345 | | __u32 libbpf_major_version(void) |
346 | 0 | { |
347 | 0 | return LIBBPF_MAJOR_VERSION; |
348 | 0 | } |
349 | | |
350 | | __u32 libbpf_minor_version(void) |
351 | 0 | { |
352 | 0 | return LIBBPF_MINOR_VERSION; |
353 | 0 | } |
354 | | |
355 | | const char *libbpf_version_string(void) |
356 | 0 | { |
357 | 0 | #define __S(X) #X |
358 | 0 | #define _S(X) __S(X) |
359 | 0 | return "v" _S(LIBBPF_MAJOR_VERSION) "." _S(LIBBPF_MINOR_VERSION); |
360 | 0 | #undef _S |
361 | 0 | #undef __S |
362 | 0 | } |
363 | | |
364 | | enum reloc_type { |
365 | | RELO_LD64, |
366 | | RELO_CALL, |
367 | | RELO_DATA, |
368 | | RELO_EXTERN_LD64, |
369 | | RELO_EXTERN_CALL, |
370 | | RELO_SUBPROG_ADDR, |
371 | | RELO_CORE, |
372 | | }; |
373 | | |
374 | | struct reloc_desc { |
375 | | enum reloc_type type; |
376 | | int insn_idx; |
377 | | union { |
378 | | const struct bpf_core_relo *core_relo; /* used when type == RELO_CORE */ |
379 | | struct { |
380 | | int map_idx; |
381 | | int sym_off; |
382 | | int ext_idx; |
383 | | }; |
384 | | }; |
385 | | }; |
386 | | |
387 | | /* stored as sec_def->cookie for all libbpf-supported SEC()s */ |
388 | | enum sec_def_flags { |
389 | | SEC_NONE = 0, |
390 | | /* expected_attach_type is optional, if kernel doesn't support that */ |
391 | | SEC_EXP_ATTACH_OPT = 1, |
392 | | /* legacy, only used by libbpf_get_type_names() and |
393 | | * libbpf_attach_type_by_name(), not used by libbpf itself at all. |
394 | | * This used to be associated with cgroup (and few other) BPF programs |
395 | | * that were attachable through BPF_PROG_ATTACH command. Pretty |
396 | | * meaningless nowadays, though. |
397 | | */ |
398 | | SEC_ATTACHABLE = 2, |
399 | | SEC_ATTACHABLE_OPT = SEC_ATTACHABLE | SEC_EXP_ATTACH_OPT, |
400 | | /* attachment target is specified through BTF ID in either kernel or |
401 | | * other BPF program's BTF object |
402 | | */ |
403 | | SEC_ATTACH_BTF = 4, |
404 | | /* BPF program type allows sleeping/blocking in kernel */ |
405 | | SEC_SLEEPABLE = 8, |
406 | | /* BPF program support non-linear XDP buffer */ |
407 | | SEC_XDP_FRAGS = 16, |
408 | | /* Setup proper attach type for usdt probes. */ |
409 | | SEC_USDT = 32, |
410 | | }; |
411 | | |
412 | | struct bpf_sec_def { |
413 | | char *sec; |
414 | | enum bpf_prog_type prog_type; |
415 | | enum bpf_attach_type expected_attach_type; |
416 | | long cookie; |
417 | | int handler_id; |
418 | | |
419 | | libbpf_prog_setup_fn_t prog_setup_fn; |
420 | | libbpf_prog_prepare_load_fn_t prog_prepare_load_fn; |
421 | | libbpf_prog_attach_fn_t prog_attach_fn; |
422 | | }; |
423 | | |
424 | | /* |
425 | | * bpf_prog should be a better name but it has been used in |
426 | | * linux/filter.h. |
427 | | */ |
428 | | struct bpf_program { |
429 | | char *name; |
430 | | char *sec_name; |
431 | | size_t sec_idx; |
432 | | const struct bpf_sec_def *sec_def; |
433 | | /* this program's instruction offset (in number of instructions) |
434 | | * within its containing ELF section |
435 | | */ |
436 | | size_t sec_insn_off; |
437 | | /* number of original instructions in ELF section belonging to this |
438 | | * program, not taking into account subprogram instructions possible |
439 | | * appended later during relocation |
440 | | */ |
441 | | size_t sec_insn_cnt; |
442 | | /* Offset (in number of instructions) of the start of instruction |
443 | | * belonging to this BPF program within its containing main BPF |
444 | | * program. For the entry-point (main) BPF program, this is always |
445 | | * zero. For a sub-program, this gets reset before each of main BPF |
446 | | * programs are processed and relocated and is used to determined |
447 | | * whether sub-program was already appended to the main program, and |
448 | | * if yes, at which instruction offset. |
449 | | */ |
450 | | size_t sub_insn_off; |
451 | | |
452 | | /* instructions that belong to BPF program; insns[0] is located at |
453 | | * sec_insn_off instruction within its ELF section in ELF file, so |
454 | | * when mapping ELF file instruction index to the local instruction, |
455 | | * one needs to subtract sec_insn_off; and vice versa. |
456 | | */ |
457 | | struct bpf_insn *insns; |
458 | | /* actual number of instruction in this BPF program's image; for |
459 | | * entry-point BPF programs this includes the size of main program |
460 | | * itself plus all the used sub-programs, appended at the end |
461 | | */ |
462 | | size_t insns_cnt; |
463 | | |
464 | | struct reloc_desc *reloc_desc; |
465 | | int nr_reloc; |
466 | | |
467 | | /* BPF verifier log settings */ |
468 | | char *log_buf; |
469 | | size_t log_size; |
470 | | __u32 log_level; |
471 | | |
472 | | struct bpf_object *obj; |
473 | | |
474 | | int fd; |
475 | | bool autoload; |
476 | | bool autoattach; |
477 | | bool sym_global; |
478 | | bool mark_btf_static; |
479 | | enum bpf_prog_type type; |
480 | | enum bpf_attach_type expected_attach_type; |
481 | | int exception_cb_idx; |
482 | | |
483 | | int prog_ifindex; |
484 | | __u32 attach_btf_obj_fd; |
485 | | __u32 attach_btf_id; |
486 | | __u32 attach_prog_fd; |
487 | | |
488 | | void *func_info; |
489 | | __u32 func_info_rec_size; |
490 | | __u32 func_info_cnt; |
491 | | |
492 | | void *line_info; |
493 | | __u32 line_info_rec_size; |
494 | | __u32 line_info_cnt; |
495 | | __u32 prog_flags; |
496 | | }; |
497 | | |
498 | | struct bpf_struct_ops { |
499 | | struct bpf_program **progs; |
500 | | __u32 *kern_func_off; |
501 | | /* e.g. struct tcp_congestion_ops in bpf_prog's btf format */ |
502 | | void *data; |
503 | | /* e.g. struct bpf_struct_ops_tcp_congestion_ops in |
504 | | * btf_vmlinux's format. |
505 | | * struct bpf_struct_ops_tcp_congestion_ops { |
506 | | * [... some other kernel fields ...] |
507 | | * struct tcp_congestion_ops data; |
508 | | * } |
509 | | * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops) |
510 | | * bpf_map__init_kern_struct_ops() will populate the "kern_vdata" |
511 | | * from "data". |
512 | | */ |
513 | | void *kern_vdata; |
514 | | __u32 type_id; |
515 | | }; |
516 | | |
517 | 2.40k | #define DATA_SEC ".data" |
518 | 2.08k | #define BSS_SEC ".bss" |
519 | 1.74k | #define RODATA_SEC ".rodata" |
520 | 8.00k | #define KCONFIG_SEC ".kconfig" |
521 | 10.4k | #define KSYMS_SEC ".ksyms" |
522 | 4.12k | #define STRUCT_OPS_SEC ".struct_ops" |
523 | 3.83k | #define STRUCT_OPS_LINK_SEC ".struct_ops.link" |
524 | 806 | #define ARENA_SEC ".addr_space.1" |
525 | | |
526 | | enum libbpf_map_type { |
527 | | LIBBPF_MAP_UNSPEC, |
528 | | LIBBPF_MAP_DATA, |
529 | | LIBBPF_MAP_BSS, |
530 | | LIBBPF_MAP_RODATA, |
531 | | LIBBPF_MAP_KCONFIG, |
532 | | }; |
533 | | |
534 | | struct bpf_map_def { |
535 | | unsigned int type; |
536 | | unsigned int key_size; |
537 | | unsigned int value_size; |
538 | | unsigned int max_entries; |
539 | | unsigned int map_flags; |
540 | | }; |
541 | | |
542 | | struct bpf_map { |
543 | | struct bpf_object *obj; |
544 | | char *name; |
545 | | /* real_name is defined for special internal maps (.rodata*, |
546 | | * .data*, .bss, .kconfig) and preserves their original ELF section |
547 | | * name. This is important to be able to find corresponding BTF |
548 | | * DATASEC information. |
549 | | */ |
550 | | char *real_name; |
551 | | int fd; |
552 | | int sec_idx; |
553 | | size_t sec_offset; |
554 | | int map_ifindex; |
555 | | int inner_map_fd; |
556 | | struct bpf_map_def def; |
557 | | __u32 numa_node; |
558 | | __u32 btf_var_idx; |
559 | | int mod_btf_fd; |
560 | | __u32 btf_key_type_id; |
561 | | __u32 btf_value_type_id; |
562 | | __u32 btf_vmlinux_value_type_id; |
563 | | enum libbpf_map_type libbpf_type; |
564 | | void *mmaped; |
565 | | struct bpf_struct_ops *st_ops; |
566 | | struct bpf_map *inner_map; |
567 | | void **init_slots; |
568 | | int init_slots_sz; |
569 | | char *pin_path; |
570 | | bool pinned; |
571 | | bool reused; |
572 | | bool autocreate; |
573 | | bool autoattach; |
574 | | __u64 map_extra; |
575 | | }; |
576 | | |
577 | | enum extern_type { |
578 | | EXT_UNKNOWN, |
579 | | EXT_KCFG, |
580 | | EXT_KSYM, |
581 | | }; |
582 | | |
583 | | enum kcfg_type { |
584 | | KCFG_UNKNOWN, |
585 | | KCFG_CHAR, |
586 | | KCFG_BOOL, |
587 | | KCFG_INT, |
588 | | KCFG_TRISTATE, |
589 | | KCFG_CHAR_ARR, |
590 | | }; |
591 | | |
592 | | struct extern_desc { |
593 | | enum extern_type type; |
594 | | int sym_idx; |
595 | | int btf_id; |
596 | | int sec_btf_id; |
597 | | const char *name; |
598 | | char *essent_name; |
599 | | bool is_set; |
600 | | bool is_weak; |
601 | | union { |
602 | | struct { |
603 | | enum kcfg_type type; |
604 | | int sz; |
605 | | int align; |
606 | | int data_off; |
607 | | bool is_signed; |
608 | | } kcfg; |
609 | | struct { |
610 | | unsigned long long addr; |
611 | | |
612 | | /* target btf_id of the corresponding kernel var. */ |
613 | | int kernel_btf_obj_fd; |
614 | | int kernel_btf_id; |
615 | | |
616 | | /* local btf_id of the ksym extern's type. */ |
617 | | __u32 type_id; |
618 | | /* BTF fd index to be patched in for insn->off, this is |
619 | | * 0 for vmlinux BTF, index in obj->fd_array for module |
620 | | * BTF |
621 | | */ |
622 | | __s16 btf_fd_idx; |
623 | | } ksym; |
624 | | }; |
625 | | }; |
626 | | |
627 | | struct module_btf { |
628 | | struct btf *btf; |
629 | | char *name; |
630 | | __u32 id; |
631 | | int fd; |
632 | | int fd_array_idx; |
633 | | }; |
634 | | |
635 | | enum sec_type { |
636 | | SEC_UNUSED = 0, |
637 | | SEC_RELO, |
638 | | SEC_BSS, |
639 | | SEC_DATA, |
640 | | SEC_RODATA, |
641 | | SEC_ST_OPS, |
642 | | }; |
643 | | |
644 | | struct elf_sec_desc { |
645 | | enum sec_type sec_type; |
646 | | Elf64_Shdr *shdr; |
647 | | Elf_Data *data; |
648 | | }; |
649 | | |
650 | | struct elf_state { |
651 | | int fd; |
652 | | const void *obj_buf; |
653 | | size_t obj_buf_sz; |
654 | | Elf *elf; |
655 | | Elf64_Ehdr *ehdr; |
656 | | Elf_Data *symbols; |
657 | | Elf_Data *arena_data; |
658 | | size_t shstrndx; /* section index for section name strings */ |
659 | | size_t strtabidx; |
660 | | struct elf_sec_desc *secs; |
661 | | size_t sec_cnt; |
662 | | int btf_maps_shndx; |
663 | | __u32 btf_maps_sec_btf_id; |
664 | | int text_shndx; |
665 | | int symbols_shndx; |
666 | | bool has_st_ops; |
667 | | int arena_data_shndx; |
668 | | }; |
669 | | |
670 | | struct usdt_manager; |
671 | | |
672 | | struct bpf_object { |
673 | | char name[BPF_OBJ_NAME_LEN]; |
674 | | char license[64]; |
675 | | __u32 kern_version; |
676 | | |
677 | | struct bpf_program *programs; |
678 | | size_t nr_programs; |
679 | | struct bpf_map *maps; |
680 | | size_t nr_maps; |
681 | | size_t maps_cap; |
682 | | |
683 | | char *kconfig; |
684 | | struct extern_desc *externs; |
685 | | int nr_extern; |
686 | | int kconfig_map_idx; |
687 | | |
688 | | bool loaded; |
689 | | bool has_subcalls; |
690 | | bool has_rodata; |
691 | | |
692 | | struct bpf_gen *gen_loader; |
693 | | |
694 | | /* Information when doing ELF related work. Only valid if efile.elf is not NULL */ |
695 | | struct elf_state efile; |
696 | | |
697 | | struct btf *btf; |
698 | | struct btf_ext *btf_ext; |
699 | | |
700 | | /* Parse and load BTF vmlinux if any of the programs in the object need |
701 | | * it at load time. |
702 | | */ |
703 | | struct btf *btf_vmlinux; |
704 | | /* Path to the custom BTF to be used for BPF CO-RE relocations as an |
705 | | * override for vmlinux BTF. |
706 | | */ |
707 | | char *btf_custom_path; |
708 | | /* vmlinux BTF override for CO-RE relocations */ |
709 | | struct btf *btf_vmlinux_override; |
710 | | /* Lazily initialized kernel module BTFs */ |
711 | | struct module_btf *btf_modules; |
712 | | bool btf_modules_loaded; |
713 | | size_t btf_module_cnt; |
714 | | size_t btf_module_cap; |
715 | | |
716 | | /* optional log settings passed to BPF_BTF_LOAD and BPF_PROG_LOAD commands */ |
717 | | char *log_buf; |
718 | | size_t log_size; |
719 | | __u32 log_level; |
720 | | |
721 | | int *fd_array; |
722 | | size_t fd_array_cap; |
723 | | size_t fd_array_cnt; |
724 | | |
725 | | struct usdt_manager *usdt_man; |
726 | | |
727 | | struct bpf_map *arena_map; |
728 | | void *arena_data; |
729 | | size_t arena_data_sz; |
730 | | |
731 | | struct kern_feature_cache *feat_cache; |
732 | | char *token_path; |
733 | | int token_fd; |
734 | | |
735 | | char path[]; |
736 | | }; |
737 | | |
738 | | static const char *elf_sym_str(const struct bpf_object *obj, size_t off); |
739 | | static const char *elf_sec_str(const struct bpf_object *obj, size_t off); |
740 | | static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx); |
741 | | static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name); |
742 | | static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn); |
743 | | static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn); |
744 | | static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn); |
745 | | static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx); |
746 | | static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx); |
747 | | |
748 | | void bpf_program__unload(struct bpf_program *prog) |
749 | 18.3k | { |
750 | 18.3k | if (!prog) |
751 | 0 | return; |
752 | | |
753 | 18.3k | zclose(prog->fd); |
754 | | |
755 | 18.3k | zfree(&prog->func_info); |
756 | 18.3k | zfree(&prog->line_info); |
757 | 18.3k | } |
758 | | |
759 | | static void bpf_program__exit(struct bpf_program *prog) |
760 | 9.17k | { |
761 | 9.17k | if (!prog) |
762 | 0 | return; |
763 | | |
764 | 9.17k | bpf_program__unload(prog); |
765 | 9.17k | zfree(&prog->name); |
766 | 9.17k | zfree(&prog->sec_name); |
767 | 9.17k | zfree(&prog->insns); |
768 | 9.17k | zfree(&prog->reloc_desc); |
769 | | |
770 | 9.17k | prog->nr_reloc = 0; |
771 | 9.17k | prog->insns_cnt = 0; |
772 | 9.17k | prog->sec_idx = -1; |
773 | 9.17k | } |
774 | | |
775 | | static bool insn_is_subprog_call(const struct bpf_insn *insn) |
776 | 0 | { |
777 | 0 | return BPF_CLASS(insn->code) == BPF_JMP && |
778 | 0 | BPF_OP(insn->code) == BPF_CALL && |
779 | 0 | BPF_SRC(insn->code) == BPF_K && |
780 | 0 | insn->src_reg == BPF_PSEUDO_CALL && |
781 | 0 | insn->dst_reg == 0 && |
782 | 0 | insn->off == 0; |
783 | 0 | } |
784 | | |
785 | | static bool is_call_insn(const struct bpf_insn *insn) |
786 | 3.40k | { |
787 | 3.40k | return insn->code == (BPF_JMP | BPF_CALL); |
788 | 3.40k | } |
789 | | |
790 | | static bool insn_is_pseudo_func(struct bpf_insn *insn) |
791 | 0 | { |
792 | 0 | return is_ldimm64_insn(insn) && insn->src_reg == BPF_PSEUDO_FUNC; |
793 | 0 | } |
794 | | |
795 | | static int |
796 | | bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog, |
797 | | const char *name, size_t sec_idx, const char *sec_name, |
798 | | size_t sec_off, void *insn_data, size_t insn_data_sz) |
799 | 9.25k | { |
800 | 9.25k | if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) { |
801 | 75 | pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n", |
802 | 75 | sec_name, name, sec_off, insn_data_sz); |
803 | 75 | return -EINVAL; |
804 | 75 | } |
805 | | |
806 | 9.17k | memset(prog, 0, sizeof(*prog)); |
807 | 9.17k | prog->obj = obj; |
808 | | |
809 | 9.17k | prog->sec_idx = sec_idx; |
810 | 9.17k | prog->sec_insn_off = sec_off / BPF_INSN_SZ; |
811 | 9.17k | prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ; |
812 | | /* insns_cnt can later be increased by appending used subprograms */ |
813 | 9.17k | prog->insns_cnt = prog->sec_insn_cnt; |
814 | | |
815 | 9.17k | prog->type = BPF_PROG_TYPE_UNSPEC; |
816 | 9.17k | prog->fd = -1; |
817 | 9.17k | prog->exception_cb_idx = -1; |
818 | | |
819 | | /* libbpf's convention for SEC("?abc...") is that it's just like |
820 | | * SEC("abc...") but the corresponding bpf_program starts out with |
821 | | * autoload set to false. |
822 | | */ |
823 | 9.17k | if (sec_name[0] == '?') { |
824 | 548 | prog->autoload = false; |
825 | | /* from now on forget there was ? in section name */ |
826 | 548 | sec_name++; |
827 | 8.62k | } else { |
828 | 8.62k | prog->autoload = true; |
829 | 8.62k | } |
830 | | |
831 | 9.17k | prog->autoattach = true; |
832 | | |
833 | | /* inherit object's log_level */ |
834 | 9.17k | prog->log_level = obj->log_level; |
835 | | |
836 | 9.17k | prog->sec_name = strdup(sec_name); |
837 | 9.17k | if (!prog->sec_name) |
838 | 0 | goto errout; |
839 | | |
840 | 9.17k | prog->name = strdup(name); |
841 | 9.17k | if (!prog->name) |
842 | 0 | goto errout; |
843 | | |
844 | 9.17k | prog->insns = malloc(insn_data_sz); |
845 | 9.17k | if (!prog->insns) |
846 | 24 | goto errout; |
847 | 9.15k | memcpy(prog->insns, insn_data, insn_data_sz); |
848 | | |
849 | 9.15k | return 0; |
850 | 24 | errout: |
851 | 24 | pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name); |
852 | 24 | bpf_program__exit(prog); |
853 | 24 | return -ENOMEM; |
854 | 9.17k | } |
855 | | |
856 | | static int |
857 | | bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data, |
858 | | const char *sec_name, int sec_idx) |
859 | 1.22k | { |
860 | 1.22k | Elf_Data *symbols = obj->efile.symbols; |
861 | 1.22k | struct bpf_program *prog, *progs; |
862 | 1.22k | void *data = sec_data->d_buf; |
863 | 1.22k | size_t sec_sz = sec_data->d_size, sec_off, prog_sz, nr_syms; |
864 | 1.22k | int nr_progs, err, i; |
865 | 1.22k | const char *name; |
866 | 1.22k | Elf64_Sym *sym; |
867 | | |
868 | 1.22k | progs = obj->programs; |
869 | 1.22k | nr_progs = obj->nr_programs; |
870 | 1.22k | nr_syms = symbols->d_size / sizeof(Elf64_Sym); |
871 | | |
872 | 184k | for (i = 0; i < nr_syms; i++) { |
873 | 183k | sym = elf_sym_by_idx(obj, i); |
874 | | |
875 | 183k | if (sym->st_shndx != sec_idx) |
876 | 172k | continue; |
877 | 11.0k | if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC) |
878 | 1.57k | continue; |
879 | | |
880 | 9.43k | prog_sz = sym->st_size; |
881 | 9.43k | sec_off = sym->st_value; |
882 | | |
883 | 9.43k | name = elf_sym_str(obj, sym->st_name); |
884 | 9.43k | if (!name) { |
885 | 60 | pr_warn("sec '%s': failed to get symbol name for offset %zu\n", |
886 | 60 | sec_name, sec_off); |
887 | 60 | return -LIBBPF_ERRNO__FORMAT; |
888 | 60 | } |
889 | | |
890 | 9.37k | if (sec_off + prog_sz > sec_sz) { |
891 | 120 | pr_warn("sec '%s': program at offset %zu crosses section boundary\n", |
892 | 120 | sec_name, sec_off); |
893 | 120 | return -LIBBPF_ERRNO__FORMAT; |
894 | 120 | } |
895 | | |
896 | 9.25k | if (sec_idx != obj->efile.text_shndx && ELF64_ST_BIND(sym->st_info) == STB_LOCAL) { |
897 | 1 | pr_warn("sec '%s': program '%s' is static and not supported\n", sec_name, name); |
898 | 1 | return -ENOTSUP; |
899 | 1 | } |
900 | | |
901 | 9.25k | pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n", |
902 | 18.5k | sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz); |
903 | | |
904 | 9.25k | progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs)); |
905 | 9.25k | if (!progs) { |
906 | | /* |
907 | | * In this case the original obj->programs |
908 | | * is still valid, so don't need special treat for |
909 | | * bpf_close_object(). |
910 | | */ |
911 | 0 | pr_warn("sec '%s': failed to alloc memory for new program '%s'\n", |
912 | 0 | sec_name, name); |
913 | 0 | return -ENOMEM; |
914 | 0 | } |
915 | 9.25k | obj->programs = progs; |
916 | | |
917 | 9.25k | prog = &progs[nr_progs]; |
918 | | |
919 | 9.25k | err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name, |
920 | 9.25k | sec_off, data + sec_off, prog_sz); |
921 | 9.25k | if (err) |
922 | 99 | return err; |
923 | | |
924 | 9.15k | if (ELF64_ST_BIND(sym->st_info) != STB_LOCAL) |
925 | 8.99k | prog->sym_global = true; |
926 | | |
927 | | /* if function is a global/weak symbol, but has restricted |
928 | | * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF FUNC |
929 | | * as static to enable more permissive BPF verification mode |
930 | | * with more outside context available to BPF verifier |
931 | | */ |
932 | 9.15k | if (prog->sym_global && (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN |
933 | 8.99k | || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL)) |
934 | 6.63k | prog->mark_btf_static = true; |
935 | | |
936 | 9.15k | nr_progs++; |
937 | 9.15k | obj->nr_programs = nr_progs; |
938 | 9.15k | } |
939 | | |
940 | 941 | return 0; |
941 | 1.22k | } |
942 | | |
943 | | static const struct btf_member * |
944 | | find_member_by_offset(const struct btf_type *t, __u32 bit_offset) |
945 | 0 | { |
946 | 0 | struct btf_member *m; |
947 | 0 | int i; |
948 | |
|
949 | 0 | for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) { |
950 | 0 | if (btf_member_bit_offset(t, i) == bit_offset) |
951 | 0 | return m; |
952 | 0 | } |
953 | | |
954 | 0 | return NULL; |
955 | 0 | } |
956 | | |
957 | | static const struct btf_member * |
958 | | find_member_by_name(const struct btf *btf, const struct btf_type *t, |
959 | | const char *name) |
960 | 0 | { |
961 | 0 | struct btf_member *m; |
962 | 0 | int i; |
963 | |
|
964 | 0 | for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) { |
965 | 0 | if (!strcmp(btf__name_by_offset(btf, m->name_off), name)) |
966 | 0 | return m; |
967 | 0 | } |
968 | | |
969 | 0 | return NULL; |
970 | 0 | } |
971 | | |
972 | | static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name, |
973 | | __u16 kind, struct btf **res_btf, |
974 | | struct module_btf **res_mod_btf); |
975 | | |
976 | 0 | #define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_" |
977 | | static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix, |
978 | | const char *name, __u32 kind); |
979 | | |
980 | | static int |
981 | | find_struct_ops_kern_types(struct bpf_object *obj, const char *tname_raw, |
982 | | struct module_btf **mod_btf, |
983 | | const struct btf_type **type, __u32 *type_id, |
984 | | const struct btf_type **vtype, __u32 *vtype_id, |
985 | | const struct btf_member **data_member) |
986 | 0 | { |
987 | 0 | const struct btf_type *kern_type, *kern_vtype; |
988 | 0 | const struct btf_member *kern_data_member; |
989 | 0 | struct btf *btf; |
990 | 0 | __s32 kern_vtype_id, kern_type_id; |
991 | 0 | char tname[256]; |
992 | 0 | __u32 i; |
993 | |
|
994 | 0 | snprintf(tname, sizeof(tname), "%.*s", |
995 | 0 | (int)bpf_core_essential_name_len(tname_raw), tname_raw); |
996 | |
|
997 | 0 | kern_type_id = find_ksym_btf_id(obj, tname, BTF_KIND_STRUCT, |
998 | 0 | &btf, mod_btf); |
999 | 0 | if (kern_type_id < 0) { |
1000 | 0 | pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n", |
1001 | 0 | tname); |
1002 | 0 | return kern_type_id; |
1003 | 0 | } |
1004 | 0 | kern_type = btf__type_by_id(btf, kern_type_id); |
1005 | | |
1006 | | /* Find the corresponding "map_value" type that will be used |
1007 | | * in map_update(BPF_MAP_TYPE_STRUCT_OPS). For example, |
1008 | | * find "struct bpf_struct_ops_tcp_congestion_ops" from the |
1009 | | * btf_vmlinux. |
1010 | | */ |
1011 | 0 | kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX, |
1012 | 0 | tname, BTF_KIND_STRUCT); |
1013 | 0 | if (kern_vtype_id < 0) { |
1014 | 0 | pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n", |
1015 | 0 | STRUCT_OPS_VALUE_PREFIX, tname); |
1016 | 0 | return kern_vtype_id; |
1017 | 0 | } |
1018 | 0 | kern_vtype = btf__type_by_id(btf, kern_vtype_id); |
1019 | | |
1020 | | /* Find "struct tcp_congestion_ops" from |
1021 | | * struct bpf_struct_ops_tcp_congestion_ops { |
1022 | | * [ ... ] |
1023 | | * struct tcp_congestion_ops data; |
1024 | | * } |
1025 | | */ |
1026 | 0 | kern_data_member = btf_members(kern_vtype); |
1027 | 0 | for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) { |
1028 | 0 | if (kern_data_member->type == kern_type_id) |
1029 | 0 | break; |
1030 | 0 | } |
1031 | 0 | if (i == btf_vlen(kern_vtype)) { |
1032 | 0 | pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n", |
1033 | 0 | tname, STRUCT_OPS_VALUE_PREFIX, tname); |
1034 | 0 | return -EINVAL; |
1035 | 0 | } |
1036 | | |
1037 | 0 | *type = kern_type; |
1038 | 0 | *type_id = kern_type_id; |
1039 | 0 | *vtype = kern_vtype; |
1040 | 0 | *vtype_id = kern_vtype_id; |
1041 | 0 | *data_member = kern_data_member; |
1042 | |
|
1043 | 0 | return 0; |
1044 | 0 | } |
1045 | | |
1046 | | static bool bpf_map__is_struct_ops(const struct bpf_map *map) |
1047 | 326 | { |
1048 | 326 | return map->def.type == BPF_MAP_TYPE_STRUCT_OPS; |
1049 | 326 | } |
1050 | | |
1051 | | static bool is_valid_st_ops_program(struct bpf_object *obj, |
1052 | | const struct bpf_program *prog) |
1053 | 0 | { |
1054 | 0 | int i; |
1055 | |
|
1056 | 0 | for (i = 0; i < obj->nr_programs; i++) { |
1057 | 0 | if (&obj->programs[i] == prog) |
1058 | 0 | return prog->type == BPF_PROG_TYPE_STRUCT_OPS; |
1059 | 0 | } |
1060 | | |
1061 | 0 | return false; |
1062 | 0 | } |
1063 | | |
1064 | | /* For each struct_ops program P, referenced from some struct_ops map M, |
1065 | | * enable P.autoload if there are Ms for which M.autocreate is true, |
1066 | | * disable P.autoload if for all Ms M.autocreate is false. |
1067 | | * Don't change P.autoload for programs that are not referenced from any maps. |
1068 | | */ |
1069 | | static int bpf_object_adjust_struct_ops_autoload(struct bpf_object *obj) |
1070 | 0 | { |
1071 | 0 | struct bpf_program *prog, *slot_prog; |
1072 | 0 | struct bpf_map *map; |
1073 | 0 | int i, j, k, vlen; |
1074 | |
|
1075 | 0 | for (i = 0; i < obj->nr_programs; ++i) { |
1076 | 0 | int should_load = false; |
1077 | 0 | int use_cnt = 0; |
1078 | |
|
1079 | 0 | prog = &obj->programs[i]; |
1080 | 0 | if (prog->type != BPF_PROG_TYPE_STRUCT_OPS) |
1081 | 0 | continue; |
1082 | | |
1083 | 0 | for (j = 0; j < obj->nr_maps; ++j) { |
1084 | 0 | const struct btf_type *type; |
1085 | |
|
1086 | 0 | map = &obj->maps[j]; |
1087 | 0 | if (!bpf_map__is_struct_ops(map)) |
1088 | 0 | continue; |
1089 | | |
1090 | 0 | type = btf__type_by_id(obj->btf, map->st_ops->type_id); |
1091 | 0 | vlen = btf_vlen(type); |
1092 | 0 | for (k = 0; k < vlen; ++k) { |
1093 | 0 | slot_prog = map->st_ops->progs[k]; |
1094 | 0 | if (prog != slot_prog) |
1095 | 0 | continue; |
1096 | | |
1097 | 0 | use_cnt++; |
1098 | 0 | if (map->autocreate) |
1099 | 0 | should_load = true; |
1100 | 0 | } |
1101 | 0 | } |
1102 | 0 | if (use_cnt) |
1103 | 0 | prog->autoload = should_load; |
1104 | 0 | } |
1105 | |
|
1106 | 0 | return 0; |
1107 | 0 | } |
1108 | | |
1109 | | /* Init the map's fields that depend on kern_btf */ |
1110 | | static int bpf_map__init_kern_struct_ops(struct bpf_map *map) |
1111 | 0 | { |
1112 | 0 | const struct btf_member *member, *kern_member, *kern_data_member; |
1113 | 0 | const struct btf_type *type, *kern_type, *kern_vtype; |
1114 | 0 | __u32 i, kern_type_id, kern_vtype_id, kern_data_off; |
1115 | 0 | struct bpf_object *obj = map->obj; |
1116 | 0 | const struct btf *btf = obj->btf; |
1117 | 0 | struct bpf_struct_ops *st_ops; |
1118 | 0 | const struct btf *kern_btf; |
1119 | 0 | struct module_btf *mod_btf; |
1120 | 0 | void *data, *kern_data; |
1121 | 0 | const char *tname; |
1122 | 0 | int err; |
1123 | |
|
1124 | 0 | st_ops = map->st_ops; |
1125 | 0 | type = btf__type_by_id(btf, st_ops->type_id); |
1126 | 0 | tname = btf__name_by_offset(btf, type->name_off); |
1127 | 0 | err = find_struct_ops_kern_types(obj, tname, &mod_btf, |
1128 | 0 | &kern_type, &kern_type_id, |
1129 | 0 | &kern_vtype, &kern_vtype_id, |
1130 | 0 | &kern_data_member); |
1131 | 0 | if (err) |
1132 | 0 | return err; |
1133 | | |
1134 | 0 | kern_btf = mod_btf ? mod_btf->btf : obj->btf_vmlinux; |
1135 | |
|
1136 | 0 | pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n", |
1137 | 0 | map->name, st_ops->type_id, kern_type_id, kern_vtype_id); |
1138 | |
|
1139 | 0 | map->mod_btf_fd = mod_btf ? mod_btf->fd : -1; |
1140 | 0 | map->def.value_size = kern_vtype->size; |
1141 | 0 | map->btf_vmlinux_value_type_id = kern_vtype_id; |
1142 | |
|
1143 | 0 | st_ops->kern_vdata = calloc(1, kern_vtype->size); |
1144 | 0 | if (!st_ops->kern_vdata) |
1145 | 0 | return -ENOMEM; |
1146 | | |
1147 | 0 | data = st_ops->data; |
1148 | 0 | kern_data_off = kern_data_member->offset / 8; |
1149 | 0 | kern_data = st_ops->kern_vdata + kern_data_off; |
1150 | |
|
1151 | 0 | member = btf_members(type); |
1152 | 0 | for (i = 0; i < btf_vlen(type); i++, member++) { |
1153 | 0 | const struct btf_type *mtype, *kern_mtype; |
1154 | 0 | __u32 mtype_id, kern_mtype_id; |
1155 | 0 | void *mdata, *kern_mdata; |
1156 | 0 | struct bpf_program *prog; |
1157 | 0 | __s64 msize, kern_msize; |
1158 | 0 | __u32 moff, kern_moff; |
1159 | 0 | __u32 kern_member_idx; |
1160 | 0 | const char *mname; |
1161 | |
|
1162 | 0 | mname = btf__name_by_offset(btf, member->name_off); |
1163 | 0 | moff = member->offset / 8; |
1164 | 0 | mdata = data + moff; |
1165 | 0 | msize = btf__resolve_size(btf, member->type); |
1166 | 0 | if (msize < 0) { |
1167 | 0 | pr_warn("struct_ops init_kern %s: failed to resolve the size of member %s\n", |
1168 | 0 | map->name, mname); |
1169 | 0 | return msize; |
1170 | 0 | } |
1171 | | |
1172 | 0 | kern_member = find_member_by_name(kern_btf, kern_type, mname); |
1173 | 0 | if (!kern_member) { |
1174 | 0 | if (!libbpf_is_mem_zeroed(mdata, msize)) { |
1175 | 0 | pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n", |
1176 | 0 | map->name, mname); |
1177 | 0 | return -ENOTSUP; |
1178 | 0 | } |
1179 | | |
1180 | 0 | if (st_ops->progs[i]) { |
1181 | | /* If we had declaratively set struct_ops callback, we need to |
1182 | | * force its autoload to false, because it doesn't have |
1183 | | * a chance of succeeding from POV of the current struct_ops map. |
1184 | | * If this program is still referenced somewhere else, though, |
1185 | | * then bpf_object_adjust_struct_ops_autoload() will update its |
1186 | | * autoload accordingly. |
1187 | | */ |
1188 | 0 | st_ops->progs[i]->autoload = false; |
1189 | 0 | st_ops->progs[i] = NULL; |
1190 | 0 | } |
1191 | | |
1192 | | /* Skip all-zero/NULL fields if they are not present in the kernel BTF */ |
1193 | 0 | pr_info("struct_ops %s: member %s not found in kernel, skipping it as it's set to zero\n", |
1194 | 0 | map->name, mname); |
1195 | 0 | continue; |
1196 | 0 | } |
1197 | | |
1198 | 0 | kern_member_idx = kern_member - btf_members(kern_type); |
1199 | 0 | if (btf_member_bitfield_size(type, i) || |
1200 | 0 | btf_member_bitfield_size(kern_type, kern_member_idx)) { |
1201 | 0 | pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n", |
1202 | 0 | map->name, mname); |
1203 | 0 | return -ENOTSUP; |
1204 | 0 | } |
1205 | | |
1206 | 0 | kern_moff = kern_member->offset / 8; |
1207 | 0 | kern_mdata = kern_data + kern_moff; |
1208 | |
|
1209 | 0 | mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id); |
1210 | 0 | kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type, |
1211 | 0 | &kern_mtype_id); |
1212 | 0 | if (BTF_INFO_KIND(mtype->info) != |
1213 | 0 | BTF_INFO_KIND(kern_mtype->info)) { |
1214 | 0 | pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n", |
1215 | 0 | map->name, mname, BTF_INFO_KIND(mtype->info), |
1216 | 0 | BTF_INFO_KIND(kern_mtype->info)); |
1217 | 0 | return -ENOTSUP; |
1218 | 0 | } |
1219 | | |
1220 | 0 | if (btf_is_ptr(mtype)) { |
1221 | 0 | prog = *(void **)mdata; |
1222 | | /* just like for !kern_member case above, reset declaratively |
1223 | | * set (at compile time) program's autload to false, |
1224 | | * if user replaced it with another program or NULL |
1225 | | */ |
1226 | 0 | if (st_ops->progs[i] && st_ops->progs[i] != prog) |
1227 | 0 | st_ops->progs[i]->autoload = false; |
1228 | | |
1229 | | /* Update the value from the shadow type */ |
1230 | 0 | st_ops->progs[i] = prog; |
1231 | 0 | if (!prog) |
1232 | 0 | continue; |
1233 | | |
1234 | 0 | if (!is_valid_st_ops_program(obj, prog)) { |
1235 | 0 | pr_warn("struct_ops init_kern %s: member %s is not a struct_ops program\n", |
1236 | 0 | map->name, mname); |
1237 | 0 | return -ENOTSUP; |
1238 | 0 | } |
1239 | | |
1240 | 0 | kern_mtype = skip_mods_and_typedefs(kern_btf, |
1241 | 0 | kern_mtype->type, |
1242 | 0 | &kern_mtype_id); |
1243 | | |
1244 | | /* mtype->type must be a func_proto which was |
1245 | | * guaranteed in bpf_object__collect_st_ops_relos(), |
1246 | | * so only check kern_mtype for func_proto here. |
1247 | | */ |
1248 | 0 | if (!btf_is_func_proto(kern_mtype)) { |
1249 | 0 | pr_warn("struct_ops init_kern %s: kernel member %s is not a func ptr\n", |
1250 | 0 | map->name, mname); |
1251 | 0 | return -ENOTSUP; |
1252 | 0 | } |
1253 | | |
1254 | 0 | if (mod_btf) |
1255 | 0 | prog->attach_btf_obj_fd = mod_btf->fd; |
1256 | | |
1257 | | /* if we haven't yet processed this BPF program, record proper |
1258 | | * attach_btf_id and member_idx |
1259 | | */ |
1260 | 0 | if (!prog->attach_btf_id) { |
1261 | 0 | prog->attach_btf_id = kern_type_id; |
1262 | 0 | prog->expected_attach_type = kern_member_idx; |
1263 | 0 | } |
1264 | | |
1265 | | /* struct_ops BPF prog can be re-used between multiple |
1266 | | * .struct_ops & .struct_ops.link as long as it's the |
1267 | | * same struct_ops struct definition and the same |
1268 | | * function pointer field |
1269 | | */ |
1270 | 0 | if (prog->attach_btf_id != kern_type_id) { |
1271 | 0 | pr_warn("struct_ops init_kern %s func ptr %s: invalid reuse of prog %s in sec %s with type %u: attach_btf_id %u != kern_type_id %u\n", |
1272 | 0 | map->name, mname, prog->name, prog->sec_name, prog->type, |
1273 | 0 | prog->attach_btf_id, kern_type_id); |
1274 | 0 | return -EINVAL; |
1275 | 0 | } |
1276 | 0 | if (prog->expected_attach_type != kern_member_idx) { |
1277 | 0 | pr_warn("struct_ops init_kern %s func ptr %s: invalid reuse of prog %s in sec %s with type %u: expected_attach_type %u != kern_member_idx %u\n", |
1278 | 0 | map->name, mname, prog->name, prog->sec_name, prog->type, |
1279 | 0 | prog->expected_attach_type, kern_member_idx); |
1280 | 0 | return -EINVAL; |
1281 | 0 | } |
1282 | | |
1283 | 0 | st_ops->kern_func_off[i] = kern_data_off + kern_moff; |
1284 | |
|
1285 | 0 | pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n", |
1286 | 0 | map->name, mname, prog->name, moff, |
1287 | 0 | kern_moff); |
1288 | |
|
1289 | 0 | continue; |
1290 | 0 | } |
1291 | | |
1292 | 0 | kern_msize = btf__resolve_size(kern_btf, kern_mtype_id); |
1293 | 0 | if (kern_msize < 0 || msize != kern_msize) { |
1294 | 0 | pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n", |
1295 | 0 | map->name, mname, (ssize_t)msize, |
1296 | 0 | (ssize_t)kern_msize); |
1297 | 0 | return -ENOTSUP; |
1298 | 0 | } |
1299 | | |
1300 | 0 | pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n", |
1301 | 0 | map->name, mname, (unsigned int)msize, |
1302 | 0 | moff, kern_moff); |
1303 | 0 | memcpy(kern_mdata, mdata, msize); |
1304 | 0 | } |
1305 | | |
1306 | 0 | return 0; |
1307 | 0 | } |
1308 | | |
1309 | | static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj) |
1310 | 0 | { |
1311 | 0 | struct bpf_map *map; |
1312 | 0 | size_t i; |
1313 | 0 | int err; |
1314 | |
|
1315 | 0 | for (i = 0; i < obj->nr_maps; i++) { |
1316 | 0 | map = &obj->maps[i]; |
1317 | |
|
1318 | 0 | if (!bpf_map__is_struct_ops(map)) |
1319 | 0 | continue; |
1320 | | |
1321 | 0 | if (!map->autocreate) |
1322 | 0 | continue; |
1323 | | |
1324 | 0 | err = bpf_map__init_kern_struct_ops(map); |
1325 | 0 | if (err) |
1326 | 0 | return err; |
1327 | 0 | } |
1328 | | |
1329 | 0 | return 0; |
1330 | 0 | } |
1331 | | |
1332 | | static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name, |
1333 | | int shndx, Elf_Data *data) |
1334 | 170 | { |
1335 | 170 | const struct btf_type *type, *datasec; |
1336 | 170 | const struct btf_var_secinfo *vsi; |
1337 | 170 | struct bpf_struct_ops *st_ops; |
1338 | 170 | const char *tname, *var_name; |
1339 | 170 | __s32 type_id, datasec_id; |
1340 | 170 | const struct btf *btf; |
1341 | 170 | struct bpf_map *map; |
1342 | 170 | __u32 i; |
1343 | | |
1344 | 170 | if (shndx == -1) |
1345 | 0 | return 0; |
1346 | | |
1347 | 170 | btf = obj->btf; |
1348 | 170 | datasec_id = btf__find_by_name_kind(btf, sec_name, |
1349 | 170 | BTF_KIND_DATASEC); |
1350 | 170 | if (datasec_id < 0) { |
1351 | 61 | pr_warn("struct_ops init: DATASEC %s not found\n", |
1352 | 61 | sec_name); |
1353 | 61 | return -EINVAL; |
1354 | 61 | } |
1355 | | |
1356 | 109 | datasec = btf__type_by_id(btf, datasec_id); |
1357 | 109 | vsi = btf_var_secinfos(datasec); |
1358 | 162 | for (i = 0; i < btf_vlen(datasec); i++, vsi++) { |
1359 | 87 | type = btf__type_by_id(obj->btf, vsi->type); |
1360 | 87 | var_name = btf__name_by_offset(obj->btf, type->name_off); |
1361 | | |
1362 | 87 | type_id = btf__resolve_type(obj->btf, vsi->type); |
1363 | 87 | if (type_id < 0) { |
1364 | 8 | pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n", |
1365 | 8 | vsi->type, sec_name); |
1366 | 8 | return -EINVAL; |
1367 | 8 | } |
1368 | | |
1369 | 79 | type = btf__type_by_id(obj->btf, type_id); |
1370 | 79 | tname = btf__name_by_offset(obj->btf, type->name_off); |
1371 | 79 | if (!tname[0]) { |
1372 | 3 | pr_warn("struct_ops init: anonymous type is not supported\n"); |
1373 | 3 | return -ENOTSUP; |
1374 | 3 | } |
1375 | 76 | if (!btf_is_struct(type)) { |
1376 | 5 | pr_warn("struct_ops init: %s is not a struct\n", tname); |
1377 | 5 | return -EINVAL; |
1378 | 5 | } |
1379 | | |
1380 | 71 | map = bpf_object__add_map(obj); |
1381 | 71 | if (IS_ERR(map)) |
1382 | 0 | return PTR_ERR(map); |
1383 | | |
1384 | 71 | map->sec_idx = shndx; |
1385 | 71 | map->sec_offset = vsi->offset; |
1386 | 71 | map->name = strdup(var_name); |
1387 | 71 | if (!map->name) |
1388 | 0 | return -ENOMEM; |
1389 | 71 | map->btf_value_type_id = type_id; |
1390 | | |
1391 | | /* Follow same convention as for programs autoload: |
1392 | | * SEC("?.struct_ops") means map is not created by default. |
1393 | | */ |
1394 | 71 | if (sec_name[0] == '?') { |
1395 | 16 | map->autocreate = false; |
1396 | | /* from now on forget there was ? in section name */ |
1397 | 16 | sec_name++; |
1398 | 16 | } |
1399 | | |
1400 | 71 | map->def.type = BPF_MAP_TYPE_STRUCT_OPS; |
1401 | 71 | map->def.key_size = sizeof(int); |
1402 | 71 | map->def.value_size = type->size; |
1403 | 71 | map->def.max_entries = 1; |
1404 | 71 | map->def.map_flags = strcmp(sec_name, STRUCT_OPS_LINK_SEC) == 0 ? BPF_F_LINK : 0; |
1405 | 71 | map->autoattach = true; |
1406 | | |
1407 | 71 | map->st_ops = calloc(1, sizeof(*map->st_ops)); |
1408 | 71 | if (!map->st_ops) |
1409 | 0 | return -ENOMEM; |
1410 | 71 | st_ops = map->st_ops; |
1411 | 71 | st_ops->data = malloc(type->size); |
1412 | 71 | st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs)); |
1413 | 71 | st_ops->kern_func_off = malloc(btf_vlen(type) * |
1414 | 71 | sizeof(*st_ops->kern_func_off)); |
1415 | 71 | if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off) |
1416 | 0 | return -ENOMEM; |
1417 | | |
1418 | 71 | if (vsi->offset + type->size > data->d_size) { |
1419 | 18 | pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n", |
1420 | 18 | var_name, sec_name); |
1421 | 18 | return -EINVAL; |
1422 | 18 | } |
1423 | | |
1424 | 53 | memcpy(st_ops->data, |
1425 | 53 | data->d_buf + vsi->offset, |
1426 | 53 | type->size); |
1427 | 53 | st_ops->type_id = type_id; |
1428 | | |
1429 | 53 | pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n", |
1430 | 53 | tname, type_id, var_name, vsi->offset); |
1431 | 53 | } |
1432 | | |
1433 | 75 | return 0; |
1434 | 109 | } |
1435 | | |
1436 | | static int bpf_object_init_struct_ops(struct bpf_object *obj) |
1437 | 2.39k | { |
1438 | 2.39k | const char *sec_name; |
1439 | 2.39k | int sec_idx, err; |
1440 | | |
1441 | 19.0k | for (sec_idx = 0; sec_idx < obj->efile.sec_cnt; ++sec_idx) { |
1442 | 16.7k | struct elf_sec_desc *desc = &obj->efile.secs[sec_idx]; |
1443 | | |
1444 | 16.7k | if (desc->sec_type != SEC_ST_OPS) |
1445 | 16.5k | continue; |
1446 | | |
1447 | 170 | sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx)); |
1448 | 170 | if (!sec_name) |
1449 | 0 | return -LIBBPF_ERRNO__FORMAT; |
1450 | | |
1451 | 170 | err = init_struct_ops_maps(obj, sec_name, sec_idx, desc->data); |
1452 | 170 | if (err) |
1453 | 95 | return err; |
1454 | 170 | } |
1455 | | |
1456 | 2.30k | return 0; |
1457 | 2.39k | } |
1458 | | |
1459 | | static struct bpf_object *bpf_object__new(const char *path, |
1460 | | const void *obj_buf, |
1461 | | size_t obj_buf_sz, |
1462 | | const char *obj_name) |
1463 | 11.7k | { |
1464 | 11.7k | struct bpf_object *obj; |
1465 | 11.7k | char *end; |
1466 | | |
1467 | 11.7k | obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1); |
1468 | 11.7k | if (!obj) { |
1469 | 0 | pr_warn("alloc memory failed for %s\n", path); |
1470 | 0 | return ERR_PTR(-ENOMEM); |
1471 | 0 | } |
1472 | | |
1473 | 11.7k | strcpy(obj->path, path); |
1474 | 11.7k | if (obj_name) { |
1475 | 11.7k | libbpf_strlcpy(obj->name, obj_name, sizeof(obj->name)); |
1476 | 11.7k | } else { |
1477 | | /* Using basename() GNU version which doesn't modify arg. */ |
1478 | 0 | libbpf_strlcpy(obj->name, basename((void *)path), sizeof(obj->name)); |
1479 | 0 | end = strchr(obj->name, '.'); |
1480 | 0 | if (end) |
1481 | 0 | *end = 0; |
1482 | 0 | } |
1483 | | |
1484 | 11.7k | obj->efile.fd = -1; |
1485 | | /* |
1486 | | * Caller of this function should also call |
1487 | | * bpf_object__elf_finish() after data collection to return |
1488 | | * obj_buf to user. If not, we should duplicate the buffer to |
1489 | | * avoid user freeing them before elf finish. |
1490 | | */ |
1491 | 11.7k | obj->efile.obj_buf = obj_buf; |
1492 | 11.7k | obj->efile.obj_buf_sz = obj_buf_sz; |
1493 | 11.7k | obj->efile.btf_maps_shndx = -1; |
1494 | 11.7k | obj->kconfig_map_idx = -1; |
1495 | | |
1496 | 11.7k | obj->kern_version = get_kernel_version(); |
1497 | 11.7k | obj->loaded = false; |
1498 | | |
1499 | 11.7k | return obj; |
1500 | 11.7k | } |
1501 | | |
1502 | | static void bpf_object__elf_finish(struct bpf_object *obj) |
1503 | 15.8k | { |
1504 | 15.8k | if (!obj->efile.elf) |
1505 | 4.25k | return; |
1506 | | |
1507 | 11.6k | elf_end(obj->efile.elf); |
1508 | 11.6k | obj->efile.elf = NULL; |
1509 | 11.6k | obj->efile.symbols = NULL; |
1510 | 11.6k | obj->efile.arena_data = NULL; |
1511 | | |
1512 | 11.6k | zfree(&obj->efile.secs); |
1513 | 11.6k | obj->efile.sec_cnt = 0; |
1514 | 11.6k | zclose(obj->efile.fd); |
1515 | 11.6k | obj->efile.obj_buf = NULL; |
1516 | 11.6k | obj->efile.obj_buf_sz = 0; |
1517 | 11.6k | } |
1518 | | |
1519 | | static int bpf_object__elf_init(struct bpf_object *obj) |
1520 | 11.7k | { |
1521 | 11.7k | Elf64_Ehdr *ehdr; |
1522 | 11.7k | int err = 0; |
1523 | 11.7k | Elf *elf; |
1524 | | |
1525 | 11.7k | if (obj->efile.elf) { |
1526 | 0 | pr_warn("elf: init internal error\n"); |
1527 | 0 | return -LIBBPF_ERRNO__LIBELF; |
1528 | 0 | } |
1529 | | |
1530 | 11.7k | if (obj->efile.obj_buf_sz > 0) { |
1531 | | /* obj_buf should have been validated by bpf_object__open_mem(). */ |
1532 | 11.7k | elf = elf_memory((char *)obj->efile.obj_buf, obj->efile.obj_buf_sz); |
1533 | 11.7k | } else { |
1534 | 0 | obj->efile.fd = open(obj->path, O_RDONLY | O_CLOEXEC); |
1535 | 0 | if (obj->efile.fd < 0) { |
1536 | 0 | char errmsg[STRERR_BUFSIZE], *cp; |
1537 | |
|
1538 | 0 | err = -errno; |
1539 | 0 | cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); |
1540 | 0 | pr_warn("elf: failed to open %s: %s\n", obj->path, cp); |
1541 | 0 | return err; |
1542 | 0 | } |
1543 | | |
1544 | 0 | elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL); |
1545 | 0 | } |
1546 | | |
1547 | 11.7k | if (!elf) { |
1548 | 93 | pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1)); |
1549 | 93 | err = -LIBBPF_ERRNO__LIBELF; |
1550 | 93 | goto errout; |
1551 | 93 | } |
1552 | | |
1553 | 11.6k | obj->efile.elf = elf; |
1554 | | |
1555 | 11.6k | if (elf_kind(elf) != ELF_K_ELF) { |
1556 | 120 | err = -LIBBPF_ERRNO__FORMAT; |
1557 | 120 | pr_warn("elf: '%s' is not a proper ELF object\n", obj->path); |
1558 | 120 | goto errout; |
1559 | 120 | } |
1560 | | |
1561 | 11.5k | if (gelf_getclass(elf) != ELFCLASS64) { |
1562 | 503 | err = -LIBBPF_ERRNO__FORMAT; |
1563 | 503 | pr_warn("elf: '%s' is not a 64-bit ELF object\n", obj->path); |
1564 | 503 | goto errout; |
1565 | 503 | } |
1566 | | |
1567 | 11.0k | obj->efile.ehdr = ehdr = elf64_getehdr(elf); |
1568 | 11.0k | if (!obj->efile.ehdr) { |
1569 | 0 | pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1)); |
1570 | 0 | err = -LIBBPF_ERRNO__FORMAT; |
1571 | 0 | goto errout; |
1572 | 0 | } |
1573 | | |
1574 | 11.0k | if (elf_getshdrstrndx(elf, &obj->efile.shstrndx)) { |
1575 | 23 | pr_warn("elf: failed to get section names section index for %s: %s\n", |
1576 | 23 | obj->path, elf_errmsg(-1)); |
1577 | 23 | err = -LIBBPF_ERRNO__FORMAT; |
1578 | 23 | goto errout; |
1579 | 23 | } |
1580 | | |
1581 | | /* ELF is corrupted/truncated, avoid calling elf_strptr. */ |
1582 | 10.9k | if (!elf_rawdata(elf_getscn(elf, obj->efile.shstrndx), NULL)) { |
1583 | 1.17k | pr_warn("elf: failed to get section names strings from %s: %s\n", |
1584 | 1.17k | obj->path, elf_errmsg(-1)); |
1585 | 1.17k | err = -LIBBPF_ERRNO__FORMAT; |
1586 | 1.17k | goto errout; |
1587 | 1.17k | } |
1588 | | |
1589 | | /* Old LLVM set e_machine to EM_NONE */ |
1590 | 9.81k | if (ehdr->e_type != ET_REL || (ehdr->e_machine && ehdr->e_machine != EM_BPF)) { |
1591 | 455 | pr_warn("elf: %s is not a valid eBPF object file\n", obj->path); |
1592 | 455 | err = -LIBBPF_ERRNO__FORMAT; |
1593 | 455 | goto errout; |
1594 | 455 | } |
1595 | | |
1596 | 9.36k | return 0; |
1597 | 2.36k | errout: |
1598 | 2.36k | bpf_object__elf_finish(obj); |
1599 | 2.36k | return err; |
1600 | 9.81k | } |
1601 | | |
1602 | | static int bpf_object__check_endianness(struct bpf_object *obj) |
1603 | 9.36k | { |
1604 | 9.36k | #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ |
1605 | 9.36k | if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2LSB) |
1606 | 9.33k | return 0; |
1607 | | #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ |
1608 | | if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2MSB) |
1609 | | return 0; |
1610 | | #else |
1611 | | # error "Unrecognized __BYTE_ORDER__" |
1612 | | #endif |
1613 | 24 | pr_warn("elf: endianness mismatch in %s.\n", obj->path); |
1614 | 24 | return -LIBBPF_ERRNO__ENDIAN; |
1615 | 9.36k | } |
1616 | | |
1617 | | static int |
1618 | | bpf_object__init_license(struct bpf_object *obj, void *data, size_t size) |
1619 | 663 | { |
1620 | 663 | if (!data) { |
1621 | 1 | pr_warn("invalid license section in %s\n", obj->path); |
1622 | 1 | return -LIBBPF_ERRNO__FORMAT; |
1623 | 1 | } |
1624 | | /* libbpf_strlcpy() only copies first N - 1 bytes, so size + 1 won't |
1625 | | * go over allowed ELF data section buffer |
1626 | | */ |
1627 | 662 | libbpf_strlcpy(obj->license, data, min(size + 1, sizeof(obj->license))); |
1628 | 662 | pr_debug("license of %s is %s\n", obj->path, obj->license); |
1629 | 662 | return 0; |
1630 | 663 | } |
1631 | | |
1632 | | static int |
1633 | | bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size) |
1634 | 49 | { |
1635 | 49 | __u32 kver; |
1636 | | |
1637 | 49 | if (!data || size != sizeof(kver)) { |
1638 | 13 | pr_warn("invalid kver section in %s\n", obj->path); |
1639 | 13 | return -LIBBPF_ERRNO__FORMAT; |
1640 | 13 | } |
1641 | 36 | memcpy(&kver, data, sizeof(kver)); |
1642 | 36 | obj->kern_version = kver; |
1643 | 36 | pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version); |
1644 | 36 | return 0; |
1645 | 49 | } |
1646 | | |
1647 | | static bool bpf_map_type__is_map_in_map(enum bpf_map_type type) |
1648 | 162 | { |
1649 | 162 | if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS || |
1650 | 162 | type == BPF_MAP_TYPE_HASH_OF_MAPS) |
1651 | 88 | return true; |
1652 | 74 | return false; |
1653 | 162 | } |
1654 | | |
1655 | | static int find_elf_sec_sz(const struct bpf_object *obj, const char *name, __u32 *size) |
1656 | 404 | { |
1657 | 404 | Elf_Data *data; |
1658 | 404 | Elf_Scn *scn; |
1659 | | |
1660 | 404 | if (!name) |
1661 | 0 | return -EINVAL; |
1662 | | |
1663 | 404 | scn = elf_sec_by_name(obj, name); |
1664 | 404 | data = elf_sec_data(obj, scn); |
1665 | 404 | if (data) { |
1666 | 243 | *size = data->d_size; |
1667 | 243 | return 0; /* found it */ |
1668 | 243 | } |
1669 | | |
1670 | 161 | return -ENOENT; |
1671 | 404 | } |
1672 | | |
1673 | | static Elf64_Sym *find_elf_var_sym(const struct bpf_object *obj, const char *name) |
1674 | 2.14k | { |
1675 | 2.14k | Elf_Data *symbols = obj->efile.symbols; |
1676 | 2.14k | const char *sname; |
1677 | 2.14k | size_t si; |
1678 | | |
1679 | 96.7k | for (si = 0; si < symbols->d_size / sizeof(Elf64_Sym); si++) { |
1680 | 96.5k | Elf64_Sym *sym = elf_sym_by_idx(obj, si); |
1681 | | |
1682 | 96.5k | if (ELF64_ST_TYPE(sym->st_info) != STT_OBJECT) |
1683 | 87.9k | continue; |
1684 | | |
1685 | 8.61k | if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL && |
1686 | 8.61k | ELF64_ST_BIND(sym->st_info) != STB_WEAK) |
1687 | 5.71k | continue; |
1688 | | |
1689 | 2.89k | sname = elf_sym_str(obj, sym->st_name); |
1690 | 2.89k | if (!sname) { |
1691 | 28 | pr_warn("failed to get sym name string for var %s\n", name); |
1692 | 28 | return ERR_PTR(-EIO); |
1693 | 28 | } |
1694 | 2.87k | if (strcmp(name, sname) == 0) |
1695 | 1.92k | return sym; |
1696 | 2.87k | } |
1697 | | |
1698 | 200 | return ERR_PTR(-ENOENT); |
1699 | 2.14k | } |
1700 | | |
1701 | | /* Some versions of Android don't provide memfd_create() in their libc |
1702 | | * implementation, so avoid complications and just go straight to Linux |
1703 | | * syscall. |
1704 | | */ |
1705 | | static int sys_memfd_create(const char *name, unsigned flags) |
1706 | 2.92k | { |
1707 | 2.92k | return syscall(__NR_memfd_create, name, flags); |
1708 | 2.92k | } |
1709 | | |
1710 | | #ifndef MFD_CLOEXEC |
1711 | | #define MFD_CLOEXEC 0x0001U |
1712 | | #endif |
1713 | | |
1714 | | static int create_placeholder_fd(void) |
1715 | 2.92k | { |
1716 | 2.92k | int fd; |
1717 | | |
1718 | 2.92k | fd = ensure_good_fd(sys_memfd_create("libbpf-placeholder-fd", MFD_CLOEXEC)); |
1719 | 2.92k | if (fd < 0) |
1720 | 0 | return -errno; |
1721 | 2.92k | return fd; |
1722 | 2.92k | } |
1723 | | |
1724 | | static struct bpf_map *bpf_object__add_map(struct bpf_object *obj) |
1725 | 2.92k | { |
1726 | 2.92k | struct bpf_map *map; |
1727 | 2.92k | int err; |
1728 | | |
1729 | 2.92k | err = libbpf_ensure_mem((void **)&obj->maps, &obj->maps_cap, |
1730 | 2.92k | sizeof(*obj->maps), obj->nr_maps + 1); |
1731 | 2.92k | if (err) |
1732 | 0 | return ERR_PTR(err); |
1733 | | |
1734 | 2.92k | map = &obj->maps[obj->nr_maps++]; |
1735 | 2.92k | map->obj = obj; |
1736 | | /* Preallocate map FD without actually creating BPF map just yet. |
1737 | | * These map FD "placeholders" will be reused later without changing |
1738 | | * FD value when map is actually created in the kernel. |
1739 | | * |
1740 | | * This is useful to be able to perform BPF program relocations |
1741 | | * without having to create BPF maps before that step. This allows us |
1742 | | * to finalize and load BTF very late in BPF object's loading phase, |
1743 | | * right before BPF maps have to be created and BPF programs have to |
1744 | | * be loaded. By having these map FD placeholders we can perform all |
1745 | | * the sanitizations, relocations, and any other adjustments before we |
1746 | | * start creating actual BPF kernel objects (BTF, maps, progs). |
1747 | | */ |
1748 | 2.92k | map->fd = create_placeholder_fd(); |
1749 | 2.92k | if (map->fd < 0) |
1750 | 0 | return ERR_PTR(map->fd); |
1751 | 2.92k | map->inner_map_fd = -1; |
1752 | 2.92k | map->autocreate = true; |
1753 | | |
1754 | 2.92k | return map; |
1755 | 2.92k | } |
1756 | | |
1757 | | static size_t array_map_mmap_sz(unsigned int value_sz, unsigned int max_entries) |
1758 | 3.28k | { |
1759 | 3.28k | const long page_sz = sysconf(_SC_PAGE_SIZE); |
1760 | 3.28k | size_t map_sz; |
1761 | | |
1762 | 3.28k | map_sz = (size_t)roundup(value_sz, 8) * max_entries; |
1763 | 3.28k | map_sz = roundup(map_sz, page_sz); |
1764 | 3.28k | return map_sz; |
1765 | 3.28k | } |
1766 | | |
1767 | | static size_t bpf_map_mmap_sz(const struct bpf_map *map) |
1768 | 3.28k | { |
1769 | 3.28k | const long page_sz = sysconf(_SC_PAGE_SIZE); |
1770 | | |
1771 | 3.28k | switch (map->def.type) { |
1772 | 3.28k | case BPF_MAP_TYPE_ARRAY: |
1773 | 3.28k | return array_map_mmap_sz(map->def.value_size, map->def.max_entries); |
1774 | 0 | case BPF_MAP_TYPE_ARENA: |
1775 | 0 | return page_sz * map->def.max_entries; |
1776 | 0 | default: |
1777 | 0 | return 0; /* not supported */ |
1778 | 3.28k | } |
1779 | 3.28k | } |
1780 | | |
1781 | | static int bpf_map_mmap_resize(struct bpf_map *map, size_t old_sz, size_t new_sz) |
1782 | 0 | { |
1783 | 0 | void *mmaped; |
1784 | |
|
1785 | 0 | if (!map->mmaped) |
1786 | 0 | return -EINVAL; |
1787 | | |
1788 | 0 | if (old_sz == new_sz) |
1789 | 0 | return 0; |
1790 | | |
1791 | 0 | mmaped = mmap(NULL, new_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0); |
1792 | 0 | if (mmaped == MAP_FAILED) |
1793 | 0 | return -errno; |
1794 | | |
1795 | 0 | memcpy(mmaped, map->mmaped, min(old_sz, new_sz)); |
1796 | 0 | munmap(map->mmaped, old_sz); |
1797 | 0 | map->mmaped = mmaped; |
1798 | 0 | return 0; |
1799 | 0 | } |
1800 | | |
1801 | | static char *internal_map_name(struct bpf_object *obj, const char *real_name) |
1802 | 1.69k | { |
1803 | 1.69k | char map_name[BPF_OBJ_NAME_LEN], *p; |
1804 | 1.69k | int pfx_len, sfx_len = max((size_t)7, strlen(real_name)); |
1805 | | |
1806 | | /* This is one of the more confusing parts of libbpf for various |
1807 | | * reasons, some of which are historical. The original idea for naming |
1808 | | * internal names was to include as much of BPF object name prefix as |
1809 | | * possible, so that it can be distinguished from similar internal |
1810 | | * maps of a different BPF object. |
1811 | | * As an example, let's say we have bpf_object named 'my_object_name' |
1812 | | * and internal map corresponding to '.rodata' ELF section. The final |
1813 | | * map name advertised to user and to the kernel will be |
1814 | | * 'my_objec.rodata', taking first 8 characters of object name and |
1815 | | * entire 7 characters of '.rodata'. |
1816 | | * Somewhat confusingly, if internal map ELF section name is shorter |
1817 | | * than 7 characters, e.g., '.bss', we still reserve 7 characters |
1818 | | * for the suffix, even though we only have 4 actual characters, and |
1819 | | * resulting map will be called 'my_objec.bss', not even using all 15 |
1820 | | * characters allowed by the kernel. Oh well, at least the truncated |
1821 | | * object name is somewhat consistent in this case. But if the map |
1822 | | * name is '.kconfig', we'll still have entirety of '.kconfig' added |
1823 | | * (8 chars) and thus will be left with only first 7 characters of the |
1824 | | * object name ('my_obje'). Happy guessing, user, that the final map |
1825 | | * name will be "my_obje.kconfig". |
1826 | | * Now, with libbpf starting to support arbitrarily named .rodata.* |
1827 | | * and .data.* data sections, it's possible that ELF section name is |
1828 | | * longer than allowed 15 chars, so we now need to be careful to take |
1829 | | * only up to 15 first characters of ELF name, taking no BPF object |
1830 | | * name characters at all. So '.rodata.abracadabra' will result in |
1831 | | * '.rodata.abracad' kernel and user-visible name. |
1832 | | * We need to keep this convoluted logic intact for .data, .bss and |
1833 | | * .rodata maps, but for new custom .data.custom and .rodata.custom |
1834 | | * maps we use their ELF names as is, not prepending bpf_object name |
1835 | | * in front. We still need to truncate them to 15 characters for the |
1836 | | * kernel. Full name can be recovered for such maps by using DATASEC |
1837 | | * BTF type associated with such map's value type, though. |
1838 | | */ |
1839 | 1.69k | if (sfx_len >= BPF_OBJ_NAME_LEN) |
1840 | 386 | sfx_len = BPF_OBJ_NAME_LEN - 1; |
1841 | | |
1842 | | /* if there are two or more dots in map name, it's a custom dot map */ |
1843 | 1.69k | if (strchr(real_name + 1, '.') != NULL) |
1844 | 1.15k | pfx_len = 0; |
1845 | 543 | else |
1846 | 543 | pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1, strlen(obj->name)); |
1847 | | |
1848 | 1.69k | snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name, |
1849 | 1.69k | sfx_len, real_name); |
1850 | | |
1851 | | /* sanitise map name to characters allowed by kernel */ |
1852 | 21.6k | for (p = map_name; *p && p < map_name + sizeof(map_name); p++) |
1853 | 20.0k | if (!isalnum(*p) && *p != '_' && *p != '.') |
1854 | 2.44k | *p = '_'; |
1855 | | |
1856 | 1.69k | return strdup(map_name); |
1857 | 1.69k | } |
1858 | | |
1859 | | static int |
1860 | | map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map); |
1861 | | |
1862 | | /* Internal BPF map is mmap()'able only if at least one of corresponding |
1863 | | * DATASEC's VARs are to be exposed through BPF skeleton. I.e., it's a GLOBAL |
1864 | | * variable and it's not marked as __hidden (which turns it into, effectively, |
1865 | | * a STATIC variable). |
1866 | | */ |
1867 | | static bool map_is_mmapable(struct bpf_object *obj, struct bpf_map *map) |
1868 | 1.69k | { |
1869 | 1.69k | const struct btf_type *t, *vt; |
1870 | 1.69k | struct btf_var_secinfo *vsi; |
1871 | 1.69k | int i, n; |
1872 | | |
1873 | 1.69k | if (!map->btf_value_type_id) |
1874 | 1.52k | return false; |
1875 | | |
1876 | 169 | t = btf__type_by_id(obj->btf, map->btf_value_type_id); |
1877 | 169 | if (!btf_is_datasec(t)) |
1878 | 25 | return false; |
1879 | | |
1880 | 144 | vsi = btf_var_secinfos(t); |
1881 | 240 | for (i = 0, n = btf_vlen(t); i < n; i++, vsi++) { |
1882 | 199 | vt = btf__type_by_id(obj->btf, vsi->type); |
1883 | 199 | if (!btf_is_var(vt)) |
1884 | 73 | continue; |
1885 | | |
1886 | 126 | if (btf_var(vt)->linkage != BTF_VAR_STATIC) |
1887 | 103 | return true; |
1888 | 126 | } |
1889 | | |
1890 | 41 | return false; |
1891 | 144 | } |
1892 | | |
1893 | | static int |
1894 | | bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type, |
1895 | | const char *real_name, int sec_idx, void *data, size_t data_sz) |
1896 | 1.69k | { |
1897 | 1.69k | struct bpf_map_def *def; |
1898 | 1.69k | struct bpf_map *map; |
1899 | 1.69k | size_t mmap_sz; |
1900 | 1.69k | int err; |
1901 | | |
1902 | 1.69k | map = bpf_object__add_map(obj); |
1903 | 1.69k | if (IS_ERR(map)) |
1904 | 0 | return PTR_ERR(map); |
1905 | | |
1906 | 1.69k | map->libbpf_type = type; |
1907 | 1.69k | map->sec_idx = sec_idx; |
1908 | 1.69k | map->sec_offset = 0; |
1909 | 1.69k | map->real_name = strdup(real_name); |
1910 | 1.69k | map->name = internal_map_name(obj, real_name); |
1911 | 1.69k | if (!map->real_name || !map->name) { |
1912 | 0 | zfree(&map->real_name); |
1913 | 0 | zfree(&map->name); |
1914 | 0 | return -ENOMEM; |
1915 | 0 | } |
1916 | | |
1917 | 1.69k | def = &map->def; |
1918 | 1.69k | def->type = BPF_MAP_TYPE_ARRAY; |
1919 | 1.69k | def->key_size = sizeof(int); |
1920 | 1.69k | def->value_size = data_sz; |
1921 | 1.69k | def->max_entries = 1; |
1922 | 1.69k | def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG |
1923 | 1.69k | ? BPF_F_RDONLY_PROG : 0; |
1924 | | |
1925 | | /* failures are fine because of maps like .rodata.str1.1 */ |
1926 | 1.69k | (void) map_fill_btf_type_info(obj, map); |
1927 | | |
1928 | 1.69k | if (map_is_mmapable(obj, map)) |
1929 | 103 | def->map_flags |= BPF_F_MMAPABLE; |
1930 | | |
1931 | 1.69k | pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n", |
1932 | 1.69k | map->name, map->sec_idx, map->sec_offset, def->map_flags); |
1933 | | |
1934 | 1.69k | mmap_sz = bpf_map_mmap_sz(map); |
1935 | 1.69k | map->mmaped = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE, |
1936 | 1.69k | MAP_SHARED | MAP_ANONYMOUS, -1, 0); |
1937 | 1.69k | if (map->mmaped == MAP_FAILED) { |
1938 | 97 | err = -errno; |
1939 | 97 | map->mmaped = NULL; |
1940 | 97 | pr_warn("failed to alloc map '%s' content buffer: %d\n", |
1941 | 97 | map->name, err); |
1942 | 97 | zfree(&map->real_name); |
1943 | 97 | zfree(&map->name); |
1944 | 97 | return err; |
1945 | 97 | } |
1946 | | |
1947 | 1.59k | if (data) |
1948 | 976 | memcpy(map->mmaped, data, data_sz); |
1949 | | |
1950 | 1.59k | pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name); |
1951 | 1.59k | return 0; |
1952 | 1.69k | } |
1953 | | |
1954 | | static int bpf_object__init_global_data_maps(struct bpf_object *obj) |
1955 | 2.49k | { |
1956 | 2.49k | struct elf_sec_desc *sec_desc; |
1957 | 2.49k | const char *sec_name; |
1958 | 2.49k | int err = 0, sec_idx; |
1959 | | |
1960 | | /* |
1961 | | * Populate obj->maps with libbpf internal maps. |
1962 | | */ |
1963 | 17.9k | for (sec_idx = 1; sec_idx < obj->efile.sec_cnt; sec_idx++) { |
1964 | 15.4k | sec_desc = &obj->efile.secs[sec_idx]; |
1965 | | |
1966 | | /* Skip recognized sections with size 0. */ |
1967 | 15.4k | if (!sec_desc->data || sec_desc->data->d_size == 0) |
1968 | 12.6k | continue; |
1969 | | |
1970 | 2.83k | switch (sec_desc->sec_type) { |
1971 | 617 | case SEC_DATA: |
1972 | 617 | sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx)); |
1973 | 617 | err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA, |
1974 | 617 | sec_name, sec_idx, |
1975 | 617 | sec_desc->data->d_buf, |
1976 | 617 | sec_desc->data->d_size); |
1977 | 617 | break; |
1978 | 359 | case SEC_RODATA: |
1979 | 359 | obj->has_rodata = true; |
1980 | 359 | sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx)); |
1981 | 359 | err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA, |
1982 | 359 | sec_name, sec_idx, |
1983 | 359 | sec_desc->data->d_buf, |
1984 | 359 | sec_desc->data->d_size); |
1985 | 359 | break; |
1986 | 625 | case SEC_BSS: |
1987 | 625 | sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx)); |
1988 | 625 | err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS, |
1989 | 625 | sec_name, sec_idx, |
1990 | 625 | NULL, |
1991 | 625 | sec_desc->data->d_size); |
1992 | 625 | break; |
1993 | 1.23k | default: |
1994 | | /* skip */ |
1995 | 1.23k | break; |
1996 | 2.83k | } |
1997 | 2.83k | if (err) |
1998 | 25 | return err; |
1999 | 2.83k | } |
2000 | 2.47k | return 0; |
2001 | 2.49k | } |
2002 | | |
2003 | | |
2004 | | static struct extern_desc *find_extern_by_name(const struct bpf_object *obj, |
2005 | | const void *name) |
2006 | 973 | { |
2007 | 973 | int i; |
2008 | | |
2009 | 3.60k | for (i = 0; i < obj->nr_extern; i++) { |
2010 | 3.19k | if (strcmp(obj->externs[i].name, name) == 0) |
2011 | 560 | return &obj->externs[i]; |
2012 | 3.19k | } |
2013 | 413 | return NULL; |
2014 | 973 | } |
2015 | | |
2016 | | static struct extern_desc *find_extern_by_name_with_len(const struct bpf_object *obj, |
2017 | | const void *name, int len) |
2018 | 0 | { |
2019 | 0 | const char *ext_name; |
2020 | 0 | int i; |
2021 | |
|
2022 | 0 | for (i = 0; i < obj->nr_extern; i++) { |
2023 | 0 | ext_name = obj->externs[i].name; |
2024 | 0 | if (strlen(ext_name) == len && strncmp(ext_name, name, len) == 0) |
2025 | 0 | return &obj->externs[i]; |
2026 | 0 | } |
2027 | 0 | return NULL; |
2028 | 0 | } |
2029 | | |
2030 | | static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val, |
2031 | | char value) |
2032 | 0 | { |
2033 | 0 | switch (ext->kcfg.type) { |
2034 | 0 | case KCFG_BOOL: |
2035 | 0 | if (value == 'm') { |
2036 | 0 | pr_warn("extern (kcfg) '%s': value '%c' implies tristate or char type\n", |
2037 | 0 | ext->name, value); |
2038 | 0 | return -EINVAL; |
2039 | 0 | } |
2040 | 0 | *(bool *)ext_val = value == 'y' ? true : false; |
2041 | 0 | break; |
2042 | 0 | case KCFG_TRISTATE: |
2043 | 0 | if (value == 'y') |
2044 | 0 | *(enum libbpf_tristate *)ext_val = TRI_YES; |
2045 | 0 | else if (value == 'm') |
2046 | 0 | *(enum libbpf_tristate *)ext_val = TRI_MODULE; |
2047 | 0 | else /* value == 'n' */ |
2048 | 0 | *(enum libbpf_tristate *)ext_val = TRI_NO; |
2049 | 0 | break; |
2050 | 0 | case KCFG_CHAR: |
2051 | 0 | *(char *)ext_val = value; |
2052 | 0 | break; |
2053 | 0 | case KCFG_UNKNOWN: |
2054 | 0 | case KCFG_INT: |
2055 | 0 | case KCFG_CHAR_ARR: |
2056 | 0 | default: |
2057 | 0 | pr_warn("extern (kcfg) '%s': value '%c' implies bool, tristate, or char type\n", |
2058 | 0 | ext->name, value); |
2059 | 0 | return -EINVAL; |
2060 | 0 | } |
2061 | 0 | ext->is_set = true; |
2062 | 0 | return 0; |
2063 | 0 | } |
2064 | | |
2065 | | static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val, |
2066 | | const char *value) |
2067 | 0 | { |
2068 | 0 | size_t len; |
2069 | |
|
2070 | 0 | if (ext->kcfg.type != KCFG_CHAR_ARR) { |
2071 | 0 | pr_warn("extern (kcfg) '%s': value '%s' implies char array type\n", |
2072 | 0 | ext->name, value); |
2073 | 0 | return -EINVAL; |
2074 | 0 | } |
2075 | | |
2076 | 0 | len = strlen(value); |
2077 | 0 | if (value[len - 1] != '"') { |
2078 | 0 | pr_warn("extern (kcfg) '%s': invalid string config '%s'\n", |
2079 | 0 | ext->name, value); |
2080 | 0 | return -EINVAL; |
2081 | 0 | } |
2082 | | |
2083 | | /* strip quotes */ |
2084 | 0 | len -= 2; |
2085 | 0 | if (len >= ext->kcfg.sz) { |
2086 | 0 | pr_warn("extern (kcfg) '%s': long string '%s' of (%zu bytes) truncated to %d bytes\n", |
2087 | 0 | ext->name, value, len, ext->kcfg.sz - 1); |
2088 | 0 | len = ext->kcfg.sz - 1; |
2089 | 0 | } |
2090 | 0 | memcpy(ext_val, value + 1, len); |
2091 | 0 | ext_val[len] = '\0'; |
2092 | 0 | ext->is_set = true; |
2093 | 0 | return 0; |
2094 | 0 | } |
2095 | | |
2096 | | static int parse_u64(const char *value, __u64 *res) |
2097 | 0 | { |
2098 | 0 | char *value_end; |
2099 | 0 | int err; |
2100 | |
|
2101 | 0 | errno = 0; |
2102 | 0 | *res = strtoull(value, &value_end, 0); |
2103 | 0 | if (errno) { |
2104 | 0 | err = -errno; |
2105 | 0 | pr_warn("failed to parse '%s' as integer: %d\n", value, err); |
2106 | 0 | return err; |
2107 | 0 | } |
2108 | 0 | if (*value_end) { |
2109 | 0 | pr_warn("failed to parse '%s' as integer completely\n", value); |
2110 | 0 | return -EINVAL; |
2111 | 0 | } |
2112 | 0 | return 0; |
2113 | 0 | } |
2114 | | |
2115 | | static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v) |
2116 | 0 | { |
2117 | 0 | int bit_sz = ext->kcfg.sz * 8; |
2118 | |
|
2119 | 0 | if (ext->kcfg.sz == 8) |
2120 | 0 | return true; |
2121 | | |
2122 | | /* Validate that value stored in u64 fits in integer of `ext->sz` |
2123 | | * bytes size without any loss of information. If the target integer |
2124 | | * is signed, we rely on the following limits of integer type of |
2125 | | * Y bits and subsequent transformation: |
2126 | | * |
2127 | | * -2^(Y-1) <= X <= 2^(Y-1) - 1 |
2128 | | * 0 <= X + 2^(Y-1) <= 2^Y - 1 |
2129 | | * 0 <= X + 2^(Y-1) < 2^Y |
2130 | | * |
2131 | | * For unsigned target integer, check that all the (64 - Y) bits are |
2132 | | * zero. |
2133 | | */ |
2134 | 0 | if (ext->kcfg.is_signed) |
2135 | 0 | return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz); |
2136 | 0 | else |
2137 | 0 | return (v >> bit_sz) == 0; |
2138 | 0 | } |
2139 | | |
2140 | | static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val, |
2141 | | __u64 value) |
2142 | 0 | { |
2143 | 0 | if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR && |
2144 | 0 | ext->kcfg.type != KCFG_BOOL) { |
2145 | 0 | pr_warn("extern (kcfg) '%s': value '%llu' implies integer, char, or boolean type\n", |
2146 | 0 | ext->name, (unsigned long long)value); |
2147 | 0 | return -EINVAL; |
2148 | 0 | } |
2149 | 0 | if (ext->kcfg.type == KCFG_BOOL && value > 1) { |
2150 | 0 | pr_warn("extern (kcfg) '%s': value '%llu' isn't boolean compatible\n", |
2151 | 0 | ext->name, (unsigned long long)value); |
2152 | 0 | return -EINVAL; |
2153 | |
|
2154 | 0 | } |
2155 | 0 | if (!is_kcfg_value_in_range(ext, value)) { |
2156 | 0 | pr_warn("extern (kcfg) '%s': value '%llu' doesn't fit in %d bytes\n", |
2157 | 0 | ext->name, (unsigned long long)value, ext->kcfg.sz); |
2158 | 0 | return -ERANGE; |
2159 | 0 | } |
2160 | 0 | switch (ext->kcfg.sz) { |
2161 | 0 | case 1: |
2162 | 0 | *(__u8 *)ext_val = value; |
2163 | 0 | break; |
2164 | 0 | case 2: |
2165 | 0 | *(__u16 *)ext_val = value; |
2166 | 0 | break; |
2167 | 0 | case 4: |
2168 | 0 | *(__u32 *)ext_val = value; |
2169 | 0 | break; |
2170 | 0 | case 8: |
2171 | 0 | *(__u64 *)ext_val = value; |
2172 | 0 | break; |
2173 | 0 | default: |
2174 | 0 | return -EINVAL; |
2175 | 0 | } |
2176 | 0 | ext->is_set = true; |
2177 | 0 | return 0; |
2178 | 0 | } |
2179 | | |
2180 | | static int bpf_object__process_kconfig_line(struct bpf_object *obj, |
2181 | | char *buf, void *data) |
2182 | 0 | { |
2183 | 0 | struct extern_desc *ext; |
2184 | 0 | char *sep, *value; |
2185 | 0 | int len, err = 0; |
2186 | 0 | void *ext_val; |
2187 | 0 | __u64 num; |
2188 | |
|
2189 | 0 | if (!str_has_pfx(buf, "CONFIG_")) |
2190 | 0 | return 0; |
2191 | | |
2192 | 0 | sep = strchr(buf, '='); |
2193 | 0 | if (!sep) { |
2194 | 0 | pr_warn("failed to parse '%s': no separator\n", buf); |
2195 | 0 | return -EINVAL; |
2196 | 0 | } |
2197 | | |
2198 | | /* Trim ending '\n' */ |
2199 | 0 | len = strlen(buf); |
2200 | 0 | if (buf[len - 1] == '\n') |
2201 | 0 | buf[len - 1] = '\0'; |
2202 | | /* Split on '=' and ensure that a value is present. */ |
2203 | 0 | *sep = '\0'; |
2204 | 0 | if (!sep[1]) { |
2205 | 0 | *sep = '='; |
2206 | 0 | pr_warn("failed to parse '%s': no value\n", buf); |
2207 | 0 | return -EINVAL; |
2208 | 0 | } |
2209 | | |
2210 | 0 | ext = find_extern_by_name(obj, buf); |
2211 | 0 | if (!ext || ext->is_set) |
2212 | 0 | return 0; |
2213 | | |
2214 | 0 | ext_val = data + ext->kcfg.data_off; |
2215 | 0 | value = sep + 1; |
2216 | |
|
2217 | 0 | switch (*value) { |
2218 | 0 | case 'y': case 'n': case 'm': |
2219 | 0 | err = set_kcfg_value_tri(ext, ext_val, *value); |
2220 | 0 | break; |
2221 | 0 | case '"': |
2222 | 0 | err = set_kcfg_value_str(ext, ext_val, value); |
2223 | 0 | break; |
2224 | 0 | default: |
2225 | | /* assume integer */ |
2226 | 0 | err = parse_u64(value, &num); |
2227 | 0 | if (err) { |
2228 | 0 | pr_warn("extern (kcfg) '%s': value '%s' isn't a valid integer\n", ext->name, value); |
2229 | 0 | return err; |
2230 | 0 | } |
2231 | 0 | if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) { |
2232 | 0 | pr_warn("extern (kcfg) '%s': value '%s' implies integer type\n", ext->name, value); |
2233 | 0 | return -EINVAL; |
2234 | 0 | } |
2235 | 0 | err = set_kcfg_value_num(ext, ext_val, num); |
2236 | 0 | break; |
2237 | 0 | } |
2238 | 0 | if (err) |
2239 | 0 | return err; |
2240 | 0 | pr_debug("extern (kcfg) '%s': set to %s\n", ext->name, value); |
2241 | 0 | return 0; |
2242 | 0 | } |
2243 | | |
2244 | | static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data) |
2245 | 0 | { |
2246 | 0 | char buf[PATH_MAX]; |
2247 | 0 | struct utsname uts; |
2248 | 0 | int len, err = 0; |
2249 | 0 | gzFile file; |
2250 | |
|
2251 | 0 | uname(&uts); |
2252 | 0 | len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release); |
2253 | 0 | if (len < 0) |
2254 | 0 | return -EINVAL; |
2255 | 0 | else if (len >= PATH_MAX) |
2256 | 0 | return -ENAMETOOLONG; |
2257 | | |
2258 | | /* gzopen also accepts uncompressed files. */ |
2259 | 0 | file = gzopen(buf, "re"); |
2260 | 0 | if (!file) |
2261 | 0 | file = gzopen("/proc/config.gz", "re"); |
2262 | |
|
2263 | 0 | if (!file) { |
2264 | 0 | pr_warn("failed to open system Kconfig\n"); |
2265 | 0 | return -ENOENT; |
2266 | 0 | } |
2267 | | |
2268 | 0 | while (gzgets(file, buf, sizeof(buf))) { |
2269 | 0 | err = bpf_object__process_kconfig_line(obj, buf, data); |
2270 | 0 | if (err) { |
2271 | 0 | pr_warn("error parsing system Kconfig line '%s': %d\n", |
2272 | 0 | buf, err); |
2273 | 0 | goto out; |
2274 | 0 | } |
2275 | 0 | } |
2276 | | |
2277 | 0 | out: |
2278 | 0 | gzclose(file); |
2279 | 0 | return err; |
2280 | 0 | } |
2281 | | |
2282 | | static int bpf_object__read_kconfig_mem(struct bpf_object *obj, |
2283 | | const char *config, void *data) |
2284 | 0 | { |
2285 | 0 | char buf[PATH_MAX]; |
2286 | 0 | int err = 0; |
2287 | 0 | FILE *file; |
2288 | |
|
2289 | 0 | file = fmemopen((void *)config, strlen(config), "r"); |
2290 | 0 | if (!file) { |
2291 | 0 | err = -errno; |
2292 | 0 | pr_warn("failed to open in-memory Kconfig: %d\n", err); |
2293 | 0 | return err; |
2294 | 0 | } |
2295 | | |
2296 | 0 | while (fgets(buf, sizeof(buf), file)) { |
2297 | 0 | err = bpf_object__process_kconfig_line(obj, buf, data); |
2298 | 0 | if (err) { |
2299 | 0 | pr_warn("error parsing in-memory Kconfig line '%s': %d\n", |
2300 | 0 | buf, err); |
2301 | 0 | break; |
2302 | 0 | } |
2303 | 0 | } |
2304 | |
|
2305 | 0 | fclose(file); |
2306 | 0 | return err; |
2307 | 0 | } |
2308 | | |
2309 | | static int bpf_object__init_kconfig_map(struct bpf_object *obj) |
2310 | 2.47k | { |
2311 | 2.47k | struct extern_desc *last_ext = NULL, *ext; |
2312 | 2.47k | size_t map_sz; |
2313 | 2.47k | int i, err; |
2314 | | |
2315 | 3.46k | for (i = 0; i < obj->nr_extern; i++) { |
2316 | 996 | ext = &obj->externs[i]; |
2317 | 996 | if (ext->type == EXT_KCFG) |
2318 | 246 | last_ext = ext; |
2319 | 996 | } |
2320 | | |
2321 | 2.47k | if (!last_ext) |
2322 | 2.37k | return 0; |
2323 | | |
2324 | 92 | map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz; |
2325 | 92 | err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG, |
2326 | 92 | ".kconfig", obj->efile.symbols_shndx, |
2327 | 92 | NULL, map_sz); |
2328 | 92 | if (err) |
2329 | 72 | return err; |
2330 | | |
2331 | 20 | obj->kconfig_map_idx = obj->nr_maps - 1; |
2332 | | |
2333 | 20 | return 0; |
2334 | 92 | } |
2335 | | |
2336 | | const struct btf_type * |
2337 | | skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id) |
2338 | 6.23k | { |
2339 | 6.23k | const struct btf_type *t = btf__type_by_id(btf, id); |
2340 | | |
2341 | 6.23k | if (res_id) |
2342 | 2.98k | *res_id = id; |
2343 | | |
2344 | 8.06k | while (btf_is_mod(t) || btf_is_typedef(t)) { |
2345 | 1.82k | if (res_id) |
2346 | 1.04k | *res_id = t->type; |
2347 | 1.82k | t = btf__type_by_id(btf, t->type); |
2348 | 1.82k | } |
2349 | | |
2350 | 6.23k | return t; |
2351 | 6.23k | } |
2352 | | |
2353 | | static const struct btf_type * |
2354 | | resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id) |
2355 | 0 | { |
2356 | 0 | const struct btf_type *t; |
2357 | |
|
2358 | 0 | t = skip_mods_and_typedefs(btf, id, NULL); |
2359 | 0 | if (!btf_is_ptr(t)) |
2360 | 0 | return NULL; |
2361 | | |
2362 | 0 | t = skip_mods_and_typedefs(btf, t->type, res_id); |
2363 | |
|
2364 | 0 | return btf_is_func_proto(t) ? t : NULL; |
2365 | 0 | } |
2366 | | |
2367 | | static const char *__btf_kind_str(__u16 kind) |
2368 | 337 | { |
2369 | 337 | switch (kind) { |
2370 | 69 | case BTF_KIND_UNKN: return "void"; |
2371 | 10 | case BTF_KIND_INT: return "int"; |
2372 | 4 | case BTF_KIND_PTR: return "ptr"; |
2373 | 8 | case BTF_KIND_ARRAY: return "array"; |
2374 | 3 | case BTF_KIND_STRUCT: return "struct"; |
2375 | 3 | case BTF_KIND_UNION: return "union"; |
2376 | 2 | case BTF_KIND_ENUM: return "enum"; |
2377 | 9 | case BTF_KIND_FWD: return "fwd"; |
2378 | 2 | case BTF_KIND_TYPEDEF: return "typedef"; |
2379 | 3 | case BTF_KIND_VOLATILE: return "volatile"; |
2380 | 2 | case BTF_KIND_CONST: return "const"; |
2381 | 4 | case BTF_KIND_RESTRICT: return "restrict"; |
2382 | 108 | case BTF_KIND_FUNC: return "func"; |
2383 | 10 | case BTF_KIND_FUNC_PROTO: return "func_proto"; |
2384 | 29 | case BTF_KIND_VAR: return "var"; |
2385 | 52 | case BTF_KIND_DATASEC: return "datasec"; |
2386 | 7 | case BTF_KIND_FLOAT: return "float"; |
2387 | 1 | case BTF_KIND_DECL_TAG: return "decl_tag"; |
2388 | 5 | case BTF_KIND_TYPE_TAG: return "type_tag"; |
2389 | 6 | case BTF_KIND_ENUM64: return "enum64"; |
2390 | 0 | default: return "unknown"; |
2391 | 337 | } |
2392 | 337 | } |
2393 | | |
2394 | | const char *btf_kind_str(const struct btf_type *t) |
2395 | 337 | { |
2396 | 337 | return __btf_kind_str(btf_kind(t)); |
2397 | 337 | } |
2398 | | |
2399 | | /* |
2400 | | * Fetch integer attribute of BTF map definition. Such attributes are |
2401 | | * represented using a pointer to an array, in which dimensionality of array |
2402 | | * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY]; |
2403 | | * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF |
2404 | | * type definition, while using only sizeof(void *) space in ELF data section. |
2405 | | */ |
2406 | | static bool get_map_field_int(const char *map_name, const struct btf *btf, |
2407 | | const struct btf_member *m, __u32 *res) |
2408 | 786 | { |
2409 | 786 | const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL); |
2410 | 786 | const char *name = btf__name_by_offset(btf, m->name_off); |
2411 | 786 | const struct btf_array *arr_info; |
2412 | 786 | const struct btf_type *arr_t; |
2413 | | |
2414 | 786 | if (!btf_is_ptr(t)) { |
2415 | 20 | pr_warn("map '%s': attr '%s': expected PTR, got %s.\n", |
2416 | 20 | map_name, name, btf_kind_str(t)); |
2417 | 20 | return false; |
2418 | 20 | } |
2419 | | |
2420 | 766 | arr_t = btf__type_by_id(btf, t->type); |
2421 | 766 | if (!arr_t) { |
2422 | 0 | pr_warn("map '%s': attr '%s': type [%u] not found.\n", |
2423 | 0 | map_name, name, t->type); |
2424 | 0 | return false; |
2425 | 0 | } |
2426 | 766 | if (!btf_is_array(arr_t)) { |
2427 | 5 | pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n", |
2428 | 5 | map_name, name, btf_kind_str(arr_t)); |
2429 | 5 | return false; |
2430 | 5 | } |
2431 | 761 | arr_info = btf_array(arr_t); |
2432 | 761 | *res = arr_info->nelems; |
2433 | 761 | return true; |
2434 | 766 | } |
2435 | | |
2436 | | static bool get_map_field_long(const char *map_name, const struct btf *btf, |
2437 | | const struct btf_member *m, __u64 *res) |
2438 | 65 | { |
2439 | 65 | const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL); |
2440 | 65 | const char *name = btf__name_by_offset(btf, m->name_off); |
2441 | | |
2442 | 65 | if (btf_is_ptr(t)) { |
2443 | 32 | __u32 res32; |
2444 | 32 | bool ret; |
2445 | | |
2446 | 32 | ret = get_map_field_int(map_name, btf, m, &res32); |
2447 | 32 | if (ret) |
2448 | 30 | *res = (__u64)res32; |
2449 | 32 | return ret; |
2450 | 32 | } |
2451 | | |
2452 | 33 | if (!btf_is_enum(t) && !btf_is_enum64(t)) { |
2453 | 13 | pr_warn("map '%s': attr '%s': expected ENUM or ENUM64, got %s.\n", |
2454 | 13 | map_name, name, btf_kind_str(t)); |
2455 | 13 | return false; |
2456 | 13 | } |
2457 | | |
2458 | 20 | if (btf_vlen(t) != 1) { |
2459 | 1 | pr_warn("map '%s': attr '%s': invalid __ulong\n", |
2460 | 1 | map_name, name); |
2461 | 1 | return false; |
2462 | 1 | } |
2463 | | |
2464 | 19 | if (btf_is_enum(t)) { |
2465 | 11 | const struct btf_enum *e = btf_enum(t); |
2466 | | |
2467 | 11 | *res = e->val; |
2468 | 11 | } else { |
2469 | 8 | const struct btf_enum64 *e = btf_enum64(t); |
2470 | | |
2471 | 8 | *res = btf_enum64_value(e); |
2472 | 8 | } |
2473 | 19 | return true; |
2474 | 20 | } |
2475 | | |
2476 | | static int pathname_concat(char *buf, size_t buf_sz, const char *path, const char *name) |
2477 | 1 | { |
2478 | 1 | int len; |
2479 | | |
2480 | 1 | len = snprintf(buf, buf_sz, "%s/%s", path, name); |
2481 | 1 | if (len < 0) |
2482 | 0 | return -EINVAL; |
2483 | 1 | if (len >= buf_sz) |
2484 | 0 | return -ENAMETOOLONG; |
2485 | | |
2486 | 1 | return 0; |
2487 | 1 | } |
2488 | | |
2489 | | static int build_map_pin_path(struct bpf_map *map, const char *path) |
2490 | 1 | { |
2491 | 1 | char buf[PATH_MAX]; |
2492 | 1 | int err; |
2493 | | |
2494 | 1 | if (!path) |
2495 | 1 | path = BPF_FS_DEFAULT_PATH; |
2496 | | |
2497 | 1 | err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map)); |
2498 | 1 | if (err) |
2499 | 0 | return err; |
2500 | | |
2501 | 1 | return bpf_map__set_pin_path(map, buf); |
2502 | 1 | } |
2503 | | |
2504 | | /* should match definition in bpf_helpers.h */ |
2505 | | enum libbpf_pin_type { |
2506 | | LIBBPF_PIN_NONE, |
2507 | | /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */ |
2508 | | LIBBPF_PIN_BY_NAME, |
2509 | | }; |
2510 | | |
2511 | | int parse_btf_map_def(const char *map_name, struct btf *btf, |
2512 | | const struct btf_type *def_t, bool strict, |
2513 | | struct btf_map_def *map_def, struct btf_map_def *inner_def) |
2514 | 1.16k | { |
2515 | 1.16k | const struct btf_type *t; |
2516 | 1.16k | const struct btf_member *m; |
2517 | 1.16k | bool is_inner = inner_def == NULL; |
2518 | 1.16k | int vlen, i; |
2519 | | |
2520 | 1.16k | vlen = btf_vlen(def_t); |
2521 | 1.16k | m = btf_members(def_t); |
2522 | 2.51k | for (i = 0; i < vlen; i++, m++) { |
2523 | 2.39k | const char *name = btf__name_by_offset(btf, m->name_off); |
2524 | | |
2525 | 2.39k | if (!name) { |
2526 | 0 | pr_warn("map '%s': invalid field #%d.\n", map_name, i); |
2527 | 0 | return -EINVAL; |
2528 | 0 | } |
2529 | 2.39k | if (strcmp(name, "type") == 0) { |
2530 | 292 | if (!get_map_field_int(map_name, btf, m, &map_def->map_type)) |
2531 | 5 | return -EINVAL; |
2532 | 287 | map_def->parts |= MAP_DEF_MAP_TYPE; |
2533 | 2.10k | } else if (strcmp(name, "max_entries") == 0) { |
2534 | 15 | if (!get_map_field_int(map_name, btf, m, &map_def->max_entries)) |
2535 | 1 | return -EINVAL; |
2536 | 14 | map_def->parts |= MAP_DEF_MAX_ENTRIES; |
2537 | 2.08k | } else if (strcmp(name, "map_flags") == 0) { |
2538 | 13 | if (!get_map_field_int(map_name, btf, m, &map_def->map_flags)) |
2539 | 2 | return -EINVAL; |
2540 | 11 | map_def->parts |= MAP_DEF_MAP_FLAGS; |
2541 | 2.07k | } else if (strcmp(name, "numa_node") == 0) { |
2542 | 24 | if (!get_map_field_int(map_name, btf, m, &map_def->numa_node)) |
2543 | 3 | return -EINVAL; |
2544 | 21 | map_def->parts |= MAP_DEF_NUMA_NODE; |
2545 | 2.04k | } else if (strcmp(name, "key_size") == 0) { |
2546 | 154 | __u32 sz; |
2547 | | |
2548 | 154 | if (!get_map_field_int(map_name, btf, m, &sz)) |
2549 | 4 | return -EINVAL; |
2550 | 150 | if (map_def->key_size && map_def->key_size != sz) { |
2551 | 64 | pr_warn("map '%s': conflicting key size %u != %u.\n", |
2552 | 64 | map_name, map_def->key_size, sz); |
2553 | 64 | return -EINVAL; |
2554 | 64 | } |
2555 | 86 | map_def->key_size = sz; |
2556 | 86 | map_def->parts |= MAP_DEF_KEY_SIZE; |
2557 | 1.89k | } else if (strcmp(name, "key") == 0) { |
2558 | 376 | __s64 sz; |
2559 | | |
2560 | 376 | t = btf__type_by_id(btf, m->type); |
2561 | 376 | if (!t) { |
2562 | 0 | pr_warn("map '%s': key type [%d] not found.\n", |
2563 | 0 | map_name, m->type); |
2564 | 0 | return -EINVAL; |
2565 | 0 | } |
2566 | 376 | if (!btf_is_ptr(t)) { |
2567 | 8 | pr_warn("map '%s': key spec is not PTR: %s.\n", |
2568 | 8 | map_name, btf_kind_str(t)); |
2569 | 8 | return -EINVAL; |
2570 | 8 | } |
2571 | 368 | sz = btf__resolve_size(btf, t->type); |
2572 | 368 | if (sz < 0) { |
2573 | 11 | pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n", |
2574 | 11 | map_name, t->type, (ssize_t)sz); |
2575 | 11 | return sz; |
2576 | 11 | } |
2577 | 357 | if (map_def->key_size && map_def->key_size != sz) { |
2578 | 50 | pr_warn("map '%s': conflicting key size %u != %zd.\n", |
2579 | 50 | map_name, map_def->key_size, (ssize_t)sz); |
2580 | 50 | return -EINVAL; |
2581 | 50 | } |
2582 | 307 | map_def->key_size = sz; |
2583 | 307 | map_def->key_type_id = t->type; |
2584 | 307 | map_def->parts |= MAP_DEF_KEY_SIZE | MAP_DEF_KEY_TYPE; |
2585 | 1.51k | } else if (strcmp(name, "value_size") == 0) { |
2586 | 182 | __u32 sz; |
2587 | | |
2588 | 182 | if (!get_map_field_int(map_name, btf, m, &sz)) |
2589 | 7 | return -EINVAL; |
2590 | 175 | if (map_def->value_size && map_def->value_size != sz) { |
2591 | 65 | pr_warn("map '%s': conflicting value size %u != %u.\n", |
2592 | 65 | map_name, map_def->value_size, sz); |
2593 | 65 | return -EINVAL; |
2594 | 65 | } |
2595 | 110 | map_def->value_size = sz; |
2596 | 110 | map_def->parts |= MAP_DEF_VALUE_SIZE; |
2597 | 1.33k | } else if (strcmp(name, "value") == 0) { |
2598 | 500 | __s64 sz; |
2599 | | |
2600 | 500 | t = btf__type_by_id(btf, m->type); |
2601 | 500 | if (!t) { |
2602 | 0 | pr_warn("map '%s': value type [%d] not found.\n", |
2603 | 0 | map_name, m->type); |
2604 | 0 | return -EINVAL; |
2605 | 0 | } |
2606 | 500 | if (!btf_is_ptr(t)) { |
2607 | 7 | pr_warn("map '%s': value spec is not PTR: %s.\n", |
2608 | 7 | map_name, btf_kind_str(t)); |
2609 | 7 | return -EINVAL; |
2610 | 7 | } |
2611 | 493 | sz = btf__resolve_size(btf, t->type); |
2612 | 493 | if (sz < 0) { |
2613 | 7 | pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n", |
2614 | 7 | map_name, t->type, (ssize_t)sz); |
2615 | 7 | return sz; |
2616 | 7 | } |
2617 | 486 | if (map_def->value_size && map_def->value_size != sz) { |
2618 | 45 | pr_warn("map '%s': conflicting value size %u != %zd.\n", |
2619 | 45 | map_name, map_def->value_size, (ssize_t)sz); |
2620 | 45 | return -EINVAL; |
2621 | 45 | } |
2622 | 441 | map_def->value_size = sz; |
2623 | 441 | map_def->value_type_id = t->type; |
2624 | 441 | map_def->parts |= MAP_DEF_VALUE_SIZE | MAP_DEF_VALUE_TYPE; |
2625 | 441 | } |
2626 | 837 | else if (strcmp(name, "values") == 0) { |
2627 | 162 | bool is_map_in_map = bpf_map_type__is_map_in_map(map_def->map_type); |
2628 | 162 | bool is_prog_array = map_def->map_type == BPF_MAP_TYPE_PROG_ARRAY; |
2629 | 162 | const char *desc = is_map_in_map ? "map-in-map inner" : "prog-array value"; |
2630 | 162 | char inner_map_name[128]; |
2631 | 162 | int err; |
2632 | | |
2633 | 162 | if (is_inner) { |
2634 | 2 | pr_warn("map '%s': multi-level inner maps not supported.\n", |
2635 | 2 | map_name); |
2636 | 2 | return -ENOTSUP; |
2637 | 2 | } |
2638 | 160 | if (i != vlen - 1) { |
2639 | 11 | pr_warn("map '%s': '%s' member should be last.\n", |
2640 | 11 | map_name, name); |
2641 | 11 | return -EINVAL; |
2642 | 11 | } |
2643 | 149 | if (!is_map_in_map && !is_prog_array) { |
2644 | 46 | pr_warn("map '%s': should be map-in-map or prog-array.\n", |
2645 | 46 | map_name); |
2646 | 46 | return -ENOTSUP; |
2647 | 46 | } |
2648 | 103 | if (map_def->value_size && map_def->value_size != 4) { |
2649 | 47 | pr_warn("map '%s': conflicting value size %u != 4.\n", |
2650 | 47 | map_name, map_def->value_size); |
2651 | 47 | return -EINVAL; |
2652 | 47 | } |
2653 | 56 | map_def->value_size = 4; |
2654 | 56 | t = btf__type_by_id(btf, m->type); |
2655 | 56 | if (!t) { |
2656 | 0 | pr_warn("map '%s': %s type [%d] not found.\n", |
2657 | 0 | map_name, desc, m->type); |
2658 | 0 | return -EINVAL; |
2659 | 0 | } |
2660 | 56 | if (!btf_is_array(t) || btf_array(t)->nelems) { |
2661 | 41 | pr_warn("map '%s': %s spec is not a zero-sized array.\n", |
2662 | 41 | map_name, desc); |
2663 | 41 | return -EINVAL; |
2664 | 41 | } |
2665 | 15 | t = skip_mods_and_typedefs(btf, btf_array(t)->type, NULL); |
2666 | 15 | if (!btf_is_ptr(t)) { |
2667 | 3 | pr_warn("map '%s': %s def is of unexpected kind %s.\n", |
2668 | 3 | map_name, desc, btf_kind_str(t)); |
2669 | 3 | return -EINVAL; |
2670 | 3 | } |
2671 | 12 | t = skip_mods_and_typedefs(btf, t->type, NULL); |
2672 | 12 | if (is_prog_array) { |
2673 | 4 | if (!btf_is_func_proto(t)) { |
2674 | 3 | pr_warn("map '%s': prog-array value def is of unexpected kind %s.\n", |
2675 | 3 | map_name, btf_kind_str(t)); |
2676 | 3 | return -EINVAL; |
2677 | 3 | } |
2678 | 1 | continue; |
2679 | 4 | } |
2680 | 8 | if (!btf_is_struct(t)) { |
2681 | 5 | pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n", |
2682 | 5 | map_name, btf_kind_str(t)); |
2683 | 5 | return -EINVAL; |
2684 | 5 | } |
2685 | | |
2686 | 3 | snprintf(inner_map_name, sizeof(inner_map_name), "%s.inner", map_name); |
2687 | 3 | err = parse_btf_map_def(inner_map_name, btf, t, strict, inner_def, NULL); |
2688 | 3 | if (err) |
2689 | 3 | return err; |
2690 | | |
2691 | 0 | map_def->parts |= MAP_DEF_INNER_MAP; |
2692 | 675 | } else if (strcmp(name, "pinning") == 0) { |
2693 | 74 | __u32 val; |
2694 | | |
2695 | 74 | if (is_inner) { |
2696 | 0 | pr_warn("map '%s': inner def can't be pinned.\n", map_name); |
2697 | 0 | return -EINVAL; |
2698 | 0 | } |
2699 | 74 | if (!get_map_field_int(map_name, btf, m, &val)) |
2700 | 1 | return -EINVAL; |
2701 | 73 | if (val != LIBBPF_PIN_NONE && val != LIBBPF_PIN_BY_NAME) { |
2702 | 51 | pr_warn("map '%s': invalid pinning value %u.\n", |
2703 | 51 | map_name, val); |
2704 | 51 | return -EINVAL; |
2705 | 51 | } |
2706 | 22 | map_def->pinning = val; |
2707 | 22 | map_def->parts |= MAP_DEF_PINNING; |
2708 | 601 | } else if (strcmp(name, "map_extra") == 0) { |
2709 | 65 | __u64 map_extra; |
2710 | | |
2711 | 65 | if (!get_map_field_long(map_name, btf, m, &map_extra)) |
2712 | 16 | return -EINVAL; |
2713 | 49 | map_def->map_extra = map_extra; |
2714 | 49 | map_def->parts |= MAP_DEF_MAP_EXTRA; |
2715 | 536 | } else { |
2716 | 536 | if (strict) { |
2717 | 536 | pr_warn("map '%s': unknown field '%s'.\n", map_name, name); |
2718 | 536 | return -ENOTSUP; |
2719 | 536 | } |
2720 | 0 | pr_debug("map '%s': ignoring unknown field '%s'.\n", map_name, name); |
2721 | 0 | } |
2722 | 2.39k | } |
2723 | | |
2724 | 117 | if (map_def->map_type == BPF_MAP_TYPE_UNSPEC) { |
2725 | 41 | pr_warn("map '%s': map type isn't specified.\n", map_name); |
2726 | 41 | return -EINVAL; |
2727 | 41 | } |
2728 | | |
2729 | 76 | return 0; |
2730 | 117 | } |
2731 | | |
2732 | | static size_t adjust_ringbuf_sz(size_t sz) |
2733 | 3 | { |
2734 | 3 | __u32 page_sz = sysconf(_SC_PAGE_SIZE); |
2735 | 3 | __u32 mul; |
2736 | | |
2737 | | /* if user forgot to set any size, make sure they see error */ |
2738 | 3 | if (sz == 0) |
2739 | 1 | return 0; |
2740 | | /* Kernel expects BPF_MAP_TYPE_RINGBUF's max_entries to be |
2741 | | * a power-of-2 multiple of kernel's page size. If user diligently |
2742 | | * satisified these conditions, pass the size through. |
2743 | | */ |
2744 | 2 | if ((sz % page_sz) == 0 && is_pow_of_2(sz / page_sz)) |
2745 | 0 | return sz; |
2746 | | |
2747 | | /* Otherwise find closest (page_sz * power_of_2) product bigger than |
2748 | | * user-set size to satisfy both user size request and kernel |
2749 | | * requirements and substitute correct max_entries for map creation. |
2750 | | */ |
2751 | 2 | for (mul = 1; mul <= UINT_MAX / page_sz; mul <<= 1) { |
2752 | 2 | if (mul * page_sz > sz) |
2753 | 2 | return mul * page_sz; |
2754 | 2 | } |
2755 | | |
2756 | | /* if it's impossible to satisfy the conditions (i.e., user size is |
2757 | | * very close to UINT_MAX but is not a power-of-2 multiple of |
2758 | | * page_size) then just return original size and let kernel reject it |
2759 | | */ |
2760 | 0 | return sz; |
2761 | 2 | } |
2762 | | |
2763 | | static bool map_is_ringbuf(const struct bpf_map *map) |
2764 | 76 | { |
2765 | 76 | return map->def.type == BPF_MAP_TYPE_RINGBUF || |
2766 | 76 | map->def.type == BPF_MAP_TYPE_USER_RINGBUF; |
2767 | 76 | } |
2768 | | |
2769 | | static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def) |
2770 | 76 | { |
2771 | 76 | map->def.type = def->map_type; |
2772 | 76 | map->def.key_size = def->key_size; |
2773 | 76 | map->def.value_size = def->value_size; |
2774 | 76 | map->def.max_entries = def->max_entries; |
2775 | 76 | map->def.map_flags = def->map_flags; |
2776 | 76 | map->map_extra = def->map_extra; |
2777 | | |
2778 | 76 | map->numa_node = def->numa_node; |
2779 | 76 | map->btf_key_type_id = def->key_type_id; |
2780 | 76 | map->btf_value_type_id = def->value_type_id; |
2781 | | |
2782 | | /* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */ |
2783 | 76 | if (map_is_ringbuf(map)) |
2784 | 3 | map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries); |
2785 | | |
2786 | 76 | if (def->parts & MAP_DEF_MAP_TYPE) |
2787 | 76 | pr_debug("map '%s': found type = %u.\n", map->name, def->map_type); |
2788 | | |
2789 | 76 | if (def->parts & MAP_DEF_KEY_TYPE) |
2790 | 76 | pr_debug("map '%s': found key [%u], sz = %u.\n", |
2791 | 62 | map->name, def->key_type_id, def->key_size); |
2792 | 62 | else if (def->parts & MAP_DEF_KEY_SIZE) |
2793 | 3 | pr_debug("map '%s': found key_size = %u.\n", map->name, def->key_size); |
2794 | | |
2795 | 76 | if (def->parts & MAP_DEF_VALUE_TYPE) |
2796 | 76 | pr_debug("map '%s': found value [%u], sz = %u.\n", |
2797 | 32 | map->name, def->value_type_id, def->value_size); |
2798 | 32 | else if (def->parts & MAP_DEF_VALUE_SIZE) |
2799 | 15 | pr_debug("map '%s': found value_size = %u.\n", map->name, def->value_size); |
2800 | | |
2801 | 76 | if (def->parts & MAP_DEF_MAX_ENTRIES) |
2802 | 76 | pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries); |
2803 | 76 | if (def->parts & MAP_DEF_MAP_FLAGS) |
2804 | 76 | pr_debug("map '%s': found map_flags = 0x%x.\n", map->name, def->map_flags); |
2805 | 76 | if (def->parts & MAP_DEF_MAP_EXTRA) |
2806 | 76 | pr_debug("map '%s': found map_extra = 0x%llx.\n", map->name, |
2807 | 76 | (unsigned long long)def->map_extra); |
2808 | 76 | if (def->parts & MAP_DEF_PINNING) |
2809 | 76 | pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning); |
2810 | 76 | if (def->parts & MAP_DEF_NUMA_NODE) |
2811 | 76 | pr_debug("map '%s': found numa_node = %u.\n", map->name, def->numa_node); |
2812 | | |
2813 | 76 | if (def->parts & MAP_DEF_INNER_MAP) |
2814 | 76 | pr_debug("map '%s': found inner map definition.\n", map->name); |
2815 | 76 | } |
2816 | | |
2817 | | static const char *btf_var_linkage_str(__u32 linkage) |
2818 | 52 | { |
2819 | 52 | switch (linkage) { |
2820 | 1 | case BTF_VAR_STATIC: return "static"; |
2821 | 0 | case BTF_VAR_GLOBAL_ALLOCATED: return "global"; |
2822 | 1 | case BTF_VAR_GLOBAL_EXTERN: return "extern"; |
2823 | 50 | default: return "unknown"; |
2824 | 52 | } |
2825 | 52 | } |
2826 | | |
2827 | | static int bpf_object__init_user_btf_map(struct bpf_object *obj, |
2828 | | const struct btf_type *sec, |
2829 | | int var_idx, int sec_idx, |
2830 | | const Elf_Data *data, bool strict, |
2831 | | const char *pin_root_path) |
2832 | 1.23k | { |
2833 | 1.23k | struct btf_map_def map_def = {}, inner_def = {}; |
2834 | 1.23k | const struct btf_type *var, *def; |
2835 | 1.23k | const struct btf_var_secinfo *vi; |
2836 | 1.23k | const struct btf_var *var_extra; |
2837 | 1.23k | const char *map_name; |
2838 | 1.23k | struct bpf_map *map; |
2839 | 1.23k | int err; |
2840 | | |
2841 | 1.23k | vi = btf_var_secinfos(sec) + var_idx; |
2842 | 1.23k | var = btf__type_by_id(obj->btf, vi->type); |
2843 | 1.23k | var_extra = btf_var(var); |
2844 | 1.23k | map_name = btf__name_by_offset(obj->btf, var->name_off); |
2845 | | |
2846 | 1.23k | if (map_name == NULL || map_name[0] == '\0') { |
2847 | 1 | pr_warn("map #%d: empty name.\n", var_idx); |
2848 | 1 | return -EINVAL; |
2849 | 1 | } |
2850 | 1.23k | if ((__u64)vi->offset + vi->size > data->d_size) { |
2851 | 3 | pr_warn("map '%s' BTF data is corrupted.\n", map_name); |
2852 | 3 | return -EINVAL; |
2853 | 3 | } |
2854 | 1.22k | if (!btf_is_var(var)) { |
2855 | 0 | pr_warn("map '%s': unexpected var kind %s.\n", |
2856 | 0 | map_name, btf_kind_str(var)); |
2857 | 0 | return -EINVAL; |
2858 | 0 | } |
2859 | 1.22k | if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED) { |
2860 | 52 | pr_warn("map '%s': unsupported map linkage %s.\n", |
2861 | 52 | map_name, btf_var_linkage_str(var_extra->linkage)); |
2862 | 52 | return -EOPNOTSUPP; |
2863 | 52 | } |
2864 | | |
2865 | 1.17k | def = skip_mods_and_typedefs(obj->btf, var->type, NULL); |
2866 | 1.17k | if (!btf_is_struct(def)) { |
2867 | 16 | pr_warn("map '%s': unexpected def kind %s.\n", |
2868 | 16 | map_name, btf_kind_str(var)); |
2869 | 16 | return -EINVAL; |
2870 | 16 | } |
2871 | 1.15k | if (def->size > vi->size) { |
2872 | 1 | pr_warn("map '%s': invalid def size.\n", map_name); |
2873 | 1 | return -EINVAL; |
2874 | 1 | } |
2875 | | |
2876 | 1.15k | map = bpf_object__add_map(obj); |
2877 | 1.15k | if (IS_ERR(map)) |
2878 | 0 | return PTR_ERR(map); |
2879 | 1.15k | map->name = strdup(map_name); |
2880 | 1.15k | if (!map->name) { |
2881 | 0 | pr_warn("map '%s': failed to alloc map name.\n", map_name); |
2882 | 0 | return -ENOMEM; |
2883 | 0 | } |
2884 | 1.15k | map->libbpf_type = LIBBPF_MAP_UNSPEC; |
2885 | 1.15k | map->def.type = BPF_MAP_TYPE_UNSPEC; |
2886 | 1.15k | map->sec_idx = sec_idx; |
2887 | 1.15k | map->sec_offset = vi->offset; |
2888 | 1.15k | map->btf_var_idx = var_idx; |
2889 | 1.15k | pr_debug("map '%s': at sec_idx %d, offset %zu.\n", |
2890 | 1.15k | map_name, map->sec_idx, map->sec_offset); |
2891 | | |
2892 | 1.15k | err = parse_btf_map_def(map->name, obj->btf, def, strict, &map_def, &inner_def); |
2893 | 1.15k | if (err) |
2894 | 1.08k | return err; |
2895 | | |
2896 | 76 | fill_map_from_def(map, &map_def); |
2897 | | |
2898 | 76 | if (map_def.pinning == LIBBPF_PIN_BY_NAME) { |
2899 | 1 | err = build_map_pin_path(map, pin_root_path); |
2900 | 1 | if (err) { |
2901 | 0 | pr_warn("map '%s': couldn't build pin path.\n", map->name); |
2902 | 0 | return err; |
2903 | 0 | } |
2904 | 1 | } |
2905 | | |
2906 | 76 | if (map_def.parts & MAP_DEF_INNER_MAP) { |
2907 | 0 | map->inner_map = calloc(1, sizeof(*map->inner_map)); |
2908 | 0 | if (!map->inner_map) |
2909 | 0 | return -ENOMEM; |
2910 | 0 | map->inner_map->fd = create_placeholder_fd(); |
2911 | 0 | if (map->inner_map->fd < 0) |
2912 | 0 | return map->inner_map->fd; |
2913 | 0 | map->inner_map->sec_idx = sec_idx; |
2914 | 0 | map->inner_map->name = malloc(strlen(map_name) + sizeof(".inner") + 1); |
2915 | 0 | if (!map->inner_map->name) |
2916 | 0 | return -ENOMEM; |
2917 | 0 | sprintf(map->inner_map->name, "%s.inner", map_name); |
2918 | |
|
2919 | 0 | fill_map_from_def(map->inner_map, &inner_def); |
2920 | 0 | } |
2921 | | |
2922 | 76 | err = map_fill_btf_type_info(obj, map); |
2923 | 76 | if (err) |
2924 | 0 | return err; |
2925 | | |
2926 | 76 | return 0; |
2927 | 76 | } |
2928 | | |
2929 | | static int init_arena_map_data(struct bpf_object *obj, struct bpf_map *map, |
2930 | | const char *sec_name, int sec_idx, |
2931 | | void *data, size_t data_sz) |
2932 | 0 | { |
2933 | 0 | const long page_sz = sysconf(_SC_PAGE_SIZE); |
2934 | 0 | size_t mmap_sz; |
2935 | |
|
2936 | 0 | mmap_sz = bpf_map_mmap_sz(obj->arena_map); |
2937 | 0 | if (roundup(data_sz, page_sz) > mmap_sz) { |
2938 | 0 | pr_warn("elf: sec '%s': declared ARENA map size (%zu) is too small to hold global __arena variables of size %zu\n", |
2939 | 0 | sec_name, mmap_sz, data_sz); |
2940 | 0 | return -E2BIG; |
2941 | 0 | } |
2942 | | |
2943 | 0 | obj->arena_data = malloc(data_sz); |
2944 | 0 | if (!obj->arena_data) |
2945 | 0 | return -ENOMEM; |
2946 | 0 | memcpy(obj->arena_data, data, data_sz); |
2947 | 0 | obj->arena_data_sz = data_sz; |
2948 | | |
2949 | | /* make bpf_map__init_value() work for ARENA maps */ |
2950 | 0 | map->mmaped = obj->arena_data; |
2951 | |
|
2952 | 0 | return 0; |
2953 | 0 | } |
2954 | | |
2955 | | static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict, |
2956 | | const char *pin_root_path) |
2957 | 3.69k | { |
2958 | 3.69k | const struct btf_type *sec = NULL; |
2959 | 3.69k | int nr_types, i, vlen, err; |
2960 | 3.69k | const struct btf_type *t; |
2961 | 3.69k | const char *name; |
2962 | 3.69k | Elf_Data *data; |
2963 | 3.69k | Elf_Scn *scn; |
2964 | | |
2965 | 3.69k | if (obj->efile.btf_maps_shndx < 0) |
2966 | 2.41k | return 0; |
2967 | | |
2968 | 1.27k | scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx); |
2969 | 1.27k | data = elf_sec_data(obj, scn); |
2970 | 1.27k | if (!scn || !data) { |
2971 | 0 | pr_warn("elf: failed to get %s map definitions for %s\n", |
2972 | 0 | MAPS_ELF_SEC, obj->path); |
2973 | 0 | return -EINVAL; |
2974 | 0 | } |
2975 | | |
2976 | 1.27k | nr_types = btf__type_cnt(obj->btf); |
2977 | 15.7k | for (i = 1; i < nr_types; i++) { |
2978 | 15.6k | t = btf__type_by_id(obj->btf, i); |
2979 | 15.6k | if (!btf_is_datasec(t)) |
2980 | 13.9k | continue; |
2981 | 1.79k | name = btf__name_by_offset(obj->btf, t->name_off); |
2982 | 1.79k | if (strcmp(name, MAPS_ELF_SEC) == 0) { |
2983 | 1.23k | sec = t; |
2984 | 1.23k | obj->efile.btf_maps_sec_btf_id = i; |
2985 | 1.23k | break; |
2986 | 1.23k | } |
2987 | 1.79k | } |
2988 | | |
2989 | 1.27k | if (!sec) { |
2990 | 42 | pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC); |
2991 | 42 | return -ENOENT; |
2992 | 42 | } |
2993 | | |
2994 | 1.23k | vlen = btf_vlen(sec); |
2995 | 1.30k | for (i = 0; i < vlen; i++) { |
2996 | 1.23k | err = bpf_object__init_user_btf_map(obj, sec, i, |
2997 | 1.23k | obj->efile.btf_maps_shndx, |
2998 | 1.23k | data, strict, |
2999 | 1.23k | pin_root_path); |
3000 | 1.23k | if (err) |
3001 | 1.15k | return err; |
3002 | 1.23k | } |
3003 | | |
3004 | 154 | for (i = 0; i < obj->nr_maps; i++) { |
3005 | 76 | struct bpf_map *map = &obj->maps[i]; |
3006 | | |
3007 | 76 | if (map->def.type != BPF_MAP_TYPE_ARENA) |
3008 | 75 | continue; |
3009 | | |
3010 | 1 | if (obj->arena_map) { |
3011 | 0 | pr_warn("map '%s': only single ARENA map is supported (map '%s' is also ARENA)\n", |
3012 | 0 | map->name, obj->arena_map->name); |
3013 | 0 | return -EINVAL; |
3014 | 0 | } |
3015 | 1 | obj->arena_map = map; |
3016 | | |
3017 | 1 | if (obj->efile.arena_data) { |
3018 | 0 | err = init_arena_map_data(obj, map, ARENA_SEC, obj->efile.arena_data_shndx, |
3019 | 0 | obj->efile.arena_data->d_buf, |
3020 | 0 | obj->efile.arena_data->d_size); |
3021 | 0 | if (err) |
3022 | 0 | return err; |
3023 | 0 | } |
3024 | 1 | } |
3025 | 78 | if (obj->efile.arena_data && !obj->arena_map) { |
3026 | 1 | pr_warn("elf: sec '%s': to use global __arena variables the ARENA map should be explicitly declared in SEC(\".maps\")\n", |
3027 | 1 | ARENA_SEC); |
3028 | 1 | return -ENOENT; |
3029 | 1 | } |
3030 | | |
3031 | 77 | return 0; |
3032 | 78 | } |
3033 | | |
3034 | | static int bpf_object__init_maps(struct bpf_object *obj, |
3035 | | const struct bpf_object_open_opts *opts) |
3036 | 3.69k | { |
3037 | 3.69k | const char *pin_root_path; |
3038 | 3.69k | bool strict; |
3039 | 3.69k | int err = 0; |
3040 | | |
3041 | 3.69k | strict = !OPTS_GET(opts, relaxed_maps, false); |
3042 | 3.69k | pin_root_path = OPTS_GET(opts, pin_root_path, NULL); |
3043 | | |
3044 | 3.69k | err = bpf_object__init_user_btf_maps(obj, strict, pin_root_path); |
3045 | 3.69k | err = err ?: bpf_object__init_global_data_maps(obj); |
3046 | 3.69k | err = err ?: bpf_object__init_kconfig_map(obj); |
3047 | 18.4E | err = err ?: bpf_object_init_struct_ops(obj); |
3048 | | |
3049 | 18.4E | return err; |
3050 | 1.27k | } |
3051 | | |
3052 | | static bool section_have_execinstr(struct bpf_object *obj, int idx) |
3053 | 2.04k | { |
3054 | 2.04k | Elf64_Shdr *sh; |
3055 | | |
3056 | 2.04k | sh = elf_sec_hdr(obj, elf_sec_by_idx(obj, idx)); |
3057 | 2.04k | if (!sh) |
3058 | 0 | return false; |
3059 | | |
3060 | 2.04k | return sh->sh_flags & SHF_EXECINSTR; |
3061 | 2.04k | } |
3062 | | |
3063 | | static bool starts_with_qmark(const char *s) |
3064 | 0 | { |
3065 | 0 | return s && s[0] == '?'; |
3066 | 0 | } |
3067 | | |
3068 | | static bool btf_needs_sanitization(struct bpf_object *obj) |
3069 | 0 | { |
3070 | 0 | bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC); |
3071 | 0 | bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC); |
3072 | 0 | bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT); |
3073 | 0 | bool has_func = kernel_supports(obj, FEAT_BTF_FUNC); |
3074 | 0 | bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG); |
3075 | 0 | bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG); |
3076 | 0 | bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64); |
3077 | 0 | bool has_qmark_datasec = kernel_supports(obj, FEAT_BTF_QMARK_DATASEC); |
3078 | |
|
3079 | 0 | return !has_func || !has_datasec || !has_func_global || !has_float || |
3080 | 0 | !has_decl_tag || !has_type_tag || !has_enum64 || !has_qmark_datasec; |
3081 | 0 | } |
3082 | | |
3083 | | static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf) |
3084 | 0 | { |
3085 | 0 | bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC); |
3086 | 0 | bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC); |
3087 | 0 | bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT); |
3088 | 0 | bool has_func = kernel_supports(obj, FEAT_BTF_FUNC); |
3089 | 0 | bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG); |
3090 | 0 | bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG); |
3091 | 0 | bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64); |
3092 | 0 | bool has_qmark_datasec = kernel_supports(obj, FEAT_BTF_QMARK_DATASEC); |
3093 | 0 | int enum64_placeholder_id = 0; |
3094 | 0 | struct btf_type *t; |
3095 | 0 | int i, j, vlen; |
3096 | |
|
3097 | 0 | for (i = 1; i < btf__type_cnt(btf); i++) { |
3098 | 0 | t = (struct btf_type *)btf__type_by_id(btf, i); |
3099 | |
|
3100 | 0 | if ((!has_datasec && btf_is_var(t)) || (!has_decl_tag && btf_is_decl_tag(t))) { |
3101 | | /* replace VAR/DECL_TAG with INT */ |
3102 | 0 | t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0); |
3103 | | /* |
3104 | | * using size = 1 is the safest choice, 4 will be too |
3105 | | * big and cause kernel BTF validation failure if |
3106 | | * original variable took less than 4 bytes |
3107 | | */ |
3108 | 0 | t->size = 1; |
3109 | 0 | *(int *)(t + 1) = BTF_INT_ENC(0, 0, 8); |
3110 | 0 | } else if (!has_datasec && btf_is_datasec(t)) { |
3111 | | /* replace DATASEC with STRUCT */ |
3112 | 0 | const struct btf_var_secinfo *v = btf_var_secinfos(t); |
3113 | 0 | struct btf_member *m = btf_members(t); |
3114 | 0 | struct btf_type *vt; |
3115 | 0 | char *name; |
3116 | |
|
3117 | 0 | name = (char *)btf__name_by_offset(btf, t->name_off); |
3118 | 0 | while (*name) { |
3119 | 0 | if (*name == '.' || *name == '?') |
3120 | 0 | *name = '_'; |
3121 | 0 | name++; |
3122 | 0 | } |
3123 | |
|
3124 | 0 | vlen = btf_vlen(t); |
3125 | 0 | t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen); |
3126 | 0 | for (j = 0; j < vlen; j++, v++, m++) { |
3127 | | /* order of field assignments is important */ |
3128 | 0 | m->offset = v->offset * 8; |
3129 | 0 | m->type = v->type; |
3130 | | /* preserve variable name as member name */ |
3131 | 0 | vt = (void *)btf__type_by_id(btf, v->type); |
3132 | 0 | m->name_off = vt->name_off; |
3133 | 0 | } |
3134 | 0 | } else if (!has_qmark_datasec && btf_is_datasec(t) && |
3135 | 0 | starts_with_qmark(btf__name_by_offset(btf, t->name_off))) { |
3136 | | /* replace '?' prefix with '_' for DATASEC names */ |
3137 | 0 | char *name; |
3138 | |
|
3139 | 0 | name = (char *)btf__name_by_offset(btf, t->name_off); |
3140 | 0 | if (name[0] == '?') |
3141 | 0 | name[0] = '_'; |
3142 | 0 | } else if (!has_func && btf_is_func_proto(t)) { |
3143 | | /* replace FUNC_PROTO with ENUM */ |
3144 | 0 | vlen = btf_vlen(t); |
3145 | 0 | t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen); |
3146 | 0 | t->size = sizeof(__u32); /* kernel enforced */ |
3147 | 0 | } else if (!has_func && btf_is_func(t)) { |
3148 | | /* replace FUNC with TYPEDEF */ |
3149 | 0 | t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0); |
3150 | 0 | } else if (!has_func_global && btf_is_func(t)) { |
3151 | | /* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */ |
3152 | 0 | t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0); |
3153 | 0 | } else if (!has_float && btf_is_float(t)) { |
3154 | | /* replace FLOAT with an equally-sized empty STRUCT; |
3155 | | * since C compilers do not accept e.g. "float" as a |
3156 | | * valid struct name, make it anonymous |
3157 | | */ |
3158 | 0 | t->name_off = 0; |
3159 | 0 | t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0); |
3160 | 0 | } else if (!has_type_tag && btf_is_type_tag(t)) { |
3161 | | /* replace TYPE_TAG with a CONST */ |
3162 | 0 | t->name_off = 0; |
3163 | 0 | t->info = BTF_INFO_ENC(BTF_KIND_CONST, 0, 0); |
3164 | 0 | } else if (!has_enum64 && btf_is_enum(t)) { |
3165 | | /* clear the kflag */ |
3166 | 0 | t->info = btf_type_info(btf_kind(t), btf_vlen(t), false); |
3167 | 0 | } else if (!has_enum64 && btf_is_enum64(t)) { |
3168 | | /* replace ENUM64 with a union */ |
3169 | 0 | struct btf_member *m; |
3170 | |
|
3171 | 0 | if (enum64_placeholder_id == 0) { |
3172 | 0 | enum64_placeholder_id = btf__add_int(btf, "enum64_placeholder", 1, 0); |
3173 | 0 | if (enum64_placeholder_id < 0) |
3174 | 0 | return enum64_placeholder_id; |
3175 | | |
3176 | 0 | t = (struct btf_type *)btf__type_by_id(btf, i); |
3177 | 0 | } |
3178 | | |
3179 | 0 | m = btf_members(t); |
3180 | 0 | vlen = btf_vlen(t); |
3181 | 0 | t->info = BTF_INFO_ENC(BTF_KIND_UNION, 0, vlen); |
3182 | 0 | for (j = 0; j < vlen; j++, m++) { |
3183 | 0 | m->type = enum64_placeholder_id; |
3184 | 0 | m->offset = 0; |
3185 | 0 | } |
3186 | 0 | } |
3187 | 0 | } |
3188 | | |
3189 | 0 | return 0; |
3190 | 0 | } |
3191 | | |
3192 | | static bool libbpf_needs_btf(const struct bpf_object *obj) |
3193 | 3.10k | { |
3194 | 3.10k | return obj->efile.btf_maps_shndx >= 0 || |
3195 | 3.10k | obj->efile.has_st_ops || |
3196 | 3.10k | obj->nr_extern > 0; |
3197 | 3.10k | } |
3198 | | |
3199 | | static bool kernel_needs_btf(const struct bpf_object *obj) |
3200 | 0 | { |
3201 | 0 | return obj->efile.has_st_ops; |
3202 | 0 | } |
3203 | | |
3204 | | static int bpf_object__init_btf(struct bpf_object *obj, |
3205 | | Elf_Data *btf_data, |
3206 | | Elf_Data *btf_ext_data) |
3207 | 6.98k | { |
3208 | 6.98k | int err = -ENOENT; |
3209 | | |
3210 | 6.98k | if (btf_data) { |
3211 | 5.06k | obj->btf = btf__new(btf_data->d_buf, btf_data->d_size); |
3212 | 5.06k | err = libbpf_get_error(obj->btf); |
3213 | 5.06k | if (err) { |
3214 | 926 | obj->btf = NULL; |
3215 | 926 | pr_warn("Error loading ELF section %s: %d.\n", BTF_ELF_SEC, err); |
3216 | 926 | goto out; |
3217 | 926 | } |
3218 | | /* enforce 8-byte pointers for BPF-targeted BTFs */ |
3219 | 4.13k | btf__set_pointer_size(obj->btf, 8); |
3220 | 4.13k | } |
3221 | 6.06k | if (btf_ext_data) { |
3222 | 301 | struct btf_ext_info *ext_segs[3]; |
3223 | 301 | int seg_num, sec_num; |
3224 | | |
3225 | 301 | if (!obj->btf) { |
3226 | 6 | pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n", |
3227 | 6 | BTF_EXT_ELF_SEC, BTF_ELF_SEC); |
3228 | 6 | goto out; |
3229 | 6 | } |
3230 | 295 | obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size); |
3231 | 295 | err = libbpf_get_error(obj->btf_ext); |
3232 | 295 | if (err) { |
3233 | 254 | pr_warn("Error loading ELF section %s: %d. Ignored and continue.\n", |
3234 | 254 | BTF_EXT_ELF_SEC, err); |
3235 | 254 | obj->btf_ext = NULL; |
3236 | 254 | goto out; |
3237 | 254 | } |
3238 | | |
3239 | | /* setup .BTF.ext to ELF section mapping */ |
3240 | 41 | ext_segs[0] = &obj->btf_ext->func_info; |
3241 | 41 | ext_segs[1] = &obj->btf_ext->line_info; |
3242 | 41 | ext_segs[2] = &obj->btf_ext->core_relo_info; |
3243 | 164 | for (seg_num = 0; seg_num < ARRAY_SIZE(ext_segs); seg_num++) { |
3244 | 123 | struct btf_ext_info *seg = ext_segs[seg_num]; |
3245 | 123 | const struct btf_ext_info_sec *sec; |
3246 | 123 | const char *sec_name; |
3247 | 123 | Elf_Scn *scn; |
3248 | | |
3249 | 123 | if (seg->sec_cnt == 0) |
3250 | 74 | continue; |
3251 | | |
3252 | 49 | seg->sec_idxs = calloc(seg->sec_cnt, sizeof(*seg->sec_idxs)); |
3253 | 49 | if (!seg->sec_idxs) { |
3254 | 0 | err = -ENOMEM; |
3255 | 0 | goto out; |
3256 | 0 | } |
3257 | | |
3258 | 49 | sec_num = 0; |
3259 | 184 | for_each_btf_ext_sec(seg, sec) { |
3260 | | /* preventively increment index to avoid doing |
3261 | | * this before every continue below |
3262 | | */ |
3263 | 184 | sec_num++; |
3264 | | |
3265 | 184 | sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off); |
3266 | 184 | if (str_is_empty(sec_name)) |
3267 | 111 | continue; |
3268 | 73 | scn = elf_sec_by_name(obj, sec_name); |
3269 | 73 | if (!scn) |
3270 | 56 | continue; |
3271 | | |
3272 | 17 | seg->sec_idxs[sec_num - 1] = elf_ndxscn(scn); |
3273 | 17 | } |
3274 | 49 | } |
3275 | 41 | } |
3276 | 6.98k | out: |
3277 | 6.98k | if (err && libbpf_needs_btf(obj)) { |
3278 | 48 | pr_warn("BTF is required, but is missing or corrupted.\n"); |
3279 | 48 | return err; |
3280 | 48 | } |
3281 | 6.93k | return 0; |
3282 | 6.98k | } |
3283 | | |
3284 | | static int compare_vsi_off(const void *_a, const void *_b) |
3285 | 2.49k | { |
3286 | 2.49k | const struct btf_var_secinfo *a = _a; |
3287 | 2.49k | const struct btf_var_secinfo *b = _b; |
3288 | | |
3289 | 2.49k | return a->offset - b->offset; |
3290 | 2.49k | } |
3291 | | |
3292 | | static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf, |
3293 | | struct btf_type *t) |
3294 | 3.59k | { |
3295 | 3.59k | __u32 size = 0, i, vars = btf_vlen(t); |
3296 | 3.59k | const char *sec_name = btf__name_by_offset(btf, t->name_off); |
3297 | 3.59k | struct btf_var_secinfo *vsi; |
3298 | 3.59k | bool fixup_offsets = false; |
3299 | 3.59k | int err; |
3300 | | |
3301 | 3.59k | if (!sec_name) { |
3302 | 0 | pr_debug("No name found in string section for DATASEC kind.\n"); |
3303 | 0 | return -ENOENT; |
3304 | 0 | } |
3305 | | |
3306 | | /* Extern-backing datasecs (.ksyms, .kconfig) have their size and |
3307 | | * variable offsets set at the previous step. Further, not every |
3308 | | * extern BTF VAR has corresponding ELF symbol preserved, so we skip |
3309 | | * all fixups altogether for such sections and go straight to sorting |
3310 | | * VARs within their DATASEC. |
3311 | | */ |
3312 | 3.59k | if (strcmp(sec_name, KCONFIG_SEC) == 0 || strcmp(sec_name, KSYMS_SEC) == 0) |
3313 | 544 | goto sort_vars; |
3314 | | |
3315 | | /* Clang leaves DATASEC size and VAR offsets as zeroes, so we need to |
3316 | | * fix this up. But BPF static linker already fixes this up and fills |
3317 | | * all the sizes and offsets during static linking. So this step has |
3318 | | * to be optional. But the STV_HIDDEN handling is non-optional for any |
3319 | | * non-extern DATASEC, so the variable fixup loop below handles both |
3320 | | * functions at the same time, paying the cost of BTF VAR <-> ELF |
3321 | | * symbol matching just once. |
3322 | | */ |
3323 | 3.05k | if (t->size == 0) { |
3324 | 404 | err = find_elf_sec_sz(obj, sec_name, &size); |
3325 | 404 | if (err || !size) { |
3326 | 165 | pr_debug("sec '%s': failed to determine size from ELF: size %u, err %d\n", |
3327 | 165 | sec_name, size, err); |
3328 | 165 | return -ENOENT; |
3329 | 165 | } |
3330 | | |
3331 | 239 | t->size = size; |
3332 | 239 | fixup_offsets = true; |
3333 | 239 | } |
3334 | | |
3335 | 4.98k | for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) { |
3336 | 2.44k | const struct btf_type *t_var; |
3337 | 2.44k | struct btf_var *var; |
3338 | 2.44k | const char *var_name; |
3339 | 2.44k | Elf64_Sym *sym; |
3340 | | |
3341 | 2.44k | t_var = btf__type_by_id(btf, vsi->type); |
3342 | 2.44k | if (!t_var || !btf_is_var(t_var)) { |
3343 | 121 | pr_debug("sec '%s': unexpected non-VAR type found\n", sec_name); |
3344 | 121 | return -EINVAL; |
3345 | 121 | } |
3346 | | |
3347 | 2.32k | var = btf_var(t_var); |
3348 | 2.32k | if (var->linkage == BTF_VAR_STATIC || var->linkage == BTF_VAR_GLOBAL_EXTERN) |
3349 | 176 | continue; |
3350 | | |
3351 | 2.14k | var_name = btf__name_by_offset(btf, t_var->name_off); |
3352 | 2.14k | if (!var_name) { |
3353 | 0 | pr_debug("sec '%s': failed to find name of DATASEC's member #%d\n", |
3354 | 0 | sec_name, i); |
3355 | 0 | return -ENOENT; |
3356 | 0 | } |
3357 | | |
3358 | 2.14k | sym = find_elf_var_sym(obj, var_name); |
3359 | 2.14k | if (IS_ERR(sym)) { |
3360 | 228 | pr_debug("sec '%s': failed to find ELF symbol for VAR '%s'\n", |
3361 | 228 | sec_name, var_name); |
3362 | 228 | return -ENOENT; |
3363 | 228 | } |
3364 | | |
3365 | 1.92k | if (fixup_offsets) |
3366 | 106 | vsi->offset = sym->st_value; |
3367 | | |
3368 | | /* if variable is a global/weak symbol, but has restricted |
3369 | | * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF VAR |
3370 | | * as static. This follows similar logic for functions (BPF |
3371 | | * subprogs) and influences libbpf's further decisions about |
3372 | | * whether to make global data BPF array maps as |
3373 | | * BPF_F_MMAPABLE. |
3374 | | */ |
3375 | 1.92k | if (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN |
3376 | 1.92k | || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL) |
3377 | 100 | var->linkage = BTF_VAR_STATIC; |
3378 | 1.92k | } |
3379 | | |
3380 | 3.08k | sort_vars: |
3381 | 3.08k | qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off); |
3382 | 3.08k | return 0; |
3383 | 2.89k | } |
3384 | | |
3385 | | static int bpf_object_fixup_btf(struct bpf_object *obj) |
3386 | 4.20k | { |
3387 | 4.20k | int i, n, err = 0; |
3388 | | |
3389 | 4.20k | if (!obj->btf) |
3390 | 1.68k | return 0; |
3391 | | |
3392 | 2.52k | n = btf__type_cnt(obj->btf); |
3393 | 26.7k | for (i = 1; i < n; i++) { |
3394 | 24.7k | struct btf_type *t = btf_type_by_id(obj->btf, i); |
3395 | | |
3396 | | /* Loader needs to fix up some of the things compiler |
3397 | | * couldn't get its hands on while emitting BTF. This |
3398 | | * is section size and global variable offset. We use |
3399 | | * the info from the ELF itself for this purpose. |
3400 | | */ |
3401 | 24.7k | if (btf_is_datasec(t)) { |
3402 | 3.59k | err = btf_fixup_datasec(obj, obj->btf, t); |
3403 | 3.59k | if (err) |
3404 | 514 | return err; |
3405 | 3.59k | } |
3406 | 24.7k | } |
3407 | | |
3408 | 2.00k | return 0; |
3409 | 2.52k | } |
3410 | | |
3411 | | static bool prog_needs_vmlinux_btf(struct bpf_program *prog) |
3412 | 0 | { |
3413 | 0 | if (prog->type == BPF_PROG_TYPE_STRUCT_OPS || |
3414 | 0 | prog->type == BPF_PROG_TYPE_LSM) |
3415 | 0 | return true; |
3416 | | |
3417 | | /* BPF_PROG_TYPE_TRACING programs which do not attach to other programs |
3418 | | * also need vmlinux BTF |
3419 | | */ |
3420 | 0 | if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd) |
3421 | 0 | return true; |
3422 | | |
3423 | 0 | return false; |
3424 | 0 | } |
3425 | | |
3426 | | static bool map_needs_vmlinux_btf(struct bpf_map *map) |
3427 | 0 | { |
3428 | 0 | return bpf_map__is_struct_ops(map); |
3429 | 0 | } |
3430 | | |
3431 | | static bool obj_needs_vmlinux_btf(const struct bpf_object *obj) |
3432 | 0 | { |
3433 | 0 | struct bpf_program *prog; |
3434 | 0 | struct bpf_map *map; |
3435 | 0 | int i; |
3436 | | |
3437 | | /* CO-RE relocations need kernel BTF, only when btf_custom_path |
3438 | | * is not specified |
3439 | | */ |
3440 | 0 | if (obj->btf_ext && obj->btf_ext->core_relo_info.len && !obj->btf_custom_path) |
3441 | 0 | return true; |
3442 | | |
3443 | | /* Support for typed ksyms needs kernel BTF */ |
3444 | 0 | for (i = 0; i < obj->nr_extern; i++) { |
3445 | 0 | const struct extern_desc *ext; |
3446 | |
|
3447 | 0 | ext = &obj->externs[i]; |
3448 | 0 | if (ext->type == EXT_KSYM && ext->ksym.type_id) |
3449 | 0 | return true; |
3450 | 0 | } |
3451 | | |
3452 | 0 | bpf_object__for_each_program(prog, obj) { |
3453 | 0 | if (!prog->autoload) |
3454 | 0 | continue; |
3455 | 0 | if (prog_needs_vmlinux_btf(prog)) |
3456 | 0 | return true; |
3457 | 0 | } |
3458 | | |
3459 | 0 | bpf_object__for_each_map(map, obj) { |
3460 | 0 | if (map_needs_vmlinux_btf(map)) |
3461 | 0 | return true; |
3462 | 0 | } |
3463 | | |
3464 | 0 | return false; |
3465 | 0 | } |
3466 | | |
3467 | | static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force) |
3468 | 0 | { |
3469 | 0 | int err; |
3470 | | |
3471 | | /* btf_vmlinux could be loaded earlier */ |
3472 | 0 | if (obj->btf_vmlinux || obj->gen_loader) |
3473 | 0 | return 0; |
3474 | | |
3475 | 0 | if (!force && !obj_needs_vmlinux_btf(obj)) |
3476 | 0 | return 0; |
3477 | | |
3478 | 0 | obj->btf_vmlinux = btf__load_vmlinux_btf(); |
3479 | 0 | err = libbpf_get_error(obj->btf_vmlinux); |
3480 | 0 | if (err) { |
3481 | 0 | pr_warn("Error loading vmlinux BTF: %d\n", err); |
3482 | 0 | obj->btf_vmlinux = NULL; |
3483 | 0 | return err; |
3484 | 0 | } |
3485 | 0 | return 0; |
3486 | 0 | } |
3487 | | |
3488 | | static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) |
3489 | 0 | { |
3490 | 0 | struct btf *kern_btf = obj->btf; |
3491 | 0 | bool btf_mandatory, sanitize; |
3492 | 0 | int i, err = 0; |
3493 | |
|
3494 | 0 | if (!obj->btf) |
3495 | 0 | return 0; |
3496 | | |
3497 | 0 | if (!kernel_supports(obj, FEAT_BTF)) { |
3498 | 0 | if (kernel_needs_btf(obj)) { |
3499 | 0 | err = -EOPNOTSUPP; |
3500 | 0 | goto report; |
3501 | 0 | } |
3502 | 0 | pr_debug("Kernel doesn't support BTF, skipping uploading it.\n"); |
3503 | 0 | return 0; |
3504 | 0 | } |
3505 | | |
3506 | | /* Even though some subprogs are global/weak, user might prefer more |
3507 | | * permissive BPF verification process that BPF verifier performs for |
3508 | | * static functions, taking into account more context from the caller |
3509 | | * functions. In such case, they need to mark such subprogs with |
3510 | | * __attribute__((visibility("hidden"))) and libbpf will adjust |
3511 | | * corresponding FUNC BTF type to be marked as static and trigger more |
3512 | | * involved BPF verification process. |
3513 | | */ |
3514 | 0 | for (i = 0; i < obj->nr_programs; i++) { |
3515 | 0 | struct bpf_program *prog = &obj->programs[i]; |
3516 | 0 | struct btf_type *t; |
3517 | 0 | const char *name; |
3518 | 0 | int j, n; |
3519 | |
|
3520 | 0 | if (!prog->mark_btf_static || !prog_is_subprog(obj, prog)) |
3521 | 0 | continue; |
3522 | | |
3523 | 0 | n = btf__type_cnt(obj->btf); |
3524 | 0 | for (j = 1; j < n; j++) { |
3525 | 0 | t = btf_type_by_id(obj->btf, j); |
3526 | 0 | if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL) |
3527 | 0 | continue; |
3528 | | |
3529 | 0 | name = btf__str_by_offset(obj->btf, t->name_off); |
3530 | 0 | if (strcmp(name, prog->name) != 0) |
3531 | 0 | continue; |
3532 | | |
3533 | 0 | t->info = btf_type_info(BTF_KIND_FUNC, BTF_FUNC_STATIC, 0); |
3534 | 0 | break; |
3535 | 0 | } |
3536 | 0 | } |
3537 | |
|
3538 | 0 | sanitize = btf_needs_sanitization(obj); |
3539 | 0 | if (sanitize) { |
3540 | 0 | const void *raw_data; |
3541 | 0 | __u32 sz; |
3542 | | |
3543 | | /* clone BTF to sanitize a copy and leave the original intact */ |
3544 | 0 | raw_data = btf__raw_data(obj->btf, &sz); |
3545 | 0 | kern_btf = btf__new(raw_data, sz); |
3546 | 0 | err = libbpf_get_error(kern_btf); |
3547 | 0 | if (err) |
3548 | 0 | return err; |
3549 | | |
3550 | | /* enforce 8-byte pointers for BPF-targeted BTFs */ |
3551 | 0 | btf__set_pointer_size(obj->btf, 8); |
3552 | 0 | err = bpf_object__sanitize_btf(obj, kern_btf); |
3553 | 0 | if (err) |
3554 | 0 | return err; |
3555 | 0 | } |
3556 | | |
3557 | 0 | if (obj->gen_loader) { |
3558 | 0 | __u32 raw_size = 0; |
3559 | 0 | const void *raw_data = btf__raw_data(kern_btf, &raw_size); |
3560 | |
|
3561 | 0 | if (!raw_data) |
3562 | 0 | return -ENOMEM; |
3563 | 0 | bpf_gen__load_btf(obj->gen_loader, raw_data, raw_size); |
3564 | | /* Pretend to have valid FD to pass various fd >= 0 checks. |
3565 | | * This fd == 0 will not be used with any syscall and will be reset to -1 eventually. |
3566 | | */ |
3567 | 0 | btf__set_fd(kern_btf, 0); |
3568 | 0 | } else { |
3569 | | /* currently BPF_BTF_LOAD only supports log_level 1 */ |
3570 | 0 | err = btf_load_into_kernel(kern_btf, obj->log_buf, obj->log_size, |
3571 | 0 | obj->log_level ? 1 : 0, obj->token_fd); |
3572 | 0 | } |
3573 | 0 | if (sanitize) { |
3574 | 0 | if (!err) { |
3575 | | /* move fd to libbpf's BTF */ |
3576 | 0 | btf__set_fd(obj->btf, btf__fd(kern_btf)); |
3577 | 0 | btf__set_fd(kern_btf, -1); |
3578 | 0 | } |
3579 | 0 | btf__free(kern_btf); |
3580 | 0 | } |
3581 | 0 | report: |
3582 | 0 | if (err) { |
3583 | 0 | btf_mandatory = kernel_needs_btf(obj); |
3584 | 0 | pr_warn("Error loading .BTF into kernel: %d. %s\n", err, |
3585 | 0 | btf_mandatory ? "BTF is mandatory, can't proceed." |
3586 | 0 | : "BTF is optional, ignoring."); |
3587 | 0 | if (!btf_mandatory) |
3588 | 0 | err = 0; |
3589 | 0 | } |
3590 | 0 | return err; |
3591 | 0 | } |
3592 | | |
3593 | | static const char *elf_sym_str(const struct bpf_object *obj, size_t off) |
3594 | 28.8k | { |
3595 | 28.8k | const char *name; |
3596 | | |
3597 | 28.8k | name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off); |
3598 | 28.8k | if (!name) { |
3599 | 10.1k | pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n", |
3600 | 10.1k | off, obj->path, elf_errmsg(-1)); |
3601 | 10.1k | return NULL; |
3602 | 10.1k | } |
3603 | | |
3604 | 18.6k | return name; |
3605 | 28.8k | } |
3606 | | |
3607 | | static const char *elf_sec_str(const struct bpf_object *obj, size_t off) |
3608 | 48.4k | { |
3609 | 48.4k | const char *name; |
3610 | | |
3611 | 48.4k | name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off); |
3612 | 48.4k | if (!name) { |
3613 | 796 | pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n", |
3614 | 796 | off, obj->path, elf_errmsg(-1)); |
3615 | 796 | return NULL; |
3616 | 796 | } |
3617 | | |
3618 | 47.6k | return name; |
3619 | 48.4k | } |
3620 | | |
3621 | | static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx) |
3622 | 14.6k | { |
3623 | 14.6k | Elf_Scn *scn; |
3624 | | |
3625 | 14.6k | scn = elf_getscn(obj->efile.elf, idx); |
3626 | 14.6k | if (!scn) { |
3627 | 0 | pr_warn("elf: failed to get section(%zu) from %s: %s\n", |
3628 | 0 | idx, obj->path, elf_errmsg(-1)); |
3629 | 0 | return NULL; |
3630 | 0 | } |
3631 | 14.6k | return scn; |
3632 | 14.6k | } |
3633 | | |
3634 | | static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name) |
3635 | 477 | { |
3636 | 477 | Elf_Scn *scn = NULL; |
3637 | 477 | Elf *elf = obj->efile.elf; |
3638 | 477 | const char *sec_name; |
3639 | | |
3640 | 2.35k | while ((scn = elf_nextscn(elf, scn)) != NULL) { |
3641 | 2.14k | sec_name = elf_sec_name(obj, scn); |
3642 | 2.14k | if (!sec_name) |
3643 | 0 | return NULL; |
3644 | | |
3645 | 2.14k | if (strcmp(sec_name, name) != 0) |
3646 | 1.88k | continue; |
3647 | | |
3648 | 262 | return scn; |
3649 | 2.14k | } |
3650 | 215 | return NULL; |
3651 | 477 | } |
3652 | | |
3653 | | static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn) |
3654 | 101k | { |
3655 | 101k | Elf64_Shdr *shdr; |
3656 | | |
3657 | 101k | if (!scn) |
3658 | 0 | return NULL; |
3659 | | |
3660 | 101k | shdr = elf64_getshdr(scn); |
3661 | 101k | if (!shdr) { |
3662 | 0 | pr_warn("elf: failed to get section(%zu) header from %s: %s\n", |
3663 | 0 | elf_ndxscn(scn), obj->path, elf_errmsg(-1)); |
3664 | 0 | return NULL; |
3665 | 0 | } |
3666 | | |
3667 | 101k | return shdr; |
3668 | 101k | } |
3669 | | |
3670 | | static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn) |
3671 | 6.95k | { |
3672 | 6.95k | const char *name; |
3673 | 6.95k | Elf64_Shdr *sh; |
3674 | | |
3675 | 6.95k | if (!scn) |
3676 | 0 | return NULL; |
3677 | | |
3678 | 6.95k | sh = elf_sec_hdr(obj, scn); |
3679 | 6.95k | if (!sh) |
3680 | 0 | return NULL; |
3681 | | |
3682 | 6.95k | name = elf_sec_str(obj, sh->sh_name); |
3683 | 6.95k | if (!name) { |
3684 | 696 | pr_warn("elf: failed to get section(%zu) name from %s: %s\n", |
3685 | 696 | elf_ndxscn(scn), obj->path, elf_errmsg(-1)); |
3686 | 696 | return NULL; |
3687 | 696 | } |
3688 | | |
3689 | 6.26k | return name; |
3690 | 6.95k | } |
3691 | | |
3692 | | static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn) |
3693 | 43.8k | { |
3694 | 43.8k | Elf_Data *data; |
3695 | | |
3696 | 43.8k | if (!scn) |
3697 | 159 | return NULL; |
3698 | | |
3699 | 43.7k | data = elf_getdata(scn, 0); |
3700 | 43.7k | if (!data) { |
3701 | 448 | pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n", |
3702 | 448 | elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>", |
3703 | 448 | obj->path, elf_errmsg(-1)); |
3704 | 448 | return NULL; |
3705 | 448 | } |
3706 | | |
3707 | 43.2k | return data; |
3708 | 43.7k | } |
3709 | | |
3710 | | static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx) |
3711 | 860k | { |
3712 | 860k | if (idx >= obj->efile.symbols->d_size / sizeof(Elf64_Sym)) |
3713 | 144 | return NULL; |
3714 | | |
3715 | 860k | return (Elf64_Sym *)obj->efile.symbols->d_buf + idx; |
3716 | 860k | } |
3717 | | |
3718 | | static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx) |
3719 | 8.53k | { |
3720 | 8.53k | if (idx >= data->d_size / sizeof(Elf64_Rel)) |
3721 | 0 | return NULL; |
3722 | | |
3723 | 8.53k | return (Elf64_Rel *)data->d_buf + idx; |
3724 | 8.53k | } |
3725 | | |
3726 | | static bool is_sec_name_dwarf(const char *name) |
3727 | 36.7k | { |
3728 | | /* approximation, but the actual list is too long */ |
3729 | 36.7k | return str_has_pfx(name, ".debug_"); |
3730 | 36.7k | } |
3731 | | |
3732 | | static bool ignore_elf_section(Elf64_Shdr *hdr, const char *name) |
3733 | 40.3k | { |
3734 | | /* no special handling of .strtab */ |
3735 | 40.3k | if (hdr->sh_type == SHT_STRTAB) |
3736 | 5.53k | return true; |
3737 | | |
3738 | | /* ignore .llvm_addrsig section as well */ |
3739 | 34.8k | if (hdr->sh_type == SHT_LLVM_ADDRSIG) |
3740 | 66 | return true; |
3741 | | |
3742 | | /* no subprograms will lead to an empty .text section, ignore it */ |
3743 | 34.7k | if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 && |
3744 | 34.7k | strcmp(name, ".text") == 0) |
3745 | 39 | return true; |
3746 | | |
3747 | | /* DWARF sections */ |
3748 | 34.7k | if (is_sec_name_dwarf(name)) |
3749 | 2.17k | return true; |
3750 | | |
3751 | 32.5k | if (str_has_pfx(name, ".rel")) { |
3752 | 2.04k | name += sizeof(".rel") - 1; |
3753 | | /* DWARF section relocations */ |
3754 | 2.04k | if (is_sec_name_dwarf(name)) |
3755 | 194 | return true; |
3756 | | |
3757 | | /* .BTF and .BTF.ext don't need relocations */ |
3758 | 1.85k | if (strcmp(name, BTF_ELF_SEC) == 0 || |
3759 | 1.85k | strcmp(name, BTF_EXT_ELF_SEC) == 0) |
3760 | 416 | return true; |
3761 | 1.85k | } |
3762 | | |
3763 | 31.9k | return false; |
3764 | 32.5k | } |
3765 | | |
3766 | | static int cmp_progs(const void *_a, const void *_b) |
3767 | 44.8k | { |
3768 | 44.8k | const struct bpf_program *a = _a; |
3769 | 44.8k | const struct bpf_program *b = _b; |
3770 | | |
3771 | 44.8k | if (a->sec_idx != b->sec_idx) |
3772 | 531 | return a->sec_idx < b->sec_idx ? -1 : 1; |
3773 | | |
3774 | | /* sec_insn_off can't be the same within the section */ |
3775 | 44.3k | return a->sec_insn_off < b->sec_insn_off ? -1 : 1; |
3776 | 44.8k | } |
3777 | | |
3778 | | static int bpf_object__elf_collect(struct bpf_object *obj) |
3779 | 9.33k | { |
3780 | 9.33k | struct elf_sec_desc *sec_desc; |
3781 | 9.33k | Elf *elf = obj->efile.elf; |
3782 | 9.33k | Elf_Data *btf_ext_data = NULL; |
3783 | 9.33k | Elf_Data *btf_data = NULL; |
3784 | 9.33k | int idx = 0, err = 0; |
3785 | 9.33k | const char *name; |
3786 | 9.33k | Elf_Data *data; |
3787 | 9.33k | Elf_Scn *scn; |
3788 | 9.33k | Elf64_Shdr *sh; |
3789 | | |
3790 | | /* ELF section indices are 0-based, but sec #0 is special "invalid" |
3791 | | * section. Since section count retrieved by elf_getshdrnum() does |
3792 | | * include sec #0, it is already the necessary size of an array to keep |
3793 | | * all the sections. |
3794 | | */ |
3795 | 9.33k | if (elf_getshdrnum(obj->efile.elf, &obj->efile.sec_cnt)) { |
3796 | 0 | pr_warn("elf: failed to get the number of sections for %s: %s\n", |
3797 | 0 | obj->path, elf_errmsg(-1)); |
3798 | 0 | return -LIBBPF_ERRNO__FORMAT; |
3799 | 0 | } |
3800 | 9.33k | obj->efile.secs = calloc(obj->efile.sec_cnt, sizeof(*obj->efile.secs)); |
3801 | 9.33k | if (!obj->efile.secs) |
3802 | 0 | return -ENOMEM; |
3803 | | |
3804 | | /* a bunch of ELF parsing functionality depends on processing symbols, |
3805 | | * so do the first pass and find the symbol table |
3806 | | */ |
3807 | 9.33k | scn = NULL; |
3808 | 54.6k | while ((scn = elf_nextscn(elf, scn)) != NULL) { |
3809 | 45.5k | sh = elf_sec_hdr(obj, scn); |
3810 | 45.5k | if (!sh) |
3811 | 0 | return -LIBBPF_ERRNO__FORMAT; |
3812 | | |
3813 | 45.5k | if (sh->sh_type == SHT_SYMTAB) { |
3814 | 9.26k | if (obj->efile.symbols) { |
3815 | 1 | pr_warn("elf: multiple symbol tables in %s\n", obj->path); |
3816 | 1 | return -LIBBPF_ERRNO__FORMAT; |
3817 | 1 | } |
3818 | | |
3819 | 9.26k | data = elf_sec_data(obj, scn); |
3820 | 9.26k | if (!data) |
3821 | 249 | return -LIBBPF_ERRNO__FORMAT; |
3822 | | |
3823 | 9.01k | idx = elf_ndxscn(scn); |
3824 | | |
3825 | 9.01k | obj->efile.symbols = data; |
3826 | 9.01k | obj->efile.symbols_shndx = idx; |
3827 | 9.01k | obj->efile.strtabidx = sh->sh_link; |
3828 | 9.01k | } |
3829 | 45.5k | } |
3830 | | |
3831 | 9.08k | if (!obj->efile.symbols) { |
3832 | 72 | pr_warn("elf: couldn't find symbol table in %s, stripped object file?\n", |
3833 | 72 | obj->path); |
3834 | 72 | return -ENOENT; |
3835 | 72 | } |
3836 | | |
3837 | 9.01k | scn = NULL; |
3838 | 48.2k | while ((scn = elf_nextscn(elf, scn)) != NULL) { |
3839 | 40.4k | idx = elf_ndxscn(scn); |
3840 | 40.4k | sec_desc = &obj->efile.secs[idx]; |
3841 | | |
3842 | 40.4k | sh = elf_sec_hdr(obj, scn); |
3843 | 40.4k | if (!sh) |
3844 | 0 | return -LIBBPF_ERRNO__FORMAT; |
3845 | | |
3846 | 40.4k | name = elf_sec_str(obj, sh->sh_name); |
3847 | 40.4k | if (!name) |
3848 | 100 | return -LIBBPF_ERRNO__FORMAT; |
3849 | | |
3850 | 40.3k | if (ignore_elf_section(sh, name)) |
3851 | 8.42k | continue; |
3852 | | |
3853 | 31.9k | data = elf_sec_data(obj, scn); |
3854 | 31.9k | if (!data) |
3855 | 183 | return -LIBBPF_ERRNO__FORMAT; |
3856 | | |
3857 | 31.7k | pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n", |
3858 | 31.7k | idx, name, (unsigned long)data->d_size, |
3859 | 31.7k | (int)sh->sh_link, (unsigned long)sh->sh_flags, |
3860 | 31.7k | (int)sh->sh_type); |
3861 | | |
3862 | 31.7k | if (strcmp(name, "license") == 0) { |
3863 | 663 | err = bpf_object__init_license(obj, data->d_buf, data->d_size); |
3864 | 663 | if (err) |
3865 | 1 | return err; |
3866 | 31.0k | } else if (strcmp(name, "version") == 0) { |
3867 | 49 | err = bpf_object__init_kversion(obj, data->d_buf, data->d_size); |
3868 | 49 | if (err) |
3869 | 13 | return err; |
3870 | 31.0k | } else if (strcmp(name, "maps") == 0) { |
3871 | 7 | pr_warn("elf: legacy map definitions in 'maps' section are not supported by libbpf v1.0+\n"); |
3872 | 7 | return -ENOTSUP; |
3873 | 31.0k | } else if (strcmp(name, MAPS_ELF_SEC) == 0) { |
3874 | 1.69k | obj->efile.btf_maps_shndx = idx; |
3875 | 29.3k | } else if (strcmp(name, BTF_ELF_SEC) == 0) { |
3876 | 5.20k | if (sh->sh_type != SHT_PROGBITS) |
3877 | 55 | return -LIBBPF_ERRNO__FORMAT; |
3878 | 5.15k | btf_data = data; |
3879 | 24.1k | } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) { |
3880 | 427 | if (sh->sh_type != SHT_PROGBITS) |
3881 | 52 | return -LIBBPF_ERRNO__FORMAT; |
3882 | 375 | btf_ext_data = data; |
3883 | 23.7k | } else if (sh->sh_type == SHT_SYMTAB) { |
3884 | | /* already processed during the first pass above */ |
3885 | 16.2k | } else if (sh->sh_type == SHT_PROGBITS && data->d_size > 0) { |
3886 | 3.62k | if (sh->sh_flags & SHF_EXECINSTR) { |
3887 | 1.22k | if (strcmp(name, ".text") == 0) |
3888 | 233 | obj->efile.text_shndx = idx; |
3889 | 1.22k | err = bpf_object__add_programs(obj, data, name, idx); |
3890 | 1.22k | if (err) |
3891 | 280 | return err; |
3892 | 2.40k | } else if (strcmp(name, DATA_SEC) == 0 || |
3893 | 2.40k | str_has_pfx(name, DATA_SEC ".")) { |
3894 | 661 | sec_desc->sec_type = SEC_DATA; |
3895 | 661 | sec_desc->shdr = sh; |
3896 | 661 | sec_desc->data = data; |
3897 | 1.74k | } else if (strcmp(name, RODATA_SEC) == 0 || |
3898 | 1.74k | str_has_pfx(name, RODATA_SEC ".")) { |
3899 | 399 | sec_desc->sec_type = SEC_RODATA; |
3900 | 399 | sec_desc->shdr = sh; |
3901 | 399 | sec_desc->data = data; |
3902 | 1.34k | } else if (strcmp(name, STRUCT_OPS_SEC) == 0 || |
3903 | 1.34k | strcmp(name, STRUCT_OPS_LINK_SEC) == 0 || |
3904 | 1.34k | strcmp(name, "?" STRUCT_OPS_SEC) == 0 || |
3905 | 1.34k | strcmp(name, "?" STRUCT_OPS_LINK_SEC) == 0) { |
3906 | 537 | sec_desc->sec_type = SEC_ST_OPS; |
3907 | 537 | sec_desc->shdr = sh; |
3908 | 537 | sec_desc->data = data; |
3909 | 537 | obj->efile.has_st_ops = true; |
3910 | 805 | } else if (strcmp(name, ARENA_SEC) == 0) { |
3911 | 75 | obj->efile.arena_data = data; |
3912 | 75 | obj->efile.arena_data_shndx = idx; |
3913 | 730 | } else { |
3914 | 730 | pr_info("elf: skipping unrecognized data section(%d) %s\n", |
3915 | 730 | idx, name); |
3916 | 730 | } |
3917 | 12.5k | } else if (sh->sh_type == SHT_REL) { |
3918 | 2.23k | int targ_sec_idx = sh->sh_info; /* points to other section */ |
3919 | | |
3920 | 2.23k | if (sh->sh_entsize != sizeof(Elf64_Rel) || |
3921 | 2.23k | targ_sec_idx >= obj->efile.sec_cnt) |
3922 | 184 | return -LIBBPF_ERRNO__FORMAT; |
3923 | | |
3924 | | /* Only do relo for section with exec instructions */ |
3925 | 2.04k | if (!section_have_execinstr(obj, targ_sec_idx) && |
3926 | 2.04k | strcmp(name, ".rel" STRUCT_OPS_SEC) && |
3927 | 2.04k | strcmp(name, ".rel" STRUCT_OPS_LINK_SEC) && |
3928 | 2.04k | strcmp(name, ".rel?" STRUCT_OPS_SEC) && |
3929 | 2.04k | strcmp(name, ".rel?" STRUCT_OPS_LINK_SEC) && |
3930 | 2.04k | strcmp(name, ".rel" MAPS_ELF_SEC)) { |
3931 | 572 | pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n", |
3932 | 572 | idx, name, targ_sec_idx, |
3933 | 572 | elf_sec_name(obj, elf_sec_by_idx(obj, targ_sec_idx)) ?: "<?>"); |
3934 | 572 | continue; |
3935 | 572 | } |
3936 | | |
3937 | 1.47k | sec_desc->sec_type = SEC_RELO; |
3938 | 1.47k | sec_desc->shdr = sh; |
3939 | 1.47k | sec_desc->data = data; |
3940 | 10.3k | } else if (sh->sh_type == SHT_NOBITS && (strcmp(name, BSS_SEC) == 0 || |
3941 | 2.08k | str_has_pfx(name, BSS_SEC "."))) { |
3942 | 1.26k | sec_desc->sec_type = SEC_BSS; |
3943 | 1.26k | sec_desc->shdr = sh; |
3944 | 1.26k | sec_desc->data = data; |
3945 | 9.09k | } else { |
3946 | 9.09k | pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name, |
3947 | 9.09k | (size_t)sh->sh_size); |
3948 | 9.09k | } |
3949 | 31.7k | } |
3950 | | |
3951 | 8.00k | if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) { |
3952 | 1.15k | pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path); |
3953 | 1.15k | return -LIBBPF_ERRNO__FORMAT; |
3954 | 1.15k | } |
3955 | | |
3956 | | /* sort BPF programs by section name and in-section instruction offset |
3957 | | * for faster search |
3958 | | */ |
3959 | 6.61k | if (obj->nr_programs) |
3960 | 632 | qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs); |
3961 | | |
3962 | 6.61k | return bpf_object__init_btf(obj, btf_data, btf_ext_data); |
3963 | 7.77k | } |
3964 | | |
3965 | | static bool sym_is_extern(const Elf64_Sym *sym) |
3966 | 572k | { |
3967 | 572k | int bind = ELF64_ST_BIND(sym->st_info); |
3968 | | /* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */ |
3969 | 572k | return sym->st_shndx == SHN_UNDEF && |
3970 | 572k | (bind == STB_GLOBAL || bind == STB_WEAK) && |
3971 | 572k | ELF64_ST_TYPE(sym->st_info) == STT_NOTYPE; |
3972 | 572k | } |
3973 | | |
3974 | | static bool sym_is_subprog(const Elf64_Sym *sym, int text_shndx) |
3975 | 1.22k | { |
3976 | 1.22k | int bind = ELF64_ST_BIND(sym->st_info); |
3977 | 1.22k | int type = ELF64_ST_TYPE(sym->st_info); |
3978 | | |
3979 | | /* in .text section */ |
3980 | 1.22k | if (sym->st_shndx != text_shndx) |
3981 | 554 | return false; |
3982 | | |
3983 | | /* local function */ |
3984 | 672 | if (bind == STB_LOCAL && type == STT_SECTION) |
3985 | 496 | return true; |
3986 | | |
3987 | | /* global function */ |
3988 | 176 | return bind == STB_GLOBAL && type == STT_FUNC; |
3989 | 672 | } |
3990 | | |
3991 | | static int find_extern_btf_id(const struct btf *btf, const char *ext_name) |
3992 | 4.71k | { |
3993 | 4.71k | const struct btf_type *t; |
3994 | 4.71k | const char *tname; |
3995 | 4.71k | int i, n; |
3996 | | |
3997 | 4.71k | if (!btf) |
3998 | 24 | return -ESRCH; |
3999 | | |
4000 | 4.69k | n = btf__type_cnt(btf); |
4001 | 27.0k | for (i = 1; i < n; i++) { |
4002 | 26.8k | t = btf__type_by_id(btf, i); |
4003 | | |
4004 | 26.8k | if (!btf_is_var(t) && !btf_is_func(t)) |
4005 | 18.7k | continue; |
4006 | | |
4007 | 8.11k | tname = btf__name_by_offset(btf, t->name_off); |
4008 | 8.11k | if (strcmp(tname, ext_name)) |
4009 | 3.60k | continue; |
4010 | | |
4011 | 4.51k | if (btf_is_var(t) && |
4012 | 4.51k | btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN) |
4013 | 53 | return -EINVAL; |
4014 | | |
4015 | 4.46k | if (btf_is_func(t) && btf_func_linkage(t) != BTF_FUNC_EXTERN) |
4016 | 16 | return -EINVAL; |
4017 | | |
4018 | 4.44k | return i; |
4019 | 4.46k | } |
4020 | | |
4021 | 179 | return -ENOENT; |
4022 | 4.69k | } |
4023 | | |
4024 | 4.44k | static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) { |
4025 | 4.44k | const struct btf_var_secinfo *vs; |
4026 | 4.44k | const struct btf_type *t; |
4027 | 4.44k | int i, j, n; |
4028 | | |
4029 | 4.44k | if (!btf) |
4030 | 0 | return -ESRCH; |
4031 | | |
4032 | 4.44k | n = btf__type_cnt(btf); |
4033 | 29.8k | for (i = 1; i < n; i++) { |
4034 | 29.7k | t = btf__type_by_id(btf, i); |
4035 | | |
4036 | 29.7k | if (!btf_is_datasec(t)) |
4037 | 19.9k | continue; |
4038 | | |
4039 | 9.80k | vs = btf_var_secinfos(t); |
4040 | 21.6k | for (j = 0; j < btf_vlen(t); j++, vs++) { |
4041 | 16.2k | if (vs->type == ext_btf_id) |
4042 | 4.40k | return i; |
4043 | 16.2k | } |
4044 | 9.80k | } |
4045 | | |
4046 | 42 | return -ENOENT; |
4047 | 4.44k | } |
4048 | | |
4049 | | static enum kcfg_type find_kcfg_type(const struct btf *btf, int id, |
4050 | | bool *is_signed) |
4051 | 1.19k | { |
4052 | 1.19k | const struct btf_type *t; |
4053 | 1.19k | const char *name; |
4054 | | |
4055 | 1.19k | t = skip_mods_and_typedefs(btf, id, NULL); |
4056 | 1.19k | name = btf__name_by_offset(btf, t->name_off); |
4057 | | |
4058 | 1.19k | if (is_signed) |
4059 | 1.08k | *is_signed = false; |
4060 | 1.19k | switch (btf_kind(t)) { |
4061 | 769 | case BTF_KIND_INT: { |
4062 | 769 | int enc = btf_int_encoding(t); |
4063 | | |
4064 | 769 | if (enc & BTF_INT_BOOL) |
4065 | 283 | return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN; |
4066 | 486 | if (is_signed) |
4067 | 409 | *is_signed = enc & BTF_INT_SIGNED; |
4068 | 486 | if (t->size == 1) |
4069 | 269 | return KCFG_CHAR; |
4070 | 217 | if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1))) |
4071 | 41 | return KCFG_UNKNOWN; |
4072 | 176 | return KCFG_INT; |
4073 | 217 | } |
4074 | 122 | case BTF_KIND_ENUM: |
4075 | 122 | if (t->size != 4) |
4076 | 19 | return KCFG_UNKNOWN; |
4077 | 103 | if (strcmp(name, "libbpf_tristate")) |
4078 | 92 | return KCFG_UNKNOWN; |
4079 | 11 | return KCFG_TRISTATE; |
4080 | 143 | case BTF_KIND_ENUM64: |
4081 | 143 | if (strcmp(name, "libbpf_tristate")) |
4082 | 133 | return KCFG_UNKNOWN; |
4083 | 10 | return KCFG_TRISTATE; |
4084 | 110 | case BTF_KIND_ARRAY: |
4085 | 110 | if (btf_array(t)->nelems == 0) |
4086 | 0 | return KCFG_UNKNOWN; |
4087 | 110 | if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR) |
4088 | 33 | return KCFG_UNKNOWN; |
4089 | 77 | return KCFG_CHAR_ARR; |
4090 | 53 | default: |
4091 | 53 | return KCFG_UNKNOWN; |
4092 | 1.19k | } |
4093 | 1.19k | } |
4094 | | |
4095 | | static int cmp_externs(const void *_a, const void *_b) |
4096 | 8.34k | { |
4097 | 8.34k | const struct extern_desc *a = _a; |
4098 | 8.34k | const struct extern_desc *b = _b; |
4099 | | |
4100 | 8.34k | if (a->type != b->type) |
4101 | 0 | return a->type < b->type ? -1 : 1; |
4102 | | |
4103 | 8.34k | if (a->type == EXT_KCFG) { |
4104 | | /* descending order by alignment requirements */ |
4105 | 1.15k | if (a->kcfg.align != b->kcfg.align) |
4106 | 0 | return a->kcfg.align > b->kcfg.align ? -1 : 1; |
4107 | | /* ascending order by size, within same alignment class */ |
4108 | 1.15k | if (a->kcfg.sz != b->kcfg.sz) |
4109 | 0 | return a->kcfg.sz < b->kcfg.sz ? -1 : 1; |
4110 | 1.15k | } |
4111 | | |
4112 | | /* resolve ties by name */ |
4113 | 8.34k | return strcmp(a->name, b->name); |
4114 | 8.34k | } |
4115 | | |
4116 | | static int find_int_btf_id(const struct btf *btf) |
4117 | 831 | { |
4118 | 831 | const struct btf_type *t; |
4119 | 831 | int i, n; |
4120 | | |
4121 | 831 | n = btf__type_cnt(btf); |
4122 | 7.70k | for (i = 1; i < n; i++) { |
4123 | 6.96k | t = btf__type_by_id(btf, i); |
4124 | | |
4125 | 6.96k | if (btf_is_int(t) && btf_int_bits(t) == 32) |
4126 | 89 | return i; |
4127 | 6.96k | } |
4128 | | |
4129 | 742 | return 0; |
4130 | 831 | } |
4131 | | |
4132 | | static int add_dummy_ksym_var(struct btf *btf) |
4133 | 5.55k | { |
4134 | 5.55k | int i, int_btf_id, sec_btf_id, dummy_var_btf_id; |
4135 | 5.55k | const struct btf_var_secinfo *vs; |
4136 | 5.55k | const struct btf_type *sec; |
4137 | | |
4138 | 5.55k | if (!btf) |
4139 | 1.71k | return 0; |
4140 | | |
4141 | 3.84k | sec_btf_id = btf__find_by_name_kind(btf, KSYMS_SEC, |
4142 | 3.84k | BTF_KIND_DATASEC); |
4143 | 3.84k | if (sec_btf_id < 0) |
4144 | 3.11k | return 0; |
4145 | | |
4146 | 729 | sec = btf__type_by_id(btf, sec_btf_id); |
4147 | 729 | vs = btf_var_secinfos(sec); |
4148 | 1.95k | for (i = 0; i < btf_vlen(sec); i++, vs++) { |
4149 | 1.75k | const struct btf_type *vt; |
4150 | | |
4151 | 1.75k | vt = btf__type_by_id(btf, vs->type); |
4152 | 1.75k | if (btf_is_func(vt)) |
4153 | 526 | break; |
4154 | 1.75k | } |
4155 | | |
4156 | | /* No func in ksyms sec. No need to add dummy var. */ |
4157 | 729 | if (i == btf_vlen(sec)) |
4158 | 203 | return 0; |
4159 | | |
4160 | 526 | int_btf_id = find_int_btf_id(btf); |
4161 | 526 | dummy_var_btf_id = btf__add_var(btf, |
4162 | 526 | "dummy_ksym", |
4163 | 526 | BTF_VAR_GLOBAL_ALLOCATED, |
4164 | 526 | int_btf_id); |
4165 | 526 | if (dummy_var_btf_id < 0) |
4166 | 526 | pr_warn("cannot create a dummy_ksym var\n"); |
4167 | | |
4168 | 526 | return dummy_var_btf_id; |
4169 | 729 | } |
4170 | | |
4171 | | static int bpf_object__collect_externs(struct bpf_object *obj) |
4172 | 6.93k | { |
4173 | 6.93k | struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL; |
4174 | 6.93k | const struct btf_type *t; |
4175 | 6.93k | struct extern_desc *ext; |
4176 | 6.93k | int i, n, off, dummy_var_btf_id; |
4177 | 6.93k | const char *ext_name, *sec_name; |
4178 | 6.93k | size_t ext_essent_len; |
4179 | 6.93k | Elf_Scn *scn; |
4180 | 6.93k | Elf64_Shdr *sh; |
4181 | | |
4182 | 6.93k | if (!obj->efile.symbols) |
4183 | 0 | return 0; |
4184 | | |
4185 | 6.93k | scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx); |
4186 | 6.93k | sh = elf_sec_hdr(obj, scn); |
4187 | 6.93k | if (!sh || sh->sh_entsize != sizeof(Elf64_Sym)) |
4188 | 1.38k | return -LIBBPF_ERRNO__FORMAT; |
4189 | | |
4190 | 5.55k | dummy_var_btf_id = add_dummy_ksym_var(obj->btf); |
4191 | 5.55k | if (dummy_var_btf_id < 0) |
4192 | 0 | return dummy_var_btf_id; |
4193 | | |
4194 | 5.55k | n = sh->sh_size / sh->sh_entsize; |
4195 | 5.55k | pr_debug("looking for externs among %d symbols...\n", n); |
4196 | | |
4197 | 575k | for (i = 0; i < n; i++) { |
4198 | 571k | Elf64_Sym *sym = elf_sym_by_idx(obj, i); |
4199 | | |
4200 | 571k | if (!sym) |
4201 | 0 | return -LIBBPF_ERRNO__FORMAT; |
4202 | 571k | if (!sym_is_extern(sym)) |
4203 | 562k | continue; |
4204 | 8.80k | ext_name = elf_sym_str(obj, sym->st_name); |
4205 | 8.80k | if (!ext_name || !ext_name[0]) |
4206 | 4.08k | continue; |
4207 | | |
4208 | 4.71k | ext = obj->externs; |
4209 | 4.71k | ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext)); |
4210 | 4.71k | if (!ext) |
4211 | 0 | return -ENOMEM; |
4212 | 4.71k | obj->externs = ext; |
4213 | 4.71k | ext = &ext[obj->nr_extern]; |
4214 | 4.71k | memset(ext, 0, sizeof(*ext)); |
4215 | 4.71k | obj->nr_extern++; |
4216 | | |
4217 | 4.71k | ext->btf_id = find_extern_btf_id(obj->btf, ext_name); |
4218 | 4.71k | if (ext->btf_id <= 0) { |
4219 | 272 | pr_warn("failed to find BTF for extern '%s': %d\n", |
4220 | 272 | ext_name, ext->btf_id); |
4221 | 272 | return ext->btf_id; |
4222 | 272 | } |
4223 | 4.44k | t = btf__type_by_id(obj->btf, ext->btf_id); |
4224 | 4.44k | ext->name = btf__name_by_offset(obj->btf, t->name_off); |
4225 | 4.44k | ext->sym_idx = i; |
4226 | 4.44k | ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK; |
4227 | | |
4228 | 4.44k | ext_essent_len = bpf_core_essential_name_len(ext->name); |
4229 | 4.44k | ext->essent_name = NULL; |
4230 | 4.44k | if (ext_essent_len != strlen(ext->name)) { |
4231 | 323 | ext->essent_name = strndup(ext->name, ext_essent_len); |
4232 | 323 | if (!ext->essent_name) |
4233 | 0 | return -ENOMEM; |
4234 | 323 | } |
4235 | | |
4236 | 4.44k | ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id); |
4237 | 4.44k | if (ext->sec_btf_id <= 0) { |
4238 | 42 | pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n", |
4239 | 42 | ext_name, ext->btf_id, ext->sec_btf_id); |
4240 | 42 | return ext->sec_btf_id; |
4241 | 42 | } |
4242 | 4.40k | sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id); |
4243 | 4.40k | sec_name = btf__name_by_offset(obj->btf, sec->name_off); |
4244 | | |
4245 | 4.40k | if (strcmp(sec_name, KCONFIG_SEC) == 0) { |
4246 | 1.32k | if (btf_is_func(t)) { |
4247 | 1 | pr_warn("extern function %s is unsupported under %s section\n", |
4248 | 1 | ext->name, KCONFIG_SEC); |
4249 | 1 | return -ENOTSUP; |
4250 | 1 | } |
4251 | 1.32k | kcfg_sec = sec; |
4252 | 1.32k | ext->type = EXT_KCFG; |
4253 | 1.32k | ext->kcfg.sz = btf__resolve_size(obj->btf, t->type); |
4254 | 1.32k | if (ext->kcfg.sz <= 0) { |
4255 | 188 | pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n", |
4256 | 188 | ext_name, ext->kcfg.sz); |
4257 | 188 | return ext->kcfg.sz; |
4258 | 188 | } |
4259 | 1.13k | ext->kcfg.align = btf__align_of(obj->btf, t->type); |
4260 | 1.13k | if (ext->kcfg.align <= 0) { |
4261 | 51 | pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n", |
4262 | 51 | ext_name, ext->kcfg.align); |
4263 | 51 | return -EINVAL; |
4264 | 51 | } |
4265 | 1.08k | ext->kcfg.type = find_kcfg_type(obj->btf, t->type, |
4266 | 1.08k | &ext->kcfg.is_signed); |
4267 | 1.08k | if (ext->kcfg.type == KCFG_UNKNOWN) { |
4268 | 366 | pr_warn("extern (kcfg) '%s': type is unsupported\n", ext_name); |
4269 | 366 | return -ENOTSUP; |
4270 | 366 | } |
4271 | 3.07k | } else if (strcmp(sec_name, KSYMS_SEC) == 0) { |
4272 | 2.98k | ksym_sec = sec; |
4273 | 2.98k | ext->type = EXT_KSYM; |
4274 | 2.98k | skip_mods_and_typedefs(obj->btf, t->type, |
4275 | 2.98k | &ext->ksym.type_id); |
4276 | 2.98k | } else { |
4277 | 89 | pr_warn("unrecognized extern section '%s'\n", sec_name); |
4278 | 89 | return -ENOTSUP; |
4279 | 89 | } |
4280 | 4.40k | } |
4281 | 4.54k | pr_debug("collected %d externs total\n", obj->nr_extern); |
4282 | | |
4283 | 4.54k | if (!obj->nr_extern) |
4284 | 4.06k | return 0; |
4285 | | |
4286 | | /* sort externs by type, for kcfg ones also by (align, size, name) */ |
4287 | 482 | qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs); |
4288 | | |
4289 | | /* for .ksyms section, we need to turn all externs into allocated |
4290 | | * variables in BTF to pass kernel verification; we do this by |
4291 | | * pretending that each extern is a 8-byte variable |
4292 | | */ |
4293 | 482 | if (ksym_sec) { |
4294 | | /* find existing 4-byte integer type in BTF to use for fake |
4295 | | * extern variables in DATASEC |
4296 | | */ |
4297 | 305 | int int_btf_id = find_int_btf_id(obj->btf); |
4298 | | /* For extern function, a dummy_var added earlier |
4299 | | * will be used to replace the vs->type and |
4300 | | * its name string will be used to refill |
4301 | | * the missing param's name. |
4302 | | */ |
4303 | 305 | const struct btf_type *dummy_var; |
4304 | | |
4305 | 305 | dummy_var = btf__type_by_id(obj->btf, dummy_var_btf_id); |
4306 | 3.09k | for (i = 0; i < obj->nr_extern; i++) { |
4307 | 2.79k | ext = &obj->externs[i]; |
4308 | 2.79k | if (ext->type != EXT_KSYM) |
4309 | 0 | continue; |
4310 | 2.79k | pr_debug("extern (ksym) #%d: symbol %d, name %s\n", |
4311 | 2.79k | i, ext->sym_idx, ext->name); |
4312 | 2.79k | } |
4313 | | |
4314 | 305 | sec = ksym_sec; |
4315 | 305 | n = btf_vlen(sec); |
4316 | 649 | for (i = 0, off = 0; i < n; i++, off += sizeof(int)) { |
4317 | 601 | struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i; |
4318 | 601 | struct btf_type *vt; |
4319 | | |
4320 | 601 | vt = (void *)btf__type_by_id(obj->btf, vs->type); |
4321 | 601 | ext_name = btf__name_by_offset(obj->btf, vt->name_off); |
4322 | 601 | ext = find_extern_by_name(obj, ext_name); |
4323 | 601 | if (!ext) { |
4324 | 257 | pr_warn("failed to find extern definition for BTF %s '%s'\n", |
4325 | 257 | btf_kind_str(vt), ext_name); |
4326 | 257 | return -ESRCH; |
4327 | 257 | } |
4328 | 344 | if (btf_is_func(vt)) { |
4329 | 78 | const struct btf_type *func_proto; |
4330 | 78 | struct btf_param *param; |
4331 | 78 | int j; |
4332 | | |
4333 | 78 | func_proto = btf__type_by_id(obj->btf, |
4334 | 78 | vt->type); |
4335 | 78 | param = btf_params(func_proto); |
4336 | | /* Reuse the dummy_var string if the |
4337 | | * func proto does not have param name. |
4338 | | */ |
4339 | 339 | for (j = 0; j < btf_vlen(func_proto); j++) |
4340 | 261 | if (param[j].type && !param[j].name_off) |
4341 | 35 | param[j].name_off = |
4342 | 35 | dummy_var->name_off; |
4343 | 78 | vs->type = dummy_var_btf_id; |
4344 | 78 | vt->info &= ~0xffff; |
4345 | 78 | vt->info |= BTF_FUNC_GLOBAL; |
4346 | 266 | } else { |
4347 | 266 | btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED; |
4348 | 266 | vt->type = int_btf_id; |
4349 | 266 | } |
4350 | 344 | vs->offset = off; |
4351 | 344 | vs->size = sizeof(int); |
4352 | 344 | } |
4353 | 48 | sec->size = off; |
4354 | 48 | } |
4355 | | |
4356 | 225 | if (kcfg_sec) { |
4357 | 177 | sec = kcfg_sec; |
4358 | | /* for kcfg externs calculate their offsets within a .kconfig map */ |
4359 | 177 | off = 0; |
4360 | 865 | for (i = 0; i < obj->nr_extern; i++) { |
4361 | 688 | ext = &obj->externs[i]; |
4362 | 688 | if (ext->type != EXT_KCFG) |
4363 | 0 | continue; |
4364 | | |
4365 | 688 | ext->kcfg.data_off = roundup(off, ext->kcfg.align); |
4366 | 688 | off = ext->kcfg.data_off + ext->kcfg.sz; |
4367 | 688 | pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n", |
4368 | 688 | i, ext->sym_idx, ext->kcfg.data_off, ext->name); |
4369 | 688 | } |
4370 | 177 | sec->size = off; |
4371 | 177 | n = btf_vlen(sec); |
4372 | 393 | for (i = 0; i < n; i++) { |
4373 | 372 | struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i; |
4374 | | |
4375 | 372 | t = btf__type_by_id(obj->btf, vs->type); |
4376 | 372 | ext_name = btf__name_by_offset(obj->btf, t->name_off); |
4377 | 372 | ext = find_extern_by_name(obj, ext_name); |
4378 | 372 | if (!ext) { |
4379 | 156 | pr_warn("failed to find extern definition for BTF var '%s'\n", |
4380 | 156 | ext_name); |
4381 | 156 | return -ESRCH; |
4382 | 156 | } |
4383 | 216 | btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED; |
4384 | 216 | vs->offset = ext->kcfg.data_off; |
4385 | 216 | } |
4386 | 177 | } |
4387 | 69 | return 0; |
4388 | 225 | } |
4389 | | |
4390 | | static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog) |
4391 | 8.78k | { |
4392 | 8.78k | return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1; |
4393 | 8.78k | } |
4394 | | |
4395 | | struct bpf_program * |
4396 | | bpf_object__find_program_by_name(const struct bpf_object *obj, |
4397 | | const char *name) |
4398 | 0 | { |
4399 | 0 | struct bpf_program *prog; |
4400 | |
|
4401 | 0 | bpf_object__for_each_program(prog, obj) { |
4402 | 0 | if (prog_is_subprog(obj, prog)) |
4403 | 0 | continue; |
4404 | 0 | if (!strcmp(prog->name, name)) |
4405 | 0 | return prog; |
4406 | 0 | } |
4407 | 0 | return errno = ENOENT, NULL; |
4408 | 0 | } |
4409 | | |
4410 | | static bool bpf_object__shndx_is_data(const struct bpf_object *obj, |
4411 | | int shndx) |
4412 | 469 | { |
4413 | 469 | switch (obj->efile.secs[shndx].sec_type) { |
4414 | 98 | case SEC_BSS: |
4415 | 327 | case SEC_DATA: |
4416 | 468 | case SEC_RODATA: |
4417 | 468 | return true; |
4418 | 1 | default: |
4419 | 1 | return false; |
4420 | 469 | } |
4421 | 469 | } |
4422 | | |
4423 | | static bool bpf_object__shndx_is_maps(const struct bpf_object *obj, |
4424 | | int shndx) |
4425 | 21 | { |
4426 | 21 | return shndx == obj->efile.btf_maps_shndx; |
4427 | 21 | } |
4428 | | |
4429 | | static enum libbpf_map_type |
4430 | | bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx) |
4431 | 562 | { |
4432 | 562 | if (shndx == obj->efile.symbols_shndx) |
4433 | 1 | return LIBBPF_MAP_KCONFIG; |
4434 | | |
4435 | 561 | switch (obj->efile.secs[shndx].sec_type) { |
4436 | 98 | case SEC_BSS: |
4437 | 98 | return LIBBPF_MAP_BSS; |
4438 | 229 | case SEC_DATA: |
4439 | 229 | return LIBBPF_MAP_DATA; |
4440 | 141 | case SEC_RODATA: |
4441 | 141 | return LIBBPF_MAP_RODATA; |
4442 | 93 | default: |
4443 | 93 | return LIBBPF_MAP_UNSPEC; |
4444 | 561 | } |
4445 | 561 | } |
4446 | | |
4447 | | static int bpf_program__record_reloc(struct bpf_program *prog, |
4448 | | struct reloc_desc *reloc_desc, |
4449 | | __u32 insn_idx, const char *sym_name, |
4450 | | const Elf64_Sym *sym, const Elf64_Rel *rel) |
4451 | 1.72k | { |
4452 | 1.72k | struct bpf_insn *insn = &prog->insns[insn_idx]; |
4453 | 1.72k | size_t map_idx, nr_maps = prog->obj->nr_maps; |
4454 | 1.72k | struct bpf_object *obj = prog->obj; |
4455 | 1.72k | __u32 shdr_idx = sym->st_shndx; |
4456 | 1.72k | enum libbpf_map_type type; |
4457 | 1.72k | const char *sym_sec_name; |
4458 | 1.72k | struct bpf_map *map; |
4459 | | |
4460 | 1.72k | if (!is_call_insn(insn) && !is_ldimm64_insn(insn)) { |
4461 | 49 | pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n", |
4462 | 49 | prog->name, sym_name, insn_idx, insn->code); |
4463 | 49 | return -LIBBPF_ERRNO__RELOC; |
4464 | 49 | } |
4465 | | |
4466 | 1.67k | if (sym_is_extern(sym)) { |
4467 | 1 | int sym_idx = ELF64_R_SYM(rel->r_info); |
4468 | 1 | int i, n = obj->nr_extern; |
4469 | 1 | struct extern_desc *ext; |
4470 | | |
4471 | 1 | for (i = 0; i < n; i++) { |
4472 | 0 | ext = &obj->externs[i]; |
4473 | 0 | if (ext->sym_idx == sym_idx) |
4474 | 0 | break; |
4475 | 0 | } |
4476 | 1 | if (i >= n) { |
4477 | 1 | pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n", |
4478 | 1 | prog->name, sym_name, sym_idx); |
4479 | 1 | return -LIBBPF_ERRNO__RELOC; |
4480 | 1 | } |
4481 | 0 | pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n", |
4482 | 0 | prog->name, i, ext->name, ext->sym_idx, insn_idx); |
4483 | 0 | if (insn->code == (BPF_JMP | BPF_CALL)) |
4484 | 0 | reloc_desc->type = RELO_EXTERN_CALL; |
4485 | 0 | else |
4486 | 0 | reloc_desc->type = RELO_EXTERN_LD64; |
4487 | 0 | reloc_desc->insn_idx = insn_idx; |
4488 | 0 | reloc_desc->ext_idx = i; |
4489 | 0 | return 0; |
4490 | 1 | } |
4491 | | |
4492 | | /* sub-program call relocation */ |
4493 | 1.67k | if (is_call_insn(insn)) { |
4494 | 448 | if (insn->src_reg != BPF_PSEUDO_CALL) { |
4495 | 6 | pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name); |
4496 | 6 | return -LIBBPF_ERRNO__RELOC; |
4497 | 6 | } |
4498 | | /* text_shndx can be 0, if no default "main" program exists */ |
4499 | 442 | if (!shdr_idx || shdr_idx != obj->efile.text_shndx) { |
4500 | 11 | sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx)); |
4501 | 11 | pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n", |
4502 | 11 | prog->name, sym_name, sym_sec_name); |
4503 | 11 | return -LIBBPF_ERRNO__RELOC; |
4504 | 11 | } |
4505 | 431 | if (sym->st_value % BPF_INSN_SZ) { |
4506 | 1 | pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n", |
4507 | 1 | prog->name, sym_name, (size_t)sym->st_value); |
4508 | 1 | return -LIBBPF_ERRNO__RELOC; |
4509 | 1 | } |
4510 | 430 | reloc_desc->type = RELO_CALL; |
4511 | 430 | reloc_desc->insn_idx = insn_idx; |
4512 | 430 | reloc_desc->sym_off = sym->st_value; |
4513 | 430 | return 0; |
4514 | 431 | } |
4515 | | |
4516 | 1.22k | if (!shdr_idx || shdr_idx >= SHN_LORESERVE) { |
4517 | 2 | pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n", |
4518 | 2 | prog->name, sym_name, shdr_idx); |
4519 | 2 | return -LIBBPF_ERRNO__RELOC; |
4520 | 2 | } |
4521 | | |
4522 | | /* loading subprog addresses */ |
4523 | 1.22k | if (sym_is_subprog(sym, obj->efile.text_shndx)) { |
4524 | | /* global_func: sym->st_value = offset in the section, insn->imm = 0. |
4525 | | * local_func: sym->st_value = 0, insn->imm = offset in the section. |
4526 | | */ |
4527 | 664 | if ((sym->st_value % BPF_INSN_SZ) || (insn->imm % BPF_INSN_SZ)) { |
4528 | 5 | pr_warn("prog '%s': bad subprog addr relo against '%s' at offset %zu+%d\n", |
4529 | 5 | prog->name, sym_name, (size_t)sym->st_value, insn->imm); |
4530 | 5 | return -LIBBPF_ERRNO__RELOC; |
4531 | 5 | } |
4532 | | |
4533 | 659 | reloc_desc->type = RELO_SUBPROG_ADDR; |
4534 | 659 | reloc_desc->insn_idx = insn_idx; |
4535 | 659 | reloc_desc->sym_off = sym->st_value; |
4536 | 659 | return 0; |
4537 | 664 | } |
4538 | | |
4539 | 562 | type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx); |
4540 | 562 | sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx)); |
4541 | | |
4542 | | /* arena data relocation */ |
4543 | 562 | if (shdr_idx == obj->efile.arena_data_shndx) { |
4544 | 72 | reloc_desc->type = RELO_DATA; |
4545 | 72 | reloc_desc->insn_idx = insn_idx; |
4546 | 72 | reloc_desc->map_idx = obj->arena_map - obj->maps; |
4547 | 72 | reloc_desc->sym_off = sym->st_value; |
4548 | 72 | return 0; |
4549 | 72 | } |
4550 | | |
4551 | | /* generic map reference relocation */ |
4552 | 490 | if (type == LIBBPF_MAP_UNSPEC) { |
4553 | 21 | if (!bpf_object__shndx_is_maps(obj, shdr_idx)) { |
4554 | 21 | pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n", |
4555 | 21 | prog->name, sym_name, sym_sec_name); |
4556 | 21 | return -LIBBPF_ERRNO__RELOC; |
4557 | 21 | } |
4558 | 0 | for (map_idx = 0; map_idx < nr_maps; map_idx++) { |
4559 | 0 | map = &obj->maps[map_idx]; |
4560 | 0 | if (map->libbpf_type != type || |
4561 | 0 | map->sec_idx != sym->st_shndx || |
4562 | 0 | map->sec_offset != sym->st_value) |
4563 | 0 | continue; |
4564 | 0 | pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n", |
4565 | 0 | prog->name, map_idx, map->name, map->sec_idx, |
4566 | 0 | map->sec_offset, insn_idx); |
4567 | 0 | break; |
4568 | 0 | } |
4569 | 0 | if (map_idx >= nr_maps) { |
4570 | 0 | pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n", |
4571 | 0 | prog->name, sym_sec_name, (size_t)sym->st_value); |
4572 | 0 | return -LIBBPF_ERRNO__RELOC; |
4573 | 0 | } |
4574 | 0 | reloc_desc->type = RELO_LD64; |
4575 | 0 | reloc_desc->insn_idx = insn_idx; |
4576 | 0 | reloc_desc->map_idx = map_idx; |
4577 | 0 | reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */ |
4578 | 0 | return 0; |
4579 | 0 | } |
4580 | | |
4581 | | /* global data map relocation */ |
4582 | 469 | if (!bpf_object__shndx_is_data(obj, shdr_idx)) { |
4583 | 1 | pr_warn("prog '%s': bad data relo against section '%s'\n", |
4584 | 1 | prog->name, sym_sec_name); |
4585 | 1 | return -LIBBPF_ERRNO__RELOC; |
4586 | 1 | } |
4587 | 704 | for (map_idx = 0; map_idx < nr_maps; map_idx++) { |
4588 | 702 | map = &obj->maps[map_idx]; |
4589 | 702 | if (map->libbpf_type != type || map->sec_idx != sym->st_shndx) |
4590 | 236 | continue; |
4591 | 466 | pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n", |
4592 | 466 | prog->name, map_idx, map->name, map->sec_idx, |
4593 | 466 | map->sec_offset, insn_idx); |
4594 | 466 | break; |
4595 | 702 | } |
4596 | 468 | if (map_idx >= nr_maps) { |
4597 | 2 | pr_warn("prog '%s': data relo failed to find map for section '%s'\n", |
4598 | 2 | prog->name, sym_sec_name); |
4599 | 2 | return -LIBBPF_ERRNO__RELOC; |
4600 | 2 | } |
4601 | | |
4602 | 466 | reloc_desc->type = RELO_DATA; |
4603 | 466 | reloc_desc->insn_idx = insn_idx; |
4604 | 466 | reloc_desc->map_idx = map_idx; |
4605 | 466 | reloc_desc->sym_off = sym->st_value; |
4606 | 466 | return 0; |
4607 | 468 | } |
4608 | | |
4609 | | static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx) |
4610 | 2.22k | { |
4611 | 2.22k | return insn_idx >= prog->sec_insn_off && |
4612 | 2.22k | insn_idx < prog->sec_insn_off + prog->sec_insn_cnt; |
4613 | 2.22k | } |
4614 | | |
4615 | | static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj, |
4616 | | size_t sec_idx, size_t insn_idx) |
4617 | 8.16k | { |
4618 | 8.16k | int l = 0, r = obj->nr_programs - 1, m; |
4619 | 8.16k | struct bpf_program *prog; |
4620 | | |
4621 | 8.16k | if (!obj->nr_programs) |
4622 | 5.43k | return NULL; |
4623 | | |
4624 | 3.96k | while (l < r) { |
4625 | 1.24k | m = l + (r - l + 1) / 2; |
4626 | 1.24k | prog = &obj->programs[m]; |
4627 | | |
4628 | 1.24k | if (prog->sec_idx < sec_idx || |
4629 | 1.24k | (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx)) |
4630 | 532 | l = m; |
4631 | 710 | else |
4632 | 710 | r = m - 1; |
4633 | 1.24k | } |
4634 | | /* matching program could be at index l, but it still might be the |
4635 | | * wrong one, so we need to double check conditions for the last time |
4636 | | */ |
4637 | 2.72k | prog = &obj->programs[l]; |
4638 | 2.72k | if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx)) |
4639 | 1.72k | return prog; |
4640 | 1.00k | return NULL; |
4641 | 2.72k | } |
4642 | | |
4643 | | static int |
4644 | | bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Data *data) |
4645 | 1.01k | { |
4646 | 1.01k | const char *relo_sec_name, *sec_name; |
4647 | 1.01k | size_t sec_idx = shdr->sh_info, sym_idx; |
4648 | 1.01k | struct bpf_program *prog; |
4649 | 1.01k | struct reloc_desc *relos; |
4650 | 1.01k | int err, i, nrels; |
4651 | 1.01k | const char *sym_name; |
4652 | 1.01k | __u32 insn_idx; |
4653 | 1.01k | Elf_Scn *scn; |
4654 | 1.01k | Elf_Data *scn_data; |
4655 | 1.01k | Elf64_Sym *sym; |
4656 | 1.01k | Elf64_Rel *rel; |
4657 | | |
4658 | 1.01k | if (sec_idx >= obj->efile.sec_cnt) |
4659 | 0 | return -EINVAL; |
4660 | | |
4661 | 1.01k | scn = elf_sec_by_idx(obj, sec_idx); |
4662 | 1.01k | scn_data = elf_sec_data(obj, scn); |
4663 | 1.01k | if (!scn_data) |
4664 | 14 | return -LIBBPF_ERRNO__FORMAT; |
4665 | | |
4666 | 997 | relo_sec_name = elf_sec_str(obj, shdr->sh_name); |
4667 | 997 | sec_name = elf_sec_name(obj, scn); |
4668 | 997 | if (!relo_sec_name || !sec_name) |
4669 | 18 | return -EINVAL; |
4670 | | |
4671 | 979 | pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n", |
4672 | 979 | relo_sec_name, sec_idx, sec_name); |
4673 | 979 | nrels = shdr->sh_size / shdr->sh_entsize; |
4674 | | |
4675 | 8.53k | for (i = 0; i < nrels; i++) { |
4676 | 8.53k | rel = elf_rel_by_idx(data, i); |
4677 | 8.53k | if (!rel) { |
4678 | 0 | pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i); |
4679 | 0 | return -LIBBPF_ERRNO__FORMAT; |
4680 | 0 | } |
4681 | | |
4682 | 8.53k | sym_idx = ELF64_R_SYM(rel->r_info); |
4683 | 8.53k | sym = elf_sym_by_idx(obj, sym_idx); |
4684 | 8.53k | if (!sym) { |
4685 | 144 | pr_warn("sec '%s': symbol #%zu not found for relo #%d\n", |
4686 | 144 | relo_sec_name, sym_idx, i); |
4687 | 144 | return -LIBBPF_ERRNO__FORMAT; |
4688 | 144 | } |
4689 | | |
4690 | 8.39k | if (sym->st_shndx >= obj->efile.sec_cnt) { |
4691 | 28 | pr_warn("sec '%s': corrupted symbol #%zu pointing to invalid section #%zu for relo #%d\n", |
4692 | 28 | relo_sec_name, sym_idx, (size_t)sym->st_shndx, i); |
4693 | 28 | return -LIBBPF_ERRNO__FORMAT; |
4694 | 28 | } |
4695 | | |
4696 | 8.36k | if (rel->r_offset % BPF_INSN_SZ || rel->r_offset >= scn_data->d_size) { |
4697 | 204 | pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n", |
4698 | 204 | relo_sec_name, (size_t)rel->r_offset, i); |
4699 | 204 | return -LIBBPF_ERRNO__FORMAT; |
4700 | 204 | } |
4701 | | |
4702 | 8.16k | insn_idx = rel->r_offset / BPF_INSN_SZ; |
4703 | | /* relocations against static functions are recorded as |
4704 | | * relocations against the section that contains a function; |
4705 | | * in such case, symbol will be STT_SECTION and sym.st_name |
4706 | | * will point to empty string (0), so fetch section name |
4707 | | * instead |
4708 | | */ |
4709 | 8.16k | if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION && sym->st_name == 0) |
4710 | 452 | sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym->st_shndx)); |
4711 | 7.71k | else |
4712 | 7.71k | sym_name = elf_sym_str(obj, sym->st_name); |
4713 | 8.16k | sym_name = sym_name ?: "<?"; |
4714 | | |
4715 | 8.16k | pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n", |
4716 | 6.82k | relo_sec_name, i, insn_idx, sym_name); |
4717 | | |
4718 | 6.82k | prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx); |
4719 | 6.82k | if (!prog) { |
4720 | 6.43k | pr_debug("sec '%s': relo #%d: couldn't find program in section '%s' for insn #%u, probably overridden weak function, skipping...\n", |
4721 | 6.43k | relo_sec_name, i, sec_name, insn_idx); |
4722 | 6.43k | continue; |
4723 | 6.43k | } |
4724 | | |
4725 | 385 | relos = libbpf_reallocarray(prog->reloc_desc, |
4726 | 385 | prog->nr_reloc + 1, sizeof(*relos)); |
4727 | 385 | if (!relos) |
4728 | 0 | return -ENOMEM; |
4729 | 385 | prog->reloc_desc = relos; |
4730 | | |
4731 | | /* adjust insn_idx to local BPF program frame of reference */ |
4732 | 385 | insn_idx -= prog->sec_insn_off; |
4733 | 385 | err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc], |
4734 | 385 | insn_idx, sym_name, sym, rel); |
4735 | 385 | if (err) |
4736 | 99 | return err; |
4737 | | |
4738 | 286 | prog->nr_reloc++; |
4739 | 286 | } |
4740 | 18.4E | return 0; |
4741 | 979 | } |
4742 | | |
4743 | | static int map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map) |
4744 | 1.76k | { |
4745 | 1.76k | int id; |
4746 | | |
4747 | 1.76k | if (!obj->btf) |
4748 | 1.36k | return -ENOENT; |
4749 | | |
4750 | | /* if it's BTF-defined map, we don't need to search for type IDs. |
4751 | | * For struct_ops map, it does not need btf_key_type_id and |
4752 | | * btf_value_type_id. |
4753 | | */ |
4754 | 402 | if (map->sec_idx == obj->efile.btf_maps_shndx || bpf_map__is_struct_ops(map)) |
4755 | 76 | return 0; |
4756 | | |
4757 | | /* |
4758 | | * LLVM annotates global data differently in BTF, that is, |
4759 | | * only as '.data', '.bss' or '.rodata'. |
4760 | | */ |
4761 | 326 | if (!bpf_map__is_internal(map)) |
4762 | 0 | return -ENOENT; |
4763 | | |
4764 | 326 | id = btf__find_by_name(obj->btf, map->real_name); |
4765 | 326 | if (id < 0) |
4766 | 157 | return id; |
4767 | | |
4768 | 169 | map->btf_key_type_id = 0; |
4769 | 169 | map->btf_value_type_id = id; |
4770 | 169 | return 0; |
4771 | 326 | } |
4772 | | |
4773 | | static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info) |
4774 | 0 | { |
4775 | 0 | char file[PATH_MAX], buff[4096]; |
4776 | 0 | FILE *fp; |
4777 | 0 | __u32 val; |
4778 | 0 | int err; |
4779 | |
|
4780 | 0 | snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd); |
4781 | 0 | memset(info, 0, sizeof(*info)); |
4782 | |
|
4783 | 0 | fp = fopen(file, "re"); |
4784 | 0 | if (!fp) { |
4785 | 0 | err = -errno; |
4786 | 0 | pr_warn("failed to open %s: %d. No procfs support?\n", file, |
4787 | 0 | err); |
4788 | 0 | return err; |
4789 | 0 | } |
4790 | | |
4791 | 0 | while (fgets(buff, sizeof(buff), fp)) { |
4792 | 0 | if (sscanf(buff, "map_type:\t%u", &val) == 1) |
4793 | 0 | info->type = val; |
4794 | 0 | else if (sscanf(buff, "key_size:\t%u", &val) == 1) |
4795 | 0 | info->key_size = val; |
4796 | 0 | else if (sscanf(buff, "value_size:\t%u", &val) == 1) |
4797 | 0 | info->value_size = val; |
4798 | 0 | else if (sscanf(buff, "max_entries:\t%u", &val) == 1) |
4799 | 0 | info->max_entries = val; |
4800 | 0 | else if (sscanf(buff, "map_flags:\t%i", &val) == 1) |
4801 | 0 | info->map_flags = val; |
4802 | 0 | } |
4803 | |
|
4804 | 0 | fclose(fp); |
4805 | |
|
4806 | 0 | return 0; |
4807 | 0 | } |
4808 | | |
4809 | | bool bpf_map__autocreate(const struct bpf_map *map) |
4810 | 0 | { |
4811 | 0 | return map->autocreate; |
4812 | 0 | } |
4813 | | |
4814 | | int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate) |
4815 | 0 | { |
4816 | 0 | if (map->obj->loaded) |
4817 | 0 | return libbpf_err(-EBUSY); |
4818 | | |
4819 | 0 | map->autocreate = autocreate; |
4820 | 0 | return 0; |
4821 | 0 | } |
4822 | | |
4823 | | int bpf_map__set_autoattach(struct bpf_map *map, bool autoattach) |
4824 | 0 | { |
4825 | 0 | if (!bpf_map__is_struct_ops(map)) |
4826 | 0 | return libbpf_err(-EINVAL); |
4827 | | |
4828 | 0 | map->autoattach = autoattach; |
4829 | 0 | return 0; |
4830 | 0 | } |
4831 | | |
4832 | | bool bpf_map__autoattach(const struct bpf_map *map) |
4833 | 0 | { |
4834 | 0 | return map->autoattach; |
4835 | 0 | } |
4836 | | |
4837 | | int bpf_map__reuse_fd(struct bpf_map *map, int fd) |
4838 | 0 | { |
4839 | 0 | struct bpf_map_info info; |
4840 | 0 | __u32 len = sizeof(info), name_len; |
4841 | 0 | int new_fd, err; |
4842 | 0 | char *new_name; |
4843 | |
|
4844 | 0 | memset(&info, 0, len); |
4845 | 0 | err = bpf_map_get_info_by_fd(fd, &info, &len); |
4846 | 0 | if (err && errno == EINVAL) |
4847 | 0 | err = bpf_get_map_info_from_fdinfo(fd, &info); |
4848 | 0 | if (err) |
4849 | 0 | return libbpf_err(err); |
4850 | | |
4851 | 0 | name_len = strlen(info.name); |
4852 | 0 | if (name_len == BPF_OBJ_NAME_LEN - 1 && strncmp(map->name, info.name, name_len) == 0) |
4853 | 0 | new_name = strdup(map->name); |
4854 | 0 | else |
4855 | 0 | new_name = strdup(info.name); |
4856 | |
|
4857 | 0 | if (!new_name) |
4858 | 0 | return libbpf_err(-errno); |
4859 | | |
4860 | | /* |
4861 | | * Like dup(), but make sure new FD is >= 3 and has O_CLOEXEC set. |
4862 | | * This is similar to what we do in ensure_good_fd(), but without |
4863 | | * closing original FD. |
4864 | | */ |
4865 | 0 | new_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3); |
4866 | 0 | if (new_fd < 0) { |
4867 | 0 | err = -errno; |
4868 | 0 | goto err_free_new_name; |
4869 | 0 | } |
4870 | | |
4871 | 0 | err = reuse_fd(map->fd, new_fd); |
4872 | 0 | if (err) |
4873 | 0 | goto err_free_new_name; |
4874 | | |
4875 | 0 | free(map->name); |
4876 | |
|
4877 | 0 | map->name = new_name; |
4878 | 0 | map->def.type = info.type; |
4879 | 0 | map->def.key_size = info.key_size; |
4880 | 0 | map->def.value_size = info.value_size; |
4881 | 0 | map->def.max_entries = info.max_entries; |
4882 | 0 | map->def.map_flags = info.map_flags; |
4883 | 0 | map->btf_key_type_id = info.btf_key_type_id; |
4884 | 0 | map->btf_value_type_id = info.btf_value_type_id; |
4885 | 0 | map->reused = true; |
4886 | 0 | map->map_extra = info.map_extra; |
4887 | |
|
4888 | 0 | return 0; |
4889 | | |
4890 | 0 | err_free_new_name: |
4891 | 0 | free(new_name); |
4892 | 0 | return libbpf_err(err); |
4893 | 0 | } |
4894 | | |
4895 | | __u32 bpf_map__max_entries(const struct bpf_map *map) |
4896 | 0 | { |
4897 | 0 | return map->def.max_entries; |
4898 | 0 | } |
4899 | | |
4900 | | struct bpf_map *bpf_map__inner_map(struct bpf_map *map) |
4901 | 0 | { |
4902 | 0 | if (!bpf_map_type__is_map_in_map(map->def.type)) |
4903 | 0 | return errno = EINVAL, NULL; |
4904 | | |
4905 | 0 | return map->inner_map; |
4906 | 0 | } |
4907 | | |
4908 | | int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries) |
4909 | 0 | { |
4910 | 0 | if (map->obj->loaded) |
4911 | 0 | return libbpf_err(-EBUSY); |
4912 | | |
4913 | 0 | map->def.max_entries = max_entries; |
4914 | | |
4915 | | /* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */ |
4916 | 0 | if (map_is_ringbuf(map)) |
4917 | 0 | map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries); |
4918 | |
|
4919 | 0 | return 0; |
4920 | 0 | } |
4921 | | |
4922 | | static int bpf_object_prepare_token(struct bpf_object *obj) |
4923 | 0 | { |
4924 | 0 | const char *bpffs_path; |
4925 | 0 | int bpffs_fd = -1, token_fd, err; |
4926 | 0 | bool mandatory; |
4927 | 0 | enum libbpf_print_level level; |
4928 | | |
4929 | | /* token is explicitly prevented */ |
4930 | 0 | if (obj->token_path && obj->token_path[0] == '\0') { |
4931 | 0 | pr_debug("object '%s': token is prevented, skipping...\n", obj->name); |
4932 | 0 | return 0; |
4933 | 0 | } |
4934 | | |
4935 | 0 | mandatory = obj->token_path != NULL; |
4936 | 0 | level = mandatory ? LIBBPF_WARN : LIBBPF_DEBUG; |
4937 | |
|
4938 | 0 | bpffs_path = obj->token_path ?: BPF_FS_DEFAULT_PATH; |
4939 | 0 | bpffs_fd = open(bpffs_path, O_DIRECTORY, O_RDWR); |
4940 | 0 | if (bpffs_fd < 0) { |
4941 | 0 | err = -errno; |
4942 | 0 | __pr(level, "object '%s': failed (%d) to open BPF FS mount at '%s'%s\n", |
4943 | 0 | obj->name, err, bpffs_path, |
4944 | 0 | mandatory ? "" : ", skipping optional step..."); |
4945 | 0 | return mandatory ? err : 0; |
4946 | 0 | } |
4947 | | |
4948 | 0 | token_fd = bpf_token_create(bpffs_fd, 0); |
4949 | 0 | close(bpffs_fd); |
4950 | 0 | if (token_fd < 0) { |
4951 | 0 | if (!mandatory && token_fd == -ENOENT) { |
4952 | 0 | pr_debug("object '%s': BPF FS at '%s' doesn't have BPF token delegation set up, skipping...\n", |
4953 | 0 | obj->name, bpffs_path); |
4954 | 0 | return 0; |
4955 | 0 | } |
4956 | 0 | __pr(level, "object '%s': failed (%d) to create BPF token from '%s'%s\n", |
4957 | 0 | obj->name, token_fd, bpffs_path, |
4958 | 0 | mandatory ? "" : ", skipping optional step..."); |
4959 | 0 | return mandatory ? token_fd : 0; |
4960 | 0 | } |
4961 | | |
4962 | 0 | obj->feat_cache = calloc(1, sizeof(*obj->feat_cache)); |
4963 | 0 | if (!obj->feat_cache) { |
4964 | 0 | close(token_fd); |
4965 | 0 | return -ENOMEM; |
4966 | 0 | } |
4967 | | |
4968 | 0 | obj->token_fd = token_fd; |
4969 | 0 | obj->feat_cache->token_fd = token_fd; |
4970 | |
|
4971 | 0 | return 0; |
4972 | 0 | } |
4973 | | |
4974 | | static int |
4975 | | bpf_object__probe_loading(struct bpf_object *obj) |
4976 | 0 | { |
4977 | 0 | char *cp, errmsg[STRERR_BUFSIZE]; |
4978 | 0 | struct bpf_insn insns[] = { |
4979 | 0 | BPF_MOV64_IMM(BPF_REG_0, 0), |
4980 | 0 | BPF_EXIT_INSN(), |
4981 | 0 | }; |
4982 | 0 | int ret, insn_cnt = ARRAY_SIZE(insns); |
4983 | 0 | LIBBPF_OPTS(bpf_prog_load_opts, opts, |
4984 | 0 | .token_fd = obj->token_fd, |
4985 | 0 | .prog_flags = obj->token_fd ? BPF_F_TOKEN_FD : 0, |
4986 | 0 | ); |
4987 | |
|
4988 | 0 | if (obj->gen_loader) |
4989 | 0 | return 0; |
4990 | | |
4991 | 0 | ret = bump_rlimit_memlock(); |
4992 | 0 | if (ret) |
4993 | 0 | pr_warn("Failed to bump RLIMIT_MEMLOCK (err = %d), you might need to do it explicitly!\n", ret); |
4994 | | |
4995 | | /* make sure basic loading works */ |
4996 | 0 | ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &opts); |
4997 | 0 | if (ret < 0) |
4998 | 0 | ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts); |
4999 | 0 | if (ret < 0) { |
5000 | 0 | ret = errno; |
5001 | 0 | cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg)); |
5002 | 0 | pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF " |
5003 | 0 | "program. Make sure your kernel supports BPF " |
5004 | 0 | "(CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is " |
5005 | 0 | "set to big enough value.\n", __func__, cp, ret); |
5006 | 0 | return -ret; |
5007 | 0 | } |
5008 | 0 | close(ret); |
5009 | |
|
5010 | 0 | return 0; |
5011 | 0 | } |
5012 | | |
5013 | | bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id) |
5014 | 0 | { |
5015 | 0 | if (obj->gen_loader) |
5016 | | /* To generate loader program assume the latest kernel |
5017 | | * to avoid doing extra prog_load, map_create syscalls. |
5018 | | */ |
5019 | 0 | return true; |
5020 | | |
5021 | 0 | if (obj->token_fd) |
5022 | 0 | return feat_supported(obj->feat_cache, feat_id); |
5023 | | |
5024 | 0 | return feat_supported(NULL, feat_id); |
5025 | 0 | } |
5026 | | |
5027 | | static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd) |
5028 | 0 | { |
5029 | 0 | struct bpf_map_info map_info; |
5030 | 0 | char msg[STRERR_BUFSIZE]; |
5031 | 0 | __u32 map_info_len = sizeof(map_info); |
5032 | 0 | int err; |
5033 | |
|
5034 | 0 | memset(&map_info, 0, map_info_len); |
5035 | 0 | err = bpf_map_get_info_by_fd(map_fd, &map_info, &map_info_len); |
5036 | 0 | if (err && errno == EINVAL) |
5037 | 0 | err = bpf_get_map_info_from_fdinfo(map_fd, &map_info); |
5038 | 0 | if (err) { |
5039 | 0 | pr_warn("failed to get map info for map FD %d: %s\n", map_fd, |
5040 | 0 | libbpf_strerror_r(errno, msg, sizeof(msg))); |
5041 | 0 | return false; |
5042 | 0 | } |
5043 | | |
5044 | 0 | return (map_info.type == map->def.type && |
5045 | 0 | map_info.key_size == map->def.key_size && |
5046 | 0 | map_info.value_size == map->def.value_size && |
5047 | 0 | map_info.max_entries == map->def.max_entries && |
5048 | 0 | map_info.map_flags == map->def.map_flags && |
5049 | 0 | map_info.map_extra == map->map_extra); |
5050 | 0 | } |
5051 | | |
5052 | | static int |
5053 | | bpf_object__reuse_map(struct bpf_map *map) |
5054 | 0 | { |
5055 | 0 | char *cp, errmsg[STRERR_BUFSIZE]; |
5056 | 0 | int err, pin_fd; |
5057 | |
|
5058 | 0 | pin_fd = bpf_obj_get(map->pin_path); |
5059 | 0 | if (pin_fd < 0) { |
5060 | 0 | err = -errno; |
5061 | 0 | if (err == -ENOENT) { |
5062 | 0 | pr_debug("found no pinned map to reuse at '%s'\n", |
5063 | 0 | map->pin_path); |
5064 | 0 | return 0; |
5065 | 0 | } |
5066 | | |
5067 | 0 | cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); |
5068 | 0 | pr_warn("couldn't retrieve pinned map '%s': %s\n", |
5069 | 0 | map->pin_path, cp); |
5070 | 0 | return err; |
5071 | 0 | } |
5072 | | |
5073 | 0 | if (!map_is_reuse_compat(map, pin_fd)) { |
5074 | 0 | pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n", |
5075 | 0 | map->pin_path); |
5076 | 0 | close(pin_fd); |
5077 | 0 | return -EINVAL; |
5078 | 0 | } |
5079 | | |
5080 | 0 | err = bpf_map__reuse_fd(map, pin_fd); |
5081 | 0 | close(pin_fd); |
5082 | 0 | if (err) |
5083 | 0 | return err; |
5084 | | |
5085 | 0 | map->pinned = true; |
5086 | 0 | pr_debug("reused pinned map at '%s'\n", map->pin_path); |
5087 | |
|
5088 | 0 | return 0; |
5089 | 0 | } |
5090 | | |
5091 | | static int |
5092 | | bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map) |
5093 | 0 | { |
5094 | 0 | enum libbpf_map_type map_type = map->libbpf_type; |
5095 | 0 | char *cp, errmsg[STRERR_BUFSIZE]; |
5096 | 0 | int err, zero = 0; |
5097 | |
|
5098 | 0 | if (obj->gen_loader) { |
5099 | 0 | bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps, |
5100 | 0 | map->mmaped, map->def.value_size); |
5101 | 0 | if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) |
5102 | 0 | bpf_gen__map_freeze(obj->gen_loader, map - obj->maps); |
5103 | 0 | return 0; |
5104 | 0 | } |
5105 | | |
5106 | 0 | err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0); |
5107 | 0 | if (err) { |
5108 | 0 | err = -errno; |
5109 | 0 | cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); |
5110 | 0 | pr_warn("Error setting initial map(%s) contents: %s\n", |
5111 | 0 | map->name, cp); |
5112 | 0 | return err; |
5113 | 0 | } |
5114 | | |
5115 | | /* Freeze .rodata and .kconfig map as read-only from syscall side. */ |
5116 | 0 | if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) { |
5117 | 0 | err = bpf_map_freeze(map->fd); |
5118 | 0 | if (err) { |
5119 | 0 | err = -errno; |
5120 | 0 | cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); |
5121 | 0 | pr_warn("Error freezing map(%s) as read-only: %s\n", |
5122 | 0 | map->name, cp); |
5123 | 0 | return err; |
5124 | 0 | } |
5125 | 0 | } |
5126 | 0 | return 0; |
5127 | 0 | } |
5128 | | |
5129 | | static void bpf_map__destroy(struct bpf_map *map); |
5130 | | |
5131 | | static bool map_is_created(const struct bpf_map *map) |
5132 | 0 | { |
5133 | 0 | return map->obj->loaded || map->reused; |
5134 | 0 | } |
5135 | | |
5136 | | static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner) |
5137 | 0 | { |
5138 | 0 | LIBBPF_OPTS(bpf_map_create_opts, create_attr); |
5139 | 0 | struct bpf_map_def *def = &map->def; |
5140 | 0 | const char *map_name = NULL; |
5141 | 0 | int err = 0, map_fd; |
5142 | |
|
5143 | 0 | if (kernel_supports(obj, FEAT_PROG_NAME)) |
5144 | 0 | map_name = map->name; |
5145 | 0 | create_attr.map_ifindex = map->map_ifindex; |
5146 | 0 | create_attr.map_flags = def->map_flags; |
5147 | 0 | create_attr.numa_node = map->numa_node; |
5148 | 0 | create_attr.map_extra = map->map_extra; |
5149 | 0 | create_attr.token_fd = obj->token_fd; |
5150 | 0 | if (obj->token_fd) |
5151 | 0 | create_attr.map_flags |= BPF_F_TOKEN_FD; |
5152 | |
|
5153 | 0 | if (bpf_map__is_struct_ops(map)) { |
5154 | 0 | create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; |
5155 | 0 | if (map->mod_btf_fd >= 0) { |
5156 | 0 | create_attr.value_type_btf_obj_fd = map->mod_btf_fd; |
5157 | 0 | create_attr.map_flags |= BPF_F_VTYPE_BTF_OBJ_FD; |
5158 | 0 | } |
5159 | 0 | } |
5160 | |
|
5161 | 0 | if (obj->btf && btf__fd(obj->btf) >= 0) { |
5162 | 0 | create_attr.btf_fd = btf__fd(obj->btf); |
5163 | 0 | create_attr.btf_key_type_id = map->btf_key_type_id; |
5164 | 0 | create_attr.btf_value_type_id = map->btf_value_type_id; |
5165 | 0 | } |
5166 | |
|
5167 | 0 | if (bpf_map_type__is_map_in_map(def->type)) { |
5168 | 0 | if (map->inner_map) { |
5169 | 0 | err = map_set_def_max_entries(map->inner_map); |
5170 | 0 | if (err) |
5171 | 0 | return err; |
5172 | 0 | err = bpf_object__create_map(obj, map->inner_map, true); |
5173 | 0 | if (err) { |
5174 | 0 | pr_warn("map '%s': failed to create inner map: %d\n", |
5175 | 0 | map->name, err); |
5176 | 0 | return err; |
5177 | 0 | } |
5178 | 0 | map->inner_map_fd = map->inner_map->fd; |
5179 | 0 | } |
5180 | 0 | if (map->inner_map_fd >= 0) |
5181 | 0 | create_attr.inner_map_fd = map->inner_map_fd; |
5182 | 0 | } |
5183 | | |
5184 | 0 | switch (def->type) { |
5185 | 0 | case BPF_MAP_TYPE_PERF_EVENT_ARRAY: |
5186 | 0 | case BPF_MAP_TYPE_CGROUP_ARRAY: |
5187 | 0 | case BPF_MAP_TYPE_STACK_TRACE: |
5188 | 0 | case BPF_MAP_TYPE_ARRAY_OF_MAPS: |
5189 | 0 | case BPF_MAP_TYPE_HASH_OF_MAPS: |
5190 | 0 | case BPF_MAP_TYPE_DEVMAP: |
5191 | 0 | case BPF_MAP_TYPE_DEVMAP_HASH: |
5192 | 0 | case BPF_MAP_TYPE_CPUMAP: |
5193 | 0 | case BPF_MAP_TYPE_XSKMAP: |
5194 | 0 | case BPF_MAP_TYPE_SOCKMAP: |
5195 | 0 | case BPF_MAP_TYPE_SOCKHASH: |
5196 | 0 | case BPF_MAP_TYPE_QUEUE: |
5197 | 0 | case BPF_MAP_TYPE_STACK: |
5198 | 0 | case BPF_MAP_TYPE_ARENA: |
5199 | 0 | create_attr.btf_fd = 0; |
5200 | 0 | create_attr.btf_key_type_id = 0; |
5201 | 0 | create_attr.btf_value_type_id = 0; |
5202 | 0 | map->btf_key_type_id = 0; |
5203 | 0 | map->btf_value_type_id = 0; |
5204 | 0 | break; |
5205 | 0 | case BPF_MAP_TYPE_STRUCT_OPS: |
5206 | 0 | create_attr.btf_value_type_id = 0; |
5207 | 0 | break; |
5208 | 0 | default: |
5209 | 0 | break; |
5210 | 0 | } |
5211 | | |
5212 | 0 | if (obj->gen_loader) { |
5213 | 0 | bpf_gen__map_create(obj->gen_loader, def->type, map_name, |
5214 | 0 | def->key_size, def->value_size, def->max_entries, |
5215 | 0 | &create_attr, is_inner ? -1 : map - obj->maps); |
5216 | | /* We keep pretenting we have valid FD to pass various fd >= 0 |
5217 | | * checks by just keeping original placeholder FDs in place. |
5218 | | * See bpf_object__add_map() comment. |
5219 | | * This placeholder fd will not be used with any syscall and |
5220 | | * will be reset to -1 eventually. |
5221 | | */ |
5222 | 0 | map_fd = map->fd; |
5223 | 0 | } else { |
5224 | 0 | map_fd = bpf_map_create(def->type, map_name, |
5225 | 0 | def->key_size, def->value_size, |
5226 | 0 | def->max_entries, &create_attr); |
5227 | 0 | } |
5228 | 0 | if (map_fd < 0 && (create_attr.btf_key_type_id || create_attr.btf_value_type_id)) { |
5229 | 0 | char *cp, errmsg[STRERR_BUFSIZE]; |
5230 | |
|
5231 | 0 | err = -errno; |
5232 | 0 | cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); |
5233 | 0 | pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n", |
5234 | 0 | map->name, cp, err); |
5235 | 0 | create_attr.btf_fd = 0; |
5236 | 0 | create_attr.btf_key_type_id = 0; |
5237 | 0 | create_attr.btf_value_type_id = 0; |
5238 | 0 | map->btf_key_type_id = 0; |
5239 | 0 | map->btf_value_type_id = 0; |
5240 | 0 | map_fd = bpf_map_create(def->type, map_name, |
5241 | 0 | def->key_size, def->value_size, |
5242 | 0 | def->max_entries, &create_attr); |
5243 | 0 | } |
5244 | |
|
5245 | 0 | if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) { |
5246 | 0 | if (obj->gen_loader) |
5247 | 0 | map->inner_map->fd = -1; |
5248 | 0 | bpf_map__destroy(map->inner_map); |
5249 | 0 | zfree(&map->inner_map); |
5250 | 0 | } |
5251 | |
|
5252 | 0 | if (map_fd < 0) |
5253 | 0 | return map_fd; |
5254 | | |
5255 | | /* obj->gen_loader case, prevent reuse_fd() from closing map_fd */ |
5256 | 0 | if (map->fd == map_fd) |
5257 | 0 | return 0; |
5258 | | |
5259 | | /* Keep placeholder FD value but now point it to the BPF map object. |
5260 | | * This way everything that relied on this map's FD (e.g., relocated |
5261 | | * ldimm64 instructions) will stay valid and won't need adjustments. |
5262 | | * map->fd stays valid but now point to what map_fd points to. |
5263 | | */ |
5264 | 0 | return reuse_fd(map->fd, map_fd); |
5265 | 0 | } |
5266 | | |
5267 | | static int init_map_in_map_slots(struct bpf_object *obj, struct bpf_map *map) |
5268 | 0 | { |
5269 | 0 | const struct bpf_map *targ_map; |
5270 | 0 | unsigned int i; |
5271 | 0 | int fd, err = 0; |
5272 | |
|
5273 | 0 | for (i = 0; i < map->init_slots_sz; i++) { |
5274 | 0 | if (!map->init_slots[i]) |
5275 | 0 | continue; |
5276 | | |
5277 | 0 | targ_map = map->init_slots[i]; |
5278 | 0 | fd = targ_map->fd; |
5279 | |
|
5280 | 0 | if (obj->gen_loader) { |
5281 | 0 | bpf_gen__populate_outer_map(obj->gen_loader, |
5282 | 0 | map - obj->maps, i, |
5283 | 0 | targ_map - obj->maps); |
5284 | 0 | } else { |
5285 | 0 | err = bpf_map_update_elem(map->fd, &i, &fd, 0); |
5286 | 0 | } |
5287 | 0 | if (err) { |
5288 | 0 | err = -errno; |
5289 | 0 | pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n", |
5290 | 0 | map->name, i, targ_map->name, fd, err); |
5291 | 0 | return err; |
5292 | 0 | } |
5293 | 0 | pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n", |
5294 | 0 | map->name, i, targ_map->name, fd); |
5295 | 0 | } |
5296 | | |
5297 | 0 | zfree(&map->init_slots); |
5298 | 0 | map->init_slots_sz = 0; |
5299 | |
|
5300 | 0 | return 0; |
5301 | 0 | } |
5302 | | |
5303 | | static int init_prog_array_slots(struct bpf_object *obj, struct bpf_map *map) |
5304 | 0 | { |
5305 | 0 | const struct bpf_program *targ_prog; |
5306 | 0 | unsigned int i; |
5307 | 0 | int fd, err; |
5308 | |
|
5309 | 0 | if (obj->gen_loader) |
5310 | 0 | return -ENOTSUP; |
5311 | | |
5312 | 0 | for (i = 0; i < map->init_slots_sz; i++) { |
5313 | 0 | if (!map->init_slots[i]) |
5314 | 0 | continue; |
5315 | | |
5316 | 0 | targ_prog = map->init_slots[i]; |
5317 | 0 | fd = bpf_program__fd(targ_prog); |
5318 | |
|
5319 | 0 | err = bpf_map_update_elem(map->fd, &i, &fd, 0); |
5320 | 0 | if (err) { |
5321 | 0 | err = -errno; |
5322 | 0 | pr_warn("map '%s': failed to initialize slot [%d] to prog '%s' fd=%d: %d\n", |
5323 | 0 | map->name, i, targ_prog->name, fd, err); |
5324 | 0 | return err; |
5325 | 0 | } |
5326 | 0 | pr_debug("map '%s': slot [%d] set to prog '%s' fd=%d\n", |
5327 | 0 | map->name, i, targ_prog->name, fd); |
5328 | 0 | } |
5329 | | |
5330 | 0 | zfree(&map->init_slots); |
5331 | 0 | map->init_slots_sz = 0; |
5332 | |
|
5333 | 0 | return 0; |
5334 | 0 | } |
5335 | | |
5336 | | static int bpf_object_init_prog_arrays(struct bpf_object *obj) |
5337 | 0 | { |
5338 | 0 | struct bpf_map *map; |
5339 | 0 | int i, err; |
5340 | |
|
5341 | 0 | for (i = 0; i < obj->nr_maps; i++) { |
5342 | 0 | map = &obj->maps[i]; |
5343 | |
|
5344 | 0 | if (!map->init_slots_sz || map->def.type != BPF_MAP_TYPE_PROG_ARRAY) |
5345 | 0 | continue; |
5346 | | |
5347 | 0 | err = init_prog_array_slots(obj, map); |
5348 | 0 | if (err < 0) |
5349 | 0 | return err; |
5350 | 0 | } |
5351 | 0 | return 0; |
5352 | 0 | } |
5353 | | |
5354 | | static int map_set_def_max_entries(struct bpf_map *map) |
5355 | 0 | { |
5356 | 0 | if (map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !map->def.max_entries) { |
5357 | 0 | int nr_cpus; |
5358 | |
|
5359 | 0 | nr_cpus = libbpf_num_possible_cpus(); |
5360 | 0 | if (nr_cpus < 0) { |
5361 | 0 | pr_warn("map '%s': failed to determine number of system CPUs: %d\n", |
5362 | 0 | map->name, nr_cpus); |
5363 | 0 | return nr_cpus; |
5364 | 0 | } |
5365 | 0 | pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus); |
5366 | 0 | map->def.max_entries = nr_cpus; |
5367 | 0 | } |
5368 | | |
5369 | 0 | return 0; |
5370 | 0 | } |
5371 | | |
5372 | | static int |
5373 | | bpf_object__create_maps(struct bpf_object *obj) |
5374 | 0 | { |
5375 | 0 | struct bpf_map *map; |
5376 | 0 | char *cp, errmsg[STRERR_BUFSIZE]; |
5377 | 0 | unsigned int i, j; |
5378 | 0 | int err; |
5379 | 0 | bool retried; |
5380 | |
|
5381 | 0 | for (i = 0; i < obj->nr_maps; i++) { |
5382 | 0 | map = &obj->maps[i]; |
5383 | | |
5384 | | /* To support old kernels, we skip creating global data maps |
5385 | | * (.rodata, .data, .kconfig, etc); later on, during program |
5386 | | * loading, if we detect that at least one of the to-be-loaded |
5387 | | * programs is referencing any global data map, we'll error |
5388 | | * out with program name and relocation index logged. |
5389 | | * This approach allows to accommodate Clang emitting |
5390 | | * unnecessary .rodata.str1.1 sections for string literals, |
5391 | | * but also it allows to have CO-RE applications that use |
5392 | | * global variables in some of BPF programs, but not others. |
5393 | | * If those global variable-using programs are not loaded at |
5394 | | * runtime due to bpf_program__set_autoload(prog, false), |
5395 | | * bpf_object loading will succeed just fine even on old |
5396 | | * kernels. |
5397 | | */ |
5398 | 0 | if (bpf_map__is_internal(map) && !kernel_supports(obj, FEAT_GLOBAL_DATA)) |
5399 | 0 | map->autocreate = false; |
5400 | |
|
5401 | 0 | if (!map->autocreate) { |
5402 | 0 | pr_debug("map '%s': skipped auto-creating...\n", map->name); |
5403 | 0 | continue; |
5404 | 0 | } |
5405 | | |
5406 | 0 | err = map_set_def_max_entries(map); |
5407 | 0 | if (err) |
5408 | 0 | goto err_out; |
5409 | | |
5410 | 0 | retried = false; |
5411 | 0 | retry: |
5412 | 0 | if (map->pin_path) { |
5413 | 0 | err = bpf_object__reuse_map(map); |
5414 | 0 | if (err) { |
5415 | 0 | pr_warn("map '%s': error reusing pinned map\n", |
5416 | 0 | map->name); |
5417 | 0 | goto err_out; |
5418 | 0 | } |
5419 | 0 | if (retried && map->fd < 0) { |
5420 | 0 | pr_warn("map '%s': cannot find pinned map\n", |
5421 | 0 | map->name); |
5422 | 0 | err = -ENOENT; |
5423 | 0 | goto err_out; |
5424 | 0 | } |
5425 | 0 | } |
5426 | | |
5427 | 0 | if (map->reused) { |
5428 | 0 | pr_debug("map '%s': skipping creation (preset fd=%d)\n", |
5429 | 0 | map->name, map->fd); |
5430 | 0 | } else { |
5431 | 0 | err = bpf_object__create_map(obj, map, false); |
5432 | 0 | if (err) |
5433 | 0 | goto err_out; |
5434 | | |
5435 | 0 | pr_debug("map '%s': created successfully, fd=%d\n", |
5436 | 0 | map->name, map->fd); |
5437 | |
|
5438 | 0 | if (bpf_map__is_internal(map)) { |
5439 | 0 | err = bpf_object__populate_internal_map(obj, map); |
5440 | 0 | if (err < 0) |
5441 | 0 | goto err_out; |
5442 | 0 | } |
5443 | 0 | if (map->def.type == BPF_MAP_TYPE_ARENA) { |
5444 | 0 | map->mmaped = mmap((void *)(long)map->map_extra, |
5445 | 0 | bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE, |
5446 | 0 | map->map_extra ? MAP_SHARED | MAP_FIXED : MAP_SHARED, |
5447 | 0 | map->fd, 0); |
5448 | 0 | if (map->mmaped == MAP_FAILED) { |
5449 | 0 | err = -errno; |
5450 | 0 | map->mmaped = NULL; |
5451 | 0 | pr_warn("map '%s': failed to mmap arena: %d\n", |
5452 | 0 | map->name, err); |
5453 | 0 | return err; |
5454 | 0 | } |
5455 | 0 | if (obj->arena_data) { |
5456 | 0 | memcpy(map->mmaped, obj->arena_data, obj->arena_data_sz); |
5457 | 0 | zfree(&obj->arena_data); |
5458 | 0 | } |
5459 | 0 | } |
5460 | 0 | if (map->init_slots_sz && map->def.type != BPF_MAP_TYPE_PROG_ARRAY) { |
5461 | 0 | err = init_map_in_map_slots(obj, map); |
5462 | 0 | if (err < 0) |
5463 | 0 | goto err_out; |
5464 | 0 | } |
5465 | 0 | } |
5466 | | |
5467 | 0 | if (map->pin_path && !map->pinned) { |
5468 | 0 | err = bpf_map__pin(map, NULL); |
5469 | 0 | if (err) { |
5470 | 0 | if (!retried && err == -EEXIST) { |
5471 | 0 | retried = true; |
5472 | 0 | goto retry; |
5473 | 0 | } |
5474 | 0 | pr_warn("map '%s': failed to auto-pin at '%s': %d\n", |
5475 | 0 | map->name, map->pin_path, err); |
5476 | 0 | goto err_out; |
5477 | 0 | } |
5478 | 0 | } |
5479 | 0 | } |
5480 | | |
5481 | 0 | return 0; |
5482 | | |
5483 | 0 | err_out: |
5484 | 0 | cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); |
5485 | 0 | pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err); |
5486 | 0 | pr_perm_msg(err); |
5487 | 0 | for (j = 0; j < i; j++) |
5488 | 0 | zclose(obj->maps[j].fd); |
5489 | 0 | return err; |
5490 | 0 | } |
5491 | | |
5492 | | static bool bpf_core_is_flavor_sep(const char *s) |
5493 | 34.3k | { |
5494 | | /* check X___Y name pattern, where X and Y are not underscores */ |
5495 | 34.3k | return s[0] != '_' && /* X */ |
5496 | 34.3k | s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */ |
5497 | 34.3k | s[4] != '_'; /* Y */ |
5498 | 34.3k | } |
5499 | | |
5500 | | /* Given 'some_struct_name___with_flavor' return the length of a name prefix |
5501 | | * before last triple underscore. Struct name part after last triple |
5502 | | * underscore is ignored by BPF CO-RE relocation during relocation matching. |
5503 | | */ |
5504 | | size_t bpf_core_essential_name_len(const char *name) |
5505 | 4.44k | { |
5506 | 4.44k | size_t n = strlen(name); |
5507 | 4.44k | int i; |
5508 | | |
5509 | 38.4k | for (i = n - 5; i >= 0; i--) { |
5510 | 34.3k | if (bpf_core_is_flavor_sep(name + i)) |
5511 | 323 | return i + 1; |
5512 | 34.3k | } |
5513 | 4.12k | return n; |
5514 | 4.44k | } |
5515 | | |
5516 | | void bpf_core_free_cands(struct bpf_core_cand_list *cands) |
5517 | 0 | { |
5518 | 0 | if (!cands) |
5519 | 0 | return; |
5520 | | |
5521 | 0 | free(cands->cands); |
5522 | 0 | free(cands); |
5523 | 0 | } |
5524 | | |
5525 | | int bpf_core_add_cands(struct bpf_core_cand *local_cand, |
5526 | | size_t local_essent_len, |
5527 | | const struct btf *targ_btf, |
5528 | | const char *targ_btf_name, |
5529 | | int targ_start_id, |
5530 | | struct bpf_core_cand_list *cands) |
5531 | 0 | { |
5532 | 0 | struct bpf_core_cand *new_cands, *cand; |
5533 | 0 | const struct btf_type *t, *local_t; |
5534 | 0 | const char *targ_name, *local_name; |
5535 | 0 | size_t targ_essent_len; |
5536 | 0 | int n, i; |
5537 | |
|
5538 | 0 | local_t = btf__type_by_id(local_cand->btf, local_cand->id); |
5539 | 0 | local_name = btf__str_by_offset(local_cand->btf, local_t->name_off); |
5540 | |
|
5541 | 0 | n = btf__type_cnt(targ_btf); |
5542 | 0 | for (i = targ_start_id; i < n; i++) { |
5543 | 0 | t = btf__type_by_id(targ_btf, i); |
5544 | 0 | if (!btf_kind_core_compat(t, local_t)) |
5545 | 0 | continue; |
5546 | | |
5547 | 0 | targ_name = btf__name_by_offset(targ_btf, t->name_off); |
5548 | 0 | if (str_is_empty(targ_name)) |
5549 | 0 | continue; |
5550 | | |
5551 | 0 | targ_essent_len = bpf_core_essential_name_len(targ_name); |
5552 | 0 | if (targ_essent_len != local_essent_len) |
5553 | 0 | continue; |
5554 | | |
5555 | 0 | if (strncmp(local_name, targ_name, local_essent_len) != 0) |
5556 | 0 | continue; |
5557 | | |
5558 | 0 | pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n", |
5559 | 0 | local_cand->id, btf_kind_str(local_t), |
5560 | 0 | local_name, i, btf_kind_str(t), targ_name, |
5561 | 0 | targ_btf_name); |
5562 | 0 | new_cands = libbpf_reallocarray(cands->cands, cands->len + 1, |
5563 | 0 | sizeof(*cands->cands)); |
5564 | 0 | if (!new_cands) |
5565 | 0 | return -ENOMEM; |
5566 | | |
5567 | 0 | cand = &new_cands[cands->len]; |
5568 | 0 | cand->btf = targ_btf; |
5569 | 0 | cand->id = i; |
5570 | |
|
5571 | 0 | cands->cands = new_cands; |
5572 | 0 | cands->len++; |
5573 | 0 | } |
5574 | 0 | return 0; |
5575 | 0 | } |
5576 | | |
5577 | | static int load_module_btfs(struct bpf_object *obj) |
5578 | 0 | { |
5579 | 0 | struct bpf_btf_info info; |
5580 | 0 | struct module_btf *mod_btf; |
5581 | 0 | struct btf *btf; |
5582 | 0 | char name[64]; |
5583 | 0 | __u32 id = 0, len; |
5584 | 0 | int err, fd; |
5585 | |
|
5586 | 0 | if (obj->btf_modules_loaded) |
5587 | 0 | return 0; |
5588 | | |
5589 | 0 | if (obj->gen_loader) |
5590 | 0 | return 0; |
5591 | | |
5592 | | /* don't do this again, even if we find no module BTFs */ |
5593 | 0 | obj->btf_modules_loaded = true; |
5594 | | |
5595 | | /* kernel too old to support module BTFs */ |
5596 | 0 | if (!kernel_supports(obj, FEAT_MODULE_BTF)) |
5597 | 0 | return 0; |
5598 | | |
5599 | 0 | while (true) { |
5600 | 0 | err = bpf_btf_get_next_id(id, &id); |
5601 | 0 | if (err && errno == ENOENT) |
5602 | 0 | return 0; |
5603 | 0 | if (err && errno == EPERM) { |
5604 | 0 | pr_debug("skipping module BTFs loading, missing privileges\n"); |
5605 | 0 | return 0; |
5606 | 0 | } |
5607 | 0 | if (err) { |
5608 | 0 | err = -errno; |
5609 | 0 | pr_warn("failed to iterate BTF objects: %d\n", err); |
5610 | 0 | return err; |
5611 | 0 | } |
5612 | | |
5613 | 0 | fd = bpf_btf_get_fd_by_id(id); |
5614 | 0 | if (fd < 0) { |
5615 | 0 | if (errno == ENOENT) |
5616 | 0 | continue; /* expected race: BTF was unloaded */ |
5617 | 0 | err = -errno; |
5618 | 0 | pr_warn("failed to get BTF object #%d FD: %d\n", id, err); |
5619 | 0 | return err; |
5620 | 0 | } |
5621 | | |
5622 | 0 | len = sizeof(info); |
5623 | 0 | memset(&info, 0, sizeof(info)); |
5624 | 0 | info.name = ptr_to_u64(name); |
5625 | 0 | info.name_len = sizeof(name); |
5626 | |
|
5627 | 0 | err = bpf_btf_get_info_by_fd(fd, &info, &len); |
5628 | 0 | if (err) { |
5629 | 0 | err = -errno; |
5630 | 0 | pr_warn("failed to get BTF object #%d info: %d\n", id, err); |
5631 | 0 | goto err_out; |
5632 | 0 | } |
5633 | | |
5634 | | /* ignore non-module BTFs */ |
5635 | 0 | if (!info.kernel_btf || strcmp(name, "vmlinux") == 0) { |
5636 | 0 | close(fd); |
5637 | 0 | continue; |
5638 | 0 | } |
5639 | | |
5640 | 0 | btf = btf_get_from_fd(fd, obj->btf_vmlinux); |
5641 | 0 | err = libbpf_get_error(btf); |
5642 | 0 | if (err) { |
5643 | 0 | pr_warn("failed to load module [%s]'s BTF object #%d: %d\n", |
5644 | 0 | name, id, err); |
5645 | 0 | goto err_out; |
5646 | 0 | } |
5647 | | |
5648 | 0 | err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap, |
5649 | 0 | sizeof(*obj->btf_modules), obj->btf_module_cnt + 1); |
5650 | 0 | if (err) |
5651 | 0 | goto err_out; |
5652 | | |
5653 | 0 | mod_btf = &obj->btf_modules[obj->btf_module_cnt++]; |
5654 | |
|
5655 | 0 | mod_btf->btf = btf; |
5656 | 0 | mod_btf->id = id; |
5657 | 0 | mod_btf->fd = fd; |
5658 | 0 | mod_btf->name = strdup(name); |
5659 | 0 | if (!mod_btf->name) { |
5660 | 0 | err = -ENOMEM; |
5661 | 0 | goto err_out; |
5662 | 0 | } |
5663 | 0 | continue; |
5664 | | |
5665 | 0 | err_out: |
5666 | 0 | close(fd); |
5667 | 0 | return err; |
5668 | 0 | } |
5669 | | |
5670 | 0 | return 0; |
5671 | 0 | } |
5672 | | |
5673 | | static struct bpf_core_cand_list * |
5674 | | bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id) |
5675 | 0 | { |
5676 | 0 | struct bpf_core_cand local_cand = {}; |
5677 | 0 | struct bpf_core_cand_list *cands; |
5678 | 0 | const struct btf *main_btf; |
5679 | 0 | const struct btf_type *local_t; |
5680 | 0 | const char *local_name; |
5681 | 0 | size_t local_essent_len; |
5682 | 0 | int err, i; |
5683 | |
|
5684 | 0 | local_cand.btf = local_btf; |
5685 | 0 | local_cand.id = local_type_id; |
5686 | 0 | local_t = btf__type_by_id(local_btf, local_type_id); |
5687 | 0 | if (!local_t) |
5688 | 0 | return ERR_PTR(-EINVAL); |
5689 | | |
5690 | 0 | local_name = btf__name_by_offset(local_btf, local_t->name_off); |
5691 | 0 | if (str_is_empty(local_name)) |
5692 | 0 | return ERR_PTR(-EINVAL); |
5693 | 0 | local_essent_len = bpf_core_essential_name_len(local_name); |
5694 | |
|
5695 | 0 | cands = calloc(1, sizeof(*cands)); |
5696 | 0 | if (!cands) |
5697 | 0 | return ERR_PTR(-ENOMEM); |
5698 | | |
5699 | | /* Attempt to find target candidates in vmlinux BTF first */ |
5700 | 0 | main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux; |
5701 | 0 | err = bpf_core_add_cands(&local_cand, local_essent_len, main_btf, "vmlinux", 1, cands); |
5702 | 0 | if (err) |
5703 | 0 | goto err_out; |
5704 | | |
5705 | | /* if vmlinux BTF has any candidate, don't got for module BTFs */ |
5706 | 0 | if (cands->len) |
5707 | 0 | return cands; |
5708 | | |
5709 | | /* if vmlinux BTF was overridden, don't attempt to load module BTFs */ |
5710 | 0 | if (obj->btf_vmlinux_override) |
5711 | 0 | return cands; |
5712 | | |
5713 | | /* now look through module BTFs, trying to still find candidates */ |
5714 | 0 | err = load_module_btfs(obj); |
5715 | 0 | if (err) |
5716 | 0 | goto err_out; |
5717 | | |
5718 | 0 | for (i = 0; i < obj->btf_module_cnt; i++) { |
5719 | 0 | err = bpf_core_add_cands(&local_cand, local_essent_len, |
5720 | 0 | obj->btf_modules[i].btf, |
5721 | 0 | obj->btf_modules[i].name, |
5722 | 0 | btf__type_cnt(obj->btf_vmlinux), |
5723 | 0 | cands); |
5724 | 0 | if (err) |
5725 | 0 | goto err_out; |
5726 | 0 | } |
5727 | | |
5728 | 0 | return cands; |
5729 | 0 | err_out: |
5730 | 0 | bpf_core_free_cands(cands); |
5731 | 0 | return ERR_PTR(err); |
5732 | 0 | } |
5733 | | |
5734 | | /* Check local and target types for compatibility. This check is used for |
5735 | | * type-based CO-RE relocations and follow slightly different rules than |
5736 | | * field-based relocations. This function assumes that root types were already |
5737 | | * checked for name match. Beyond that initial root-level name check, names |
5738 | | * are completely ignored. Compatibility rules are as follows: |
5739 | | * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but |
5740 | | * kind should match for local and target types (i.e., STRUCT is not |
5741 | | * compatible with UNION); |
5742 | | * - for ENUMs, the size is ignored; |
5743 | | * - for INT, size and signedness are ignored; |
5744 | | * - for ARRAY, dimensionality is ignored, element types are checked for |
5745 | | * compatibility recursively; |
5746 | | * - CONST/VOLATILE/RESTRICT modifiers are ignored; |
5747 | | * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible; |
5748 | | * - FUNC_PROTOs are compatible if they have compatible signature: same |
5749 | | * number of input args and compatible return and argument types. |
5750 | | * These rules are not set in stone and probably will be adjusted as we get |
5751 | | * more experience with using BPF CO-RE relocations. |
5752 | | */ |
5753 | | int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, |
5754 | | const struct btf *targ_btf, __u32 targ_id) |
5755 | 0 | { |
5756 | 0 | return __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id, 32); |
5757 | 0 | } |
5758 | | |
5759 | | int bpf_core_types_match(const struct btf *local_btf, __u32 local_id, |
5760 | | const struct btf *targ_btf, __u32 targ_id) |
5761 | 0 | { |
5762 | 0 | return __bpf_core_types_match(local_btf, local_id, targ_btf, targ_id, false, 32); |
5763 | 0 | } |
5764 | | |
5765 | | static size_t bpf_core_hash_fn(const long key, void *ctx) |
5766 | 0 | { |
5767 | 0 | return key; |
5768 | 0 | } |
5769 | | |
5770 | | static bool bpf_core_equal_fn(const long k1, const long k2, void *ctx) |
5771 | 0 | { |
5772 | 0 | return k1 == k2; |
5773 | 0 | } |
5774 | | |
5775 | | static int record_relo_core(struct bpf_program *prog, |
5776 | | const struct bpf_core_relo *core_relo, int insn_idx) |
5777 | 0 | { |
5778 | 0 | struct reloc_desc *relos, *relo; |
5779 | |
|
5780 | 0 | relos = libbpf_reallocarray(prog->reloc_desc, |
5781 | 0 | prog->nr_reloc + 1, sizeof(*relos)); |
5782 | 0 | if (!relos) |
5783 | 0 | return -ENOMEM; |
5784 | 0 | relo = &relos[prog->nr_reloc]; |
5785 | 0 | relo->type = RELO_CORE; |
5786 | 0 | relo->insn_idx = insn_idx; |
5787 | 0 | relo->core_relo = core_relo; |
5788 | 0 | prog->reloc_desc = relos; |
5789 | 0 | prog->nr_reloc++; |
5790 | 0 | return 0; |
5791 | 0 | } |
5792 | | |
5793 | | static const struct bpf_core_relo *find_relo_core(struct bpf_program *prog, int insn_idx) |
5794 | 0 | { |
5795 | 0 | struct reloc_desc *relo; |
5796 | 0 | int i; |
5797 | |
|
5798 | 0 | for (i = 0; i < prog->nr_reloc; i++) { |
5799 | 0 | relo = &prog->reloc_desc[i]; |
5800 | 0 | if (relo->type != RELO_CORE || relo->insn_idx != insn_idx) |
5801 | 0 | continue; |
5802 | | |
5803 | 0 | return relo->core_relo; |
5804 | 0 | } |
5805 | | |
5806 | 0 | return NULL; |
5807 | 0 | } |
5808 | | |
5809 | | static int bpf_core_resolve_relo(struct bpf_program *prog, |
5810 | | const struct bpf_core_relo *relo, |
5811 | | int relo_idx, |
5812 | | const struct btf *local_btf, |
5813 | | struct hashmap *cand_cache, |
5814 | | struct bpf_core_relo_res *targ_res) |
5815 | 0 | { |
5816 | 0 | struct bpf_core_spec specs_scratch[3] = {}; |
5817 | 0 | struct bpf_core_cand_list *cands = NULL; |
5818 | 0 | const char *prog_name = prog->name; |
5819 | 0 | const struct btf_type *local_type; |
5820 | 0 | const char *local_name; |
5821 | 0 | __u32 local_id = relo->type_id; |
5822 | 0 | int err; |
5823 | |
|
5824 | 0 | local_type = btf__type_by_id(local_btf, local_id); |
5825 | 0 | if (!local_type) |
5826 | 0 | return -EINVAL; |
5827 | | |
5828 | 0 | local_name = btf__name_by_offset(local_btf, local_type->name_off); |
5829 | 0 | if (!local_name) |
5830 | 0 | return -EINVAL; |
5831 | | |
5832 | 0 | if (relo->kind != BPF_CORE_TYPE_ID_LOCAL && |
5833 | 0 | !hashmap__find(cand_cache, local_id, &cands)) { |
5834 | 0 | cands = bpf_core_find_cands(prog->obj, local_btf, local_id); |
5835 | 0 | if (IS_ERR(cands)) { |
5836 | 0 | pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n", |
5837 | 0 | prog_name, relo_idx, local_id, btf_kind_str(local_type), |
5838 | 0 | local_name, PTR_ERR(cands)); |
5839 | 0 | return PTR_ERR(cands); |
5840 | 0 | } |
5841 | 0 | err = hashmap__set(cand_cache, local_id, cands, NULL, NULL); |
5842 | 0 | if (err) { |
5843 | 0 | bpf_core_free_cands(cands); |
5844 | 0 | return err; |
5845 | 0 | } |
5846 | 0 | } |
5847 | | |
5848 | 0 | return bpf_core_calc_relo_insn(prog_name, relo, relo_idx, local_btf, cands, specs_scratch, |
5849 | 0 | targ_res); |
5850 | 0 | } |
5851 | | |
5852 | | static int |
5853 | | bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path) |
5854 | 0 | { |
5855 | 0 | const struct btf_ext_info_sec *sec; |
5856 | 0 | struct bpf_core_relo_res targ_res; |
5857 | 0 | const struct bpf_core_relo *rec; |
5858 | 0 | const struct btf_ext_info *seg; |
5859 | 0 | struct hashmap_entry *entry; |
5860 | 0 | struct hashmap *cand_cache = NULL; |
5861 | 0 | struct bpf_program *prog; |
5862 | 0 | struct bpf_insn *insn; |
5863 | 0 | const char *sec_name; |
5864 | 0 | int i, err = 0, insn_idx, sec_idx, sec_num; |
5865 | |
|
5866 | 0 | if (obj->btf_ext->core_relo_info.len == 0) |
5867 | 0 | return 0; |
5868 | | |
5869 | 0 | if (targ_btf_path) { |
5870 | 0 | obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL); |
5871 | 0 | err = libbpf_get_error(obj->btf_vmlinux_override); |
5872 | 0 | if (err) { |
5873 | 0 | pr_warn("failed to parse target BTF: %d\n", err); |
5874 | 0 | return err; |
5875 | 0 | } |
5876 | 0 | } |
5877 | | |
5878 | 0 | cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL); |
5879 | 0 | if (IS_ERR(cand_cache)) { |
5880 | 0 | err = PTR_ERR(cand_cache); |
5881 | 0 | goto out; |
5882 | 0 | } |
5883 | | |
5884 | 0 | seg = &obj->btf_ext->core_relo_info; |
5885 | 0 | sec_num = 0; |
5886 | 0 | for_each_btf_ext_sec(seg, sec) { |
5887 | 0 | sec_idx = seg->sec_idxs[sec_num]; |
5888 | 0 | sec_num++; |
5889 | |
|
5890 | 0 | sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off); |
5891 | 0 | if (str_is_empty(sec_name)) { |
5892 | 0 | err = -EINVAL; |
5893 | 0 | goto out; |
5894 | 0 | } |
5895 | | |
5896 | 0 | pr_debug("sec '%s': found %d CO-RE relocations\n", sec_name, sec->num_info); |
5897 | |
|
5898 | 0 | for_each_btf_ext_rec(seg, sec, i, rec) { |
5899 | 0 | if (rec->insn_off % BPF_INSN_SZ) |
5900 | 0 | return -EINVAL; |
5901 | 0 | insn_idx = rec->insn_off / BPF_INSN_SZ; |
5902 | 0 | prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx); |
5903 | 0 | if (!prog) { |
5904 | | /* When __weak subprog is "overridden" by another instance |
5905 | | * of the subprog from a different object file, linker still |
5906 | | * appends all the .BTF.ext info that used to belong to that |
5907 | | * eliminated subprogram. |
5908 | | * This is similar to what x86-64 linker does for relocations. |
5909 | | * So just ignore such relocations just like we ignore |
5910 | | * subprog instructions when discovering subprograms. |
5911 | | */ |
5912 | 0 | pr_debug("sec '%s': skipping CO-RE relocation #%d for insn #%d belonging to eliminated weak subprogram\n", |
5913 | 0 | sec_name, i, insn_idx); |
5914 | 0 | continue; |
5915 | 0 | } |
5916 | | /* no need to apply CO-RE relocation if the program is |
5917 | | * not going to be loaded |
5918 | | */ |
5919 | 0 | if (!prog->autoload) |
5920 | 0 | continue; |
5921 | | |
5922 | | /* adjust insn_idx from section frame of reference to the local |
5923 | | * program's frame of reference; (sub-)program code is not yet |
5924 | | * relocated, so it's enough to just subtract in-section offset |
5925 | | */ |
5926 | 0 | insn_idx = insn_idx - prog->sec_insn_off; |
5927 | 0 | if (insn_idx >= prog->insns_cnt) |
5928 | 0 | return -EINVAL; |
5929 | 0 | insn = &prog->insns[insn_idx]; |
5930 | |
|
5931 | 0 | err = record_relo_core(prog, rec, insn_idx); |
5932 | 0 | if (err) { |
5933 | 0 | pr_warn("prog '%s': relo #%d: failed to record relocation: %d\n", |
5934 | 0 | prog->name, i, err); |
5935 | 0 | goto out; |
5936 | 0 | } |
5937 | | |
5938 | 0 | if (prog->obj->gen_loader) |
5939 | 0 | continue; |
5940 | | |
5941 | 0 | err = bpf_core_resolve_relo(prog, rec, i, obj->btf, cand_cache, &targ_res); |
5942 | 0 | if (err) { |
5943 | 0 | pr_warn("prog '%s': relo #%d: failed to relocate: %d\n", |
5944 | 0 | prog->name, i, err); |
5945 | 0 | goto out; |
5946 | 0 | } |
5947 | | |
5948 | 0 | err = bpf_core_patch_insn(prog->name, insn, insn_idx, rec, i, &targ_res); |
5949 | 0 | if (err) { |
5950 | 0 | pr_warn("prog '%s': relo #%d: failed to patch insn #%u: %d\n", |
5951 | 0 | prog->name, i, insn_idx, err); |
5952 | 0 | goto out; |
5953 | 0 | } |
5954 | 0 | } |
5955 | 0 | } |
5956 | | |
5957 | 0 | out: |
5958 | | /* obj->btf_vmlinux and module BTFs are freed after object load */ |
5959 | 0 | btf__free(obj->btf_vmlinux_override); |
5960 | 0 | obj->btf_vmlinux_override = NULL; |
5961 | |
|
5962 | 0 | if (!IS_ERR_OR_NULL(cand_cache)) { |
5963 | 0 | hashmap__for_each_entry(cand_cache, entry, i) { |
5964 | 0 | bpf_core_free_cands(entry->pvalue); |
5965 | 0 | } |
5966 | 0 | hashmap__free(cand_cache); |
5967 | 0 | } |
5968 | 0 | return err; |
5969 | 0 | } |
5970 | | |
5971 | | /* base map load ldimm64 special constant, used also for log fixup logic */ |
5972 | 0 | #define POISON_LDIMM64_MAP_BASE 2001000000 |
5973 | | #define POISON_LDIMM64_MAP_PFX "200100" |
5974 | | |
5975 | | static void poison_map_ldimm64(struct bpf_program *prog, int relo_idx, |
5976 | | int insn_idx, struct bpf_insn *insn, |
5977 | | int map_idx, const struct bpf_map *map) |
5978 | 0 | { |
5979 | 0 | int i; |
5980 | |
|
5981 | 0 | pr_debug("prog '%s': relo #%d: poisoning insn #%d that loads map #%d '%s'\n", |
5982 | 0 | prog->name, relo_idx, insn_idx, map_idx, map->name); |
5983 | | |
5984 | | /* we turn single ldimm64 into two identical invalid calls */ |
5985 | 0 | for (i = 0; i < 2; i++) { |
5986 | 0 | insn->code = BPF_JMP | BPF_CALL; |
5987 | 0 | insn->dst_reg = 0; |
5988 | 0 | insn->src_reg = 0; |
5989 | 0 | insn->off = 0; |
5990 | | /* if this instruction is reachable (not a dead code), |
5991 | | * verifier will complain with something like: |
5992 | | * invalid func unknown#2001000123 |
5993 | | * where lower 123 is map index into obj->maps[] array |
5994 | | */ |
5995 | 0 | insn->imm = POISON_LDIMM64_MAP_BASE + map_idx; |
5996 | |
|
5997 | 0 | insn++; |
5998 | 0 | } |
5999 | 0 | } |
6000 | | |
6001 | | /* unresolved kfunc call special constant, used also for log fixup logic */ |
6002 | 0 | #define POISON_CALL_KFUNC_BASE 2002000000 |
6003 | | #define POISON_CALL_KFUNC_PFX "2002" |
6004 | | |
6005 | | static void poison_kfunc_call(struct bpf_program *prog, int relo_idx, |
6006 | | int insn_idx, struct bpf_insn *insn, |
6007 | | int ext_idx, const struct extern_desc *ext) |
6008 | 0 | { |
6009 | 0 | pr_debug("prog '%s': relo #%d: poisoning insn #%d that calls kfunc '%s'\n", |
6010 | 0 | prog->name, relo_idx, insn_idx, ext->name); |
6011 | | |
6012 | | /* we turn kfunc call into invalid helper call with identifiable constant */ |
6013 | 0 | insn->code = BPF_JMP | BPF_CALL; |
6014 | 0 | insn->dst_reg = 0; |
6015 | 0 | insn->src_reg = 0; |
6016 | 0 | insn->off = 0; |
6017 | | /* if this instruction is reachable (not a dead code), |
6018 | | * verifier will complain with something like: |
6019 | | * invalid func unknown#2001000123 |
6020 | | * where lower 123 is extern index into obj->externs[] array |
6021 | | */ |
6022 | 0 | insn->imm = POISON_CALL_KFUNC_BASE + ext_idx; |
6023 | 0 | } |
6024 | | |
6025 | | /* Relocate data references within program code: |
6026 | | * - map references; |
6027 | | * - global variable references; |
6028 | | * - extern references. |
6029 | | */ |
6030 | | static int |
6031 | | bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog) |
6032 | 0 | { |
6033 | 0 | int i; |
6034 | |
|
6035 | 0 | for (i = 0; i < prog->nr_reloc; i++) { |
6036 | 0 | struct reloc_desc *relo = &prog->reloc_desc[i]; |
6037 | 0 | struct bpf_insn *insn = &prog->insns[relo->insn_idx]; |
6038 | 0 | const struct bpf_map *map; |
6039 | 0 | struct extern_desc *ext; |
6040 | |
|
6041 | 0 | switch (relo->type) { |
6042 | 0 | case RELO_LD64: |
6043 | 0 | map = &obj->maps[relo->map_idx]; |
6044 | 0 | if (obj->gen_loader) { |
6045 | 0 | insn[0].src_reg = BPF_PSEUDO_MAP_IDX; |
6046 | 0 | insn[0].imm = relo->map_idx; |
6047 | 0 | } else if (map->autocreate) { |
6048 | 0 | insn[0].src_reg = BPF_PSEUDO_MAP_FD; |
6049 | 0 | insn[0].imm = map->fd; |
6050 | 0 | } else { |
6051 | 0 | poison_map_ldimm64(prog, i, relo->insn_idx, insn, |
6052 | 0 | relo->map_idx, map); |
6053 | 0 | } |
6054 | 0 | break; |
6055 | 0 | case RELO_DATA: |
6056 | 0 | map = &obj->maps[relo->map_idx]; |
6057 | 0 | insn[1].imm = insn[0].imm + relo->sym_off; |
6058 | 0 | if (obj->gen_loader) { |
6059 | 0 | insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE; |
6060 | 0 | insn[0].imm = relo->map_idx; |
6061 | 0 | } else if (map->autocreate) { |
6062 | 0 | insn[0].src_reg = BPF_PSEUDO_MAP_VALUE; |
6063 | 0 | insn[0].imm = map->fd; |
6064 | 0 | } else { |
6065 | 0 | poison_map_ldimm64(prog, i, relo->insn_idx, insn, |
6066 | 0 | relo->map_idx, map); |
6067 | 0 | } |
6068 | 0 | break; |
6069 | 0 | case RELO_EXTERN_LD64: |
6070 | 0 | ext = &obj->externs[relo->ext_idx]; |
6071 | 0 | if (ext->type == EXT_KCFG) { |
6072 | 0 | if (obj->gen_loader) { |
6073 | 0 | insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE; |
6074 | 0 | insn[0].imm = obj->kconfig_map_idx; |
6075 | 0 | } else { |
6076 | 0 | insn[0].src_reg = BPF_PSEUDO_MAP_VALUE; |
6077 | 0 | insn[0].imm = obj->maps[obj->kconfig_map_idx].fd; |
6078 | 0 | } |
6079 | 0 | insn[1].imm = ext->kcfg.data_off; |
6080 | 0 | } else /* EXT_KSYM */ { |
6081 | 0 | if (ext->ksym.type_id && ext->is_set) { /* typed ksyms */ |
6082 | 0 | insn[0].src_reg = BPF_PSEUDO_BTF_ID; |
6083 | 0 | insn[0].imm = ext->ksym.kernel_btf_id; |
6084 | 0 | insn[1].imm = ext->ksym.kernel_btf_obj_fd; |
6085 | 0 | } else { /* typeless ksyms or unresolved typed ksyms */ |
6086 | 0 | insn[0].imm = (__u32)ext->ksym.addr; |
6087 | 0 | insn[1].imm = ext->ksym.addr >> 32; |
6088 | 0 | } |
6089 | 0 | } |
6090 | 0 | break; |
6091 | 0 | case RELO_EXTERN_CALL: |
6092 | 0 | ext = &obj->externs[relo->ext_idx]; |
6093 | 0 | insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL; |
6094 | 0 | if (ext->is_set) { |
6095 | 0 | insn[0].imm = ext->ksym.kernel_btf_id; |
6096 | 0 | insn[0].off = ext->ksym.btf_fd_idx; |
6097 | 0 | } else { /* unresolved weak kfunc call */ |
6098 | 0 | poison_kfunc_call(prog, i, relo->insn_idx, insn, |
6099 | 0 | relo->ext_idx, ext); |
6100 | 0 | } |
6101 | 0 | break; |
6102 | 0 | case RELO_SUBPROG_ADDR: |
6103 | 0 | if (insn[0].src_reg != BPF_PSEUDO_FUNC) { |
6104 | 0 | pr_warn("prog '%s': relo #%d: bad insn\n", |
6105 | 0 | prog->name, i); |
6106 | 0 | return -EINVAL; |
6107 | 0 | } |
6108 | | /* handled already */ |
6109 | 0 | break; |
6110 | 0 | case RELO_CALL: |
6111 | | /* handled already */ |
6112 | 0 | break; |
6113 | 0 | case RELO_CORE: |
6114 | | /* will be handled by bpf_program_record_relos() */ |
6115 | 0 | break; |
6116 | 0 | default: |
6117 | 0 | pr_warn("prog '%s': relo #%d: bad relo type %d\n", |
6118 | 0 | prog->name, i, relo->type); |
6119 | 0 | return -EINVAL; |
6120 | 0 | } |
6121 | 0 | } |
6122 | | |
6123 | 0 | return 0; |
6124 | 0 | } |
6125 | | |
6126 | | static int adjust_prog_btf_ext_info(const struct bpf_object *obj, |
6127 | | const struct bpf_program *prog, |
6128 | | const struct btf_ext_info *ext_info, |
6129 | | void **prog_info, __u32 *prog_rec_cnt, |
6130 | | __u32 *prog_rec_sz) |
6131 | 0 | { |
6132 | 0 | void *copy_start = NULL, *copy_end = NULL; |
6133 | 0 | void *rec, *rec_end, *new_prog_info; |
6134 | 0 | const struct btf_ext_info_sec *sec; |
6135 | 0 | size_t old_sz, new_sz; |
6136 | 0 | int i, sec_num, sec_idx, off_adj; |
6137 | |
|
6138 | 0 | sec_num = 0; |
6139 | 0 | for_each_btf_ext_sec(ext_info, sec) { |
6140 | 0 | sec_idx = ext_info->sec_idxs[sec_num]; |
6141 | 0 | sec_num++; |
6142 | 0 | if (prog->sec_idx != sec_idx) |
6143 | 0 | continue; |
6144 | | |
6145 | 0 | for_each_btf_ext_rec(ext_info, sec, i, rec) { |
6146 | 0 | __u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ; |
6147 | |
|
6148 | 0 | if (insn_off < prog->sec_insn_off) |
6149 | 0 | continue; |
6150 | 0 | if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt) |
6151 | 0 | break; |
6152 | | |
6153 | 0 | if (!copy_start) |
6154 | 0 | copy_start = rec; |
6155 | 0 | copy_end = rec + ext_info->rec_size; |
6156 | 0 | } |
6157 | |
|
6158 | 0 | if (!copy_start) |
6159 | 0 | return -ENOENT; |
6160 | | |
6161 | | /* append func/line info of a given (sub-)program to the main |
6162 | | * program func/line info |
6163 | | */ |
6164 | 0 | old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size; |
6165 | 0 | new_sz = old_sz + (copy_end - copy_start); |
6166 | 0 | new_prog_info = realloc(*prog_info, new_sz); |
6167 | 0 | if (!new_prog_info) |
6168 | 0 | return -ENOMEM; |
6169 | 0 | *prog_info = new_prog_info; |
6170 | 0 | *prog_rec_cnt = new_sz / ext_info->rec_size; |
6171 | 0 | memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start); |
6172 | | |
6173 | | /* Kernel instruction offsets are in units of 8-byte |
6174 | | * instructions, while .BTF.ext instruction offsets generated |
6175 | | * by Clang are in units of bytes. So convert Clang offsets |
6176 | | * into kernel offsets and adjust offset according to program |
6177 | | * relocated position. |
6178 | | */ |
6179 | 0 | off_adj = prog->sub_insn_off - prog->sec_insn_off; |
6180 | 0 | rec = new_prog_info + old_sz; |
6181 | 0 | rec_end = new_prog_info + new_sz; |
6182 | 0 | for (; rec < rec_end; rec += ext_info->rec_size) { |
6183 | 0 | __u32 *insn_off = rec; |
6184 | |
|
6185 | 0 | *insn_off = *insn_off / BPF_INSN_SZ + off_adj; |
6186 | 0 | } |
6187 | 0 | *prog_rec_sz = ext_info->rec_size; |
6188 | 0 | return 0; |
6189 | 0 | } |
6190 | | |
6191 | 0 | return -ENOENT; |
6192 | 0 | } |
6193 | | |
6194 | | static int |
6195 | | reloc_prog_func_and_line_info(const struct bpf_object *obj, |
6196 | | struct bpf_program *main_prog, |
6197 | | const struct bpf_program *prog) |
6198 | 0 | { |
6199 | 0 | int err; |
6200 | | |
6201 | | /* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't |
6202 | | * support func/line info |
6203 | | */ |
6204 | 0 | if (!obj->btf_ext || !kernel_supports(obj, FEAT_BTF_FUNC)) |
6205 | 0 | return 0; |
6206 | | |
6207 | | /* only attempt func info relocation if main program's func_info |
6208 | | * relocation was successful |
6209 | | */ |
6210 | 0 | if (main_prog != prog && !main_prog->func_info) |
6211 | 0 | goto line_info; |
6212 | | |
6213 | 0 | err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info, |
6214 | 0 | &main_prog->func_info, |
6215 | 0 | &main_prog->func_info_cnt, |
6216 | 0 | &main_prog->func_info_rec_size); |
6217 | 0 | if (err) { |
6218 | 0 | if (err != -ENOENT) { |
6219 | 0 | pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n", |
6220 | 0 | prog->name, err); |
6221 | 0 | return err; |
6222 | 0 | } |
6223 | 0 | if (main_prog->func_info) { |
6224 | | /* |
6225 | | * Some info has already been found but has problem |
6226 | | * in the last btf_ext reloc. Must have to error out. |
6227 | | */ |
6228 | 0 | pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name); |
6229 | 0 | return err; |
6230 | 0 | } |
6231 | | /* Have problem loading the very first info. Ignore the rest. */ |
6232 | 0 | pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n", |
6233 | 0 | prog->name); |
6234 | 0 | } |
6235 | | |
6236 | 0 | line_info: |
6237 | | /* don't relocate line info if main program's relocation failed */ |
6238 | 0 | if (main_prog != prog && !main_prog->line_info) |
6239 | 0 | return 0; |
6240 | | |
6241 | 0 | err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info, |
6242 | 0 | &main_prog->line_info, |
6243 | 0 | &main_prog->line_info_cnt, |
6244 | 0 | &main_prog->line_info_rec_size); |
6245 | 0 | if (err) { |
6246 | 0 | if (err != -ENOENT) { |
6247 | 0 | pr_warn("prog '%s': error relocating .BTF.ext line info: %d\n", |
6248 | 0 | prog->name, err); |
6249 | 0 | return err; |
6250 | 0 | } |
6251 | 0 | if (main_prog->line_info) { |
6252 | | /* |
6253 | | * Some info has already been found but has problem |
6254 | | * in the last btf_ext reloc. Must have to error out. |
6255 | | */ |
6256 | 0 | pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name); |
6257 | 0 | return err; |
6258 | 0 | } |
6259 | | /* Have problem loading the very first info. Ignore the rest. */ |
6260 | 0 | pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n", |
6261 | 0 | prog->name); |
6262 | 0 | } |
6263 | 0 | return 0; |
6264 | 0 | } |
6265 | | |
6266 | | static int cmp_relo_by_insn_idx(const void *key, const void *elem) |
6267 | 0 | { |
6268 | 0 | size_t insn_idx = *(const size_t *)key; |
6269 | 0 | const struct reloc_desc *relo = elem; |
6270 | |
|
6271 | 0 | if (insn_idx == relo->insn_idx) |
6272 | 0 | return 0; |
6273 | 0 | return insn_idx < relo->insn_idx ? -1 : 1; |
6274 | 0 | } |
6275 | | |
6276 | | static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx) |
6277 | 0 | { |
6278 | 0 | if (!prog->nr_reloc) |
6279 | 0 | return NULL; |
6280 | 0 | return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc, |
6281 | 0 | sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx); |
6282 | 0 | } |
6283 | | |
6284 | | static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_program *subprog) |
6285 | 0 | { |
6286 | 0 | int new_cnt = main_prog->nr_reloc + subprog->nr_reloc; |
6287 | 0 | struct reloc_desc *relos; |
6288 | 0 | int i; |
6289 | |
|
6290 | 0 | if (main_prog == subprog) |
6291 | 0 | return 0; |
6292 | 0 | relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos)); |
6293 | | /* if new count is zero, reallocarray can return a valid NULL result; |
6294 | | * in this case the previous pointer will be freed, so we *have to* |
6295 | | * reassign old pointer to the new value (even if it's NULL) |
6296 | | */ |
6297 | 0 | if (!relos && new_cnt) |
6298 | 0 | return -ENOMEM; |
6299 | 0 | if (subprog->nr_reloc) |
6300 | 0 | memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc, |
6301 | 0 | sizeof(*relos) * subprog->nr_reloc); |
6302 | |
|
6303 | 0 | for (i = main_prog->nr_reloc; i < new_cnt; i++) |
6304 | 0 | relos[i].insn_idx += subprog->sub_insn_off; |
6305 | | /* After insn_idx adjustment the 'relos' array is still sorted |
6306 | | * by insn_idx and doesn't break bsearch. |
6307 | | */ |
6308 | 0 | main_prog->reloc_desc = relos; |
6309 | 0 | main_prog->nr_reloc = new_cnt; |
6310 | 0 | return 0; |
6311 | 0 | } |
6312 | | |
6313 | | static int |
6314 | | bpf_object__append_subprog_code(struct bpf_object *obj, struct bpf_program *main_prog, |
6315 | | struct bpf_program *subprog) |
6316 | 0 | { |
6317 | 0 | struct bpf_insn *insns; |
6318 | 0 | size_t new_cnt; |
6319 | 0 | int err; |
6320 | |
|
6321 | 0 | subprog->sub_insn_off = main_prog->insns_cnt; |
6322 | |
|
6323 | 0 | new_cnt = main_prog->insns_cnt + subprog->insns_cnt; |
6324 | 0 | insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns)); |
6325 | 0 | if (!insns) { |
6326 | 0 | pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name); |
6327 | 0 | return -ENOMEM; |
6328 | 0 | } |
6329 | 0 | main_prog->insns = insns; |
6330 | 0 | main_prog->insns_cnt = new_cnt; |
6331 | |
|
6332 | 0 | memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns, |
6333 | 0 | subprog->insns_cnt * sizeof(*insns)); |
6334 | |
|
6335 | 0 | pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n", |
6336 | 0 | main_prog->name, subprog->insns_cnt, subprog->name); |
6337 | | |
6338 | | /* The subprog insns are now appended. Append its relos too. */ |
6339 | 0 | err = append_subprog_relos(main_prog, subprog); |
6340 | 0 | if (err) |
6341 | 0 | return err; |
6342 | 0 | return 0; |
6343 | 0 | } |
6344 | | |
6345 | | static int |
6346 | | bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog, |
6347 | | struct bpf_program *prog) |
6348 | 0 | { |
6349 | 0 | size_t sub_insn_idx, insn_idx; |
6350 | 0 | struct bpf_program *subprog; |
6351 | 0 | struct reloc_desc *relo; |
6352 | 0 | struct bpf_insn *insn; |
6353 | 0 | int err; |
6354 | |
|
6355 | 0 | err = reloc_prog_func_and_line_info(obj, main_prog, prog); |
6356 | 0 | if (err) |
6357 | 0 | return err; |
6358 | | |
6359 | 0 | for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) { |
6360 | 0 | insn = &main_prog->insns[prog->sub_insn_off + insn_idx]; |
6361 | 0 | if (!insn_is_subprog_call(insn) && !insn_is_pseudo_func(insn)) |
6362 | 0 | continue; |
6363 | | |
6364 | 0 | relo = find_prog_insn_relo(prog, insn_idx); |
6365 | 0 | if (relo && relo->type == RELO_EXTERN_CALL) |
6366 | | /* kfunc relocations will be handled later |
6367 | | * in bpf_object__relocate_data() |
6368 | | */ |
6369 | 0 | continue; |
6370 | 0 | if (relo && relo->type != RELO_CALL && relo->type != RELO_SUBPROG_ADDR) { |
6371 | 0 | pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n", |
6372 | 0 | prog->name, insn_idx, relo->type); |
6373 | 0 | return -LIBBPF_ERRNO__RELOC; |
6374 | 0 | } |
6375 | 0 | if (relo) { |
6376 | | /* sub-program instruction index is a combination of |
6377 | | * an offset of a symbol pointed to by relocation and |
6378 | | * call instruction's imm field; for global functions, |
6379 | | * call always has imm = -1, but for static functions |
6380 | | * relocation is against STT_SECTION and insn->imm |
6381 | | * points to a start of a static function |
6382 | | * |
6383 | | * for subprog addr relocation, the relo->sym_off + insn->imm is |
6384 | | * the byte offset in the corresponding section. |
6385 | | */ |
6386 | 0 | if (relo->type == RELO_CALL) |
6387 | 0 | sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1; |
6388 | 0 | else |
6389 | 0 | sub_insn_idx = (relo->sym_off + insn->imm) / BPF_INSN_SZ; |
6390 | 0 | } else if (insn_is_pseudo_func(insn)) { |
6391 | | /* |
6392 | | * RELO_SUBPROG_ADDR relo is always emitted even if both |
6393 | | * functions are in the same section, so it shouldn't reach here. |
6394 | | */ |
6395 | 0 | pr_warn("prog '%s': missing subprog addr relo for insn #%zu\n", |
6396 | 0 | prog->name, insn_idx); |
6397 | 0 | return -LIBBPF_ERRNO__RELOC; |
6398 | 0 | } else { |
6399 | | /* if subprogram call is to a static function within |
6400 | | * the same ELF section, there won't be any relocation |
6401 | | * emitted, but it also means there is no additional |
6402 | | * offset necessary, insns->imm is relative to |
6403 | | * instruction's original position within the section |
6404 | | */ |
6405 | 0 | sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1; |
6406 | 0 | } |
6407 | | |
6408 | | /* we enforce that sub-programs should be in .text section */ |
6409 | 0 | subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx); |
6410 | 0 | if (!subprog) { |
6411 | 0 | pr_warn("prog '%s': no .text section found yet sub-program call exists\n", |
6412 | 0 | prog->name); |
6413 | 0 | return -LIBBPF_ERRNO__RELOC; |
6414 | 0 | } |
6415 | | |
6416 | | /* if it's the first call instruction calling into this |
6417 | | * subprogram (meaning this subprog hasn't been processed |
6418 | | * yet) within the context of current main program: |
6419 | | * - append it at the end of main program's instructions blog; |
6420 | | * - process is recursively, while current program is put on hold; |
6421 | | * - if that subprogram calls some other not yet processes |
6422 | | * subprogram, same thing will happen recursively until |
6423 | | * there are no more unprocesses subprograms left to append |
6424 | | * and relocate. |
6425 | | */ |
6426 | 0 | if (subprog->sub_insn_off == 0) { |
6427 | 0 | err = bpf_object__append_subprog_code(obj, main_prog, subprog); |
6428 | 0 | if (err) |
6429 | 0 | return err; |
6430 | 0 | err = bpf_object__reloc_code(obj, main_prog, subprog); |
6431 | 0 | if (err) |
6432 | 0 | return err; |
6433 | 0 | } |
6434 | | |
6435 | | /* main_prog->insns memory could have been re-allocated, so |
6436 | | * calculate pointer again |
6437 | | */ |
6438 | 0 | insn = &main_prog->insns[prog->sub_insn_off + insn_idx]; |
6439 | | /* calculate correct instruction position within current main |
6440 | | * prog; each main prog can have a different set of |
6441 | | * subprograms appended (potentially in different order as |
6442 | | * well), so position of any subprog can be different for |
6443 | | * different main programs |
6444 | | */ |
6445 | 0 | insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1; |
6446 | |
|
6447 | 0 | pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n", |
6448 | 0 | prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off); |
6449 | 0 | } |
6450 | | |
6451 | 0 | return 0; |
6452 | 0 | } |
6453 | | |
6454 | | /* |
6455 | | * Relocate sub-program calls. |
6456 | | * |
6457 | | * Algorithm operates as follows. Each entry-point BPF program (referred to as |
6458 | | * main prog) is processed separately. For each subprog (non-entry functions, |
6459 | | * that can be called from either entry progs or other subprogs) gets their |
6460 | | * sub_insn_off reset to zero. This serves as indicator that this subprogram |
6461 | | * hasn't been yet appended and relocated within current main prog. Once its |
6462 | | * relocated, sub_insn_off will point at the position within current main prog |
6463 | | * where given subprog was appended. This will further be used to relocate all |
6464 | | * the call instructions jumping into this subprog. |
6465 | | * |
6466 | | * We start with main program and process all call instructions. If the call |
6467 | | * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off |
6468 | | * is zero), subprog instructions are appended at the end of main program's |
6469 | | * instruction array. Then main program is "put on hold" while we recursively |
6470 | | * process newly appended subprogram. If that subprogram calls into another |
6471 | | * subprogram that hasn't been appended, new subprogram is appended again to |
6472 | | * the *main* prog's instructions (subprog's instructions are always left |
6473 | | * untouched, as they need to be in unmodified state for subsequent main progs |
6474 | | * and subprog instructions are always sent only as part of a main prog) and |
6475 | | * the process continues recursively. Once all the subprogs called from a main |
6476 | | * prog or any of its subprogs are appended (and relocated), all their |
6477 | | * positions within finalized instructions array are known, so it's easy to |
6478 | | * rewrite call instructions with correct relative offsets, corresponding to |
6479 | | * desired target subprog. |
6480 | | * |
6481 | | * Its important to realize that some subprogs might not be called from some |
6482 | | * main prog and any of its called/used subprogs. Those will keep their |
6483 | | * subprog->sub_insn_off as zero at all times and won't be appended to current |
6484 | | * main prog and won't be relocated within the context of current main prog. |
6485 | | * They might still be used from other main progs later. |
6486 | | * |
6487 | | * Visually this process can be shown as below. Suppose we have two main |
6488 | | * programs mainA and mainB and BPF object contains three subprogs: subA, |
6489 | | * subB, and subC. mainA calls only subA, mainB calls only subC, but subA and |
6490 | | * subC both call subB: |
6491 | | * |
6492 | | * +--------+ +-------+ |
6493 | | * | v v | |
6494 | | * +--+---+ +--+-+-+ +---+--+ |
6495 | | * | subA | | subB | | subC | |
6496 | | * +--+---+ +------+ +---+--+ |
6497 | | * ^ ^ |
6498 | | * | | |
6499 | | * +---+-------+ +------+----+ |
6500 | | * | mainA | | mainB | |
6501 | | * +-----------+ +-----------+ |
6502 | | * |
6503 | | * We'll start relocating mainA, will find subA, append it and start |
6504 | | * processing sub A recursively: |
6505 | | * |
6506 | | * +-----------+------+ |
6507 | | * | mainA | subA | |
6508 | | * +-----------+------+ |
6509 | | * |
6510 | | * At this point we notice that subB is used from subA, so we append it and |
6511 | | * relocate (there are no further subcalls from subB): |
6512 | | * |
6513 | | * +-----------+------+------+ |
6514 | | * | mainA | subA | subB | |
6515 | | * +-----------+------+------+ |
6516 | | * |
6517 | | * At this point, we relocate subA calls, then go one level up and finish with |
6518 | | * relocatin mainA calls. mainA is done. |
6519 | | * |
6520 | | * For mainB process is similar but results in different order. We start with |
6521 | | * mainB and skip subA and subB, as mainB never calls them (at least |
6522 | | * directly), but we see subC is needed, so we append and start processing it: |
6523 | | * |
6524 | | * +-----------+------+ |
6525 | | * | mainB | subC | |
6526 | | * +-----------+------+ |
6527 | | * Now we see subC needs subB, so we go back to it, append and relocate it: |
6528 | | * |
6529 | | * +-----------+------+------+ |
6530 | | * | mainB | subC | subB | |
6531 | | * +-----------+------+------+ |
6532 | | * |
6533 | | * At this point we unwind recursion, relocate calls in subC, then in mainB. |
6534 | | */ |
6535 | | static int |
6536 | | bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog) |
6537 | 0 | { |
6538 | 0 | struct bpf_program *subprog; |
6539 | 0 | int i, err; |
6540 | | |
6541 | | /* mark all subprogs as not relocated (yet) within the context of |
6542 | | * current main program |
6543 | | */ |
6544 | 0 | for (i = 0; i < obj->nr_programs; i++) { |
6545 | 0 | subprog = &obj->programs[i]; |
6546 | 0 | if (!prog_is_subprog(obj, subprog)) |
6547 | 0 | continue; |
6548 | | |
6549 | 0 | subprog->sub_insn_off = 0; |
6550 | 0 | } |
6551 | |
|
6552 | 0 | err = bpf_object__reloc_code(obj, prog, prog); |
6553 | 0 | if (err) |
6554 | 0 | return err; |
6555 | | |
6556 | 0 | return 0; |
6557 | 0 | } |
6558 | | |
6559 | | static void |
6560 | | bpf_object__free_relocs(struct bpf_object *obj) |
6561 | 0 | { |
6562 | 0 | struct bpf_program *prog; |
6563 | 0 | int i; |
6564 | | |
6565 | | /* free up relocation descriptors */ |
6566 | 0 | for (i = 0; i < obj->nr_programs; i++) { |
6567 | 0 | prog = &obj->programs[i]; |
6568 | 0 | zfree(&prog->reloc_desc); |
6569 | 0 | prog->nr_reloc = 0; |
6570 | 0 | } |
6571 | 0 | } |
6572 | | |
6573 | | static int cmp_relocs(const void *_a, const void *_b) |
6574 | 4.12k | { |
6575 | 4.12k | const struct reloc_desc *a = _a; |
6576 | 4.12k | const struct reloc_desc *b = _b; |
6577 | | |
6578 | 4.12k | if (a->insn_idx != b->insn_idx) |
6579 | 457 | return a->insn_idx < b->insn_idx ? -1 : 1; |
6580 | | |
6581 | | /* no two relocations should have the same insn_idx, but ... */ |
6582 | 3.66k | if (a->type != b->type) |
6583 | 256 | return a->type < b->type ? -1 : 1; |
6584 | | |
6585 | 3.40k | return 0; |
6586 | 3.66k | } |
6587 | | |
6588 | | static void bpf_object__sort_relos(struct bpf_object *obj) |
6589 | 1.79k | { |
6590 | 1.79k | int i; |
6591 | | |
6592 | 9.45k | for (i = 0; i < obj->nr_programs; i++) { |
6593 | 7.65k | struct bpf_program *p = &obj->programs[i]; |
6594 | | |
6595 | 7.65k | if (!p->nr_reloc) |
6596 | 7.58k | continue; |
6597 | | |
6598 | 73 | qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs); |
6599 | 73 | } |
6600 | 1.79k | } |
6601 | | |
6602 | | static int bpf_prog_assign_exc_cb(struct bpf_object *obj, struct bpf_program *prog) |
6603 | 0 | { |
6604 | 0 | const char *str = "exception_callback:"; |
6605 | 0 | size_t pfx_len = strlen(str); |
6606 | 0 | int i, j, n; |
6607 | |
|
6608 | 0 | if (!obj->btf || !kernel_supports(obj, FEAT_BTF_DECL_TAG)) |
6609 | 0 | return 0; |
6610 | | |
6611 | 0 | n = btf__type_cnt(obj->btf); |
6612 | 0 | for (i = 1; i < n; i++) { |
6613 | 0 | const char *name; |
6614 | 0 | struct btf_type *t; |
6615 | |
|
6616 | 0 | t = btf_type_by_id(obj->btf, i); |
6617 | 0 | if (!btf_is_decl_tag(t) || btf_decl_tag(t)->component_idx != -1) |
6618 | 0 | continue; |
6619 | | |
6620 | 0 | name = btf__str_by_offset(obj->btf, t->name_off); |
6621 | 0 | if (strncmp(name, str, pfx_len) != 0) |
6622 | 0 | continue; |
6623 | | |
6624 | 0 | t = btf_type_by_id(obj->btf, t->type); |
6625 | 0 | if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL) { |
6626 | 0 | pr_warn("prog '%s': exception_callback:<value> decl tag not applied to the main program\n", |
6627 | 0 | prog->name); |
6628 | 0 | return -EINVAL; |
6629 | 0 | } |
6630 | 0 | if (strcmp(prog->name, btf__str_by_offset(obj->btf, t->name_off)) != 0) |
6631 | 0 | continue; |
6632 | | /* Multiple callbacks are specified for the same prog, |
6633 | | * the verifier will eventually return an error for this |
6634 | | * case, hence simply skip appending a subprog. |
6635 | | */ |
6636 | 0 | if (prog->exception_cb_idx >= 0) { |
6637 | 0 | prog->exception_cb_idx = -1; |
6638 | 0 | break; |
6639 | 0 | } |
6640 | | |
6641 | 0 | name += pfx_len; |
6642 | 0 | if (str_is_empty(name)) { |
6643 | 0 | pr_warn("prog '%s': exception_callback:<value> decl tag contains empty value\n", |
6644 | 0 | prog->name); |
6645 | 0 | return -EINVAL; |
6646 | 0 | } |
6647 | | |
6648 | 0 | for (j = 0; j < obj->nr_programs; j++) { |
6649 | 0 | struct bpf_program *subprog = &obj->programs[j]; |
6650 | |
|
6651 | 0 | if (!prog_is_subprog(obj, subprog)) |
6652 | 0 | continue; |
6653 | 0 | if (strcmp(name, subprog->name) != 0) |
6654 | 0 | continue; |
6655 | | /* Enforce non-hidden, as from verifier point of |
6656 | | * view it expects global functions, whereas the |
6657 | | * mark_btf_static fixes up linkage as static. |
6658 | | */ |
6659 | 0 | if (!subprog->sym_global || subprog->mark_btf_static) { |
6660 | 0 | pr_warn("prog '%s': exception callback %s must be a global non-hidden function\n", |
6661 | 0 | prog->name, subprog->name); |
6662 | 0 | return -EINVAL; |
6663 | 0 | } |
6664 | | /* Let's see if we already saw a static exception callback with the same name */ |
6665 | 0 | if (prog->exception_cb_idx >= 0) { |
6666 | 0 | pr_warn("prog '%s': multiple subprogs with same name as exception callback '%s'\n", |
6667 | 0 | prog->name, subprog->name); |
6668 | 0 | return -EINVAL; |
6669 | 0 | } |
6670 | 0 | prog->exception_cb_idx = j; |
6671 | 0 | break; |
6672 | 0 | } |
6673 | | |
6674 | 0 | if (prog->exception_cb_idx >= 0) |
6675 | 0 | continue; |
6676 | | |
6677 | 0 | pr_warn("prog '%s': cannot find exception callback '%s'\n", prog->name, name); |
6678 | 0 | return -ENOENT; |
6679 | 0 | } |
6680 | | |
6681 | 0 | return 0; |
6682 | 0 | } |
6683 | | |
6684 | | static struct { |
6685 | | enum bpf_prog_type prog_type; |
6686 | | const char *ctx_name; |
6687 | | } global_ctx_map[] = { |
6688 | | { BPF_PROG_TYPE_CGROUP_DEVICE, "bpf_cgroup_dev_ctx" }, |
6689 | | { BPF_PROG_TYPE_CGROUP_SKB, "__sk_buff" }, |
6690 | | { BPF_PROG_TYPE_CGROUP_SOCK, "bpf_sock" }, |
6691 | | { BPF_PROG_TYPE_CGROUP_SOCK_ADDR, "bpf_sock_addr" }, |
6692 | | { BPF_PROG_TYPE_CGROUP_SOCKOPT, "bpf_sockopt" }, |
6693 | | { BPF_PROG_TYPE_CGROUP_SYSCTL, "bpf_sysctl" }, |
6694 | | { BPF_PROG_TYPE_FLOW_DISSECTOR, "__sk_buff" }, |
6695 | | { BPF_PROG_TYPE_KPROBE, "bpf_user_pt_regs_t" }, |
6696 | | { BPF_PROG_TYPE_LWT_IN, "__sk_buff" }, |
6697 | | { BPF_PROG_TYPE_LWT_OUT, "__sk_buff" }, |
6698 | | { BPF_PROG_TYPE_LWT_SEG6LOCAL, "__sk_buff" }, |
6699 | | { BPF_PROG_TYPE_LWT_XMIT, "__sk_buff" }, |
6700 | | { BPF_PROG_TYPE_NETFILTER, "bpf_nf_ctx" }, |
6701 | | { BPF_PROG_TYPE_PERF_EVENT, "bpf_perf_event_data" }, |
6702 | | { BPF_PROG_TYPE_RAW_TRACEPOINT, "bpf_raw_tracepoint_args" }, |
6703 | | { BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, "bpf_raw_tracepoint_args" }, |
6704 | | { BPF_PROG_TYPE_SCHED_ACT, "__sk_buff" }, |
6705 | | { BPF_PROG_TYPE_SCHED_CLS, "__sk_buff" }, |
6706 | | { BPF_PROG_TYPE_SK_LOOKUP, "bpf_sk_lookup" }, |
6707 | | { BPF_PROG_TYPE_SK_MSG, "sk_msg_md" }, |
6708 | | { BPF_PROG_TYPE_SK_REUSEPORT, "sk_reuseport_md" }, |
6709 | | { BPF_PROG_TYPE_SK_SKB, "__sk_buff" }, |
6710 | | { BPF_PROG_TYPE_SOCK_OPS, "bpf_sock_ops" }, |
6711 | | { BPF_PROG_TYPE_SOCKET_FILTER, "__sk_buff" }, |
6712 | | { BPF_PROG_TYPE_XDP, "xdp_md" }, |
6713 | | /* all other program types don't have "named" context structs */ |
6714 | | }; |
6715 | | |
6716 | | /* forward declarations for arch-specific underlying types of bpf_user_pt_regs_t typedef, |
6717 | | * for below __builtin_types_compatible_p() checks; |
6718 | | * with this approach we don't need any extra arch-specific #ifdef guards |
6719 | | */ |
6720 | | struct pt_regs; |
6721 | | struct user_pt_regs; |
6722 | | struct user_regs_struct; |
6723 | | |
6724 | | static bool need_func_arg_type_fixup(const struct btf *btf, const struct bpf_program *prog, |
6725 | | const char *subprog_name, int arg_idx, |
6726 | | int arg_type_id, const char *ctx_name) |
6727 | 0 | { |
6728 | 0 | const struct btf_type *t; |
6729 | 0 | const char *tname; |
6730 | | |
6731 | | /* check if existing parameter already matches verifier expectations */ |
6732 | 0 | t = skip_mods_and_typedefs(btf, arg_type_id, NULL); |
6733 | 0 | if (!btf_is_ptr(t)) |
6734 | 0 | goto out_warn; |
6735 | | |
6736 | | /* typedef bpf_user_pt_regs_t is a special PITA case, valid for kprobe |
6737 | | * and perf_event programs, so check this case early on and forget |
6738 | | * about it for subsequent checks |
6739 | | */ |
6740 | 0 | while (btf_is_mod(t)) |
6741 | 0 | t = btf__type_by_id(btf, t->type); |
6742 | 0 | if (btf_is_typedef(t) && |
6743 | 0 | (prog->type == BPF_PROG_TYPE_KPROBE || prog->type == BPF_PROG_TYPE_PERF_EVENT)) { |
6744 | 0 | tname = btf__str_by_offset(btf, t->name_off) ?: "<anon>"; |
6745 | 0 | if (strcmp(tname, "bpf_user_pt_regs_t") == 0) |
6746 | 0 | return false; /* canonical type for kprobe/perf_event */ |
6747 | 0 | } |
6748 | | |
6749 | | /* now we can ignore typedefs moving forward */ |
6750 | 0 | t = skip_mods_and_typedefs(btf, t->type, NULL); |
6751 | | |
6752 | | /* if it's `void *`, definitely fix up BTF info */ |
6753 | 0 | if (btf_is_void(t)) |
6754 | 0 | return true; |
6755 | | |
6756 | | /* if it's already proper canonical type, no need to fix up */ |
6757 | 0 | tname = btf__str_by_offset(btf, t->name_off) ?: "<anon>"; |
6758 | 0 | if (btf_is_struct(t) && strcmp(tname, ctx_name) == 0) |
6759 | 0 | return false; |
6760 | | |
6761 | | /* special cases */ |
6762 | 0 | switch (prog->type) { |
6763 | 0 | case BPF_PROG_TYPE_KPROBE: |
6764 | | /* `struct pt_regs *` is expected, but we need to fix up */ |
6765 | 0 | if (btf_is_struct(t) && strcmp(tname, "pt_regs") == 0) |
6766 | 0 | return true; |
6767 | 0 | break; |
6768 | 0 | case BPF_PROG_TYPE_PERF_EVENT: |
6769 | 0 | if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct pt_regs) && |
6770 | 0 | btf_is_struct(t) && strcmp(tname, "pt_regs") == 0) |
6771 | 0 | return true; |
6772 | 0 | if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_pt_regs) && |
6773 | 0 | btf_is_struct(t) && strcmp(tname, "user_pt_regs") == 0) |
6774 | 0 | return true; |
6775 | 0 | if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_regs_struct) && |
6776 | 0 | btf_is_struct(t) && strcmp(tname, "user_regs_struct") == 0) |
6777 | 0 | return true; |
6778 | 0 | break; |
6779 | 0 | case BPF_PROG_TYPE_RAW_TRACEPOINT: |
6780 | 0 | case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: |
6781 | | /* allow u64* as ctx */ |
6782 | 0 | if (btf_is_int(t) && t->size == 8) |
6783 | 0 | return true; |
6784 | 0 | break; |
6785 | 0 | default: |
6786 | 0 | break; |
6787 | 0 | } |
6788 | | |
6789 | 0 | out_warn: |
6790 | 0 | pr_warn("prog '%s': subprog '%s' arg#%d is expected to be of `struct %s *` type\n", |
6791 | 0 | prog->name, subprog_name, arg_idx, ctx_name); |
6792 | 0 | return false; |
6793 | 0 | } |
6794 | | |
6795 | | static int clone_func_btf_info(struct btf *btf, int orig_fn_id, struct bpf_program *prog) |
6796 | 0 | { |
6797 | 0 | int fn_id, fn_proto_id, ret_type_id, orig_proto_id; |
6798 | 0 | int i, err, arg_cnt, fn_name_off, linkage; |
6799 | 0 | struct btf_type *fn_t, *fn_proto_t, *t; |
6800 | 0 | struct btf_param *p; |
6801 | | |
6802 | | /* caller already validated FUNC -> FUNC_PROTO validity */ |
6803 | 0 | fn_t = btf_type_by_id(btf, orig_fn_id); |
6804 | 0 | fn_proto_t = btf_type_by_id(btf, fn_t->type); |
6805 | | |
6806 | | /* Note that each btf__add_xxx() operation invalidates |
6807 | | * all btf_type and string pointers, so we need to be |
6808 | | * very careful when cloning BTF types. BTF type |
6809 | | * pointers have to be always refetched. And to avoid |
6810 | | * problems with invalidated string pointers, we |
6811 | | * add empty strings initially, then just fix up |
6812 | | * name_off offsets in place. Offsets are stable for |
6813 | | * existing strings, so that works out. |
6814 | | */ |
6815 | 0 | fn_name_off = fn_t->name_off; /* we are about to invalidate fn_t */ |
6816 | 0 | linkage = btf_func_linkage(fn_t); |
6817 | 0 | orig_proto_id = fn_t->type; /* original FUNC_PROTO ID */ |
6818 | 0 | ret_type_id = fn_proto_t->type; /* fn_proto_t will be invalidated */ |
6819 | 0 | arg_cnt = btf_vlen(fn_proto_t); |
6820 | | |
6821 | | /* clone FUNC_PROTO and its params */ |
6822 | 0 | fn_proto_id = btf__add_func_proto(btf, ret_type_id); |
6823 | 0 | if (fn_proto_id < 0) |
6824 | 0 | return -EINVAL; |
6825 | | |
6826 | 0 | for (i = 0; i < arg_cnt; i++) { |
6827 | 0 | int name_off; |
6828 | | |
6829 | | /* copy original parameter data */ |
6830 | 0 | t = btf_type_by_id(btf, orig_proto_id); |
6831 | 0 | p = &btf_params(t)[i]; |
6832 | 0 | name_off = p->name_off; |
6833 | |
|
6834 | 0 | err = btf__add_func_param(btf, "", p->type); |
6835 | 0 | if (err) |
6836 | 0 | return err; |
6837 | | |
6838 | 0 | fn_proto_t = btf_type_by_id(btf, fn_proto_id); |
6839 | 0 | p = &btf_params(fn_proto_t)[i]; |
6840 | 0 | p->name_off = name_off; /* use remembered str offset */ |
6841 | 0 | } |
6842 | | |
6843 | | /* clone FUNC now, btf__add_func() enforces non-empty name, so use |
6844 | | * entry program's name as a placeholder, which we replace immediately |
6845 | | * with original name_off |
6846 | | */ |
6847 | 0 | fn_id = btf__add_func(btf, prog->name, linkage, fn_proto_id); |
6848 | 0 | if (fn_id < 0) |
6849 | 0 | return -EINVAL; |
6850 | | |
6851 | 0 | fn_t = btf_type_by_id(btf, fn_id); |
6852 | 0 | fn_t->name_off = fn_name_off; /* reuse original string */ |
6853 | |
|
6854 | 0 | return fn_id; |
6855 | 0 | } |
6856 | | |
6857 | | /* Check if main program or global subprog's function prototype has `arg:ctx` |
6858 | | * argument tags, and, if necessary, substitute correct type to match what BPF |
6859 | | * verifier would expect, taking into account specific program type. This |
6860 | | * allows to support __arg_ctx tag transparently on old kernels that don't yet |
6861 | | * have a native support for it in the verifier, making user's life much |
6862 | | * easier. |
6863 | | */ |
6864 | | static int bpf_program_fixup_func_info(struct bpf_object *obj, struct bpf_program *prog) |
6865 | 0 | { |
6866 | 0 | const char *ctx_name = NULL, *ctx_tag = "arg:ctx", *fn_name; |
6867 | 0 | struct bpf_func_info_min *func_rec; |
6868 | 0 | struct btf_type *fn_t, *fn_proto_t; |
6869 | 0 | struct btf *btf = obj->btf; |
6870 | 0 | const struct btf_type *t; |
6871 | 0 | struct btf_param *p; |
6872 | 0 | int ptr_id = 0, struct_id, tag_id, orig_fn_id; |
6873 | 0 | int i, n, arg_idx, arg_cnt, err, rec_idx; |
6874 | 0 | int *orig_ids; |
6875 | | |
6876 | | /* no .BTF.ext, no problem */ |
6877 | 0 | if (!obj->btf_ext || !prog->func_info) |
6878 | 0 | return 0; |
6879 | | |
6880 | | /* don't do any fix ups if kernel natively supports __arg_ctx */ |
6881 | 0 | if (kernel_supports(obj, FEAT_ARG_CTX_TAG)) |
6882 | 0 | return 0; |
6883 | | |
6884 | | /* some BPF program types just don't have named context structs, so |
6885 | | * this fallback mechanism doesn't work for them |
6886 | | */ |
6887 | 0 | for (i = 0; i < ARRAY_SIZE(global_ctx_map); i++) { |
6888 | 0 | if (global_ctx_map[i].prog_type != prog->type) |
6889 | 0 | continue; |
6890 | 0 | ctx_name = global_ctx_map[i].ctx_name; |
6891 | 0 | break; |
6892 | 0 | } |
6893 | 0 | if (!ctx_name) |
6894 | 0 | return 0; |
6895 | | |
6896 | | /* remember original func BTF IDs to detect if we already cloned them */ |
6897 | 0 | orig_ids = calloc(prog->func_info_cnt, sizeof(*orig_ids)); |
6898 | 0 | if (!orig_ids) |
6899 | 0 | return -ENOMEM; |
6900 | 0 | for (i = 0; i < prog->func_info_cnt; i++) { |
6901 | 0 | func_rec = prog->func_info + prog->func_info_rec_size * i; |
6902 | 0 | orig_ids[i] = func_rec->type_id; |
6903 | 0 | } |
6904 | | |
6905 | | /* go through each DECL_TAG with "arg:ctx" and see if it points to one |
6906 | | * of our subprogs; if yes and subprog is global and needs adjustment, |
6907 | | * clone and adjust FUNC -> FUNC_PROTO combo |
6908 | | */ |
6909 | 0 | for (i = 1, n = btf__type_cnt(btf); i < n; i++) { |
6910 | | /* only DECL_TAG with "arg:ctx" value are interesting */ |
6911 | 0 | t = btf__type_by_id(btf, i); |
6912 | 0 | if (!btf_is_decl_tag(t)) |
6913 | 0 | continue; |
6914 | 0 | if (strcmp(btf__str_by_offset(btf, t->name_off), ctx_tag) != 0) |
6915 | 0 | continue; |
6916 | | |
6917 | | /* only global funcs need adjustment, if at all */ |
6918 | 0 | orig_fn_id = t->type; |
6919 | 0 | fn_t = btf_type_by_id(btf, orig_fn_id); |
6920 | 0 | if (!btf_is_func(fn_t) || btf_func_linkage(fn_t) != BTF_FUNC_GLOBAL) |
6921 | 0 | continue; |
6922 | | |
6923 | | /* sanity check FUNC -> FUNC_PROTO chain, just in case */ |
6924 | 0 | fn_proto_t = btf_type_by_id(btf, fn_t->type); |
6925 | 0 | if (!fn_proto_t || !btf_is_func_proto(fn_proto_t)) |
6926 | 0 | continue; |
6927 | | |
6928 | | /* find corresponding func_info record */ |
6929 | 0 | func_rec = NULL; |
6930 | 0 | for (rec_idx = 0; rec_idx < prog->func_info_cnt; rec_idx++) { |
6931 | 0 | if (orig_ids[rec_idx] == t->type) { |
6932 | 0 | func_rec = prog->func_info + prog->func_info_rec_size * rec_idx; |
6933 | 0 | break; |
6934 | 0 | } |
6935 | 0 | } |
6936 | | /* current main program doesn't call into this subprog */ |
6937 | 0 | if (!func_rec) |
6938 | 0 | continue; |
6939 | | |
6940 | | /* some more sanity checking of DECL_TAG */ |
6941 | 0 | arg_cnt = btf_vlen(fn_proto_t); |
6942 | 0 | arg_idx = btf_decl_tag(t)->component_idx; |
6943 | 0 | if (arg_idx < 0 || arg_idx >= arg_cnt) |
6944 | 0 | continue; |
6945 | | |
6946 | | /* check if we should fix up argument type */ |
6947 | 0 | p = &btf_params(fn_proto_t)[arg_idx]; |
6948 | 0 | fn_name = btf__str_by_offset(btf, fn_t->name_off) ?: "<anon>"; |
6949 | 0 | if (!need_func_arg_type_fixup(btf, prog, fn_name, arg_idx, p->type, ctx_name)) |
6950 | 0 | continue; |
6951 | | |
6952 | | /* clone fn/fn_proto, unless we already did it for another arg */ |
6953 | 0 | if (func_rec->type_id == orig_fn_id) { |
6954 | 0 | int fn_id; |
6955 | |
|
6956 | 0 | fn_id = clone_func_btf_info(btf, orig_fn_id, prog); |
6957 | 0 | if (fn_id < 0) { |
6958 | 0 | err = fn_id; |
6959 | 0 | goto err_out; |
6960 | 0 | } |
6961 | | |
6962 | | /* point func_info record to a cloned FUNC type */ |
6963 | 0 | func_rec->type_id = fn_id; |
6964 | 0 | } |
6965 | | |
6966 | | /* create PTR -> STRUCT type chain to mark PTR_TO_CTX argument; |
6967 | | * we do it just once per main BPF program, as all global |
6968 | | * funcs share the same program type, so need only PTR -> |
6969 | | * STRUCT type chain |
6970 | | */ |
6971 | 0 | if (ptr_id == 0) { |
6972 | 0 | struct_id = btf__add_struct(btf, ctx_name, 0); |
6973 | 0 | ptr_id = btf__add_ptr(btf, struct_id); |
6974 | 0 | if (ptr_id < 0 || struct_id < 0) { |
6975 | 0 | err = -EINVAL; |
6976 | 0 | goto err_out; |
6977 | 0 | } |
6978 | 0 | } |
6979 | | |
6980 | | /* for completeness, clone DECL_TAG and point it to cloned param */ |
6981 | 0 | tag_id = btf__add_decl_tag(btf, ctx_tag, func_rec->type_id, arg_idx); |
6982 | 0 | if (tag_id < 0) { |
6983 | 0 | err = -EINVAL; |
6984 | 0 | goto err_out; |
6985 | 0 | } |
6986 | | |
6987 | | /* all the BTF manipulations invalidated pointers, refetch them */ |
6988 | 0 | fn_t = btf_type_by_id(btf, func_rec->type_id); |
6989 | 0 | fn_proto_t = btf_type_by_id(btf, fn_t->type); |
6990 | | |
6991 | | /* fix up type ID pointed to by param */ |
6992 | 0 | p = &btf_params(fn_proto_t)[arg_idx]; |
6993 | 0 | p->type = ptr_id; |
6994 | 0 | } |
6995 | | |
6996 | 0 | free(orig_ids); |
6997 | 0 | return 0; |
6998 | 0 | err_out: |
6999 | 0 | free(orig_ids); |
7000 | 0 | return err; |
7001 | 0 | } |
7002 | | |
7003 | | static int bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path) |
7004 | 0 | { |
7005 | 0 | struct bpf_program *prog; |
7006 | 0 | size_t i, j; |
7007 | 0 | int err; |
7008 | |
|
7009 | 0 | if (obj->btf_ext) { |
7010 | 0 | err = bpf_object__relocate_core(obj, targ_btf_path); |
7011 | 0 | if (err) { |
7012 | 0 | pr_warn("failed to perform CO-RE relocations: %d\n", |
7013 | 0 | err); |
7014 | 0 | return err; |
7015 | 0 | } |
7016 | 0 | bpf_object__sort_relos(obj); |
7017 | 0 | } |
7018 | | |
7019 | | /* Before relocating calls pre-process relocations and mark |
7020 | | * few ld_imm64 instructions that points to subprogs. |
7021 | | * Otherwise bpf_object__reloc_code() later would have to consider |
7022 | | * all ld_imm64 insns as relocation candidates. That would |
7023 | | * reduce relocation speed, since amount of find_prog_insn_relo() |
7024 | | * would increase and most of them will fail to find a relo. |
7025 | | */ |
7026 | 0 | for (i = 0; i < obj->nr_programs; i++) { |
7027 | 0 | prog = &obj->programs[i]; |
7028 | 0 | for (j = 0; j < prog->nr_reloc; j++) { |
7029 | 0 | struct reloc_desc *relo = &prog->reloc_desc[j]; |
7030 | 0 | struct bpf_insn *insn = &prog->insns[relo->insn_idx]; |
7031 | | |
7032 | | /* mark the insn, so it's recognized by insn_is_pseudo_func() */ |
7033 | 0 | if (relo->type == RELO_SUBPROG_ADDR) |
7034 | 0 | insn[0].src_reg = BPF_PSEUDO_FUNC; |
7035 | 0 | } |
7036 | 0 | } |
7037 | | |
7038 | | /* relocate subprogram calls and append used subprograms to main |
7039 | | * programs; each copy of subprogram code needs to be relocated |
7040 | | * differently for each main program, because its code location might |
7041 | | * have changed. |
7042 | | * Append subprog relos to main programs to allow data relos to be |
7043 | | * processed after text is completely relocated. |
7044 | | */ |
7045 | 0 | for (i = 0; i < obj->nr_programs; i++) { |
7046 | 0 | prog = &obj->programs[i]; |
7047 | | /* sub-program's sub-calls are relocated within the context of |
7048 | | * its main program only |
7049 | | */ |
7050 | 0 | if (prog_is_subprog(obj, prog)) |
7051 | 0 | continue; |
7052 | 0 | if (!prog->autoload) |
7053 | 0 | continue; |
7054 | | |
7055 | 0 | err = bpf_object__relocate_calls(obj, prog); |
7056 | 0 | if (err) { |
7057 | 0 | pr_warn("prog '%s': failed to relocate calls: %d\n", |
7058 | 0 | prog->name, err); |
7059 | 0 | return err; |
7060 | 0 | } |
7061 | | |
7062 | 0 | err = bpf_prog_assign_exc_cb(obj, prog); |
7063 | 0 | if (err) |
7064 | 0 | return err; |
7065 | | /* Now, also append exception callback if it has not been done already. */ |
7066 | 0 | if (prog->exception_cb_idx >= 0) { |
7067 | 0 | struct bpf_program *subprog = &obj->programs[prog->exception_cb_idx]; |
7068 | | |
7069 | | /* Calling exception callback directly is disallowed, which the |
7070 | | * verifier will reject later. In case it was processed already, |
7071 | | * we can skip this step, otherwise for all other valid cases we |
7072 | | * have to append exception callback now. |
7073 | | */ |
7074 | 0 | if (subprog->sub_insn_off == 0) { |
7075 | 0 | err = bpf_object__append_subprog_code(obj, prog, subprog); |
7076 | 0 | if (err) |
7077 | 0 | return err; |
7078 | 0 | err = bpf_object__reloc_code(obj, prog, subprog); |
7079 | 0 | if (err) |
7080 | 0 | return err; |
7081 | 0 | } |
7082 | 0 | } |
7083 | 0 | } |
7084 | 0 | for (i = 0; i < obj->nr_programs; i++) { |
7085 | 0 | prog = &obj->programs[i]; |
7086 | 0 | if (prog_is_subprog(obj, prog)) |
7087 | 0 | continue; |
7088 | 0 | if (!prog->autoload) |
7089 | 0 | continue; |
7090 | | |
7091 | | /* Process data relos for main programs */ |
7092 | 0 | err = bpf_object__relocate_data(obj, prog); |
7093 | 0 | if (err) { |
7094 | 0 | pr_warn("prog '%s': failed to relocate data references: %d\n", |
7095 | 0 | prog->name, err); |
7096 | 0 | return err; |
7097 | 0 | } |
7098 | | |
7099 | | /* Fix up .BTF.ext information, if necessary */ |
7100 | 0 | err = bpf_program_fixup_func_info(obj, prog); |
7101 | 0 | if (err) { |
7102 | 0 | pr_warn("prog '%s': failed to perform .BTF.ext fix ups: %d\n", |
7103 | 0 | prog->name, err); |
7104 | 0 | return err; |
7105 | 0 | } |
7106 | 0 | } |
7107 | | |
7108 | 0 | return 0; |
7109 | 0 | } |
7110 | | |
7111 | | static int bpf_object__collect_st_ops_relos(struct bpf_object *obj, |
7112 | | Elf64_Shdr *shdr, Elf_Data *data); |
7113 | | |
7114 | | static int bpf_object__collect_map_relos(struct bpf_object *obj, |
7115 | | Elf64_Shdr *shdr, Elf_Data *data) |
7116 | 0 | { |
7117 | 0 | const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *); |
7118 | 0 | int i, j, nrels, new_sz; |
7119 | 0 | const struct btf_var_secinfo *vi = NULL; |
7120 | 0 | const struct btf_type *sec, *var, *def; |
7121 | 0 | struct bpf_map *map = NULL, *targ_map = NULL; |
7122 | 0 | struct bpf_program *targ_prog = NULL; |
7123 | 0 | bool is_prog_array, is_map_in_map; |
7124 | 0 | const struct btf_member *member; |
7125 | 0 | const char *name, *mname, *type; |
7126 | 0 | unsigned int moff; |
7127 | 0 | Elf64_Sym *sym; |
7128 | 0 | Elf64_Rel *rel; |
7129 | 0 | void *tmp; |
7130 | |
|
7131 | 0 | if (!obj->efile.btf_maps_sec_btf_id || !obj->btf) |
7132 | 0 | return -EINVAL; |
7133 | 0 | sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id); |
7134 | 0 | if (!sec) |
7135 | 0 | return -EINVAL; |
7136 | | |
7137 | 0 | nrels = shdr->sh_size / shdr->sh_entsize; |
7138 | 0 | for (i = 0; i < nrels; i++) { |
7139 | 0 | rel = elf_rel_by_idx(data, i); |
7140 | 0 | if (!rel) { |
7141 | 0 | pr_warn(".maps relo #%d: failed to get ELF relo\n", i); |
7142 | 0 | return -LIBBPF_ERRNO__FORMAT; |
7143 | 0 | } |
7144 | | |
7145 | 0 | sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info)); |
7146 | 0 | if (!sym) { |
7147 | 0 | pr_warn(".maps relo #%d: symbol %zx not found\n", |
7148 | 0 | i, (size_t)ELF64_R_SYM(rel->r_info)); |
7149 | 0 | return -LIBBPF_ERRNO__FORMAT; |
7150 | 0 | } |
7151 | 0 | name = elf_sym_str(obj, sym->st_name) ?: "<?>"; |
7152 | |
|
7153 | 0 | pr_debug(".maps relo #%d: for %zd value %zd rel->r_offset %zu name %d ('%s')\n", |
7154 | 0 | i, (ssize_t)(rel->r_info >> 32), (size_t)sym->st_value, |
7155 | 0 | (size_t)rel->r_offset, sym->st_name, name); |
7156 | |
|
7157 | 0 | for (j = 0; j < obj->nr_maps; j++) { |
7158 | 0 | map = &obj->maps[j]; |
7159 | 0 | if (map->sec_idx != obj->efile.btf_maps_shndx) |
7160 | 0 | continue; |
7161 | | |
7162 | 0 | vi = btf_var_secinfos(sec) + map->btf_var_idx; |
7163 | 0 | if (vi->offset <= rel->r_offset && |
7164 | 0 | rel->r_offset + bpf_ptr_sz <= vi->offset + vi->size) |
7165 | 0 | break; |
7166 | 0 | } |
7167 | 0 | if (j == obj->nr_maps) { |
7168 | 0 | pr_warn(".maps relo #%d: cannot find map '%s' at rel->r_offset %zu\n", |
7169 | 0 | i, name, (size_t)rel->r_offset); |
7170 | 0 | return -EINVAL; |
7171 | 0 | } |
7172 | | |
7173 | 0 | is_map_in_map = bpf_map_type__is_map_in_map(map->def.type); |
7174 | 0 | is_prog_array = map->def.type == BPF_MAP_TYPE_PROG_ARRAY; |
7175 | 0 | type = is_map_in_map ? "map" : "prog"; |
7176 | 0 | if (is_map_in_map) { |
7177 | 0 | if (sym->st_shndx != obj->efile.btf_maps_shndx) { |
7178 | 0 | pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n", |
7179 | 0 | i, name); |
7180 | 0 | return -LIBBPF_ERRNO__RELOC; |
7181 | 0 | } |
7182 | 0 | if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS && |
7183 | 0 | map->def.key_size != sizeof(int)) { |
7184 | 0 | pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n", |
7185 | 0 | i, map->name, sizeof(int)); |
7186 | 0 | return -EINVAL; |
7187 | 0 | } |
7188 | 0 | targ_map = bpf_object__find_map_by_name(obj, name); |
7189 | 0 | if (!targ_map) { |
7190 | 0 | pr_warn(".maps relo #%d: '%s' isn't a valid map reference\n", |
7191 | 0 | i, name); |
7192 | 0 | return -ESRCH; |
7193 | 0 | } |
7194 | 0 | } else if (is_prog_array) { |
7195 | 0 | targ_prog = bpf_object__find_program_by_name(obj, name); |
7196 | 0 | if (!targ_prog) { |
7197 | 0 | pr_warn(".maps relo #%d: '%s' isn't a valid program reference\n", |
7198 | 0 | i, name); |
7199 | 0 | return -ESRCH; |
7200 | 0 | } |
7201 | 0 | if (targ_prog->sec_idx != sym->st_shndx || |
7202 | 0 | targ_prog->sec_insn_off * 8 != sym->st_value || |
7203 | 0 | prog_is_subprog(obj, targ_prog)) { |
7204 | 0 | pr_warn(".maps relo #%d: '%s' isn't an entry-point program\n", |
7205 | 0 | i, name); |
7206 | 0 | return -LIBBPF_ERRNO__RELOC; |
7207 | 0 | } |
7208 | 0 | } else { |
7209 | 0 | return -EINVAL; |
7210 | 0 | } |
7211 | | |
7212 | 0 | var = btf__type_by_id(obj->btf, vi->type); |
7213 | 0 | def = skip_mods_and_typedefs(obj->btf, var->type, NULL); |
7214 | 0 | if (btf_vlen(def) == 0) |
7215 | 0 | return -EINVAL; |
7216 | 0 | member = btf_members(def) + btf_vlen(def) - 1; |
7217 | 0 | mname = btf__name_by_offset(obj->btf, member->name_off); |
7218 | 0 | if (strcmp(mname, "values")) |
7219 | 0 | return -EINVAL; |
7220 | | |
7221 | 0 | moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8; |
7222 | 0 | if (rel->r_offset - vi->offset < moff) |
7223 | 0 | return -EINVAL; |
7224 | | |
7225 | 0 | moff = rel->r_offset - vi->offset - moff; |
7226 | | /* here we use BPF pointer size, which is always 64 bit, as we |
7227 | | * are parsing ELF that was built for BPF target |
7228 | | */ |
7229 | 0 | if (moff % bpf_ptr_sz) |
7230 | 0 | return -EINVAL; |
7231 | 0 | moff /= bpf_ptr_sz; |
7232 | 0 | if (moff >= map->init_slots_sz) { |
7233 | 0 | new_sz = moff + 1; |
7234 | 0 | tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz); |
7235 | 0 | if (!tmp) |
7236 | 0 | return -ENOMEM; |
7237 | 0 | map->init_slots = tmp; |
7238 | 0 | memset(map->init_slots + map->init_slots_sz, 0, |
7239 | 0 | (new_sz - map->init_slots_sz) * host_ptr_sz); |
7240 | 0 | map->init_slots_sz = new_sz; |
7241 | 0 | } |
7242 | 0 | map->init_slots[moff] = is_map_in_map ? (void *)targ_map : (void *)targ_prog; |
7243 | |
|
7244 | 0 | pr_debug(".maps relo #%d: map '%s' slot [%d] points to %s '%s'\n", |
7245 | 0 | i, map->name, moff, type, name); |
7246 | 0 | } |
7247 | | |
7248 | 0 | return 0; |
7249 | 0 | } |
7250 | | |
7251 | | static int bpf_object__collect_relos(struct bpf_object *obj) |
7252 | 2.30k | { |
7253 | 2.30k | int i, err; |
7254 | | |
7255 | 17.1k | for (i = 0; i < obj->efile.sec_cnt; i++) { |
7256 | 15.3k | struct elf_sec_desc *sec_desc = &obj->efile.secs[i]; |
7257 | 15.3k | Elf64_Shdr *shdr; |
7258 | 15.3k | Elf_Data *data; |
7259 | 15.3k | int idx; |
7260 | | |
7261 | 15.3k | if (sec_desc->sec_type != SEC_RELO) |
7262 | 14.2k | continue; |
7263 | | |
7264 | 1.01k | shdr = sec_desc->shdr; |
7265 | 1.01k | data = sec_desc->data; |
7266 | 1.01k | idx = shdr->sh_info; |
7267 | | |
7268 | 1.01k | if (shdr->sh_type != SHT_REL || idx < 0 || idx >= obj->efile.sec_cnt) { |
7269 | 0 | pr_warn("internal error at %d\n", __LINE__); |
7270 | 0 | return -LIBBPF_ERRNO__INTERNAL; |
7271 | 0 | } |
7272 | | |
7273 | 1.01k | if (obj->efile.secs[idx].sec_type == SEC_ST_OPS) |
7274 | 0 | err = bpf_object__collect_st_ops_relos(obj, shdr, data); |
7275 | 1.01k | else if (idx == obj->efile.btf_maps_shndx) |
7276 | 0 | err = bpf_object__collect_map_relos(obj, shdr, data); |
7277 | 1.01k | else |
7278 | 1.01k | err = bpf_object__collect_prog_relos(obj, shdr, data); |
7279 | 1.01k | if (err) |
7280 | 507 | return err; |
7281 | 1.01k | } |
7282 | | |
7283 | 1.79k | bpf_object__sort_relos(obj); |
7284 | 1.79k | return 0; |
7285 | 2.30k | } |
7286 | | |
7287 | | static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id) |
7288 | 0 | { |
7289 | 0 | if (BPF_CLASS(insn->code) == BPF_JMP && |
7290 | 0 | BPF_OP(insn->code) == BPF_CALL && |
7291 | 0 | BPF_SRC(insn->code) == BPF_K && |
7292 | 0 | insn->src_reg == 0 && |
7293 | 0 | insn->dst_reg == 0) { |
7294 | 0 | *func_id = insn->imm; |
7295 | 0 | return true; |
7296 | 0 | } |
7297 | 0 | return false; |
7298 | 0 | } |
7299 | | |
7300 | | static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program *prog) |
7301 | 0 | { |
7302 | 0 | struct bpf_insn *insn = prog->insns; |
7303 | 0 | enum bpf_func_id func_id; |
7304 | 0 | int i; |
7305 | |
|
7306 | 0 | if (obj->gen_loader) |
7307 | 0 | return 0; |
7308 | | |
7309 | 0 | for (i = 0; i < prog->insns_cnt; i++, insn++) { |
7310 | 0 | if (!insn_is_helper_call(insn, &func_id)) |
7311 | 0 | continue; |
7312 | | |
7313 | | /* on kernels that don't yet support |
7314 | | * bpf_probe_read_{kernel,user}[_str] helpers, fall back |
7315 | | * to bpf_probe_read() which works well for old kernels |
7316 | | */ |
7317 | 0 | switch (func_id) { |
7318 | 0 | case BPF_FUNC_probe_read_kernel: |
7319 | 0 | case BPF_FUNC_probe_read_user: |
7320 | 0 | if (!kernel_supports(obj, FEAT_PROBE_READ_KERN)) |
7321 | 0 | insn->imm = BPF_FUNC_probe_read; |
7322 | 0 | break; |
7323 | 0 | case BPF_FUNC_probe_read_kernel_str: |
7324 | 0 | case BPF_FUNC_probe_read_user_str: |
7325 | 0 | if (!kernel_supports(obj, FEAT_PROBE_READ_KERN)) |
7326 | 0 | insn->imm = BPF_FUNC_probe_read_str; |
7327 | 0 | break; |
7328 | 0 | default: |
7329 | 0 | break; |
7330 | 0 | } |
7331 | 0 | } |
7332 | 0 | return 0; |
7333 | 0 | } |
7334 | | |
7335 | | static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name, |
7336 | | int *btf_obj_fd, int *btf_type_id); |
7337 | | |
7338 | | /* this is called as prog->sec_def->prog_prepare_load_fn for libbpf-supported sec_defs */ |
7339 | | static int libbpf_prepare_prog_load(struct bpf_program *prog, |
7340 | | struct bpf_prog_load_opts *opts, long cookie) |
7341 | 0 | { |
7342 | 0 | enum sec_def_flags def = cookie; |
7343 | | |
7344 | | /* old kernels might not support specifying expected_attach_type */ |
7345 | 0 | if ((def & SEC_EXP_ATTACH_OPT) && !kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE)) |
7346 | 0 | opts->expected_attach_type = 0; |
7347 | |
|
7348 | 0 | if (def & SEC_SLEEPABLE) |
7349 | 0 | opts->prog_flags |= BPF_F_SLEEPABLE; |
7350 | |
|
7351 | 0 | if (prog->type == BPF_PROG_TYPE_XDP && (def & SEC_XDP_FRAGS)) |
7352 | 0 | opts->prog_flags |= BPF_F_XDP_HAS_FRAGS; |
7353 | | |
7354 | | /* special check for usdt to use uprobe_multi link */ |
7355 | 0 | if ((def & SEC_USDT) && kernel_supports(prog->obj, FEAT_UPROBE_MULTI_LINK)) |
7356 | 0 | prog->expected_attach_type = BPF_TRACE_UPROBE_MULTI; |
7357 | |
|
7358 | 0 | if ((def & SEC_ATTACH_BTF) && !prog->attach_btf_id) { |
7359 | 0 | int btf_obj_fd = 0, btf_type_id = 0, err; |
7360 | 0 | const char *attach_name; |
7361 | |
|
7362 | 0 | attach_name = strchr(prog->sec_name, '/'); |
7363 | 0 | if (!attach_name) { |
7364 | | /* if BPF program is annotated with just SEC("fentry") |
7365 | | * (or similar) without declaratively specifying |
7366 | | * target, then it is expected that target will be |
7367 | | * specified with bpf_program__set_attach_target() at |
7368 | | * runtime before BPF object load step. If not, then |
7369 | | * there is nothing to load into the kernel as BPF |
7370 | | * verifier won't be able to validate BPF program |
7371 | | * correctness anyways. |
7372 | | */ |
7373 | 0 | pr_warn("prog '%s': no BTF-based attach target is specified, use bpf_program__set_attach_target()\n", |
7374 | 0 | prog->name); |
7375 | 0 | return -EINVAL; |
7376 | 0 | } |
7377 | 0 | attach_name++; /* skip over / */ |
7378 | |
|
7379 | 0 | err = libbpf_find_attach_btf_id(prog, attach_name, &btf_obj_fd, &btf_type_id); |
7380 | 0 | if (err) |
7381 | 0 | return err; |
7382 | | |
7383 | | /* cache resolved BTF FD and BTF type ID in the prog */ |
7384 | 0 | prog->attach_btf_obj_fd = btf_obj_fd; |
7385 | 0 | prog->attach_btf_id = btf_type_id; |
7386 | | |
7387 | | /* but by now libbpf common logic is not utilizing |
7388 | | * prog->atach_btf_obj_fd/prog->attach_btf_id anymore because |
7389 | | * this callback is called after opts were populated by |
7390 | | * libbpf, so this callback has to update opts explicitly here |
7391 | | */ |
7392 | 0 | opts->attach_btf_obj_fd = btf_obj_fd; |
7393 | 0 | opts->attach_btf_id = btf_type_id; |
7394 | 0 | } |
7395 | 0 | return 0; |
7396 | 0 | } |
7397 | | |
7398 | | static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz); |
7399 | | |
7400 | | static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog, |
7401 | | struct bpf_insn *insns, int insns_cnt, |
7402 | | const char *license, __u32 kern_version, int *prog_fd) |
7403 | 0 | { |
7404 | 0 | LIBBPF_OPTS(bpf_prog_load_opts, load_attr); |
7405 | 0 | const char *prog_name = NULL; |
7406 | 0 | char *cp, errmsg[STRERR_BUFSIZE]; |
7407 | 0 | size_t log_buf_size = 0; |
7408 | 0 | char *log_buf = NULL, *tmp; |
7409 | 0 | bool own_log_buf = true; |
7410 | 0 | __u32 log_level = prog->log_level; |
7411 | 0 | int ret, err; |
7412 | | |
7413 | | /* Be more helpful by rejecting programs that can't be validated early |
7414 | | * with more meaningful and actionable error message. |
7415 | | */ |
7416 | 0 | switch (prog->type) { |
7417 | 0 | case BPF_PROG_TYPE_UNSPEC: |
7418 | | /* |
7419 | | * The program type must be set. Most likely we couldn't find a proper |
7420 | | * section definition at load time, and thus we didn't infer the type. |
7421 | | */ |
7422 | 0 | pr_warn("prog '%s': missing BPF prog type, check ELF section name '%s'\n", |
7423 | 0 | prog->name, prog->sec_name); |
7424 | 0 | return -EINVAL; |
7425 | 0 | case BPF_PROG_TYPE_STRUCT_OPS: |
7426 | 0 | if (prog->attach_btf_id == 0) { |
7427 | 0 | pr_warn("prog '%s': SEC(\"struct_ops\") program isn't referenced anywhere, did you forget to use it?\n", |
7428 | 0 | prog->name); |
7429 | 0 | return -EINVAL; |
7430 | 0 | } |
7431 | 0 | break; |
7432 | 0 | default: |
7433 | 0 | break; |
7434 | 0 | } |
7435 | | |
7436 | 0 | if (!insns || !insns_cnt) |
7437 | 0 | return -EINVAL; |
7438 | | |
7439 | 0 | if (kernel_supports(obj, FEAT_PROG_NAME)) |
7440 | 0 | prog_name = prog->name; |
7441 | 0 | load_attr.attach_prog_fd = prog->attach_prog_fd; |
7442 | 0 | load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd; |
7443 | 0 | load_attr.attach_btf_id = prog->attach_btf_id; |
7444 | 0 | load_attr.kern_version = kern_version; |
7445 | 0 | load_attr.prog_ifindex = prog->prog_ifindex; |
7446 | | |
7447 | | /* specify func_info/line_info only if kernel supports them */ |
7448 | 0 | if (obj->btf && btf__fd(obj->btf) >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) { |
7449 | 0 | load_attr.prog_btf_fd = btf__fd(obj->btf); |
7450 | 0 | load_attr.func_info = prog->func_info; |
7451 | 0 | load_attr.func_info_rec_size = prog->func_info_rec_size; |
7452 | 0 | load_attr.func_info_cnt = prog->func_info_cnt; |
7453 | 0 | load_attr.line_info = prog->line_info; |
7454 | 0 | load_attr.line_info_rec_size = prog->line_info_rec_size; |
7455 | 0 | load_attr.line_info_cnt = prog->line_info_cnt; |
7456 | 0 | } |
7457 | 0 | load_attr.log_level = log_level; |
7458 | 0 | load_attr.prog_flags = prog->prog_flags; |
7459 | 0 | load_attr.fd_array = obj->fd_array; |
7460 | |
|
7461 | 0 | load_attr.token_fd = obj->token_fd; |
7462 | 0 | if (obj->token_fd) |
7463 | 0 | load_attr.prog_flags |= BPF_F_TOKEN_FD; |
7464 | | |
7465 | | /* adjust load_attr if sec_def provides custom preload callback */ |
7466 | 0 | if (prog->sec_def && prog->sec_def->prog_prepare_load_fn) { |
7467 | 0 | err = prog->sec_def->prog_prepare_load_fn(prog, &load_attr, prog->sec_def->cookie); |
7468 | 0 | if (err < 0) { |
7469 | 0 | pr_warn("prog '%s': failed to prepare load attributes: %d\n", |
7470 | 0 | prog->name, err); |
7471 | 0 | return err; |
7472 | 0 | } |
7473 | 0 | insns = prog->insns; |
7474 | 0 | insns_cnt = prog->insns_cnt; |
7475 | 0 | } |
7476 | | |
7477 | | /* allow prog_prepare_load_fn to change expected_attach_type */ |
7478 | 0 | load_attr.expected_attach_type = prog->expected_attach_type; |
7479 | |
|
7480 | 0 | if (obj->gen_loader) { |
7481 | 0 | bpf_gen__prog_load(obj->gen_loader, prog->type, prog->name, |
7482 | 0 | license, insns, insns_cnt, &load_attr, |
7483 | 0 | prog - obj->programs); |
7484 | 0 | *prog_fd = -1; |
7485 | 0 | return 0; |
7486 | 0 | } |
7487 | | |
7488 | 0 | retry_load: |
7489 | | /* if log_level is zero, we don't request logs initially even if |
7490 | | * custom log_buf is specified; if the program load fails, then we'll |
7491 | | * bump log_level to 1 and use either custom log_buf or we'll allocate |
7492 | | * our own and retry the load to get details on what failed |
7493 | | */ |
7494 | 0 | if (log_level) { |
7495 | 0 | if (prog->log_buf) { |
7496 | 0 | log_buf = prog->log_buf; |
7497 | 0 | log_buf_size = prog->log_size; |
7498 | 0 | own_log_buf = false; |
7499 | 0 | } else if (obj->log_buf) { |
7500 | 0 | log_buf = obj->log_buf; |
7501 | 0 | log_buf_size = obj->log_size; |
7502 | 0 | own_log_buf = false; |
7503 | 0 | } else { |
7504 | 0 | log_buf_size = max((size_t)BPF_LOG_BUF_SIZE, log_buf_size * 2); |
7505 | 0 | tmp = realloc(log_buf, log_buf_size); |
7506 | 0 | if (!tmp) { |
7507 | 0 | ret = -ENOMEM; |
7508 | 0 | goto out; |
7509 | 0 | } |
7510 | 0 | log_buf = tmp; |
7511 | 0 | log_buf[0] = '\0'; |
7512 | 0 | own_log_buf = true; |
7513 | 0 | } |
7514 | 0 | } |
7515 | | |
7516 | 0 | load_attr.log_buf = log_buf; |
7517 | 0 | load_attr.log_size = log_buf_size; |
7518 | 0 | load_attr.log_level = log_level; |
7519 | |
|
7520 | 0 | ret = bpf_prog_load(prog->type, prog_name, license, insns, insns_cnt, &load_attr); |
7521 | 0 | if (ret >= 0) { |
7522 | 0 | if (log_level && own_log_buf) { |
7523 | 0 | pr_debug("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n", |
7524 | 0 | prog->name, log_buf); |
7525 | 0 | } |
7526 | |
|
7527 | 0 | if (obj->has_rodata && kernel_supports(obj, FEAT_PROG_BIND_MAP)) { |
7528 | 0 | struct bpf_map *map; |
7529 | 0 | int i; |
7530 | |
|
7531 | 0 | for (i = 0; i < obj->nr_maps; i++) { |
7532 | 0 | map = &prog->obj->maps[i]; |
7533 | 0 | if (map->libbpf_type != LIBBPF_MAP_RODATA) |
7534 | 0 | continue; |
7535 | | |
7536 | 0 | if (bpf_prog_bind_map(ret, map->fd, NULL)) { |
7537 | 0 | cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); |
7538 | 0 | pr_warn("prog '%s': failed to bind map '%s': %s\n", |
7539 | 0 | prog->name, map->real_name, cp); |
7540 | | /* Don't fail hard if can't bind rodata. */ |
7541 | 0 | } |
7542 | 0 | } |
7543 | 0 | } |
7544 | |
|
7545 | 0 | *prog_fd = ret; |
7546 | 0 | ret = 0; |
7547 | 0 | goto out; |
7548 | 0 | } |
7549 | | |
7550 | 0 | if (log_level == 0) { |
7551 | 0 | log_level = 1; |
7552 | 0 | goto retry_load; |
7553 | 0 | } |
7554 | | /* On ENOSPC, increase log buffer size and retry, unless custom |
7555 | | * log_buf is specified. |
7556 | | * Be careful to not overflow u32, though. Kernel's log buf size limit |
7557 | | * isn't part of UAPI so it can always be bumped to full 4GB. So don't |
7558 | | * multiply by 2 unless we are sure we'll fit within 32 bits. |
7559 | | * Currently, we'll get -EINVAL when we reach (UINT_MAX >> 2). |
7560 | | */ |
7561 | 0 | if (own_log_buf && errno == ENOSPC && log_buf_size <= UINT_MAX / 2) |
7562 | 0 | goto retry_load; |
7563 | | |
7564 | 0 | ret = -errno; |
7565 | | |
7566 | | /* post-process verifier log to improve error descriptions */ |
7567 | 0 | fixup_verifier_log(prog, log_buf, log_buf_size); |
7568 | |
|
7569 | 0 | cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); |
7570 | 0 | pr_warn("prog '%s': BPF program load failed: %s\n", prog->name, cp); |
7571 | 0 | pr_perm_msg(ret); |
7572 | |
|
7573 | 0 | if (own_log_buf && log_buf && log_buf[0] != '\0') { |
7574 | 0 | pr_warn("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n", |
7575 | 0 | prog->name, log_buf); |
7576 | 0 | } |
7577 | |
|
7578 | 0 | out: |
7579 | 0 | if (own_log_buf) |
7580 | 0 | free(log_buf); |
7581 | 0 | return ret; |
7582 | 0 | } |
7583 | | |
7584 | | static char *find_prev_line(char *buf, char *cur) |
7585 | 0 | { |
7586 | 0 | char *p; |
7587 | |
|
7588 | 0 | if (cur == buf) /* end of a log buf */ |
7589 | 0 | return NULL; |
7590 | | |
7591 | 0 | p = cur - 1; |
7592 | 0 | while (p - 1 >= buf && *(p - 1) != '\n') |
7593 | 0 | p--; |
7594 | |
|
7595 | 0 | return p; |
7596 | 0 | } |
7597 | | |
7598 | | static void patch_log(char *buf, size_t buf_sz, size_t log_sz, |
7599 | | char *orig, size_t orig_sz, const char *patch) |
7600 | 0 | { |
7601 | | /* size of the remaining log content to the right from the to-be-replaced part */ |
7602 | 0 | size_t rem_sz = (buf + log_sz) - (orig + orig_sz); |
7603 | 0 | size_t patch_sz = strlen(patch); |
7604 | |
|
7605 | 0 | if (patch_sz != orig_sz) { |
7606 | | /* If patch line(s) are longer than original piece of verifier log, |
7607 | | * shift log contents by (patch_sz - orig_sz) bytes to the right |
7608 | | * starting from after to-be-replaced part of the log. |
7609 | | * |
7610 | | * If patch line(s) are shorter than original piece of verifier log, |
7611 | | * shift log contents by (orig_sz - patch_sz) bytes to the left |
7612 | | * starting from after to-be-replaced part of the log |
7613 | | * |
7614 | | * We need to be careful about not overflowing available |
7615 | | * buf_sz capacity. If that's the case, we'll truncate the end |
7616 | | * of the original log, as necessary. |
7617 | | */ |
7618 | 0 | if (patch_sz > orig_sz) { |
7619 | 0 | if (orig + patch_sz >= buf + buf_sz) { |
7620 | | /* patch is big enough to cover remaining space completely */ |
7621 | 0 | patch_sz -= (orig + patch_sz) - (buf + buf_sz) + 1; |
7622 | 0 | rem_sz = 0; |
7623 | 0 | } else if (patch_sz - orig_sz > buf_sz - log_sz) { |
7624 | | /* patch causes part of remaining log to be truncated */ |
7625 | 0 | rem_sz -= (patch_sz - orig_sz) - (buf_sz - log_sz); |
7626 | 0 | } |
7627 | 0 | } |
7628 | | /* shift remaining log to the right by calculated amount */ |
7629 | 0 | memmove(orig + patch_sz, orig + orig_sz, rem_sz); |
7630 | 0 | } |
7631 | |
|
7632 | 0 | memcpy(orig, patch, patch_sz); |
7633 | 0 | } |
7634 | | |
7635 | | static void fixup_log_failed_core_relo(struct bpf_program *prog, |
7636 | | char *buf, size_t buf_sz, size_t log_sz, |
7637 | | char *line1, char *line2, char *line3) |
7638 | 0 | { |
7639 | | /* Expected log for failed and not properly guarded CO-RE relocation: |
7640 | | * line1 -> 123: (85) call unknown#195896080 |
7641 | | * line2 -> invalid func unknown#195896080 |
7642 | | * line3 -> <anything else or end of buffer> |
7643 | | * |
7644 | | * "123" is the index of the instruction that was poisoned. We extract |
7645 | | * instruction index to find corresponding CO-RE relocation and |
7646 | | * replace this part of the log with more relevant information about |
7647 | | * failed CO-RE relocation. |
7648 | | */ |
7649 | 0 | const struct bpf_core_relo *relo; |
7650 | 0 | struct bpf_core_spec spec; |
7651 | 0 | char patch[512], spec_buf[256]; |
7652 | 0 | int insn_idx, err, spec_len; |
7653 | |
|
7654 | 0 | if (sscanf(line1, "%d: (%*d) call unknown#195896080\n", &insn_idx) != 1) |
7655 | 0 | return; |
7656 | | |
7657 | 0 | relo = find_relo_core(prog, insn_idx); |
7658 | 0 | if (!relo) |
7659 | 0 | return; |
7660 | | |
7661 | 0 | err = bpf_core_parse_spec(prog->name, prog->obj->btf, relo, &spec); |
7662 | 0 | if (err) |
7663 | 0 | return; |
7664 | | |
7665 | 0 | spec_len = bpf_core_format_spec(spec_buf, sizeof(spec_buf), &spec); |
7666 | 0 | snprintf(patch, sizeof(patch), |
7667 | 0 | "%d: <invalid CO-RE relocation>\n" |
7668 | 0 | "failed to resolve CO-RE relocation %s%s\n", |
7669 | 0 | insn_idx, spec_buf, spec_len >= sizeof(spec_buf) ? "..." : ""); |
7670 | |
|
7671 | 0 | patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch); |
7672 | 0 | } |
7673 | | |
7674 | | static void fixup_log_missing_map_load(struct bpf_program *prog, |
7675 | | char *buf, size_t buf_sz, size_t log_sz, |
7676 | | char *line1, char *line2, char *line3) |
7677 | 0 | { |
7678 | | /* Expected log for failed and not properly guarded map reference: |
7679 | | * line1 -> 123: (85) call unknown#2001000345 |
7680 | | * line2 -> invalid func unknown#2001000345 |
7681 | | * line3 -> <anything else or end of buffer> |
7682 | | * |
7683 | | * "123" is the index of the instruction that was poisoned. |
7684 | | * "345" in "2001000345" is a map index in obj->maps to fetch map name. |
7685 | | */ |
7686 | 0 | struct bpf_object *obj = prog->obj; |
7687 | 0 | const struct bpf_map *map; |
7688 | 0 | int insn_idx, map_idx; |
7689 | 0 | char patch[128]; |
7690 | |
|
7691 | 0 | if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &map_idx) != 2) |
7692 | 0 | return; |
7693 | | |
7694 | 0 | map_idx -= POISON_LDIMM64_MAP_BASE; |
7695 | 0 | if (map_idx < 0 || map_idx >= obj->nr_maps) |
7696 | 0 | return; |
7697 | 0 | map = &obj->maps[map_idx]; |
7698 | |
|
7699 | 0 | snprintf(patch, sizeof(patch), |
7700 | 0 | "%d: <invalid BPF map reference>\n" |
7701 | 0 | "BPF map '%s' is referenced but wasn't created\n", |
7702 | 0 | insn_idx, map->name); |
7703 | |
|
7704 | 0 | patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch); |
7705 | 0 | } |
7706 | | |
7707 | | static void fixup_log_missing_kfunc_call(struct bpf_program *prog, |
7708 | | char *buf, size_t buf_sz, size_t log_sz, |
7709 | | char *line1, char *line2, char *line3) |
7710 | 0 | { |
7711 | | /* Expected log for failed and not properly guarded kfunc call: |
7712 | | * line1 -> 123: (85) call unknown#2002000345 |
7713 | | * line2 -> invalid func unknown#2002000345 |
7714 | | * line3 -> <anything else or end of buffer> |
7715 | | * |
7716 | | * "123" is the index of the instruction that was poisoned. |
7717 | | * "345" in "2002000345" is an extern index in obj->externs to fetch kfunc name. |
7718 | | */ |
7719 | 0 | struct bpf_object *obj = prog->obj; |
7720 | 0 | const struct extern_desc *ext; |
7721 | 0 | int insn_idx, ext_idx; |
7722 | 0 | char patch[128]; |
7723 | |
|
7724 | 0 | if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &ext_idx) != 2) |
7725 | 0 | return; |
7726 | | |
7727 | 0 | ext_idx -= POISON_CALL_KFUNC_BASE; |
7728 | 0 | if (ext_idx < 0 || ext_idx >= obj->nr_extern) |
7729 | 0 | return; |
7730 | 0 | ext = &obj->externs[ext_idx]; |
7731 | |
|
7732 | 0 | snprintf(patch, sizeof(patch), |
7733 | 0 | "%d: <invalid kfunc call>\n" |
7734 | 0 | "kfunc '%s' is referenced but wasn't resolved\n", |
7735 | 0 | insn_idx, ext->name); |
7736 | |
|
7737 | 0 | patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch); |
7738 | 0 | } |
7739 | | |
7740 | | static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz) |
7741 | 0 | { |
7742 | | /* look for familiar error patterns in last N lines of the log */ |
7743 | 0 | const size_t max_last_line_cnt = 10; |
7744 | 0 | char *prev_line, *cur_line, *next_line; |
7745 | 0 | size_t log_sz; |
7746 | 0 | int i; |
7747 | |
|
7748 | 0 | if (!buf) |
7749 | 0 | return; |
7750 | | |
7751 | 0 | log_sz = strlen(buf) + 1; |
7752 | 0 | next_line = buf + log_sz - 1; |
7753 | |
|
7754 | 0 | for (i = 0; i < max_last_line_cnt; i++, next_line = cur_line) { |
7755 | 0 | cur_line = find_prev_line(buf, next_line); |
7756 | 0 | if (!cur_line) |
7757 | 0 | return; |
7758 | | |
7759 | 0 | if (str_has_pfx(cur_line, "invalid func unknown#195896080\n")) { |
7760 | 0 | prev_line = find_prev_line(buf, cur_line); |
7761 | 0 | if (!prev_line) |
7762 | 0 | continue; |
7763 | | |
7764 | | /* failed CO-RE relocation case */ |
7765 | 0 | fixup_log_failed_core_relo(prog, buf, buf_sz, log_sz, |
7766 | 0 | prev_line, cur_line, next_line); |
7767 | 0 | return; |
7768 | 0 | } else if (str_has_pfx(cur_line, "invalid func unknown#"POISON_LDIMM64_MAP_PFX)) { |
7769 | 0 | prev_line = find_prev_line(buf, cur_line); |
7770 | 0 | if (!prev_line) |
7771 | 0 | continue; |
7772 | | |
7773 | | /* reference to uncreated BPF map */ |
7774 | 0 | fixup_log_missing_map_load(prog, buf, buf_sz, log_sz, |
7775 | 0 | prev_line, cur_line, next_line); |
7776 | 0 | return; |
7777 | 0 | } else if (str_has_pfx(cur_line, "invalid func unknown#"POISON_CALL_KFUNC_PFX)) { |
7778 | 0 | prev_line = find_prev_line(buf, cur_line); |
7779 | 0 | if (!prev_line) |
7780 | 0 | continue; |
7781 | | |
7782 | | /* reference to unresolved kfunc */ |
7783 | 0 | fixup_log_missing_kfunc_call(prog, buf, buf_sz, log_sz, |
7784 | 0 | prev_line, cur_line, next_line); |
7785 | 0 | return; |
7786 | 0 | } |
7787 | 0 | } |
7788 | 0 | } |
7789 | | |
7790 | | static int bpf_program_record_relos(struct bpf_program *prog) |
7791 | 0 | { |
7792 | 0 | struct bpf_object *obj = prog->obj; |
7793 | 0 | int i; |
7794 | |
|
7795 | 0 | for (i = 0; i < prog->nr_reloc; i++) { |
7796 | 0 | struct reloc_desc *relo = &prog->reloc_desc[i]; |
7797 | 0 | struct extern_desc *ext = &obj->externs[relo->ext_idx]; |
7798 | 0 | int kind; |
7799 | |
|
7800 | 0 | switch (relo->type) { |
7801 | 0 | case RELO_EXTERN_LD64: |
7802 | 0 | if (ext->type != EXT_KSYM) |
7803 | 0 | continue; |
7804 | 0 | kind = btf_is_var(btf__type_by_id(obj->btf, ext->btf_id)) ? |
7805 | 0 | BTF_KIND_VAR : BTF_KIND_FUNC; |
7806 | 0 | bpf_gen__record_extern(obj->gen_loader, ext->name, |
7807 | 0 | ext->is_weak, !ext->ksym.type_id, |
7808 | 0 | true, kind, relo->insn_idx); |
7809 | 0 | break; |
7810 | 0 | case RELO_EXTERN_CALL: |
7811 | 0 | bpf_gen__record_extern(obj->gen_loader, ext->name, |
7812 | 0 | ext->is_weak, false, false, BTF_KIND_FUNC, |
7813 | 0 | relo->insn_idx); |
7814 | 0 | break; |
7815 | 0 | case RELO_CORE: { |
7816 | 0 | struct bpf_core_relo cr = { |
7817 | 0 | .insn_off = relo->insn_idx * 8, |
7818 | 0 | .type_id = relo->core_relo->type_id, |
7819 | 0 | .access_str_off = relo->core_relo->access_str_off, |
7820 | 0 | .kind = relo->core_relo->kind, |
7821 | 0 | }; |
7822 | |
|
7823 | 0 | bpf_gen__record_relo_core(obj->gen_loader, &cr); |
7824 | 0 | break; |
7825 | 0 | } |
7826 | 0 | default: |
7827 | 0 | continue; |
7828 | 0 | } |
7829 | 0 | } |
7830 | 0 | return 0; |
7831 | 0 | } |
7832 | | |
7833 | | static int |
7834 | | bpf_object__load_progs(struct bpf_object *obj, int log_level) |
7835 | 0 | { |
7836 | 0 | struct bpf_program *prog; |
7837 | 0 | size_t i; |
7838 | 0 | int err; |
7839 | |
|
7840 | 0 | for (i = 0; i < obj->nr_programs; i++) { |
7841 | 0 | prog = &obj->programs[i]; |
7842 | 0 | err = bpf_object__sanitize_prog(obj, prog); |
7843 | 0 | if (err) |
7844 | 0 | return err; |
7845 | 0 | } |
7846 | | |
7847 | 0 | for (i = 0; i < obj->nr_programs; i++) { |
7848 | 0 | prog = &obj->programs[i]; |
7849 | 0 | if (prog_is_subprog(obj, prog)) |
7850 | 0 | continue; |
7851 | 0 | if (!prog->autoload) { |
7852 | 0 | pr_debug("prog '%s': skipped loading\n", prog->name); |
7853 | 0 | continue; |
7854 | 0 | } |
7855 | 0 | prog->log_level |= log_level; |
7856 | |
|
7857 | 0 | if (obj->gen_loader) |
7858 | 0 | bpf_program_record_relos(prog); |
7859 | |
|
7860 | 0 | err = bpf_object_load_prog(obj, prog, prog->insns, prog->insns_cnt, |
7861 | 0 | obj->license, obj->kern_version, &prog->fd); |
7862 | 0 | if (err) { |
7863 | 0 | pr_warn("prog '%s': failed to load: %d\n", prog->name, err); |
7864 | 0 | return err; |
7865 | 0 | } |
7866 | 0 | } |
7867 | | |
7868 | 0 | bpf_object__free_relocs(obj); |
7869 | 0 | return 0; |
7870 | 0 | } |
7871 | | |
7872 | | static const struct bpf_sec_def *find_sec_def(const char *sec_name); |
7873 | | |
7874 | | static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object_open_opts *opts) |
7875 | 2.30k | { |
7876 | 2.30k | struct bpf_program *prog; |
7877 | 2.30k | int err; |
7878 | | |
7879 | 7.51k | bpf_object__for_each_program(prog, obj) { |
7880 | 7.51k | prog->sec_def = find_sec_def(prog->sec_name); |
7881 | 7.51k | if (!prog->sec_def) { |
7882 | | /* couldn't guess, but user might manually specify */ |
7883 | 6.86k | pr_debug("prog '%s': unrecognized ELF section name '%s'\n", |
7884 | 6.86k | prog->name, prog->sec_name); |
7885 | 6.86k | continue; |
7886 | 6.86k | } |
7887 | | |
7888 | 650 | prog->type = prog->sec_def->prog_type; |
7889 | 650 | prog->expected_attach_type = prog->sec_def->expected_attach_type; |
7890 | | |
7891 | | /* sec_def can have custom callback which should be called |
7892 | | * after bpf_program is initialized to adjust its properties |
7893 | | */ |
7894 | 650 | if (prog->sec_def->prog_setup_fn) { |
7895 | 0 | err = prog->sec_def->prog_setup_fn(prog, prog->sec_def->cookie); |
7896 | 0 | if (err < 0) { |
7897 | 0 | pr_warn("prog '%s': failed to initialize: %d\n", |
7898 | 0 | prog->name, err); |
7899 | 0 | return err; |
7900 | 0 | } |
7901 | 0 | } |
7902 | 650 | } |
7903 | | |
7904 | 2.30k | return 0; |
7905 | 2.30k | } |
7906 | | |
7907 | | static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz, |
7908 | | const char *obj_name, |
7909 | | const struct bpf_object_open_opts *opts) |
7910 | 11.7k | { |
7911 | 11.7k | const char *kconfig, *btf_tmp_path, *token_path; |
7912 | 11.7k | struct bpf_object *obj; |
7913 | 11.7k | int err; |
7914 | 11.7k | char *log_buf; |
7915 | 11.7k | size_t log_size; |
7916 | 11.7k | __u32 log_level; |
7917 | | |
7918 | 11.7k | if (obj_buf && !obj_name) |
7919 | 0 | return ERR_PTR(-EINVAL); |
7920 | | |
7921 | 11.7k | if (elf_version(EV_CURRENT) == EV_NONE) { |
7922 | 0 | pr_warn("failed to init libelf for %s\n", |
7923 | 0 | path ? : "(mem buf)"); |
7924 | 0 | return ERR_PTR(-LIBBPF_ERRNO__LIBELF); |
7925 | 0 | } |
7926 | | |
7927 | 11.7k | if (!OPTS_VALID(opts, bpf_object_open_opts)) |
7928 | 0 | return ERR_PTR(-EINVAL); |
7929 | | |
7930 | 11.7k | obj_name = OPTS_GET(opts, object_name, NULL) ?: obj_name; |
7931 | 11.7k | if (obj_buf) { |
7932 | 11.7k | path = obj_name; |
7933 | 11.7k | pr_debug("loading object '%s' from buffer\n", obj_name); |
7934 | 18.4E | } else { |
7935 | 18.4E | pr_debug("loading object from %s\n", path); |
7936 | 18.4E | } |
7937 | |
|
7938 | 0 | log_buf = OPTS_GET(opts, kernel_log_buf, NULL); |
7939 | 0 | log_size = OPTS_GET(opts, kernel_log_size, 0); |
7940 | 0 | log_level = OPTS_GET(opts, kernel_log_level, 0); |
7941 | 0 | if (log_size > UINT_MAX) |
7942 | 0 | return ERR_PTR(-EINVAL); |
7943 | 0 | if (log_size && !log_buf) |
7944 | 0 | return ERR_PTR(-EINVAL); |
7945 | | |
7946 | 0 | token_path = OPTS_GET(opts, bpf_token_path, NULL); |
7947 | | /* if user didn't specify bpf_token_path explicitly, check if |
7948 | | * LIBBPF_BPF_TOKEN_PATH envvar was set and treat it as bpf_token_path |
7949 | | * option |
7950 | | */ |
7951 | 0 | if (!token_path) |
7952 | 11.7k | token_path = getenv("LIBBPF_BPF_TOKEN_PATH"); |
7953 | 0 | if (token_path && strlen(token_path) >= PATH_MAX) |
7954 | 0 | return ERR_PTR(-ENAMETOOLONG); |
7955 | | |
7956 | 0 | obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name); |
7957 | 0 | if (IS_ERR(obj)) |
7958 | 0 | return obj; |
7959 | | |
7960 | 0 | obj->log_buf = log_buf; |
7961 | 0 | obj->log_size = log_size; |
7962 | 0 | obj->log_level = log_level; |
7963 | |
|
7964 | 0 | if (token_path) { |
7965 | 0 | obj->token_path = strdup(token_path); |
7966 | 0 | if (!obj->token_path) { |
7967 | 0 | err = -ENOMEM; |
7968 | 0 | goto out; |
7969 | 0 | } |
7970 | 0 | } |
7971 | | |
7972 | 0 | btf_tmp_path = OPTS_GET(opts, btf_custom_path, NULL); |
7973 | 0 | if (btf_tmp_path) { |
7974 | 0 | if (strlen(btf_tmp_path) >= PATH_MAX) { |
7975 | 0 | err = -ENAMETOOLONG; |
7976 | 0 | goto out; |
7977 | 0 | } |
7978 | 0 | obj->btf_custom_path = strdup(btf_tmp_path); |
7979 | 0 | if (!obj->btf_custom_path) { |
7980 | 0 | err = -ENOMEM; |
7981 | 0 | goto out; |
7982 | 0 | } |
7983 | 0 | } |
7984 | | |
7985 | 0 | kconfig = OPTS_GET(opts, kconfig, NULL); |
7986 | 0 | if (kconfig) { |
7987 | 0 | obj->kconfig = strdup(kconfig); |
7988 | 0 | if (!obj->kconfig) { |
7989 | 0 | err = -ENOMEM; |
7990 | 0 | goto out; |
7991 | 0 | } |
7992 | 0 | } |
7993 | | |
7994 | 0 | err = bpf_object__elf_init(obj); |
7995 | 18.4E | err = err ? : bpf_object__check_endianness(obj); |
7996 | 18.4E | err = err ? : bpf_object__elf_collect(obj); |
7997 | 18.4E | err = err ? : bpf_object__collect_externs(obj); |
7998 | 18.4E | err = err ? : bpf_object_fixup_btf(obj); |
7999 | 18.4E | err = err ? : bpf_object__init_maps(obj, opts); |
8000 | 18.4E | err = err ? : bpf_object_init_progs(obj, opts); |
8001 | 18.4E | err = err ? : bpf_object__collect_relos(obj); |
8002 | 18.4E | if (err) |
8003 | 9.93k | goto out; |
8004 | | |
8005 | 18.4E | bpf_object__elf_finish(obj); |
8006 | | |
8007 | 18.4E | return obj; |
8008 | 9.93k | out: |
8009 | 9.93k | bpf_object__close(obj); |
8010 | 9.93k | return ERR_PTR(err); |
8011 | 18.4E | } |
8012 | | |
8013 | | struct bpf_object * |
8014 | | bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts) |
8015 | 0 | { |
8016 | 0 | if (!path) |
8017 | 0 | return libbpf_err_ptr(-EINVAL); |
8018 | | |
8019 | 0 | return libbpf_ptr(bpf_object_open(path, NULL, 0, NULL, opts)); |
8020 | 0 | } |
8021 | | |
8022 | | struct bpf_object *bpf_object__open(const char *path) |
8023 | 0 | { |
8024 | 0 | return bpf_object__open_file(path, NULL); |
8025 | 0 | } |
8026 | | |
8027 | | struct bpf_object * |
8028 | | bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz, |
8029 | | const struct bpf_object_open_opts *opts) |
8030 | 11.7k | { |
8031 | 11.7k | char tmp_name[64]; |
8032 | | |
8033 | 11.7k | if (!obj_buf || obj_buf_sz == 0) |
8034 | 0 | return libbpf_err_ptr(-EINVAL); |
8035 | | |
8036 | | /* create a (quite useless) default "name" for this memory buffer object */ |
8037 | 11.7k | snprintf(tmp_name, sizeof(tmp_name), "%lx-%zx", (unsigned long)obj_buf, obj_buf_sz); |
8038 | | |
8039 | 11.7k | return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, tmp_name, opts)); |
8040 | 11.7k | } |
8041 | | |
8042 | | static int bpf_object_unload(struct bpf_object *obj) |
8043 | 11.7k | { |
8044 | 11.7k | size_t i; |
8045 | | |
8046 | 11.7k | if (!obj) |
8047 | 0 | return libbpf_err(-EINVAL); |
8048 | | |
8049 | 14.6k | for (i = 0; i < obj->nr_maps; i++) { |
8050 | 2.92k | zclose(obj->maps[i].fd); |
8051 | 2.92k | if (obj->maps[i].st_ops) |
8052 | 71 | zfree(&obj->maps[i].st_ops->kern_vdata); |
8053 | 2.92k | } |
8054 | | |
8055 | 20.8k | for (i = 0; i < obj->nr_programs; i++) |
8056 | 9.15k | bpf_program__unload(&obj->programs[i]); |
8057 | | |
8058 | 11.7k | return 0; |
8059 | 11.7k | } |
8060 | | |
8061 | | static int bpf_object__sanitize_maps(struct bpf_object *obj) |
8062 | 0 | { |
8063 | 0 | struct bpf_map *m; |
8064 | |
|
8065 | 0 | bpf_object__for_each_map(m, obj) { |
8066 | 0 | if (!bpf_map__is_internal(m)) |
8067 | 0 | continue; |
8068 | 0 | if (!kernel_supports(obj, FEAT_ARRAY_MMAP)) |
8069 | 0 | m->def.map_flags &= ~BPF_F_MMAPABLE; |
8070 | 0 | } |
8071 | |
|
8072 | 0 | return 0; |
8073 | 0 | } |
8074 | | |
8075 | | typedef int (*kallsyms_cb_t)(unsigned long long sym_addr, char sym_type, |
8076 | | const char *sym_name, void *ctx); |
8077 | | |
8078 | | static int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *ctx) |
8079 | 0 | { |
8080 | 0 | char sym_type, sym_name[500]; |
8081 | 0 | unsigned long long sym_addr; |
8082 | 0 | int ret, err = 0; |
8083 | 0 | FILE *f; |
8084 | |
|
8085 | 0 | f = fopen("/proc/kallsyms", "re"); |
8086 | 0 | if (!f) { |
8087 | 0 | err = -errno; |
8088 | 0 | pr_warn("failed to open /proc/kallsyms: %d\n", err); |
8089 | 0 | return err; |
8090 | 0 | } |
8091 | | |
8092 | 0 | while (true) { |
8093 | 0 | ret = fscanf(f, "%llx %c %499s%*[^\n]\n", |
8094 | 0 | &sym_addr, &sym_type, sym_name); |
8095 | 0 | if (ret == EOF && feof(f)) |
8096 | 0 | break; |
8097 | 0 | if (ret != 3) { |
8098 | 0 | pr_warn("failed to read kallsyms entry: %d\n", ret); |
8099 | 0 | err = -EINVAL; |
8100 | 0 | break; |
8101 | 0 | } |
8102 | | |
8103 | 0 | err = cb(sym_addr, sym_type, sym_name, ctx); |
8104 | 0 | if (err) |
8105 | 0 | break; |
8106 | 0 | } |
8107 | |
|
8108 | 0 | fclose(f); |
8109 | 0 | return err; |
8110 | 0 | } |
8111 | | |
8112 | | static int kallsyms_cb(unsigned long long sym_addr, char sym_type, |
8113 | | const char *sym_name, void *ctx) |
8114 | 0 | { |
8115 | 0 | struct bpf_object *obj = ctx; |
8116 | 0 | const struct btf_type *t; |
8117 | 0 | struct extern_desc *ext; |
8118 | 0 | char *res; |
8119 | |
|
8120 | 0 | res = strstr(sym_name, ".llvm."); |
8121 | 0 | if (sym_type == 'd' && res) |
8122 | 0 | ext = find_extern_by_name_with_len(obj, sym_name, res - sym_name); |
8123 | 0 | else |
8124 | 0 | ext = find_extern_by_name(obj, sym_name); |
8125 | 0 | if (!ext || ext->type != EXT_KSYM) |
8126 | 0 | return 0; |
8127 | | |
8128 | 0 | t = btf__type_by_id(obj->btf, ext->btf_id); |
8129 | 0 | if (!btf_is_var(t)) |
8130 | 0 | return 0; |
8131 | | |
8132 | 0 | if (ext->is_set && ext->ksym.addr != sym_addr) { |
8133 | 0 | pr_warn("extern (ksym) '%s': resolution is ambiguous: 0x%llx or 0x%llx\n", |
8134 | 0 | sym_name, ext->ksym.addr, sym_addr); |
8135 | 0 | return -EINVAL; |
8136 | 0 | } |
8137 | 0 | if (!ext->is_set) { |
8138 | 0 | ext->is_set = true; |
8139 | 0 | ext->ksym.addr = sym_addr; |
8140 | 0 | pr_debug("extern (ksym) '%s': set to 0x%llx\n", sym_name, sym_addr); |
8141 | 0 | } |
8142 | 0 | return 0; |
8143 | 0 | } |
8144 | | |
8145 | | static int bpf_object__read_kallsyms_file(struct bpf_object *obj) |
8146 | 0 | { |
8147 | 0 | return libbpf_kallsyms_parse(kallsyms_cb, obj); |
8148 | 0 | } |
8149 | | |
8150 | | static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name, |
8151 | | __u16 kind, struct btf **res_btf, |
8152 | | struct module_btf **res_mod_btf) |
8153 | 0 | { |
8154 | 0 | struct module_btf *mod_btf; |
8155 | 0 | struct btf *btf; |
8156 | 0 | int i, id, err; |
8157 | |
|
8158 | 0 | btf = obj->btf_vmlinux; |
8159 | 0 | mod_btf = NULL; |
8160 | 0 | id = btf__find_by_name_kind(btf, ksym_name, kind); |
8161 | |
|
8162 | 0 | if (id == -ENOENT) { |
8163 | 0 | err = load_module_btfs(obj); |
8164 | 0 | if (err) |
8165 | 0 | return err; |
8166 | | |
8167 | 0 | for (i = 0; i < obj->btf_module_cnt; i++) { |
8168 | | /* we assume module_btf's BTF FD is always >0 */ |
8169 | 0 | mod_btf = &obj->btf_modules[i]; |
8170 | 0 | btf = mod_btf->btf; |
8171 | 0 | id = btf__find_by_name_kind_own(btf, ksym_name, kind); |
8172 | 0 | if (id != -ENOENT) |
8173 | 0 | break; |
8174 | 0 | } |
8175 | 0 | } |
8176 | 0 | if (id <= 0) |
8177 | 0 | return -ESRCH; |
8178 | | |
8179 | 0 | *res_btf = btf; |
8180 | 0 | *res_mod_btf = mod_btf; |
8181 | 0 | return id; |
8182 | 0 | } |
8183 | | |
8184 | | static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj, |
8185 | | struct extern_desc *ext) |
8186 | 0 | { |
8187 | 0 | const struct btf_type *targ_var, *targ_type; |
8188 | 0 | __u32 targ_type_id, local_type_id; |
8189 | 0 | struct module_btf *mod_btf = NULL; |
8190 | 0 | const char *targ_var_name; |
8191 | 0 | struct btf *btf = NULL; |
8192 | 0 | int id, err; |
8193 | |
|
8194 | 0 | id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &mod_btf); |
8195 | 0 | if (id < 0) { |
8196 | 0 | if (id == -ESRCH && ext->is_weak) |
8197 | 0 | return 0; |
8198 | 0 | pr_warn("extern (var ksym) '%s': not found in kernel BTF\n", |
8199 | 0 | ext->name); |
8200 | 0 | return id; |
8201 | 0 | } |
8202 | | |
8203 | | /* find local type_id */ |
8204 | 0 | local_type_id = ext->ksym.type_id; |
8205 | | |
8206 | | /* find target type_id */ |
8207 | 0 | targ_var = btf__type_by_id(btf, id); |
8208 | 0 | targ_var_name = btf__name_by_offset(btf, targ_var->name_off); |
8209 | 0 | targ_type = skip_mods_and_typedefs(btf, targ_var->type, &targ_type_id); |
8210 | |
|
8211 | 0 | err = bpf_core_types_are_compat(obj->btf, local_type_id, |
8212 | 0 | btf, targ_type_id); |
8213 | 0 | if (err <= 0) { |
8214 | 0 | const struct btf_type *local_type; |
8215 | 0 | const char *targ_name, *local_name; |
8216 | |
|
8217 | 0 | local_type = btf__type_by_id(obj->btf, local_type_id); |
8218 | 0 | local_name = btf__name_by_offset(obj->btf, local_type->name_off); |
8219 | 0 | targ_name = btf__name_by_offset(btf, targ_type->name_off); |
8220 | |
|
8221 | 0 | pr_warn("extern (var ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n", |
8222 | 0 | ext->name, local_type_id, |
8223 | 0 | btf_kind_str(local_type), local_name, targ_type_id, |
8224 | 0 | btf_kind_str(targ_type), targ_name); |
8225 | 0 | return -EINVAL; |
8226 | 0 | } |
8227 | | |
8228 | 0 | ext->is_set = true; |
8229 | 0 | ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0; |
8230 | 0 | ext->ksym.kernel_btf_id = id; |
8231 | 0 | pr_debug("extern (var ksym) '%s': resolved to [%d] %s %s\n", |
8232 | 0 | ext->name, id, btf_kind_str(targ_var), targ_var_name); |
8233 | |
|
8234 | 0 | return 0; |
8235 | 0 | } |
8236 | | |
8237 | | static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj, |
8238 | | struct extern_desc *ext) |
8239 | 0 | { |
8240 | 0 | int local_func_proto_id, kfunc_proto_id, kfunc_id; |
8241 | 0 | struct module_btf *mod_btf = NULL; |
8242 | 0 | const struct btf_type *kern_func; |
8243 | 0 | struct btf *kern_btf = NULL; |
8244 | 0 | int ret; |
8245 | |
|
8246 | 0 | local_func_proto_id = ext->ksym.type_id; |
8247 | |
|
8248 | 0 | kfunc_id = find_ksym_btf_id(obj, ext->essent_name ?: ext->name, BTF_KIND_FUNC, &kern_btf, |
8249 | 0 | &mod_btf); |
8250 | 0 | if (kfunc_id < 0) { |
8251 | 0 | if (kfunc_id == -ESRCH && ext->is_weak) |
8252 | 0 | return 0; |
8253 | 0 | pr_warn("extern (func ksym) '%s': not found in kernel or module BTFs\n", |
8254 | 0 | ext->name); |
8255 | 0 | return kfunc_id; |
8256 | 0 | } |
8257 | | |
8258 | 0 | kern_func = btf__type_by_id(kern_btf, kfunc_id); |
8259 | 0 | kfunc_proto_id = kern_func->type; |
8260 | |
|
8261 | 0 | ret = bpf_core_types_are_compat(obj->btf, local_func_proto_id, |
8262 | 0 | kern_btf, kfunc_proto_id); |
8263 | 0 | if (ret <= 0) { |
8264 | 0 | if (ext->is_weak) |
8265 | 0 | return 0; |
8266 | | |
8267 | 0 | pr_warn("extern (func ksym) '%s': func_proto [%d] incompatible with %s [%d]\n", |
8268 | 0 | ext->name, local_func_proto_id, |
8269 | 0 | mod_btf ? mod_btf->name : "vmlinux", kfunc_proto_id); |
8270 | 0 | return -EINVAL; |
8271 | 0 | } |
8272 | | |
8273 | | /* set index for module BTF fd in fd_array, if unset */ |
8274 | 0 | if (mod_btf && !mod_btf->fd_array_idx) { |
8275 | | /* insn->off is s16 */ |
8276 | 0 | if (obj->fd_array_cnt == INT16_MAX) { |
8277 | 0 | pr_warn("extern (func ksym) '%s': module BTF fd index %d too big to fit in bpf_insn offset\n", |
8278 | 0 | ext->name, mod_btf->fd_array_idx); |
8279 | 0 | return -E2BIG; |
8280 | 0 | } |
8281 | | /* Cannot use index 0 for module BTF fd */ |
8282 | 0 | if (!obj->fd_array_cnt) |
8283 | 0 | obj->fd_array_cnt = 1; |
8284 | |
|
8285 | 0 | ret = libbpf_ensure_mem((void **)&obj->fd_array, &obj->fd_array_cap, sizeof(int), |
8286 | 0 | obj->fd_array_cnt + 1); |
8287 | 0 | if (ret) |
8288 | 0 | return ret; |
8289 | 0 | mod_btf->fd_array_idx = obj->fd_array_cnt; |
8290 | | /* we assume module BTF FD is always >0 */ |
8291 | 0 | obj->fd_array[obj->fd_array_cnt++] = mod_btf->fd; |
8292 | 0 | } |
8293 | | |
8294 | 0 | ext->is_set = true; |
8295 | 0 | ext->ksym.kernel_btf_id = kfunc_id; |
8296 | 0 | ext->ksym.btf_fd_idx = mod_btf ? mod_btf->fd_array_idx : 0; |
8297 | | /* Also set kernel_btf_obj_fd to make sure that bpf_object__relocate_data() |
8298 | | * populates FD into ld_imm64 insn when it's used to point to kfunc. |
8299 | | * {kernel_btf_id, btf_fd_idx} -> fixup bpf_call. |
8300 | | * {kernel_btf_id, kernel_btf_obj_fd} -> fixup ld_imm64. |
8301 | | */ |
8302 | 0 | ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0; |
8303 | 0 | pr_debug("extern (func ksym) '%s': resolved to %s [%d]\n", |
8304 | 0 | ext->name, mod_btf ? mod_btf->name : "vmlinux", kfunc_id); |
8305 | |
|
8306 | 0 | return 0; |
8307 | 0 | } |
8308 | | |
8309 | | static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj) |
8310 | 0 | { |
8311 | 0 | const struct btf_type *t; |
8312 | 0 | struct extern_desc *ext; |
8313 | 0 | int i, err; |
8314 | |
|
8315 | 0 | for (i = 0; i < obj->nr_extern; i++) { |
8316 | 0 | ext = &obj->externs[i]; |
8317 | 0 | if (ext->type != EXT_KSYM || !ext->ksym.type_id) |
8318 | 0 | continue; |
8319 | | |
8320 | 0 | if (obj->gen_loader) { |
8321 | 0 | ext->is_set = true; |
8322 | 0 | ext->ksym.kernel_btf_obj_fd = 0; |
8323 | 0 | ext->ksym.kernel_btf_id = 0; |
8324 | 0 | continue; |
8325 | 0 | } |
8326 | 0 | t = btf__type_by_id(obj->btf, ext->btf_id); |
8327 | 0 | if (btf_is_var(t)) |
8328 | 0 | err = bpf_object__resolve_ksym_var_btf_id(obj, ext); |
8329 | 0 | else |
8330 | 0 | err = bpf_object__resolve_ksym_func_btf_id(obj, ext); |
8331 | 0 | if (err) |
8332 | 0 | return err; |
8333 | 0 | } |
8334 | 0 | return 0; |
8335 | 0 | } |
8336 | | |
8337 | | static int bpf_object__resolve_externs(struct bpf_object *obj, |
8338 | | const char *extra_kconfig) |
8339 | 0 | { |
8340 | 0 | bool need_config = false, need_kallsyms = false; |
8341 | 0 | bool need_vmlinux_btf = false; |
8342 | 0 | struct extern_desc *ext; |
8343 | 0 | void *kcfg_data = NULL; |
8344 | 0 | int err, i; |
8345 | |
|
8346 | 0 | if (obj->nr_extern == 0) |
8347 | 0 | return 0; |
8348 | | |
8349 | 0 | if (obj->kconfig_map_idx >= 0) |
8350 | 0 | kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped; |
8351 | |
|
8352 | 0 | for (i = 0; i < obj->nr_extern; i++) { |
8353 | 0 | ext = &obj->externs[i]; |
8354 | |
|
8355 | 0 | if (ext->type == EXT_KSYM) { |
8356 | 0 | if (ext->ksym.type_id) |
8357 | 0 | need_vmlinux_btf = true; |
8358 | 0 | else |
8359 | 0 | need_kallsyms = true; |
8360 | 0 | continue; |
8361 | 0 | } else if (ext->type == EXT_KCFG) { |
8362 | 0 | void *ext_ptr = kcfg_data + ext->kcfg.data_off; |
8363 | 0 | __u64 value = 0; |
8364 | | |
8365 | | /* Kconfig externs need actual /proc/config.gz */ |
8366 | 0 | if (str_has_pfx(ext->name, "CONFIG_")) { |
8367 | 0 | need_config = true; |
8368 | 0 | continue; |
8369 | 0 | } |
8370 | | |
8371 | | /* Virtual kcfg externs are customly handled by libbpf */ |
8372 | 0 | if (strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) { |
8373 | 0 | value = get_kernel_version(); |
8374 | 0 | if (!value) { |
8375 | 0 | pr_warn("extern (kcfg) '%s': failed to get kernel version\n", ext->name); |
8376 | 0 | return -EINVAL; |
8377 | 0 | } |
8378 | 0 | } else if (strcmp(ext->name, "LINUX_HAS_BPF_COOKIE") == 0) { |
8379 | 0 | value = kernel_supports(obj, FEAT_BPF_COOKIE); |
8380 | 0 | } else if (strcmp(ext->name, "LINUX_HAS_SYSCALL_WRAPPER") == 0) { |
8381 | 0 | value = kernel_supports(obj, FEAT_SYSCALL_WRAPPER); |
8382 | 0 | } else if (!str_has_pfx(ext->name, "LINUX_") || !ext->is_weak) { |
8383 | | /* Currently libbpf supports only CONFIG_ and LINUX_ prefixed |
8384 | | * __kconfig externs, where LINUX_ ones are virtual and filled out |
8385 | | * customly by libbpf (their values don't come from Kconfig). |
8386 | | * If LINUX_xxx variable is not recognized by libbpf, but is marked |
8387 | | * __weak, it defaults to zero value, just like for CONFIG_xxx |
8388 | | * externs. |
8389 | | */ |
8390 | 0 | pr_warn("extern (kcfg) '%s': unrecognized virtual extern\n", ext->name); |
8391 | 0 | return -EINVAL; |
8392 | 0 | } |
8393 | | |
8394 | 0 | err = set_kcfg_value_num(ext, ext_ptr, value); |
8395 | 0 | if (err) |
8396 | 0 | return err; |
8397 | 0 | pr_debug("extern (kcfg) '%s': set to 0x%llx\n", |
8398 | 0 | ext->name, (long long)value); |
8399 | 0 | } else { |
8400 | 0 | pr_warn("extern '%s': unrecognized extern kind\n", ext->name); |
8401 | 0 | return -EINVAL; |
8402 | 0 | } |
8403 | 0 | } |
8404 | 0 | if (need_config && extra_kconfig) { |
8405 | 0 | err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data); |
8406 | 0 | if (err) |
8407 | 0 | return -EINVAL; |
8408 | 0 | need_config = false; |
8409 | 0 | for (i = 0; i < obj->nr_extern; i++) { |
8410 | 0 | ext = &obj->externs[i]; |
8411 | 0 | if (ext->type == EXT_KCFG && !ext->is_set) { |
8412 | 0 | need_config = true; |
8413 | 0 | break; |
8414 | 0 | } |
8415 | 0 | } |
8416 | 0 | } |
8417 | 0 | if (need_config) { |
8418 | 0 | err = bpf_object__read_kconfig_file(obj, kcfg_data); |
8419 | 0 | if (err) |
8420 | 0 | return -EINVAL; |
8421 | 0 | } |
8422 | 0 | if (need_kallsyms) { |
8423 | 0 | err = bpf_object__read_kallsyms_file(obj); |
8424 | 0 | if (err) |
8425 | 0 | return -EINVAL; |
8426 | 0 | } |
8427 | 0 | if (need_vmlinux_btf) { |
8428 | 0 | err = bpf_object__resolve_ksyms_btf_id(obj); |
8429 | 0 | if (err) |
8430 | 0 | return -EINVAL; |
8431 | 0 | } |
8432 | 0 | for (i = 0; i < obj->nr_extern; i++) { |
8433 | 0 | ext = &obj->externs[i]; |
8434 | |
|
8435 | 0 | if (!ext->is_set && !ext->is_weak) { |
8436 | 0 | pr_warn("extern '%s' (strong): not resolved\n", ext->name); |
8437 | 0 | return -ESRCH; |
8438 | 0 | } else if (!ext->is_set) { |
8439 | 0 | pr_debug("extern '%s' (weak): not resolved, defaulting to zero\n", |
8440 | 0 | ext->name); |
8441 | 0 | } |
8442 | 0 | } |
8443 | | |
8444 | 0 | return 0; |
8445 | 0 | } |
8446 | | |
8447 | | static void bpf_map_prepare_vdata(const struct bpf_map *map) |
8448 | 0 | { |
8449 | 0 | const struct btf_type *type; |
8450 | 0 | struct bpf_struct_ops *st_ops; |
8451 | 0 | __u32 i; |
8452 | |
|
8453 | 0 | st_ops = map->st_ops; |
8454 | 0 | type = btf__type_by_id(map->obj->btf, st_ops->type_id); |
8455 | 0 | for (i = 0; i < btf_vlen(type); i++) { |
8456 | 0 | struct bpf_program *prog = st_ops->progs[i]; |
8457 | 0 | void *kern_data; |
8458 | 0 | int prog_fd; |
8459 | |
|
8460 | 0 | if (!prog) |
8461 | 0 | continue; |
8462 | | |
8463 | 0 | prog_fd = bpf_program__fd(prog); |
8464 | 0 | kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i]; |
8465 | 0 | *(unsigned long *)kern_data = prog_fd; |
8466 | 0 | } |
8467 | 0 | } |
8468 | | |
8469 | | static int bpf_object_prepare_struct_ops(struct bpf_object *obj) |
8470 | 0 | { |
8471 | 0 | struct bpf_map *map; |
8472 | 0 | int i; |
8473 | |
|
8474 | 0 | for (i = 0; i < obj->nr_maps; i++) { |
8475 | 0 | map = &obj->maps[i]; |
8476 | |
|
8477 | 0 | if (!bpf_map__is_struct_ops(map)) |
8478 | 0 | continue; |
8479 | | |
8480 | 0 | if (!map->autocreate) |
8481 | 0 | continue; |
8482 | | |
8483 | 0 | bpf_map_prepare_vdata(map); |
8484 | 0 | } |
8485 | |
|
8486 | 0 | return 0; |
8487 | 0 | } |
8488 | | |
8489 | | static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path) |
8490 | 0 | { |
8491 | 0 | int err, i; |
8492 | |
|
8493 | 0 | if (!obj) |
8494 | 0 | return libbpf_err(-EINVAL); |
8495 | | |
8496 | 0 | if (obj->loaded) { |
8497 | 0 | pr_warn("object '%s': load can't be attempted twice\n", obj->name); |
8498 | 0 | return libbpf_err(-EINVAL); |
8499 | 0 | } |
8500 | | |
8501 | 0 | if (obj->gen_loader) |
8502 | 0 | bpf_gen__init(obj->gen_loader, extra_log_level, obj->nr_programs, obj->nr_maps); |
8503 | |
|
8504 | 0 | err = bpf_object_prepare_token(obj); |
8505 | 0 | err = err ? : bpf_object__probe_loading(obj); |
8506 | 0 | err = err ? : bpf_object__load_vmlinux_btf(obj, false); |
8507 | 0 | err = err ? : bpf_object__resolve_externs(obj, obj->kconfig); |
8508 | 0 | err = err ? : bpf_object__sanitize_maps(obj); |
8509 | 0 | err = err ? : bpf_object__init_kern_struct_ops_maps(obj); |
8510 | 0 | err = err ? : bpf_object_adjust_struct_ops_autoload(obj); |
8511 | 0 | err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path); |
8512 | 0 | err = err ? : bpf_object__sanitize_and_load_btf(obj); |
8513 | 0 | err = err ? : bpf_object__create_maps(obj); |
8514 | 0 | err = err ? : bpf_object__load_progs(obj, extra_log_level); |
8515 | 0 | err = err ? : bpf_object_init_prog_arrays(obj); |
8516 | 0 | err = err ? : bpf_object_prepare_struct_ops(obj); |
8517 | |
|
8518 | 0 | if (obj->gen_loader) { |
8519 | | /* reset FDs */ |
8520 | 0 | if (obj->btf) |
8521 | 0 | btf__set_fd(obj->btf, -1); |
8522 | 0 | if (!err) |
8523 | 0 | err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps); |
8524 | 0 | } |
8525 | | |
8526 | | /* clean up fd_array */ |
8527 | 0 | zfree(&obj->fd_array); |
8528 | | |
8529 | | /* clean up module BTFs */ |
8530 | 0 | for (i = 0; i < obj->btf_module_cnt; i++) { |
8531 | 0 | close(obj->btf_modules[i].fd); |
8532 | 0 | btf__free(obj->btf_modules[i].btf); |
8533 | 0 | free(obj->btf_modules[i].name); |
8534 | 0 | } |
8535 | 0 | free(obj->btf_modules); |
8536 | | |
8537 | | /* clean up vmlinux BTF */ |
8538 | 0 | btf__free(obj->btf_vmlinux); |
8539 | 0 | obj->btf_vmlinux = NULL; |
8540 | |
|
8541 | 0 | obj->loaded = true; /* doesn't matter if successfully or not */ |
8542 | |
|
8543 | 0 | if (err) |
8544 | 0 | goto out; |
8545 | | |
8546 | 0 | return 0; |
8547 | 0 | out: |
8548 | | /* unpin any maps that were auto-pinned during load */ |
8549 | 0 | for (i = 0; i < obj->nr_maps; i++) |
8550 | 0 | if (obj->maps[i].pinned && !obj->maps[i].reused) |
8551 | 0 | bpf_map__unpin(&obj->maps[i], NULL); |
8552 | |
|
8553 | 0 | bpf_object_unload(obj); |
8554 | 0 | pr_warn("failed to load object '%s'\n", obj->path); |
8555 | 0 | return libbpf_err(err); |
8556 | 0 | } |
8557 | | |
8558 | | int bpf_object__load(struct bpf_object *obj) |
8559 | 0 | { |
8560 | 0 | return bpf_object_load(obj, 0, NULL); |
8561 | 0 | } |
8562 | | |
8563 | | static int make_parent_dir(const char *path) |
8564 | 0 | { |
8565 | 0 | char *cp, errmsg[STRERR_BUFSIZE]; |
8566 | 0 | char *dname, *dir; |
8567 | 0 | int err = 0; |
8568 | |
|
8569 | 0 | dname = strdup(path); |
8570 | 0 | if (dname == NULL) |
8571 | 0 | return -ENOMEM; |
8572 | | |
8573 | 0 | dir = dirname(dname); |
8574 | 0 | if (mkdir(dir, 0700) && errno != EEXIST) |
8575 | 0 | err = -errno; |
8576 | |
|
8577 | 0 | free(dname); |
8578 | 0 | if (err) { |
8579 | 0 | cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); |
8580 | 0 | pr_warn("failed to mkdir %s: %s\n", path, cp); |
8581 | 0 | } |
8582 | 0 | return err; |
8583 | 0 | } |
8584 | | |
8585 | | static int check_path(const char *path) |
8586 | 0 | { |
8587 | 0 | char *cp, errmsg[STRERR_BUFSIZE]; |
8588 | 0 | struct statfs st_fs; |
8589 | 0 | char *dname, *dir; |
8590 | 0 | int err = 0; |
8591 | |
|
8592 | 0 | if (path == NULL) |
8593 | 0 | return -EINVAL; |
8594 | | |
8595 | 0 | dname = strdup(path); |
8596 | 0 | if (dname == NULL) |
8597 | 0 | return -ENOMEM; |
8598 | | |
8599 | 0 | dir = dirname(dname); |
8600 | 0 | if (statfs(dir, &st_fs)) { |
8601 | 0 | cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); |
8602 | 0 | pr_warn("failed to statfs %s: %s\n", dir, cp); |
8603 | 0 | err = -errno; |
8604 | 0 | } |
8605 | 0 | free(dname); |
8606 | |
|
8607 | 0 | if (!err && st_fs.f_type != BPF_FS_MAGIC) { |
8608 | 0 | pr_warn("specified path %s is not on BPF FS\n", path); |
8609 | 0 | err = -EINVAL; |
8610 | 0 | } |
8611 | |
|
8612 | 0 | return err; |
8613 | 0 | } |
8614 | | |
8615 | | int bpf_program__pin(struct bpf_program *prog, const char *path) |
8616 | 0 | { |
8617 | 0 | char *cp, errmsg[STRERR_BUFSIZE]; |
8618 | 0 | int err; |
8619 | |
|
8620 | 0 | if (prog->fd < 0) { |
8621 | 0 | pr_warn("prog '%s': can't pin program that wasn't loaded\n", prog->name); |
8622 | 0 | return libbpf_err(-EINVAL); |
8623 | 0 | } |
8624 | | |
8625 | 0 | err = make_parent_dir(path); |
8626 | 0 | if (err) |
8627 | 0 | return libbpf_err(err); |
8628 | | |
8629 | 0 | err = check_path(path); |
8630 | 0 | if (err) |
8631 | 0 | return libbpf_err(err); |
8632 | | |
8633 | 0 | if (bpf_obj_pin(prog->fd, path)) { |
8634 | 0 | err = -errno; |
8635 | 0 | cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); |
8636 | 0 | pr_warn("prog '%s': failed to pin at '%s': %s\n", prog->name, path, cp); |
8637 | 0 | return libbpf_err(err); |
8638 | 0 | } |
8639 | | |
8640 | 0 | pr_debug("prog '%s': pinned at '%s'\n", prog->name, path); |
8641 | 0 | return 0; |
8642 | 0 | } |
8643 | | |
8644 | | int bpf_program__unpin(struct bpf_program *prog, const char *path) |
8645 | 0 | { |
8646 | 0 | int err; |
8647 | |
|
8648 | 0 | if (prog->fd < 0) { |
8649 | 0 | pr_warn("prog '%s': can't unpin program that wasn't loaded\n", prog->name); |
8650 | 0 | return libbpf_err(-EINVAL); |
8651 | 0 | } |
8652 | | |
8653 | 0 | err = check_path(path); |
8654 | 0 | if (err) |
8655 | 0 | return libbpf_err(err); |
8656 | | |
8657 | 0 | err = unlink(path); |
8658 | 0 | if (err) |
8659 | 0 | return libbpf_err(-errno); |
8660 | | |
8661 | 0 | pr_debug("prog '%s': unpinned from '%s'\n", prog->name, path); |
8662 | 0 | return 0; |
8663 | 0 | } |
8664 | | |
8665 | | int bpf_map__pin(struct bpf_map *map, const char *path) |
8666 | 0 | { |
8667 | 0 | char *cp, errmsg[STRERR_BUFSIZE]; |
8668 | 0 | int err; |
8669 | |
|
8670 | 0 | if (map == NULL) { |
8671 | 0 | pr_warn("invalid map pointer\n"); |
8672 | 0 | return libbpf_err(-EINVAL); |
8673 | 0 | } |
8674 | | |
8675 | 0 | if (map->fd < 0) { |
8676 | 0 | pr_warn("map '%s': can't pin BPF map without FD (was it created?)\n", map->name); |
8677 | 0 | return libbpf_err(-EINVAL); |
8678 | 0 | } |
8679 | | |
8680 | 0 | if (map->pin_path) { |
8681 | 0 | if (path && strcmp(path, map->pin_path)) { |
8682 | 0 | pr_warn("map '%s' already has pin path '%s' different from '%s'\n", |
8683 | 0 | bpf_map__name(map), map->pin_path, path); |
8684 | 0 | return libbpf_err(-EINVAL); |
8685 | 0 | } else if (map->pinned) { |
8686 | 0 | pr_debug("map '%s' already pinned at '%s'; not re-pinning\n", |
8687 | 0 | bpf_map__name(map), map->pin_path); |
8688 | 0 | return 0; |
8689 | 0 | } |
8690 | 0 | } else { |
8691 | 0 | if (!path) { |
8692 | 0 | pr_warn("missing a path to pin map '%s' at\n", |
8693 | 0 | bpf_map__name(map)); |
8694 | 0 | return libbpf_err(-EINVAL); |
8695 | 0 | } else if (map->pinned) { |
8696 | 0 | pr_warn("map '%s' already pinned\n", bpf_map__name(map)); |
8697 | 0 | return libbpf_err(-EEXIST); |
8698 | 0 | } |
8699 | | |
8700 | 0 | map->pin_path = strdup(path); |
8701 | 0 | if (!map->pin_path) { |
8702 | 0 | err = -errno; |
8703 | 0 | goto out_err; |
8704 | 0 | } |
8705 | 0 | } |
8706 | | |
8707 | 0 | err = make_parent_dir(map->pin_path); |
8708 | 0 | if (err) |
8709 | 0 | return libbpf_err(err); |
8710 | | |
8711 | 0 | err = check_path(map->pin_path); |
8712 | 0 | if (err) |
8713 | 0 | return libbpf_err(err); |
8714 | | |
8715 | 0 | if (bpf_obj_pin(map->fd, map->pin_path)) { |
8716 | 0 | err = -errno; |
8717 | 0 | goto out_err; |
8718 | 0 | } |
8719 | | |
8720 | 0 | map->pinned = true; |
8721 | 0 | pr_debug("pinned map '%s'\n", map->pin_path); |
8722 | |
|
8723 | 0 | return 0; |
8724 | | |
8725 | 0 | out_err: |
8726 | 0 | cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); |
8727 | 0 | pr_warn("failed to pin map: %s\n", cp); |
8728 | 0 | return libbpf_err(err); |
8729 | 0 | } |
8730 | | |
8731 | | int bpf_map__unpin(struct bpf_map *map, const char *path) |
8732 | 0 | { |
8733 | 0 | int err; |
8734 | |
|
8735 | 0 | if (map == NULL) { |
8736 | 0 | pr_warn("invalid map pointer\n"); |
8737 | 0 | return libbpf_err(-EINVAL); |
8738 | 0 | } |
8739 | | |
8740 | 0 | if (map->pin_path) { |
8741 | 0 | if (path && strcmp(path, map->pin_path)) { |
8742 | 0 | pr_warn("map '%s' already has pin path '%s' different from '%s'\n", |
8743 | 0 | bpf_map__name(map), map->pin_path, path); |
8744 | 0 | return libbpf_err(-EINVAL); |
8745 | 0 | } |
8746 | 0 | path = map->pin_path; |
8747 | 0 | } else if (!path) { |
8748 | 0 | pr_warn("no path to unpin map '%s' from\n", |
8749 | 0 | bpf_map__name(map)); |
8750 | 0 | return libbpf_err(-EINVAL); |
8751 | 0 | } |
8752 | | |
8753 | 0 | err = check_path(path); |
8754 | 0 | if (err) |
8755 | 0 | return libbpf_err(err); |
8756 | | |
8757 | 0 | err = unlink(path); |
8758 | 0 | if (err != 0) |
8759 | 0 | return libbpf_err(-errno); |
8760 | | |
8761 | 0 | map->pinned = false; |
8762 | 0 | pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path); |
8763 | |
|
8764 | 0 | return 0; |
8765 | 0 | } |
8766 | | |
8767 | | int bpf_map__set_pin_path(struct bpf_map *map, const char *path) |
8768 | 1 | { |
8769 | 1 | char *new = NULL; |
8770 | | |
8771 | 1 | if (path) { |
8772 | 1 | new = strdup(path); |
8773 | 1 | if (!new) |
8774 | 0 | return libbpf_err(-errno); |
8775 | 1 | } |
8776 | | |
8777 | 1 | free(map->pin_path); |
8778 | 1 | map->pin_path = new; |
8779 | 1 | return 0; |
8780 | 1 | } |
8781 | | |
8782 | | __alias(bpf_map__pin_path) |
8783 | | const char *bpf_map__get_pin_path(const struct bpf_map *map); |
8784 | | |
8785 | | const char *bpf_map__pin_path(const struct bpf_map *map) |
8786 | 0 | { |
8787 | 0 | return map->pin_path; |
8788 | 0 | } |
8789 | | |
8790 | | bool bpf_map__is_pinned(const struct bpf_map *map) |
8791 | 0 | { |
8792 | 0 | return map->pinned; |
8793 | 0 | } |
8794 | | |
8795 | | static void sanitize_pin_path(char *s) |
8796 | 0 | { |
8797 | | /* bpffs disallows periods in path names */ |
8798 | 0 | while (*s) { |
8799 | 0 | if (*s == '.') |
8800 | 0 | *s = '_'; |
8801 | 0 | s++; |
8802 | 0 | } |
8803 | 0 | } |
8804 | | |
8805 | | int bpf_object__pin_maps(struct bpf_object *obj, const char *path) |
8806 | 0 | { |
8807 | 0 | struct bpf_map *map; |
8808 | 0 | int err; |
8809 | |
|
8810 | 0 | if (!obj) |
8811 | 0 | return libbpf_err(-ENOENT); |
8812 | | |
8813 | 0 | if (!obj->loaded) { |
8814 | 0 | pr_warn("object not yet loaded; load it first\n"); |
8815 | 0 | return libbpf_err(-ENOENT); |
8816 | 0 | } |
8817 | | |
8818 | 0 | bpf_object__for_each_map(map, obj) { |
8819 | 0 | char *pin_path = NULL; |
8820 | 0 | char buf[PATH_MAX]; |
8821 | |
|
8822 | 0 | if (!map->autocreate) |
8823 | 0 | continue; |
8824 | | |
8825 | 0 | if (path) { |
8826 | 0 | err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map)); |
8827 | 0 | if (err) |
8828 | 0 | goto err_unpin_maps; |
8829 | 0 | sanitize_pin_path(buf); |
8830 | 0 | pin_path = buf; |
8831 | 0 | } else if (!map->pin_path) { |
8832 | 0 | continue; |
8833 | 0 | } |
8834 | | |
8835 | 0 | err = bpf_map__pin(map, pin_path); |
8836 | 0 | if (err) |
8837 | 0 | goto err_unpin_maps; |
8838 | 0 | } |
8839 | | |
8840 | 0 | return 0; |
8841 | | |
8842 | 0 | err_unpin_maps: |
8843 | 0 | while ((map = bpf_object__prev_map(obj, map))) { |
8844 | 0 | if (!map->pin_path) |
8845 | 0 | continue; |
8846 | | |
8847 | 0 | bpf_map__unpin(map, NULL); |
8848 | 0 | } |
8849 | |
|
8850 | 0 | return libbpf_err(err); |
8851 | 0 | } |
8852 | | |
8853 | | int bpf_object__unpin_maps(struct bpf_object *obj, const char *path) |
8854 | 0 | { |
8855 | 0 | struct bpf_map *map; |
8856 | 0 | int err; |
8857 | |
|
8858 | 0 | if (!obj) |
8859 | 0 | return libbpf_err(-ENOENT); |
8860 | | |
8861 | 0 | bpf_object__for_each_map(map, obj) { |
8862 | 0 | char *pin_path = NULL; |
8863 | 0 | char buf[PATH_MAX]; |
8864 | |
|
8865 | 0 | if (path) { |
8866 | 0 | err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map)); |
8867 | 0 | if (err) |
8868 | 0 | return libbpf_err(err); |
8869 | 0 | sanitize_pin_path(buf); |
8870 | 0 | pin_path = buf; |
8871 | 0 | } else if (!map->pin_path) { |
8872 | 0 | continue; |
8873 | 0 | } |
8874 | | |
8875 | 0 | err = bpf_map__unpin(map, pin_path); |
8876 | 0 | if (err) |
8877 | 0 | return libbpf_err(err); |
8878 | 0 | } |
8879 | | |
8880 | 0 | return 0; |
8881 | 0 | } |
8882 | | |
8883 | | int bpf_object__pin_programs(struct bpf_object *obj, const char *path) |
8884 | 0 | { |
8885 | 0 | struct bpf_program *prog; |
8886 | 0 | char buf[PATH_MAX]; |
8887 | 0 | int err; |
8888 | |
|
8889 | 0 | if (!obj) |
8890 | 0 | return libbpf_err(-ENOENT); |
8891 | | |
8892 | 0 | if (!obj->loaded) { |
8893 | 0 | pr_warn("object not yet loaded; load it first\n"); |
8894 | 0 | return libbpf_err(-ENOENT); |
8895 | 0 | } |
8896 | | |
8897 | 0 | bpf_object__for_each_program(prog, obj) { |
8898 | 0 | err = pathname_concat(buf, sizeof(buf), path, prog->name); |
8899 | 0 | if (err) |
8900 | 0 | goto err_unpin_programs; |
8901 | | |
8902 | 0 | err = bpf_program__pin(prog, buf); |
8903 | 0 | if (err) |
8904 | 0 | goto err_unpin_programs; |
8905 | 0 | } |
8906 | | |
8907 | 0 | return 0; |
8908 | | |
8909 | 0 | err_unpin_programs: |
8910 | 0 | while ((prog = bpf_object__prev_program(obj, prog))) { |
8911 | 0 | if (pathname_concat(buf, sizeof(buf), path, prog->name)) |
8912 | 0 | continue; |
8913 | | |
8914 | 0 | bpf_program__unpin(prog, buf); |
8915 | 0 | } |
8916 | |
|
8917 | 0 | return libbpf_err(err); |
8918 | 0 | } |
8919 | | |
8920 | | int bpf_object__unpin_programs(struct bpf_object *obj, const char *path) |
8921 | 0 | { |
8922 | 0 | struct bpf_program *prog; |
8923 | 0 | int err; |
8924 | |
|
8925 | 0 | if (!obj) |
8926 | 0 | return libbpf_err(-ENOENT); |
8927 | | |
8928 | 0 | bpf_object__for_each_program(prog, obj) { |
8929 | 0 | char buf[PATH_MAX]; |
8930 | |
|
8931 | 0 | err = pathname_concat(buf, sizeof(buf), path, prog->name); |
8932 | 0 | if (err) |
8933 | 0 | return libbpf_err(err); |
8934 | | |
8935 | 0 | err = bpf_program__unpin(prog, buf); |
8936 | 0 | if (err) |
8937 | 0 | return libbpf_err(err); |
8938 | 0 | } |
8939 | | |
8940 | 0 | return 0; |
8941 | 0 | } |
8942 | | |
8943 | | int bpf_object__pin(struct bpf_object *obj, const char *path) |
8944 | 0 | { |
8945 | 0 | int err; |
8946 | |
|
8947 | 0 | err = bpf_object__pin_maps(obj, path); |
8948 | 0 | if (err) |
8949 | 0 | return libbpf_err(err); |
8950 | | |
8951 | 0 | err = bpf_object__pin_programs(obj, path); |
8952 | 0 | if (err) { |
8953 | 0 | bpf_object__unpin_maps(obj, path); |
8954 | 0 | return libbpf_err(err); |
8955 | 0 | } |
8956 | | |
8957 | 0 | return 0; |
8958 | 0 | } |
8959 | | |
8960 | | int bpf_object__unpin(struct bpf_object *obj, const char *path) |
8961 | 0 | { |
8962 | 0 | int err; |
8963 | |
|
8964 | 0 | err = bpf_object__unpin_programs(obj, path); |
8965 | 0 | if (err) |
8966 | 0 | return libbpf_err(err); |
8967 | | |
8968 | 0 | err = bpf_object__unpin_maps(obj, path); |
8969 | 0 | if (err) |
8970 | 0 | return libbpf_err(err); |
8971 | | |
8972 | 0 | return 0; |
8973 | 0 | } |
8974 | | |
8975 | | static void bpf_map__destroy(struct bpf_map *map) |
8976 | 2.92k | { |
8977 | 2.92k | if (map->inner_map) { |
8978 | 0 | bpf_map__destroy(map->inner_map); |
8979 | 0 | zfree(&map->inner_map); |
8980 | 0 | } |
8981 | | |
8982 | 2.92k | zfree(&map->init_slots); |
8983 | 2.92k | map->init_slots_sz = 0; |
8984 | | |
8985 | 2.92k | if (map->mmaped && map->mmaped != map->obj->arena_data) |
8986 | 1.59k | munmap(map->mmaped, bpf_map_mmap_sz(map)); |
8987 | 2.92k | map->mmaped = NULL; |
8988 | | |
8989 | 2.92k | if (map->st_ops) { |
8990 | 71 | zfree(&map->st_ops->data); |
8991 | 71 | zfree(&map->st_ops->progs); |
8992 | 71 | zfree(&map->st_ops->kern_func_off); |
8993 | 71 | zfree(&map->st_ops); |
8994 | 71 | } |
8995 | | |
8996 | 2.92k | zfree(&map->name); |
8997 | 2.92k | zfree(&map->real_name); |
8998 | 2.92k | zfree(&map->pin_path); |
8999 | | |
9000 | 2.92k | if (map->fd >= 0) |
9001 | 0 | zclose(map->fd); |
9002 | 2.92k | } |
9003 | | |
9004 | | void bpf_object__close(struct bpf_object *obj) |
9005 | 11.7k | { |
9006 | 11.7k | size_t i; |
9007 | | |
9008 | 11.7k | if (IS_ERR_OR_NULL(obj)) |
9009 | 0 | return; |
9010 | | |
9011 | 11.7k | usdt_manager_free(obj->usdt_man); |
9012 | 11.7k | obj->usdt_man = NULL; |
9013 | | |
9014 | 11.7k | bpf_gen__free(obj->gen_loader); |
9015 | 11.7k | bpf_object__elf_finish(obj); |
9016 | 11.7k | bpf_object_unload(obj); |
9017 | 11.7k | btf__free(obj->btf); |
9018 | 11.7k | btf__free(obj->btf_vmlinux); |
9019 | 11.7k | btf_ext__free(obj->btf_ext); |
9020 | | |
9021 | 14.6k | for (i = 0; i < obj->nr_maps; i++) |
9022 | 2.92k | bpf_map__destroy(&obj->maps[i]); |
9023 | | |
9024 | 11.7k | zfree(&obj->btf_custom_path); |
9025 | 11.7k | zfree(&obj->kconfig); |
9026 | | |
9027 | 16.4k | for (i = 0; i < obj->nr_extern; i++) |
9028 | 4.71k | zfree(&obj->externs[i].essent_name); |
9029 | | |
9030 | 11.7k | zfree(&obj->externs); |
9031 | 11.7k | obj->nr_extern = 0; |
9032 | | |
9033 | 11.7k | zfree(&obj->maps); |
9034 | 11.7k | obj->nr_maps = 0; |
9035 | | |
9036 | 11.7k | if (obj->programs && obj->nr_programs) { |
9037 | 9.83k | for (i = 0; i < obj->nr_programs; i++) |
9038 | 9.15k | bpf_program__exit(&obj->programs[i]); |
9039 | 683 | } |
9040 | 11.7k | zfree(&obj->programs); |
9041 | | |
9042 | 11.7k | zfree(&obj->feat_cache); |
9043 | 11.7k | zfree(&obj->token_path); |
9044 | 11.7k | if (obj->token_fd > 0) |
9045 | 0 | close(obj->token_fd); |
9046 | | |
9047 | 11.7k | zfree(&obj->arena_data); |
9048 | | |
9049 | 11.7k | free(obj); |
9050 | 11.7k | } |
9051 | | |
9052 | | const char *bpf_object__name(const struct bpf_object *obj) |
9053 | 0 | { |
9054 | 0 | return obj ? obj->name : libbpf_err_ptr(-EINVAL); |
9055 | 0 | } |
9056 | | |
9057 | | unsigned int bpf_object__kversion(const struct bpf_object *obj) |
9058 | 0 | { |
9059 | 0 | return obj ? obj->kern_version : 0; |
9060 | 0 | } |
9061 | | |
9062 | | struct btf *bpf_object__btf(const struct bpf_object *obj) |
9063 | 0 | { |
9064 | 0 | return obj ? obj->btf : NULL; |
9065 | 0 | } |
9066 | | |
9067 | | int bpf_object__btf_fd(const struct bpf_object *obj) |
9068 | 0 | { |
9069 | 0 | return obj->btf ? btf__fd(obj->btf) : -1; |
9070 | 0 | } |
9071 | | |
9072 | | int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version) |
9073 | 0 | { |
9074 | 0 | if (obj->loaded) |
9075 | 0 | return libbpf_err(-EINVAL); |
9076 | | |
9077 | 0 | obj->kern_version = kern_version; |
9078 | |
|
9079 | 0 | return 0; |
9080 | 0 | } |
9081 | | |
9082 | | int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts) |
9083 | 0 | { |
9084 | 0 | struct bpf_gen *gen; |
9085 | |
|
9086 | 0 | if (!opts) |
9087 | 0 | return -EFAULT; |
9088 | 0 | if (!OPTS_VALID(opts, gen_loader_opts)) |
9089 | 0 | return -EINVAL; |
9090 | 0 | gen = calloc(sizeof(*gen), 1); |
9091 | 0 | if (!gen) |
9092 | 0 | return -ENOMEM; |
9093 | 0 | gen->opts = opts; |
9094 | 0 | obj->gen_loader = gen; |
9095 | 0 | return 0; |
9096 | 0 | } |
9097 | | |
9098 | | static struct bpf_program * |
9099 | | __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj, |
9100 | | bool forward) |
9101 | 11.0k | { |
9102 | 11.0k | size_t nr_programs = obj->nr_programs; |
9103 | 11.0k | ssize_t idx; |
9104 | | |
9105 | 11.0k | if (!nr_programs) |
9106 | 1.67k | return NULL; |
9107 | | |
9108 | 9.41k | if (!p) |
9109 | | /* Iter from the beginning */ |
9110 | 627 | return forward ? &obj->programs[0] : |
9111 | 627 | &obj->programs[nr_programs - 1]; |
9112 | | |
9113 | 8.78k | if (p->obj != obj) { |
9114 | 0 | pr_warn("error: program handler doesn't match object\n"); |
9115 | 0 | return errno = EINVAL, NULL; |
9116 | 0 | } |
9117 | | |
9118 | 8.78k | idx = (p - obj->programs) + (forward ? 1 : -1); |
9119 | 8.78k | if (idx >= obj->nr_programs || idx < 0) |
9120 | 627 | return NULL; |
9121 | 8.16k | return &obj->programs[idx]; |
9122 | 8.78k | } |
9123 | | |
9124 | | struct bpf_program * |
9125 | | bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev) |
9126 | 9.81k | { |
9127 | 9.81k | struct bpf_program *prog = prev; |
9128 | | |
9129 | 11.0k | do { |
9130 | 11.0k | prog = __bpf_program__iter(prog, obj, true); |
9131 | 11.0k | } while (prog && prog_is_subprog(obj, prog)); |
9132 | | |
9133 | 9.81k | return prog; |
9134 | 9.81k | } |
9135 | | |
9136 | | struct bpf_program * |
9137 | | bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *next) |
9138 | 0 | { |
9139 | 0 | struct bpf_program *prog = next; |
9140 | |
|
9141 | 0 | do { |
9142 | 0 | prog = __bpf_program__iter(prog, obj, false); |
9143 | 0 | } while (prog && prog_is_subprog(obj, prog)); |
9144 | |
|
9145 | 0 | return prog; |
9146 | 0 | } |
9147 | | |
9148 | | void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex) |
9149 | 0 | { |
9150 | 0 | prog->prog_ifindex = ifindex; |
9151 | 0 | } |
9152 | | |
9153 | | const char *bpf_program__name(const struct bpf_program *prog) |
9154 | 0 | { |
9155 | 0 | return prog->name; |
9156 | 0 | } |
9157 | | |
9158 | | const char *bpf_program__section_name(const struct bpf_program *prog) |
9159 | 0 | { |
9160 | 0 | return prog->sec_name; |
9161 | 0 | } |
9162 | | |
9163 | | bool bpf_program__autoload(const struct bpf_program *prog) |
9164 | 0 | { |
9165 | 0 | return prog->autoload; |
9166 | 0 | } |
9167 | | |
9168 | | int bpf_program__set_autoload(struct bpf_program *prog, bool autoload) |
9169 | 0 | { |
9170 | 0 | if (prog->obj->loaded) |
9171 | 0 | return libbpf_err(-EINVAL); |
9172 | | |
9173 | 0 | prog->autoload = autoload; |
9174 | 0 | return 0; |
9175 | 0 | } |
9176 | | |
9177 | | bool bpf_program__autoattach(const struct bpf_program *prog) |
9178 | 0 | { |
9179 | 0 | return prog->autoattach; |
9180 | 0 | } |
9181 | | |
9182 | | void bpf_program__set_autoattach(struct bpf_program *prog, bool autoattach) |
9183 | 0 | { |
9184 | 0 | prog->autoattach = autoattach; |
9185 | 0 | } |
9186 | | |
9187 | | const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog) |
9188 | 0 | { |
9189 | 0 | return prog->insns; |
9190 | 0 | } |
9191 | | |
9192 | | size_t bpf_program__insn_cnt(const struct bpf_program *prog) |
9193 | 0 | { |
9194 | 0 | return prog->insns_cnt; |
9195 | 0 | } |
9196 | | |
9197 | | int bpf_program__set_insns(struct bpf_program *prog, |
9198 | | struct bpf_insn *new_insns, size_t new_insn_cnt) |
9199 | 0 | { |
9200 | 0 | struct bpf_insn *insns; |
9201 | |
|
9202 | 0 | if (prog->obj->loaded) |
9203 | 0 | return -EBUSY; |
9204 | | |
9205 | 0 | insns = libbpf_reallocarray(prog->insns, new_insn_cnt, sizeof(*insns)); |
9206 | | /* NULL is a valid return from reallocarray if the new count is zero */ |
9207 | 0 | if (!insns && new_insn_cnt) { |
9208 | 0 | pr_warn("prog '%s': failed to realloc prog code\n", prog->name); |
9209 | 0 | return -ENOMEM; |
9210 | 0 | } |
9211 | 0 | memcpy(insns, new_insns, new_insn_cnt * sizeof(*insns)); |
9212 | |
|
9213 | 0 | prog->insns = insns; |
9214 | 0 | prog->insns_cnt = new_insn_cnt; |
9215 | 0 | return 0; |
9216 | 0 | } |
9217 | | |
9218 | | int bpf_program__fd(const struct bpf_program *prog) |
9219 | 0 | { |
9220 | 0 | if (!prog) |
9221 | 0 | return libbpf_err(-EINVAL); |
9222 | | |
9223 | 0 | if (prog->fd < 0) |
9224 | 0 | return libbpf_err(-ENOENT); |
9225 | | |
9226 | 0 | return prog->fd; |
9227 | 0 | } |
9228 | | |
9229 | | __alias(bpf_program__type) |
9230 | | enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog); |
9231 | | |
9232 | | enum bpf_prog_type bpf_program__type(const struct bpf_program *prog) |
9233 | 0 | { |
9234 | 0 | return prog->type; |
9235 | 0 | } |
9236 | | |
9237 | | static size_t custom_sec_def_cnt; |
9238 | | static struct bpf_sec_def *custom_sec_defs; |
9239 | | static struct bpf_sec_def custom_fallback_def; |
9240 | | static bool has_custom_fallback_def; |
9241 | | static int last_custom_sec_def_handler_id; |
9242 | | |
9243 | | int bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type) |
9244 | 0 | { |
9245 | 0 | if (prog->obj->loaded) |
9246 | 0 | return libbpf_err(-EBUSY); |
9247 | | |
9248 | | /* if type is not changed, do nothing */ |
9249 | 0 | if (prog->type == type) |
9250 | 0 | return 0; |
9251 | | |
9252 | 0 | prog->type = type; |
9253 | | |
9254 | | /* If a program type was changed, we need to reset associated SEC() |
9255 | | * handler, as it will be invalid now. The only exception is a generic |
9256 | | * fallback handler, which by definition is program type-agnostic and |
9257 | | * is a catch-all custom handler, optionally set by the application, |
9258 | | * so should be able to handle any type of BPF program. |
9259 | | */ |
9260 | 0 | if (prog->sec_def != &custom_fallback_def) |
9261 | 0 | prog->sec_def = NULL; |
9262 | 0 | return 0; |
9263 | 0 | } |
9264 | | |
9265 | | __alias(bpf_program__expected_attach_type) |
9266 | | enum bpf_attach_type bpf_program__get_expected_attach_type(const struct bpf_program *prog); |
9267 | | |
9268 | | enum bpf_attach_type bpf_program__expected_attach_type(const struct bpf_program *prog) |
9269 | 0 | { |
9270 | 0 | return prog->expected_attach_type; |
9271 | 0 | } |
9272 | | |
9273 | | int bpf_program__set_expected_attach_type(struct bpf_program *prog, |
9274 | | enum bpf_attach_type type) |
9275 | 0 | { |
9276 | 0 | if (prog->obj->loaded) |
9277 | 0 | return libbpf_err(-EBUSY); |
9278 | | |
9279 | 0 | prog->expected_attach_type = type; |
9280 | 0 | return 0; |
9281 | 0 | } |
9282 | | |
9283 | | __u32 bpf_program__flags(const struct bpf_program *prog) |
9284 | 0 | { |
9285 | 0 | return prog->prog_flags; |
9286 | 0 | } |
9287 | | |
9288 | | int bpf_program__set_flags(struct bpf_program *prog, __u32 flags) |
9289 | 0 | { |
9290 | 0 | if (prog->obj->loaded) |
9291 | 0 | return libbpf_err(-EBUSY); |
9292 | | |
9293 | 0 | prog->prog_flags = flags; |
9294 | 0 | return 0; |
9295 | 0 | } |
9296 | | |
9297 | | __u32 bpf_program__log_level(const struct bpf_program *prog) |
9298 | 0 | { |
9299 | 0 | return prog->log_level; |
9300 | 0 | } |
9301 | | |
9302 | | int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level) |
9303 | 0 | { |
9304 | 0 | if (prog->obj->loaded) |
9305 | 0 | return libbpf_err(-EBUSY); |
9306 | | |
9307 | 0 | prog->log_level = log_level; |
9308 | 0 | return 0; |
9309 | 0 | } |
9310 | | |
9311 | | const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size) |
9312 | 0 | { |
9313 | 0 | *log_size = prog->log_size; |
9314 | 0 | return prog->log_buf; |
9315 | 0 | } |
9316 | | |
9317 | | int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size) |
9318 | 0 | { |
9319 | 0 | if (log_size && !log_buf) |
9320 | 0 | return -EINVAL; |
9321 | 0 | if (prog->log_size > UINT_MAX) |
9322 | 0 | return -EINVAL; |
9323 | 0 | if (prog->obj->loaded) |
9324 | 0 | return -EBUSY; |
9325 | | |
9326 | 0 | prog->log_buf = log_buf; |
9327 | 0 | prog->log_size = log_size; |
9328 | 0 | return 0; |
9329 | 0 | } |
9330 | | |
9331 | | #define SEC_DEF(sec_pfx, ptype, atype, flags, ...) { \ |
9332 | | .sec = (char *)sec_pfx, \ |
9333 | | .prog_type = BPF_PROG_TYPE_##ptype, \ |
9334 | | .expected_attach_type = atype, \ |
9335 | | .cookie = (long)(flags), \ |
9336 | | .prog_prepare_load_fn = libbpf_prepare_prog_load, \ |
9337 | | __VA_ARGS__ \ |
9338 | | } |
9339 | | |
9340 | | static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link); |
9341 | | static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link); |
9342 | | static int attach_ksyscall(const struct bpf_program *prog, long cookie, struct bpf_link **link); |
9343 | | static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link); |
9344 | | static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link); |
9345 | | static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link); |
9346 | | static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link); |
9347 | | static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link); |
9348 | | static int attach_kprobe_session(const struct bpf_program *prog, long cookie, struct bpf_link **link); |
9349 | | static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link); |
9350 | | static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link); |
9351 | | static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link); |
9352 | | |
9353 | | static const struct bpf_sec_def section_defs[] = { |
9354 | | SEC_DEF("socket", SOCKET_FILTER, 0, SEC_NONE), |
9355 | | SEC_DEF("sk_reuseport/migrate", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, SEC_ATTACHABLE), |
9356 | | SEC_DEF("sk_reuseport", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT, SEC_ATTACHABLE), |
9357 | | SEC_DEF("kprobe+", KPROBE, 0, SEC_NONE, attach_kprobe), |
9358 | | SEC_DEF("uprobe+", KPROBE, 0, SEC_NONE, attach_uprobe), |
9359 | | SEC_DEF("uprobe.s+", KPROBE, 0, SEC_SLEEPABLE, attach_uprobe), |
9360 | | SEC_DEF("kretprobe+", KPROBE, 0, SEC_NONE, attach_kprobe), |
9361 | | SEC_DEF("uretprobe+", KPROBE, 0, SEC_NONE, attach_uprobe), |
9362 | | SEC_DEF("uretprobe.s+", KPROBE, 0, SEC_SLEEPABLE, attach_uprobe), |
9363 | | SEC_DEF("kprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi), |
9364 | | SEC_DEF("kretprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi), |
9365 | | SEC_DEF("kprobe.session+", KPROBE, BPF_TRACE_KPROBE_SESSION, SEC_NONE, attach_kprobe_session), |
9366 | | SEC_DEF("uprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi), |
9367 | | SEC_DEF("uretprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi), |
9368 | | SEC_DEF("uprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi), |
9369 | | SEC_DEF("uretprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi), |
9370 | | SEC_DEF("ksyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall), |
9371 | | SEC_DEF("kretsyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall), |
9372 | | SEC_DEF("usdt+", KPROBE, 0, SEC_USDT, attach_usdt), |
9373 | | SEC_DEF("usdt.s+", KPROBE, 0, SEC_USDT | SEC_SLEEPABLE, attach_usdt), |
9374 | | SEC_DEF("tc/ingress", SCHED_CLS, BPF_TCX_INGRESS, SEC_NONE), /* alias for tcx */ |
9375 | | SEC_DEF("tc/egress", SCHED_CLS, BPF_TCX_EGRESS, SEC_NONE), /* alias for tcx */ |
9376 | | SEC_DEF("tcx/ingress", SCHED_CLS, BPF_TCX_INGRESS, SEC_NONE), |
9377 | | SEC_DEF("tcx/egress", SCHED_CLS, BPF_TCX_EGRESS, SEC_NONE), |
9378 | | SEC_DEF("tc", SCHED_CLS, 0, SEC_NONE), /* deprecated / legacy, use tcx */ |
9379 | | SEC_DEF("classifier", SCHED_CLS, 0, SEC_NONE), /* deprecated / legacy, use tcx */ |
9380 | | SEC_DEF("action", SCHED_ACT, 0, SEC_NONE), /* deprecated / legacy, use tcx */ |
9381 | | SEC_DEF("netkit/primary", SCHED_CLS, BPF_NETKIT_PRIMARY, SEC_NONE), |
9382 | | SEC_DEF("netkit/peer", SCHED_CLS, BPF_NETKIT_PEER, SEC_NONE), |
9383 | | SEC_DEF("tracepoint+", TRACEPOINT, 0, SEC_NONE, attach_tp), |
9384 | | SEC_DEF("tp+", TRACEPOINT, 0, SEC_NONE, attach_tp), |
9385 | | SEC_DEF("raw_tracepoint+", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp), |
9386 | | SEC_DEF("raw_tp+", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp), |
9387 | | SEC_DEF("raw_tracepoint.w+", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp), |
9388 | | SEC_DEF("raw_tp.w+", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp), |
9389 | | SEC_DEF("tp_btf+", TRACING, BPF_TRACE_RAW_TP, SEC_ATTACH_BTF, attach_trace), |
9390 | | SEC_DEF("fentry+", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF, attach_trace), |
9391 | | SEC_DEF("fmod_ret+", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF, attach_trace), |
9392 | | SEC_DEF("fexit+", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF, attach_trace), |
9393 | | SEC_DEF("fentry.s+", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace), |
9394 | | SEC_DEF("fmod_ret.s+", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace), |
9395 | | SEC_DEF("fexit.s+", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace), |
9396 | | SEC_DEF("freplace+", EXT, 0, SEC_ATTACH_BTF, attach_trace), |
9397 | | SEC_DEF("lsm+", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF, attach_lsm), |
9398 | | SEC_DEF("lsm.s+", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_lsm), |
9399 | | SEC_DEF("lsm_cgroup+", LSM, BPF_LSM_CGROUP, SEC_ATTACH_BTF), |
9400 | | SEC_DEF("iter+", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF, attach_iter), |
9401 | | SEC_DEF("iter.s+", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_iter), |
9402 | | SEC_DEF("syscall", SYSCALL, 0, SEC_SLEEPABLE), |
9403 | | SEC_DEF("xdp.frags/devmap", XDP, BPF_XDP_DEVMAP, SEC_XDP_FRAGS), |
9404 | | SEC_DEF("xdp/devmap", XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE), |
9405 | | SEC_DEF("xdp.frags/cpumap", XDP, BPF_XDP_CPUMAP, SEC_XDP_FRAGS), |
9406 | | SEC_DEF("xdp/cpumap", XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE), |
9407 | | SEC_DEF("xdp.frags", XDP, BPF_XDP, SEC_XDP_FRAGS), |
9408 | | SEC_DEF("xdp", XDP, BPF_XDP, SEC_ATTACHABLE_OPT), |
9409 | | SEC_DEF("perf_event", PERF_EVENT, 0, SEC_NONE), |
9410 | | SEC_DEF("lwt_in", LWT_IN, 0, SEC_NONE), |
9411 | | SEC_DEF("lwt_out", LWT_OUT, 0, SEC_NONE), |
9412 | | SEC_DEF("lwt_xmit", LWT_XMIT, 0, SEC_NONE), |
9413 | | SEC_DEF("lwt_seg6local", LWT_SEG6LOCAL, 0, SEC_NONE), |
9414 | | SEC_DEF("sockops", SOCK_OPS, BPF_CGROUP_SOCK_OPS, SEC_ATTACHABLE_OPT), |
9415 | | SEC_DEF("sk_skb/stream_parser", SK_SKB, BPF_SK_SKB_STREAM_PARSER, SEC_ATTACHABLE_OPT), |
9416 | | SEC_DEF("sk_skb/stream_verdict",SK_SKB, BPF_SK_SKB_STREAM_VERDICT, SEC_ATTACHABLE_OPT), |
9417 | | SEC_DEF("sk_skb/verdict", SK_SKB, BPF_SK_SKB_VERDICT, SEC_ATTACHABLE_OPT), |
9418 | | SEC_DEF("sk_skb", SK_SKB, 0, SEC_NONE), |
9419 | | SEC_DEF("sk_msg", SK_MSG, BPF_SK_MSG_VERDICT, SEC_ATTACHABLE_OPT), |
9420 | | SEC_DEF("lirc_mode2", LIRC_MODE2, BPF_LIRC_MODE2, SEC_ATTACHABLE_OPT), |
9421 | | SEC_DEF("flow_dissector", FLOW_DISSECTOR, BPF_FLOW_DISSECTOR, SEC_ATTACHABLE_OPT), |
9422 | | SEC_DEF("cgroup_skb/ingress", CGROUP_SKB, BPF_CGROUP_INET_INGRESS, SEC_ATTACHABLE_OPT), |
9423 | | SEC_DEF("cgroup_skb/egress", CGROUP_SKB, BPF_CGROUP_INET_EGRESS, SEC_ATTACHABLE_OPT), |
9424 | | SEC_DEF("cgroup/skb", CGROUP_SKB, 0, SEC_NONE), |
9425 | | SEC_DEF("cgroup/sock_create", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE), |
9426 | | SEC_DEF("cgroup/sock_release", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_RELEASE, SEC_ATTACHABLE), |
9427 | | SEC_DEF("cgroup/sock", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE_OPT), |
9428 | | SEC_DEF("cgroup/post_bind4", CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND, SEC_ATTACHABLE), |
9429 | | SEC_DEF("cgroup/post_bind6", CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND, SEC_ATTACHABLE), |
9430 | | SEC_DEF("cgroup/bind4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND, SEC_ATTACHABLE), |
9431 | | SEC_DEF("cgroup/bind6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND, SEC_ATTACHABLE), |
9432 | | SEC_DEF("cgroup/connect4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT, SEC_ATTACHABLE), |
9433 | | SEC_DEF("cgroup/connect6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT, SEC_ATTACHABLE), |
9434 | | SEC_DEF("cgroup/connect_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_CONNECT, SEC_ATTACHABLE), |
9435 | | SEC_DEF("cgroup/sendmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG, SEC_ATTACHABLE), |
9436 | | SEC_DEF("cgroup/sendmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG, SEC_ATTACHABLE), |
9437 | | SEC_DEF("cgroup/sendmsg_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_SENDMSG, SEC_ATTACHABLE), |
9438 | | SEC_DEF("cgroup/recvmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG, SEC_ATTACHABLE), |
9439 | | SEC_DEF("cgroup/recvmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG, SEC_ATTACHABLE), |
9440 | | SEC_DEF("cgroup/recvmsg_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_RECVMSG, SEC_ATTACHABLE), |
9441 | | SEC_DEF("cgroup/getpeername4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETPEERNAME, SEC_ATTACHABLE), |
9442 | | SEC_DEF("cgroup/getpeername6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETPEERNAME, SEC_ATTACHABLE), |
9443 | | SEC_DEF("cgroup/getpeername_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_GETPEERNAME, SEC_ATTACHABLE), |
9444 | | SEC_DEF("cgroup/getsockname4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETSOCKNAME, SEC_ATTACHABLE), |
9445 | | SEC_DEF("cgroup/getsockname6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETSOCKNAME, SEC_ATTACHABLE), |
9446 | | SEC_DEF("cgroup/getsockname_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_GETSOCKNAME, SEC_ATTACHABLE), |
9447 | | SEC_DEF("cgroup/sysctl", CGROUP_SYSCTL, BPF_CGROUP_SYSCTL, SEC_ATTACHABLE), |
9448 | | SEC_DEF("cgroup/getsockopt", CGROUP_SOCKOPT, BPF_CGROUP_GETSOCKOPT, SEC_ATTACHABLE), |
9449 | | SEC_DEF("cgroup/setsockopt", CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT, SEC_ATTACHABLE), |
9450 | | SEC_DEF("cgroup/dev", CGROUP_DEVICE, BPF_CGROUP_DEVICE, SEC_ATTACHABLE_OPT), |
9451 | | SEC_DEF("struct_ops+", STRUCT_OPS, 0, SEC_NONE), |
9452 | | SEC_DEF("struct_ops.s+", STRUCT_OPS, 0, SEC_SLEEPABLE), |
9453 | | SEC_DEF("sk_lookup", SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE), |
9454 | | SEC_DEF("netfilter", NETFILTER, BPF_NETFILTER, SEC_NONE), |
9455 | | }; |
9456 | | |
9457 | | int libbpf_register_prog_handler(const char *sec, |
9458 | | enum bpf_prog_type prog_type, |
9459 | | enum bpf_attach_type exp_attach_type, |
9460 | | const struct libbpf_prog_handler_opts *opts) |
9461 | 0 | { |
9462 | 0 | struct bpf_sec_def *sec_def; |
9463 | |
|
9464 | 0 | if (!OPTS_VALID(opts, libbpf_prog_handler_opts)) |
9465 | 0 | return libbpf_err(-EINVAL); |
9466 | | |
9467 | 0 | if (last_custom_sec_def_handler_id == INT_MAX) /* prevent overflow */ |
9468 | 0 | return libbpf_err(-E2BIG); |
9469 | | |
9470 | 0 | if (sec) { |
9471 | 0 | sec_def = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt + 1, |
9472 | 0 | sizeof(*sec_def)); |
9473 | 0 | if (!sec_def) |
9474 | 0 | return libbpf_err(-ENOMEM); |
9475 | | |
9476 | 0 | custom_sec_defs = sec_def; |
9477 | 0 | sec_def = &custom_sec_defs[custom_sec_def_cnt]; |
9478 | 0 | } else { |
9479 | 0 | if (has_custom_fallback_def) |
9480 | 0 | return libbpf_err(-EBUSY); |
9481 | | |
9482 | 0 | sec_def = &custom_fallback_def; |
9483 | 0 | } |
9484 | | |
9485 | 0 | sec_def->sec = sec ? strdup(sec) : NULL; |
9486 | 0 | if (sec && !sec_def->sec) |
9487 | 0 | return libbpf_err(-ENOMEM); |
9488 | | |
9489 | 0 | sec_def->prog_type = prog_type; |
9490 | 0 | sec_def->expected_attach_type = exp_attach_type; |
9491 | 0 | sec_def->cookie = OPTS_GET(opts, cookie, 0); |
9492 | |
|
9493 | 0 | sec_def->prog_setup_fn = OPTS_GET(opts, prog_setup_fn, NULL); |
9494 | 0 | sec_def->prog_prepare_load_fn = OPTS_GET(opts, prog_prepare_load_fn, NULL); |
9495 | 0 | sec_def->prog_attach_fn = OPTS_GET(opts, prog_attach_fn, NULL); |
9496 | |
|
9497 | 0 | sec_def->handler_id = ++last_custom_sec_def_handler_id; |
9498 | |
|
9499 | 0 | if (sec) |
9500 | 0 | custom_sec_def_cnt++; |
9501 | 0 | else |
9502 | 0 | has_custom_fallback_def = true; |
9503 | |
|
9504 | 0 | return sec_def->handler_id; |
9505 | 0 | } |
9506 | | |
9507 | | int libbpf_unregister_prog_handler(int handler_id) |
9508 | 0 | { |
9509 | 0 | struct bpf_sec_def *sec_defs; |
9510 | 0 | int i; |
9511 | |
|
9512 | 0 | if (handler_id <= 0) |
9513 | 0 | return libbpf_err(-EINVAL); |
9514 | | |
9515 | 0 | if (has_custom_fallback_def && custom_fallback_def.handler_id == handler_id) { |
9516 | 0 | memset(&custom_fallback_def, 0, sizeof(custom_fallback_def)); |
9517 | 0 | has_custom_fallback_def = false; |
9518 | 0 | return 0; |
9519 | 0 | } |
9520 | | |
9521 | 0 | for (i = 0; i < custom_sec_def_cnt; i++) { |
9522 | 0 | if (custom_sec_defs[i].handler_id == handler_id) |
9523 | 0 | break; |
9524 | 0 | } |
9525 | |
|
9526 | 0 | if (i == custom_sec_def_cnt) |
9527 | 0 | return libbpf_err(-ENOENT); |
9528 | | |
9529 | 0 | free(custom_sec_defs[i].sec); |
9530 | 0 | for (i = i + 1; i < custom_sec_def_cnt; i++) |
9531 | 0 | custom_sec_defs[i - 1] = custom_sec_defs[i]; |
9532 | 0 | custom_sec_def_cnt--; |
9533 | | |
9534 | | /* try to shrink the array, but it's ok if we couldn't */ |
9535 | 0 | sec_defs = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt, sizeof(*sec_defs)); |
9536 | | /* if new count is zero, reallocarray can return a valid NULL result; |
9537 | | * in this case the previous pointer will be freed, so we *have to* |
9538 | | * reassign old pointer to the new value (even if it's NULL) |
9539 | | */ |
9540 | 0 | if (sec_defs || custom_sec_def_cnt == 0) |
9541 | 0 | custom_sec_defs = sec_defs; |
9542 | |
|
9543 | 0 | return 0; |
9544 | 0 | } |
9545 | | |
9546 | | static bool sec_def_matches(const struct bpf_sec_def *sec_def, const char *sec_name) |
9547 | 717k | { |
9548 | 717k | size_t len = strlen(sec_def->sec); |
9549 | | |
9550 | | /* "type/" always has to have proper SEC("type/extras") form */ |
9551 | 717k | if (sec_def->sec[len - 1] == '/') { |
9552 | 0 | if (str_has_pfx(sec_name, sec_def->sec)) |
9553 | 0 | return true; |
9554 | 0 | return false; |
9555 | 0 | } |
9556 | | |
9557 | | /* "type+" means it can be either exact SEC("type") or |
9558 | | * well-formed SEC("type/extras") with proper '/' separator |
9559 | | */ |
9560 | 717k | if (sec_def->sec[len - 1] == '+') { |
9561 | 278k | len--; |
9562 | | /* not even a prefix */ |
9563 | 278k | if (strncmp(sec_name, sec_def->sec, len) != 0) |
9564 | 276k | return false; |
9565 | | /* exact match or has '/' separator */ |
9566 | 1.90k | if (sec_name[len] == '\0' || sec_name[len] == '/') |
9567 | 629 | return true; |
9568 | 1.28k | return false; |
9569 | 1.90k | } |
9570 | | |
9571 | 439k | return strcmp(sec_name, sec_def->sec) == 0; |
9572 | 717k | } |
9573 | | |
9574 | | static const struct bpf_sec_def *find_sec_def(const char *sec_name) |
9575 | 7.51k | { |
9576 | 7.51k | const struct bpf_sec_def *sec_def; |
9577 | 7.51k | int i, n; |
9578 | | |
9579 | 7.51k | n = custom_sec_def_cnt; |
9580 | 7.51k | for (i = 0; i < n; i++) { |
9581 | 0 | sec_def = &custom_sec_defs[i]; |
9582 | 0 | if (sec_def_matches(sec_def, sec_name)) |
9583 | 0 | return sec_def; |
9584 | 0 | } |
9585 | | |
9586 | 7.51k | n = ARRAY_SIZE(section_defs); |
9587 | 724k | for (i = 0; i < n; i++) { |
9588 | 717k | sec_def = §ion_defs[i]; |
9589 | 717k | if (sec_def_matches(sec_def, sec_name)) |
9590 | 650 | return sec_def; |
9591 | 717k | } |
9592 | | |
9593 | 6.86k | if (has_custom_fallback_def) |
9594 | 0 | return &custom_fallback_def; |
9595 | | |
9596 | 6.86k | return NULL; |
9597 | 6.86k | } |
9598 | | |
9599 | 0 | #define MAX_TYPE_NAME_SIZE 32 |
9600 | | |
9601 | | static char *libbpf_get_type_names(bool attach_type) |
9602 | 0 | { |
9603 | 0 | int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE; |
9604 | 0 | char *buf; |
9605 | |
|
9606 | 0 | buf = malloc(len); |
9607 | 0 | if (!buf) |
9608 | 0 | return NULL; |
9609 | | |
9610 | 0 | buf[0] = '\0'; |
9611 | | /* Forge string buf with all available names */ |
9612 | 0 | for (i = 0; i < ARRAY_SIZE(section_defs); i++) { |
9613 | 0 | const struct bpf_sec_def *sec_def = §ion_defs[i]; |
9614 | |
|
9615 | 0 | if (attach_type) { |
9616 | 0 | if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load) |
9617 | 0 | continue; |
9618 | | |
9619 | 0 | if (!(sec_def->cookie & SEC_ATTACHABLE)) |
9620 | 0 | continue; |
9621 | 0 | } |
9622 | | |
9623 | 0 | if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) { |
9624 | 0 | free(buf); |
9625 | 0 | return NULL; |
9626 | 0 | } |
9627 | 0 | strcat(buf, " "); |
9628 | 0 | strcat(buf, section_defs[i].sec); |
9629 | 0 | } |
9630 | | |
9631 | 0 | return buf; |
9632 | 0 | } |
9633 | | |
9634 | | int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, |
9635 | | enum bpf_attach_type *expected_attach_type) |
9636 | 0 | { |
9637 | 0 | const struct bpf_sec_def *sec_def; |
9638 | 0 | char *type_names; |
9639 | |
|
9640 | 0 | if (!name) |
9641 | 0 | return libbpf_err(-EINVAL); |
9642 | | |
9643 | 0 | sec_def = find_sec_def(name); |
9644 | 0 | if (sec_def) { |
9645 | 0 | *prog_type = sec_def->prog_type; |
9646 | 0 | *expected_attach_type = sec_def->expected_attach_type; |
9647 | 0 | return 0; |
9648 | 0 | } |
9649 | | |
9650 | 0 | pr_debug("failed to guess program type from ELF section '%s'\n", name); |
9651 | 0 | type_names = libbpf_get_type_names(false); |
9652 | 0 | if (type_names != NULL) { |
9653 | 0 | pr_debug("supported section(type) names are:%s\n", type_names); |
9654 | 0 | free(type_names); |
9655 | 0 | } |
9656 | |
|
9657 | 0 | return libbpf_err(-ESRCH); |
9658 | 0 | } |
9659 | | |
9660 | | const char *libbpf_bpf_attach_type_str(enum bpf_attach_type t) |
9661 | 0 | { |
9662 | 0 | if (t < 0 || t >= ARRAY_SIZE(attach_type_name)) |
9663 | 0 | return NULL; |
9664 | | |
9665 | 0 | return attach_type_name[t]; |
9666 | 0 | } |
9667 | | |
9668 | | const char *libbpf_bpf_link_type_str(enum bpf_link_type t) |
9669 | 0 | { |
9670 | 0 | if (t < 0 || t >= ARRAY_SIZE(link_type_name)) |
9671 | 0 | return NULL; |
9672 | | |
9673 | 0 | return link_type_name[t]; |
9674 | 0 | } |
9675 | | |
9676 | | const char *libbpf_bpf_map_type_str(enum bpf_map_type t) |
9677 | 0 | { |
9678 | 0 | if (t < 0 || t >= ARRAY_SIZE(map_type_name)) |
9679 | 0 | return NULL; |
9680 | | |
9681 | 0 | return map_type_name[t]; |
9682 | 0 | } |
9683 | | |
9684 | | const char *libbpf_bpf_prog_type_str(enum bpf_prog_type t) |
9685 | 0 | { |
9686 | 0 | if (t < 0 || t >= ARRAY_SIZE(prog_type_name)) |
9687 | 0 | return NULL; |
9688 | | |
9689 | 0 | return prog_type_name[t]; |
9690 | 0 | } |
9691 | | |
9692 | | static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj, |
9693 | | int sec_idx, |
9694 | | size_t offset) |
9695 | 0 | { |
9696 | 0 | struct bpf_map *map; |
9697 | 0 | size_t i; |
9698 | |
|
9699 | 0 | for (i = 0; i < obj->nr_maps; i++) { |
9700 | 0 | map = &obj->maps[i]; |
9701 | 0 | if (!bpf_map__is_struct_ops(map)) |
9702 | 0 | continue; |
9703 | 0 | if (map->sec_idx == sec_idx && |
9704 | 0 | map->sec_offset <= offset && |
9705 | 0 | offset - map->sec_offset < map->def.value_size) |
9706 | 0 | return map; |
9707 | 0 | } |
9708 | | |
9709 | 0 | return NULL; |
9710 | 0 | } |
9711 | | |
9712 | | /* Collect the reloc from ELF, populate the st_ops->progs[], and update |
9713 | | * st_ops->data for shadow type. |
9714 | | */ |
9715 | | static int bpf_object__collect_st_ops_relos(struct bpf_object *obj, |
9716 | | Elf64_Shdr *shdr, Elf_Data *data) |
9717 | 0 | { |
9718 | 0 | const struct btf_type *type; |
9719 | 0 | const struct btf_member *member; |
9720 | 0 | struct bpf_struct_ops *st_ops; |
9721 | 0 | struct bpf_program *prog; |
9722 | 0 | unsigned int shdr_idx; |
9723 | 0 | const struct btf *btf; |
9724 | 0 | struct bpf_map *map; |
9725 | 0 | unsigned int moff, insn_idx; |
9726 | 0 | const char *name; |
9727 | 0 | __u32 member_idx; |
9728 | 0 | Elf64_Sym *sym; |
9729 | 0 | Elf64_Rel *rel; |
9730 | 0 | int i, nrels; |
9731 | |
|
9732 | 0 | btf = obj->btf; |
9733 | 0 | nrels = shdr->sh_size / shdr->sh_entsize; |
9734 | 0 | for (i = 0; i < nrels; i++) { |
9735 | 0 | rel = elf_rel_by_idx(data, i); |
9736 | 0 | if (!rel) { |
9737 | 0 | pr_warn("struct_ops reloc: failed to get %d reloc\n", i); |
9738 | 0 | return -LIBBPF_ERRNO__FORMAT; |
9739 | 0 | } |
9740 | | |
9741 | 0 | sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info)); |
9742 | 0 | if (!sym) { |
9743 | 0 | pr_warn("struct_ops reloc: symbol %zx not found\n", |
9744 | 0 | (size_t)ELF64_R_SYM(rel->r_info)); |
9745 | 0 | return -LIBBPF_ERRNO__FORMAT; |
9746 | 0 | } |
9747 | | |
9748 | 0 | name = elf_sym_str(obj, sym->st_name) ?: "<?>"; |
9749 | 0 | map = find_struct_ops_map_by_offset(obj, shdr->sh_info, rel->r_offset); |
9750 | 0 | if (!map) { |
9751 | 0 | pr_warn("struct_ops reloc: cannot find map at rel->r_offset %zu\n", |
9752 | 0 | (size_t)rel->r_offset); |
9753 | 0 | return -EINVAL; |
9754 | 0 | } |
9755 | | |
9756 | 0 | moff = rel->r_offset - map->sec_offset; |
9757 | 0 | shdr_idx = sym->st_shndx; |
9758 | 0 | st_ops = map->st_ops; |
9759 | 0 | pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel->r_offset %zu map->sec_offset %zu name %d (\'%s\')\n", |
9760 | 0 | map->name, |
9761 | 0 | (long long)(rel->r_info >> 32), |
9762 | 0 | (long long)sym->st_value, |
9763 | 0 | shdr_idx, (size_t)rel->r_offset, |
9764 | 0 | map->sec_offset, sym->st_name, name); |
9765 | |
|
9766 | 0 | if (shdr_idx >= SHN_LORESERVE) { |
9767 | 0 | pr_warn("struct_ops reloc %s: rel->r_offset %zu shdr_idx %u unsupported non-static function\n", |
9768 | 0 | map->name, (size_t)rel->r_offset, shdr_idx); |
9769 | 0 | return -LIBBPF_ERRNO__RELOC; |
9770 | 0 | } |
9771 | 0 | if (sym->st_value % BPF_INSN_SZ) { |
9772 | 0 | pr_warn("struct_ops reloc %s: invalid target program offset %llu\n", |
9773 | 0 | map->name, (unsigned long long)sym->st_value); |
9774 | 0 | return -LIBBPF_ERRNO__FORMAT; |
9775 | 0 | } |
9776 | 0 | insn_idx = sym->st_value / BPF_INSN_SZ; |
9777 | |
|
9778 | 0 | type = btf__type_by_id(btf, st_ops->type_id); |
9779 | 0 | member = find_member_by_offset(type, moff * 8); |
9780 | 0 | if (!member) { |
9781 | 0 | pr_warn("struct_ops reloc %s: cannot find member at moff %u\n", |
9782 | 0 | map->name, moff); |
9783 | 0 | return -EINVAL; |
9784 | 0 | } |
9785 | 0 | member_idx = member - btf_members(type); |
9786 | 0 | name = btf__name_by_offset(btf, member->name_off); |
9787 | |
|
9788 | 0 | if (!resolve_func_ptr(btf, member->type, NULL)) { |
9789 | 0 | pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n", |
9790 | 0 | map->name, name); |
9791 | 0 | return -EINVAL; |
9792 | 0 | } |
9793 | | |
9794 | 0 | prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx); |
9795 | 0 | if (!prog) { |
9796 | 0 | pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n", |
9797 | 0 | map->name, shdr_idx, name); |
9798 | 0 | return -EINVAL; |
9799 | 0 | } |
9800 | | |
9801 | | /* prevent the use of BPF prog with invalid type */ |
9802 | 0 | if (prog->type != BPF_PROG_TYPE_STRUCT_OPS) { |
9803 | 0 | pr_warn("struct_ops reloc %s: prog %s is not struct_ops BPF program\n", |
9804 | 0 | map->name, prog->name); |
9805 | 0 | return -EINVAL; |
9806 | 0 | } |
9807 | | |
9808 | 0 | st_ops->progs[member_idx] = prog; |
9809 | | |
9810 | | /* st_ops->data will be exposed to users, being returned by |
9811 | | * bpf_map__initial_value() as a pointer to the shadow |
9812 | | * type. All function pointers in the original struct type |
9813 | | * should be converted to a pointer to struct bpf_program |
9814 | | * in the shadow type. |
9815 | | */ |
9816 | 0 | *((struct bpf_program **)(st_ops->data + moff)) = prog; |
9817 | 0 | } |
9818 | | |
9819 | 0 | return 0; |
9820 | 0 | } |
9821 | | |
9822 | 0 | #define BTF_TRACE_PREFIX "btf_trace_" |
9823 | 0 | #define BTF_LSM_PREFIX "bpf_lsm_" |
9824 | 0 | #define BTF_ITER_PREFIX "bpf_iter_" |
9825 | | #define BTF_MAX_NAME_SIZE 128 |
9826 | | |
9827 | | void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type, |
9828 | | const char **prefix, int *kind) |
9829 | 0 | { |
9830 | 0 | switch (attach_type) { |
9831 | 0 | case BPF_TRACE_RAW_TP: |
9832 | 0 | *prefix = BTF_TRACE_PREFIX; |
9833 | 0 | *kind = BTF_KIND_TYPEDEF; |
9834 | 0 | break; |
9835 | 0 | case BPF_LSM_MAC: |
9836 | 0 | case BPF_LSM_CGROUP: |
9837 | 0 | *prefix = BTF_LSM_PREFIX; |
9838 | 0 | *kind = BTF_KIND_FUNC; |
9839 | 0 | break; |
9840 | 0 | case BPF_TRACE_ITER: |
9841 | 0 | *prefix = BTF_ITER_PREFIX; |
9842 | 0 | *kind = BTF_KIND_FUNC; |
9843 | 0 | break; |
9844 | 0 | default: |
9845 | 0 | *prefix = ""; |
9846 | 0 | *kind = BTF_KIND_FUNC; |
9847 | 0 | } |
9848 | 0 | } |
9849 | | |
9850 | | static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix, |
9851 | | const char *name, __u32 kind) |
9852 | 0 | { |
9853 | 0 | char btf_type_name[BTF_MAX_NAME_SIZE]; |
9854 | 0 | int ret; |
9855 | |
|
9856 | 0 | ret = snprintf(btf_type_name, sizeof(btf_type_name), |
9857 | 0 | "%s%s", prefix, name); |
9858 | | /* snprintf returns the number of characters written excluding the |
9859 | | * terminating null. So, if >= BTF_MAX_NAME_SIZE are written, it |
9860 | | * indicates truncation. |
9861 | | */ |
9862 | 0 | if (ret < 0 || ret >= sizeof(btf_type_name)) |
9863 | 0 | return -ENAMETOOLONG; |
9864 | 0 | return btf__find_by_name_kind(btf, btf_type_name, kind); |
9865 | 0 | } |
9866 | | |
9867 | | static inline int find_attach_btf_id(struct btf *btf, const char *name, |
9868 | | enum bpf_attach_type attach_type) |
9869 | 0 | { |
9870 | 0 | const char *prefix; |
9871 | 0 | int kind; |
9872 | |
|
9873 | 0 | btf_get_kernel_prefix_kind(attach_type, &prefix, &kind); |
9874 | 0 | return find_btf_by_prefix_kind(btf, prefix, name, kind); |
9875 | 0 | } |
9876 | | |
9877 | | int libbpf_find_vmlinux_btf_id(const char *name, |
9878 | | enum bpf_attach_type attach_type) |
9879 | 0 | { |
9880 | 0 | struct btf *btf; |
9881 | 0 | int err; |
9882 | |
|
9883 | 0 | btf = btf__load_vmlinux_btf(); |
9884 | 0 | err = libbpf_get_error(btf); |
9885 | 0 | if (err) { |
9886 | 0 | pr_warn("vmlinux BTF is not found\n"); |
9887 | 0 | return libbpf_err(err); |
9888 | 0 | } |
9889 | | |
9890 | 0 | err = find_attach_btf_id(btf, name, attach_type); |
9891 | 0 | if (err <= 0) |
9892 | 0 | pr_warn("%s is not found in vmlinux BTF\n", name); |
9893 | |
|
9894 | 0 | btf__free(btf); |
9895 | 0 | return libbpf_err(err); |
9896 | 0 | } |
9897 | | |
9898 | | static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd) |
9899 | 0 | { |
9900 | 0 | struct bpf_prog_info info; |
9901 | 0 | __u32 info_len = sizeof(info); |
9902 | 0 | struct btf *btf; |
9903 | 0 | int err; |
9904 | |
|
9905 | 0 | memset(&info, 0, info_len); |
9906 | 0 | err = bpf_prog_get_info_by_fd(attach_prog_fd, &info, &info_len); |
9907 | 0 | if (err) { |
9908 | 0 | pr_warn("failed bpf_prog_get_info_by_fd for FD %d: %d\n", |
9909 | 0 | attach_prog_fd, err); |
9910 | 0 | return err; |
9911 | 0 | } |
9912 | | |
9913 | 0 | err = -EINVAL; |
9914 | 0 | if (!info.btf_id) { |
9915 | 0 | pr_warn("The target program doesn't have BTF\n"); |
9916 | 0 | goto out; |
9917 | 0 | } |
9918 | 0 | btf = btf__load_from_kernel_by_id(info.btf_id); |
9919 | 0 | err = libbpf_get_error(btf); |
9920 | 0 | if (err) { |
9921 | 0 | pr_warn("Failed to get BTF %d of the program: %d\n", info.btf_id, err); |
9922 | 0 | goto out; |
9923 | 0 | } |
9924 | 0 | err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC); |
9925 | 0 | btf__free(btf); |
9926 | 0 | if (err <= 0) { |
9927 | 0 | pr_warn("%s is not found in prog's BTF\n", name); |
9928 | 0 | goto out; |
9929 | 0 | } |
9930 | 0 | out: |
9931 | 0 | return err; |
9932 | 0 | } |
9933 | | |
9934 | | static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name, |
9935 | | enum bpf_attach_type attach_type, |
9936 | | int *btf_obj_fd, int *btf_type_id) |
9937 | 0 | { |
9938 | 0 | int ret, i, mod_len; |
9939 | 0 | const char *fn_name, *mod_name = NULL; |
9940 | |
|
9941 | 0 | fn_name = strchr(attach_name, ':'); |
9942 | 0 | if (fn_name) { |
9943 | 0 | mod_name = attach_name; |
9944 | 0 | mod_len = fn_name - mod_name; |
9945 | 0 | fn_name++; |
9946 | 0 | } |
9947 | |
|
9948 | 0 | if (!mod_name || strncmp(mod_name, "vmlinux", mod_len) == 0) { |
9949 | 0 | ret = find_attach_btf_id(obj->btf_vmlinux, |
9950 | 0 | mod_name ? fn_name : attach_name, |
9951 | 0 | attach_type); |
9952 | 0 | if (ret > 0) { |
9953 | 0 | *btf_obj_fd = 0; /* vmlinux BTF */ |
9954 | 0 | *btf_type_id = ret; |
9955 | 0 | return 0; |
9956 | 0 | } |
9957 | 0 | if (ret != -ENOENT) |
9958 | 0 | return ret; |
9959 | 0 | } |
9960 | | |
9961 | 0 | ret = load_module_btfs(obj); |
9962 | 0 | if (ret) |
9963 | 0 | return ret; |
9964 | | |
9965 | 0 | for (i = 0; i < obj->btf_module_cnt; i++) { |
9966 | 0 | const struct module_btf *mod = &obj->btf_modules[i]; |
9967 | |
|
9968 | 0 | if (mod_name && strncmp(mod->name, mod_name, mod_len) != 0) |
9969 | 0 | continue; |
9970 | | |
9971 | 0 | ret = find_attach_btf_id(mod->btf, |
9972 | 0 | mod_name ? fn_name : attach_name, |
9973 | 0 | attach_type); |
9974 | 0 | if (ret > 0) { |
9975 | 0 | *btf_obj_fd = mod->fd; |
9976 | 0 | *btf_type_id = ret; |
9977 | 0 | return 0; |
9978 | 0 | } |
9979 | 0 | if (ret == -ENOENT) |
9980 | 0 | continue; |
9981 | | |
9982 | 0 | return ret; |
9983 | 0 | } |
9984 | | |
9985 | 0 | return -ESRCH; |
9986 | 0 | } |
9987 | | |
9988 | | static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name, |
9989 | | int *btf_obj_fd, int *btf_type_id) |
9990 | 0 | { |
9991 | 0 | enum bpf_attach_type attach_type = prog->expected_attach_type; |
9992 | 0 | __u32 attach_prog_fd = prog->attach_prog_fd; |
9993 | 0 | int err = 0; |
9994 | | |
9995 | | /* BPF program's BTF ID */ |
9996 | 0 | if (prog->type == BPF_PROG_TYPE_EXT || attach_prog_fd) { |
9997 | 0 | if (!attach_prog_fd) { |
9998 | 0 | pr_warn("prog '%s': attach program FD is not set\n", prog->name); |
9999 | 0 | return -EINVAL; |
10000 | 0 | } |
10001 | 0 | err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd); |
10002 | 0 | if (err < 0) { |
10003 | 0 | pr_warn("prog '%s': failed to find BPF program (FD %d) BTF ID for '%s': %d\n", |
10004 | 0 | prog->name, attach_prog_fd, attach_name, err); |
10005 | 0 | return err; |
10006 | 0 | } |
10007 | 0 | *btf_obj_fd = 0; |
10008 | 0 | *btf_type_id = err; |
10009 | 0 | return 0; |
10010 | 0 | } |
10011 | | |
10012 | | /* kernel/module BTF ID */ |
10013 | 0 | if (prog->obj->gen_loader) { |
10014 | 0 | bpf_gen__record_attach_target(prog->obj->gen_loader, attach_name, attach_type); |
10015 | 0 | *btf_obj_fd = 0; |
10016 | 0 | *btf_type_id = 1; |
10017 | 0 | } else { |
10018 | 0 | err = find_kernel_btf_id(prog->obj, attach_name, |
10019 | 0 | attach_type, btf_obj_fd, |
10020 | 0 | btf_type_id); |
10021 | 0 | } |
10022 | 0 | if (err) { |
10023 | 0 | pr_warn("prog '%s': failed to find kernel BTF type ID of '%s': %d\n", |
10024 | 0 | prog->name, attach_name, err); |
10025 | 0 | return err; |
10026 | 0 | } |
10027 | 0 | return 0; |
10028 | 0 | } |
10029 | | |
10030 | | int libbpf_attach_type_by_name(const char *name, |
10031 | | enum bpf_attach_type *attach_type) |
10032 | 0 | { |
10033 | 0 | char *type_names; |
10034 | 0 | const struct bpf_sec_def *sec_def; |
10035 | |
|
10036 | 0 | if (!name) |
10037 | 0 | return libbpf_err(-EINVAL); |
10038 | | |
10039 | 0 | sec_def = find_sec_def(name); |
10040 | 0 | if (!sec_def) { |
10041 | 0 | pr_debug("failed to guess attach type based on ELF section name '%s'\n", name); |
10042 | 0 | type_names = libbpf_get_type_names(true); |
10043 | 0 | if (type_names != NULL) { |
10044 | 0 | pr_debug("attachable section(type) names are:%s\n", type_names); |
10045 | 0 | free(type_names); |
10046 | 0 | } |
10047 | |
|
10048 | 0 | return libbpf_err(-EINVAL); |
10049 | 0 | } |
10050 | | |
10051 | 0 | if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load) |
10052 | 0 | return libbpf_err(-EINVAL); |
10053 | 0 | if (!(sec_def->cookie & SEC_ATTACHABLE)) |
10054 | 0 | return libbpf_err(-EINVAL); |
10055 | | |
10056 | 0 | *attach_type = sec_def->expected_attach_type; |
10057 | 0 | return 0; |
10058 | 0 | } |
10059 | | |
10060 | | int bpf_map__fd(const struct bpf_map *map) |
10061 | 0 | { |
10062 | 0 | if (!map) |
10063 | 0 | return libbpf_err(-EINVAL); |
10064 | 0 | if (!map_is_created(map)) |
10065 | 0 | return -1; |
10066 | 0 | return map->fd; |
10067 | 0 | } |
10068 | | |
10069 | | static bool map_uses_real_name(const struct bpf_map *map) |
10070 | 1 | { |
10071 | | /* Since libbpf started to support custom .data.* and .rodata.* maps, |
10072 | | * their user-visible name differs from kernel-visible name. Users see |
10073 | | * such map's corresponding ELF section name as a map name. |
10074 | | * This check distinguishes .data/.rodata from .data.* and .rodata.* |
10075 | | * maps to know which name has to be returned to the user. |
10076 | | */ |
10077 | 1 | if (map->libbpf_type == LIBBPF_MAP_DATA && strcmp(map->real_name, DATA_SEC) != 0) |
10078 | 0 | return true; |
10079 | 1 | if (map->libbpf_type == LIBBPF_MAP_RODATA && strcmp(map->real_name, RODATA_SEC) != 0) |
10080 | 0 | return true; |
10081 | 1 | return false; |
10082 | 1 | } |
10083 | | |
10084 | | const char *bpf_map__name(const struct bpf_map *map) |
10085 | 1 | { |
10086 | 1 | if (!map) |
10087 | 0 | return NULL; |
10088 | | |
10089 | 1 | if (map_uses_real_name(map)) |
10090 | 0 | return map->real_name; |
10091 | | |
10092 | 1 | return map->name; |
10093 | 1 | } |
10094 | | |
10095 | | enum bpf_map_type bpf_map__type(const struct bpf_map *map) |
10096 | 0 | { |
10097 | 0 | return map->def.type; |
10098 | 0 | } |
10099 | | |
10100 | | int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type) |
10101 | 0 | { |
10102 | 0 | if (map_is_created(map)) |
10103 | 0 | return libbpf_err(-EBUSY); |
10104 | 0 | map->def.type = type; |
10105 | 0 | return 0; |
10106 | 0 | } |
10107 | | |
10108 | | __u32 bpf_map__map_flags(const struct bpf_map *map) |
10109 | 0 | { |
10110 | 0 | return map->def.map_flags; |
10111 | 0 | } |
10112 | | |
10113 | | int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags) |
10114 | 0 | { |
10115 | 0 | if (map_is_created(map)) |
10116 | 0 | return libbpf_err(-EBUSY); |
10117 | 0 | map->def.map_flags = flags; |
10118 | 0 | return 0; |
10119 | 0 | } |
10120 | | |
10121 | | __u64 bpf_map__map_extra(const struct bpf_map *map) |
10122 | 0 | { |
10123 | 0 | return map->map_extra; |
10124 | 0 | } |
10125 | | |
10126 | | int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra) |
10127 | 0 | { |
10128 | 0 | if (map_is_created(map)) |
10129 | 0 | return libbpf_err(-EBUSY); |
10130 | 0 | map->map_extra = map_extra; |
10131 | 0 | return 0; |
10132 | 0 | } |
10133 | | |
10134 | | __u32 bpf_map__numa_node(const struct bpf_map *map) |
10135 | 0 | { |
10136 | 0 | return map->numa_node; |
10137 | 0 | } |
10138 | | |
10139 | | int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node) |
10140 | 0 | { |
10141 | 0 | if (map_is_created(map)) |
10142 | 0 | return libbpf_err(-EBUSY); |
10143 | 0 | map->numa_node = numa_node; |
10144 | 0 | return 0; |
10145 | 0 | } |
10146 | | |
10147 | | __u32 bpf_map__key_size(const struct bpf_map *map) |
10148 | 0 | { |
10149 | 0 | return map->def.key_size; |
10150 | 0 | } |
10151 | | |
10152 | | int bpf_map__set_key_size(struct bpf_map *map, __u32 size) |
10153 | 0 | { |
10154 | 0 | if (map_is_created(map)) |
10155 | 0 | return libbpf_err(-EBUSY); |
10156 | 0 | map->def.key_size = size; |
10157 | 0 | return 0; |
10158 | 0 | } |
10159 | | |
10160 | | __u32 bpf_map__value_size(const struct bpf_map *map) |
10161 | 0 | { |
10162 | 0 | return map->def.value_size; |
10163 | 0 | } |
10164 | | |
10165 | | static int map_btf_datasec_resize(struct bpf_map *map, __u32 size) |
10166 | 0 | { |
10167 | 0 | struct btf *btf; |
10168 | 0 | struct btf_type *datasec_type, *var_type; |
10169 | 0 | struct btf_var_secinfo *var; |
10170 | 0 | const struct btf_type *array_type; |
10171 | 0 | const struct btf_array *array; |
10172 | 0 | int vlen, element_sz, new_array_id; |
10173 | 0 | __u32 nr_elements; |
10174 | | |
10175 | | /* check btf existence */ |
10176 | 0 | btf = bpf_object__btf(map->obj); |
10177 | 0 | if (!btf) |
10178 | 0 | return -ENOENT; |
10179 | | |
10180 | | /* verify map is datasec */ |
10181 | 0 | datasec_type = btf_type_by_id(btf, bpf_map__btf_value_type_id(map)); |
10182 | 0 | if (!btf_is_datasec(datasec_type)) { |
10183 | 0 | pr_warn("map '%s': cannot be resized, map value type is not a datasec\n", |
10184 | 0 | bpf_map__name(map)); |
10185 | 0 | return -EINVAL; |
10186 | 0 | } |
10187 | | |
10188 | | /* verify datasec has at least one var */ |
10189 | 0 | vlen = btf_vlen(datasec_type); |
10190 | 0 | if (vlen == 0) { |
10191 | 0 | pr_warn("map '%s': cannot be resized, map value datasec is empty\n", |
10192 | 0 | bpf_map__name(map)); |
10193 | 0 | return -EINVAL; |
10194 | 0 | } |
10195 | | |
10196 | | /* verify last var in the datasec is an array */ |
10197 | 0 | var = &btf_var_secinfos(datasec_type)[vlen - 1]; |
10198 | 0 | var_type = btf_type_by_id(btf, var->type); |
10199 | 0 | array_type = skip_mods_and_typedefs(btf, var_type->type, NULL); |
10200 | 0 | if (!btf_is_array(array_type)) { |
10201 | 0 | pr_warn("map '%s': cannot be resized, last var must be an array\n", |
10202 | 0 | bpf_map__name(map)); |
10203 | 0 | return -EINVAL; |
10204 | 0 | } |
10205 | | |
10206 | | /* verify request size aligns with array */ |
10207 | 0 | array = btf_array(array_type); |
10208 | 0 | element_sz = btf__resolve_size(btf, array->type); |
10209 | 0 | if (element_sz <= 0 || (size - var->offset) % element_sz != 0) { |
10210 | 0 | pr_warn("map '%s': cannot be resized, element size (%d) doesn't align with new total size (%u)\n", |
10211 | 0 | bpf_map__name(map), element_sz, size); |
10212 | 0 | return -EINVAL; |
10213 | 0 | } |
10214 | | |
10215 | | /* create a new array based on the existing array, but with new length */ |
10216 | 0 | nr_elements = (size - var->offset) / element_sz; |
10217 | 0 | new_array_id = btf__add_array(btf, array->index_type, array->type, nr_elements); |
10218 | 0 | if (new_array_id < 0) |
10219 | 0 | return new_array_id; |
10220 | | |
10221 | | /* adding a new btf type invalidates existing pointers to btf objects, |
10222 | | * so refresh pointers before proceeding |
10223 | | */ |
10224 | 0 | datasec_type = btf_type_by_id(btf, map->btf_value_type_id); |
10225 | 0 | var = &btf_var_secinfos(datasec_type)[vlen - 1]; |
10226 | 0 | var_type = btf_type_by_id(btf, var->type); |
10227 | | |
10228 | | /* finally update btf info */ |
10229 | 0 | datasec_type->size = size; |
10230 | 0 | var->size = size - var->offset; |
10231 | 0 | var_type->type = new_array_id; |
10232 | |
|
10233 | 0 | return 0; |
10234 | 0 | } |
10235 | | |
10236 | | int bpf_map__set_value_size(struct bpf_map *map, __u32 size) |
10237 | 0 | { |
10238 | 0 | if (map->obj->loaded || map->reused) |
10239 | 0 | return libbpf_err(-EBUSY); |
10240 | | |
10241 | 0 | if (map->mmaped) { |
10242 | 0 | size_t mmap_old_sz, mmap_new_sz; |
10243 | 0 | int err; |
10244 | |
|
10245 | 0 | if (map->def.type != BPF_MAP_TYPE_ARRAY) |
10246 | 0 | return -EOPNOTSUPP; |
10247 | | |
10248 | 0 | mmap_old_sz = bpf_map_mmap_sz(map); |
10249 | 0 | mmap_new_sz = array_map_mmap_sz(size, map->def.max_entries); |
10250 | 0 | err = bpf_map_mmap_resize(map, mmap_old_sz, mmap_new_sz); |
10251 | 0 | if (err) { |
10252 | 0 | pr_warn("map '%s': failed to resize memory-mapped region: %d\n", |
10253 | 0 | bpf_map__name(map), err); |
10254 | 0 | return err; |
10255 | 0 | } |
10256 | 0 | err = map_btf_datasec_resize(map, size); |
10257 | 0 | if (err && err != -ENOENT) { |
10258 | 0 | pr_warn("map '%s': failed to adjust resized BTF, clearing BTF key/value info: %d\n", |
10259 | 0 | bpf_map__name(map), err); |
10260 | 0 | map->btf_value_type_id = 0; |
10261 | 0 | map->btf_key_type_id = 0; |
10262 | 0 | } |
10263 | 0 | } |
10264 | | |
10265 | 0 | map->def.value_size = size; |
10266 | 0 | return 0; |
10267 | 0 | } |
10268 | | |
10269 | | __u32 bpf_map__btf_key_type_id(const struct bpf_map *map) |
10270 | 0 | { |
10271 | 0 | return map ? map->btf_key_type_id : 0; |
10272 | 0 | } |
10273 | | |
10274 | | __u32 bpf_map__btf_value_type_id(const struct bpf_map *map) |
10275 | 0 | { |
10276 | 0 | return map ? map->btf_value_type_id : 0; |
10277 | 0 | } |
10278 | | |
10279 | | int bpf_map__set_initial_value(struct bpf_map *map, |
10280 | | const void *data, size_t size) |
10281 | 0 | { |
10282 | 0 | size_t actual_sz; |
10283 | |
|
10284 | 0 | if (map->obj->loaded || map->reused) |
10285 | 0 | return libbpf_err(-EBUSY); |
10286 | | |
10287 | 0 | if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG) |
10288 | 0 | return libbpf_err(-EINVAL); |
10289 | | |
10290 | 0 | if (map->def.type == BPF_MAP_TYPE_ARENA) |
10291 | 0 | actual_sz = map->obj->arena_data_sz; |
10292 | 0 | else |
10293 | 0 | actual_sz = map->def.value_size; |
10294 | 0 | if (size != actual_sz) |
10295 | 0 | return libbpf_err(-EINVAL); |
10296 | | |
10297 | 0 | memcpy(map->mmaped, data, size); |
10298 | 0 | return 0; |
10299 | 0 | } |
10300 | | |
10301 | | void *bpf_map__initial_value(const struct bpf_map *map, size_t *psize) |
10302 | 0 | { |
10303 | 0 | if (bpf_map__is_struct_ops(map)) { |
10304 | 0 | if (psize) |
10305 | 0 | *psize = map->def.value_size; |
10306 | 0 | return map->st_ops->data; |
10307 | 0 | } |
10308 | | |
10309 | 0 | if (!map->mmaped) |
10310 | 0 | return NULL; |
10311 | | |
10312 | 0 | if (map->def.type == BPF_MAP_TYPE_ARENA) |
10313 | 0 | *psize = map->obj->arena_data_sz; |
10314 | 0 | else |
10315 | 0 | *psize = map->def.value_size; |
10316 | |
|
10317 | 0 | return map->mmaped; |
10318 | 0 | } |
10319 | | |
10320 | | bool bpf_map__is_internal(const struct bpf_map *map) |
10321 | 326 | { |
10322 | 326 | return map->libbpf_type != LIBBPF_MAP_UNSPEC; |
10323 | 326 | } |
10324 | | |
10325 | | __u32 bpf_map__ifindex(const struct bpf_map *map) |
10326 | 0 | { |
10327 | 0 | return map->map_ifindex; |
10328 | 0 | } |
10329 | | |
10330 | | int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex) |
10331 | 0 | { |
10332 | 0 | if (map_is_created(map)) |
10333 | 0 | return libbpf_err(-EBUSY); |
10334 | 0 | map->map_ifindex = ifindex; |
10335 | 0 | return 0; |
10336 | 0 | } |
10337 | | |
10338 | | int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd) |
10339 | 0 | { |
10340 | 0 | if (!bpf_map_type__is_map_in_map(map->def.type)) { |
10341 | 0 | pr_warn("error: unsupported map type\n"); |
10342 | 0 | return libbpf_err(-EINVAL); |
10343 | 0 | } |
10344 | 0 | if (map->inner_map_fd != -1) { |
10345 | 0 | pr_warn("error: inner_map_fd already specified\n"); |
10346 | 0 | return libbpf_err(-EINVAL); |
10347 | 0 | } |
10348 | 0 | if (map->inner_map) { |
10349 | 0 | bpf_map__destroy(map->inner_map); |
10350 | 0 | zfree(&map->inner_map); |
10351 | 0 | } |
10352 | 0 | map->inner_map_fd = fd; |
10353 | 0 | return 0; |
10354 | 0 | } |
10355 | | |
10356 | | static struct bpf_map * |
10357 | | __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i) |
10358 | 0 | { |
10359 | 0 | ssize_t idx; |
10360 | 0 | struct bpf_map *s, *e; |
10361 | |
|
10362 | 0 | if (!obj || !obj->maps) |
10363 | 0 | return errno = EINVAL, NULL; |
10364 | | |
10365 | 0 | s = obj->maps; |
10366 | 0 | e = obj->maps + obj->nr_maps; |
10367 | |
|
10368 | 0 | if ((m < s) || (m >= e)) { |
10369 | 0 | pr_warn("error in %s: map handler doesn't belong to object\n", |
10370 | 0 | __func__); |
10371 | 0 | return errno = EINVAL, NULL; |
10372 | 0 | } |
10373 | | |
10374 | 0 | idx = (m - obj->maps) + i; |
10375 | 0 | if (idx >= obj->nr_maps || idx < 0) |
10376 | 0 | return NULL; |
10377 | 0 | return &obj->maps[idx]; |
10378 | 0 | } |
10379 | | |
10380 | | struct bpf_map * |
10381 | | bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev) |
10382 | 0 | { |
10383 | 0 | if (prev == NULL && obj != NULL) |
10384 | 0 | return obj->maps; |
10385 | | |
10386 | 0 | return __bpf_map__iter(prev, obj, 1); |
10387 | 0 | } |
10388 | | |
10389 | | struct bpf_map * |
10390 | | bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next) |
10391 | 0 | { |
10392 | 0 | if (next == NULL && obj != NULL) { |
10393 | 0 | if (!obj->nr_maps) |
10394 | 0 | return NULL; |
10395 | 0 | return obj->maps + obj->nr_maps - 1; |
10396 | 0 | } |
10397 | | |
10398 | 0 | return __bpf_map__iter(next, obj, -1); |
10399 | 0 | } |
10400 | | |
10401 | | struct bpf_map * |
10402 | | bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name) |
10403 | 0 | { |
10404 | 0 | struct bpf_map *pos; |
10405 | |
|
10406 | 0 | bpf_object__for_each_map(pos, obj) { |
10407 | | /* if it's a special internal map name (which always starts |
10408 | | * with dot) then check if that special name matches the |
10409 | | * real map name (ELF section name) |
10410 | | */ |
10411 | 0 | if (name[0] == '.') { |
10412 | 0 | if (pos->real_name && strcmp(pos->real_name, name) == 0) |
10413 | 0 | return pos; |
10414 | 0 | continue; |
10415 | 0 | } |
10416 | | /* otherwise map name has to be an exact match */ |
10417 | 0 | if (map_uses_real_name(pos)) { |
10418 | 0 | if (strcmp(pos->real_name, name) == 0) |
10419 | 0 | return pos; |
10420 | 0 | continue; |
10421 | 0 | } |
10422 | 0 | if (strcmp(pos->name, name) == 0) |
10423 | 0 | return pos; |
10424 | 0 | } |
10425 | 0 | return errno = ENOENT, NULL; |
10426 | 0 | } |
10427 | | |
10428 | | int |
10429 | | bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name) |
10430 | 0 | { |
10431 | 0 | return bpf_map__fd(bpf_object__find_map_by_name(obj, name)); |
10432 | 0 | } |
10433 | | |
10434 | | static int validate_map_op(const struct bpf_map *map, size_t key_sz, |
10435 | | size_t value_sz, bool check_value_sz) |
10436 | 0 | { |
10437 | 0 | if (!map_is_created(map)) /* map is not yet created */ |
10438 | 0 | return -ENOENT; |
10439 | | |
10440 | 0 | if (map->def.key_size != key_sz) { |
10441 | 0 | pr_warn("map '%s': unexpected key size %zu provided, expected %u\n", |
10442 | 0 | map->name, key_sz, map->def.key_size); |
10443 | 0 | return -EINVAL; |
10444 | 0 | } |
10445 | | |
10446 | 0 | if (map->fd < 0) { |
10447 | 0 | pr_warn("map '%s': can't use BPF map without FD (was it created?)\n", map->name); |
10448 | 0 | return -EINVAL; |
10449 | 0 | } |
10450 | | |
10451 | 0 | if (!check_value_sz) |
10452 | 0 | return 0; |
10453 | | |
10454 | 0 | switch (map->def.type) { |
10455 | 0 | case BPF_MAP_TYPE_PERCPU_ARRAY: |
10456 | 0 | case BPF_MAP_TYPE_PERCPU_HASH: |
10457 | 0 | case BPF_MAP_TYPE_LRU_PERCPU_HASH: |
10458 | 0 | case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: { |
10459 | 0 | int num_cpu = libbpf_num_possible_cpus(); |
10460 | 0 | size_t elem_sz = roundup(map->def.value_size, 8); |
10461 | |
|
10462 | 0 | if (value_sz != num_cpu * elem_sz) { |
10463 | 0 | pr_warn("map '%s': unexpected value size %zu provided for per-CPU map, expected %d * %zu = %zd\n", |
10464 | 0 | map->name, value_sz, num_cpu, elem_sz, num_cpu * elem_sz); |
10465 | 0 | return -EINVAL; |
10466 | 0 | } |
10467 | 0 | break; |
10468 | 0 | } |
10469 | 0 | default: |
10470 | 0 | if (map->def.value_size != value_sz) { |
10471 | 0 | pr_warn("map '%s': unexpected value size %zu provided, expected %u\n", |
10472 | 0 | map->name, value_sz, map->def.value_size); |
10473 | 0 | return -EINVAL; |
10474 | 0 | } |
10475 | 0 | break; |
10476 | 0 | } |
10477 | 0 | return 0; |
10478 | 0 | } |
10479 | | |
10480 | | int bpf_map__lookup_elem(const struct bpf_map *map, |
10481 | | const void *key, size_t key_sz, |
10482 | | void *value, size_t value_sz, __u64 flags) |
10483 | 0 | { |
10484 | 0 | int err; |
10485 | |
|
10486 | 0 | err = validate_map_op(map, key_sz, value_sz, true); |
10487 | 0 | if (err) |
10488 | 0 | return libbpf_err(err); |
10489 | | |
10490 | 0 | return bpf_map_lookup_elem_flags(map->fd, key, value, flags); |
10491 | 0 | } |
10492 | | |
10493 | | int bpf_map__update_elem(const struct bpf_map *map, |
10494 | | const void *key, size_t key_sz, |
10495 | | const void *value, size_t value_sz, __u64 flags) |
10496 | 0 | { |
10497 | 0 | int err; |
10498 | |
|
10499 | 0 | err = validate_map_op(map, key_sz, value_sz, true); |
10500 | 0 | if (err) |
10501 | 0 | return libbpf_err(err); |
10502 | | |
10503 | 0 | return bpf_map_update_elem(map->fd, key, value, flags); |
10504 | 0 | } |
10505 | | |
10506 | | int bpf_map__delete_elem(const struct bpf_map *map, |
10507 | | const void *key, size_t key_sz, __u64 flags) |
10508 | 0 | { |
10509 | 0 | int err; |
10510 | |
|
10511 | 0 | err = validate_map_op(map, key_sz, 0, false /* check_value_sz */); |
10512 | 0 | if (err) |
10513 | 0 | return libbpf_err(err); |
10514 | | |
10515 | 0 | return bpf_map_delete_elem_flags(map->fd, key, flags); |
10516 | 0 | } |
10517 | | |
10518 | | int bpf_map__lookup_and_delete_elem(const struct bpf_map *map, |
10519 | | const void *key, size_t key_sz, |
10520 | | void *value, size_t value_sz, __u64 flags) |
10521 | 0 | { |
10522 | 0 | int err; |
10523 | |
|
10524 | 0 | err = validate_map_op(map, key_sz, value_sz, true); |
10525 | 0 | if (err) |
10526 | 0 | return libbpf_err(err); |
10527 | | |
10528 | 0 | return bpf_map_lookup_and_delete_elem_flags(map->fd, key, value, flags); |
10529 | 0 | } |
10530 | | |
10531 | | int bpf_map__get_next_key(const struct bpf_map *map, |
10532 | | const void *cur_key, void *next_key, size_t key_sz) |
10533 | 0 | { |
10534 | 0 | int err; |
10535 | |
|
10536 | 0 | err = validate_map_op(map, key_sz, 0, false /* check_value_sz */); |
10537 | 0 | if (err) |
10538 | 0 | return libbpf_err(err); |
10539 | | |
10540 | 0 | return bpf_map_get_next_key(map->fd, cur_key, next_key); |
10541 | 0 | } |
10542 | | |
10543 | | long libbpf_get_error(const void *ptr) |
10544 | 17.0k | { |
10545 | 17.0k | if (!IS_ERR_OR_NULL(ptr)) |
10546 | 5.97k | return 0; |
10547 | | |
10548 | 11.1k | if (IS_ERR(ptr)) |
10549 | 0 | errno = -PTR_ERR(ptr); |
10550 | | |
10551 | | /* If ptr == NULL, then errno should be already set by the failing |
10552 | | * API, because libbpf never returns NULL on success and it now always |
10553 | | * sets errno on error. So no extra errno handling for ptr == NULL |
10554 | | * case. |
10555 | | */ |
10556 | 11.1k | return -errno; |
10557 | 17.0k | } |
10558 | | |
10559 | | /* Replace link's underlying BPF program with the new one */ |
10560 | | int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog) |
10561 | 0 | { |
10562 | 0 | int ret; |
10563 | 0 | int prog_fd = bpf_program__fd(prog); |
10564 | |
|
10565 | 0 | if (prog_fd < 0) { |
10566 | 0 | pr_warn("prog '%s': can't use BPF program without FD (was it loaded?)\n", |
10567 | 0 | prog->name); |
10568 | 0 | return libbpf_err(-EINVAL); |
10569 | 0 | } |
10570 | | |
10571 | 0 | ret = bpf_link_update(bpf_link__fd(link), prog_fd, NULL); |
10572 | 0 | return libbpf_err_errno(ret); |
10573 | 0 | } |
10574 | | |
10575 | | /* Release "ownership" of underlying BPF resource (typically, BPF program |
10576 | | * attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected |
10577 | | * link, when destructed through bpf_link__destroy() call won't attempt to |
10578 | | * detach/unregisted that BPF resource. This is useful in situations where, |
10579 | | * say, attached BPF program has to outlive userspace program that attached it |
10580 | | * in the system. Depending on type of BPF program, though, there might be |
10581 | | * additional steps (like pinning BPF program in BPF FS) necessary to ensure |
10582 | | * exit of userspace program doesn't trigger automatic detachment and clean up |
10583 | | * inside the kernel. |
10584 | | */ |
10585 | | void bpf_link__disconnect(struct bpf_link *link) |
10586 | 0 | { |
10587 | 0 | link->disconnected = true; |
10588 | 0 | } |
10589 | | |
10590 | | int bpf_link__destroy(struct bpf_link *link) |
10591 | 0 | { |
10592 | 0 | int err = 0; |
10593 | |
|
10594 | 0 | if (IS_ERR_OR_NULL(link)) |
10595 | 0 | return 0; |
10596 | | |
10597 | 0 | if (!link->disconnected && link->detach) |
10598 | 0 | err = link->detach(link); |
10599 | 0 | if (link->pin_path) |
10600 | 0 | free(link->pin_path); |
10601 | 0 | if (link->dealloc) |
10602 | 0 | link->dealloc(link); |
10603 | 0 | else |
10604 | 0 | free(link); |
10605 | |
|
10606 | 0 | return libbpf_err(err); |
10607 | 0 | } |
10608 | | |
10609 | | int bpf_link__fd(const struct bpf_link *link) |
10610 | 0 | { |
10611 | 0 | return link->fd; |
10612 | 0 | } |
10613 | | |
10614 | | const char *bpf_link__pin_path(const struct bpf_link *link) |
10615 | 0 | { |
10616 | 0 | return link->pin_path; |
10617 | 0 | } |
10618 | | |
10619 | | static int bpf_link__detach_fd(struct bpf_link *link) |
10620 | 0 | { |
10621 | 0 | return libbpf_err_errno(close(link->fd)); |
10622 | 0 | } |
10623 | | |
10624 | | struct bpf_link *bpf_link__open(const char *path) |
10625 | 0 | { |
10626 | 0 | struct bpf_link *link; |
10627 | 0 | int fd; |
10628 | |
|
10629 | 0 | fd = bpf_obj_get(path); |
10630 | 0 | if (fd < 0) { |
10631 | 0 | fd = -errno; |
10632 | 0 | pr_warn("failed to open link at %s: %d\n", path, fd); |
10633 | 0 | return libbpf_err_ptr(fd); |
10634 | 0 | } |
10635 | | |
10636 | 0 | link = calloc(1, sizeof(*link)); |
10637 | 0 | if (!link) { |
10638 | 0 | close(fd); |
10639 | 0 | return libbpf_err_ptr(-ENOMEM); |
10640 | 0 | } |
10641 | 0 | link->detach = &bpf_link__detach_fd; |
10642 | 0 | link->fd = fd; |
10643 | |
|
10644 | 0 | link->pin_path = strdup(path); |
10645 | 0 | if (!link->pin_path) { |
10646 | 0 | bpf_link__destroy(link); |
10647 | 0 | return libbpf_err_ptr(-ENOMEM); |
10648 | 0 | } |
10649 | | |
10650 | 0 | return link; |
10651 | 0 | } |
10652 | | |
10653 | | int bpf_link__detach(struct bpf_link *link) |
10654 | 0 | { |
10655 | 0 | return bpf_link_detach(link->fd) ? -errno : 0; |
10656 | 0 | } |
10657 | | |
10658 | | int bpf_link__pin(struct bpf_link *link, const char *path) |
10659 | 0 | { |
10660 | 0 | int err; |
10661 | |
|
10662 | 0 | if (link->pin_path) |
10663 | 0 | return libbpf_err(-EBUSY); |
10664 | 0 | err = make_parent_dir(path); |
10665 | 0 | if (err) |
10666 | 0 | return libbpf_err(err); |
10667 | 0 | err = check_path(path); |
10668 | 0 | if (err) |
10669 | 0 | return libbpf_err(err); |
10670 | | |
10671 | 0 | link->pin_path = strdup(path); |
10672 | 0 | if (!link->pin_path) |
10673 | 0 | return libbpf_err(-ENOMEM); |
10674 | | |
10675 | 0 | if (bpf_obj_pin(link->fd, link->pin_path)) { |
10676 | 0 | err = -errno; |
10677 | 0 | zfree(&link->pin_path); |
10678 | 0 | return libbpf_err(err); |
10679 | 0 | } |
10680 | | |
10681 | 0 | pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path); |
10682 | 0 | return 0; |
10683 | 0 | } |
10684 | | |
10685 | | int bpf_link__unpin(struct bpf_link *link) |
10686 | 0 | { |
10687 | 0 | int err; |
10688 | |
|
10689 | 0 | if (!link->pin_path) |
10690 | 0 | return libbpf_err(-EINVAL); |
10691 | | |
10692 | 0 | err = unlink(link->pin_path); |
10693 | 0 | if (err != 0) |
10694 | 0 | return -errno; |
10695 | | |
10696 | 0 | pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path); |
10697 | 0 | zfree(&link->pin_path); |
10698 | 0 | return 0; |
10699 | 0 | } |
10700 | | |
10701 | | struct bpf_link_perf { |
10702 | | struct bpf_link link; |
10703 | | int perf_event_fd; |
10704 | | /* legacy kprobe support: keep track of probe identifier and type */ |
10705 | | char *legacy_probe_name; |
10706 | | bool legacy_is_kprobe; |
10707 | | bool legacy_is_retprobe; |
10708 | | }; |
10709 | | |
10710 | | static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe); |
10711 | | static int remove_uprobe_event_legacy(const char *probe_name, bool retprobe); |
10712 | | |
10713 | | static int bpf_link_perf_detach(struct bpf_link *link) |
10714 | 0 | { |
10715 | 0 | struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link); |
10716 | 0 | int err = 0; |
10717 | |
|
10718 | 0 | if (ioctl(perf_link->perf_event_fd, PERF_EVENT_IOC_DISABLE, 0) < 0) |
10719 | 0 | err = -errno; |
10720 | |
|
10721 | 0 | if (perf_link->perf_event_fd != link->fd) |
10722 | 0 | close(perf_link->perf_event_fd); |
10723 | 0 | close(link->fd); |
10724 | | |
10725 | | /* legacy uprobe/kprobe needs to be removed after perf event fd closure */ |
10726 | 0 | if (perf_link->legacy_probe_name) { |
10727 | 0 | if (perf_link->legacy_is_kprobe) { |
10728 | 0 | err = remove_kprobe_event_legacy(perf_link->legacy_probe_name, |
10729 | 0 | perf_link->legacy_is_retprobe); |
10730 | 0 | } else { |
10731 | 0 | err = remove_uprobe_event_legacy(perf_link->legacy_probe_name, |
10732 | 0 | perf_link->legacy_is_retprobe); |
10733 | 0 | } |
10734 | 0 | } |
10735 | |
|
10736 | 0 | return err; |
10737 | 0 | } |
10738 | | |
10739 | | static void bpf_link_perf_dealloc(struct bpf_link *link) |
10740 | 0 | { |
10741 | 0 | struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link); |
10742 | |
|
10743 | 0 | free(perf_link->legacy_probe_name); |
10744 | 0 | free(perf_link); |
10745 | 0 | } |
10746 | | |
10747 | | struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd, |
10748 | | const struct bpf_perf_event_opts *opts) |
10749 | 0 | { |
10750 | 0 | char errmsg[STRERR_BUFSIZE]; |
10751 | 0 | struct bpf_link_perf *link; |
10752 | 0 | int prog_fd, link_fd = -1, err; |
10753 | 0 | bool force_ioctl_attach; |
10754 | |
|
10755 | 0 | if (!OPTS_VALID(opts, bpf_perf_event_opts)) |
10756 | 0 | return libbpf_err_ptr(-EINVAL); |
10757 | | |
10758 | 0 | if (pfd < 0) { |
10759 | 0 | pr_warn("prog '%s': invalid perf event FD %d\n", |
10760 | 0 | prog->name, pfd); |
10761 | 0 | return libbpf_err_ptr(-EINVAL); |
10762 | 0 | } |
10763 | 0 | prog_fd = bpf_program__fd(prog); |
10764 | 0 | if (prog_fd < 0) { |
10765 | 0 | pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n", |
10766 | 0 | prog->name); |
10767 | 0 | return libbpf_err_ptr(-EINVAL); |
10768 | 0 | } |
10769 | | |
10770 | 0 | link = calloc(1, sizeof(*link)); |
10771 | 0 | if (!link) |
10772 | 0 | return libbpf_err_ptr(-ENOMEM); |
10773 | 0 | link->link.detach = &bpf_link_perf_detach; |
10774 | 0 | link->link.dealloc = &bpf_link_perf_dealloc; |
10775 | 0 | link->perf_event_fd = pfd; |
10776 | |
|
10777 | 0 | force_ioctl_attach = OPTS_GET(opts, force_ioctl_attach, false); |
10778 | 0 | if (kernel_supports(prog->obj, FEAT_PERF_LINK) && !force_ioctl_attach) { |
10779 | 0 | DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_opts, |
10780 | 0 | .perf_event.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0)); |
10781 | |
|
10782 | 0 | link_fd = bpf_link_create(prog_fd, pfd, BPF_PERF_EVENT, &link_opts); |
10783 | 0 | if (link_fd < 0) { |
10784 | 0 | err = -errno; |
10785 | 0 | pr_warn("prog '%s': failed to create BPF link for perf_event FD %d: %d (%s)\n", |
10786 | 0 | prog->name, pfd, |
10787 | 0 | err, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); |
10788 | 0 | goto err_out; |
10789 | 0 | } |
10790 | 0 | link->link.fd = link_fd; |
10791 | 0 | } else { |
10792 | 0 | if (OPTS_GET(opts, bpf_cookie, 0)) { |
10793 | 0 | pr_warn("prog '%s': user context value is not supported\n", prog->name); |
10794 | 0 | err = -EOPNOTSUPP; |
10795 | 0 | goto err_out; |
10796 | 0 | } |
10797 | | |
10798 | 0 | if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) { |
10799 | 0 | err = -errno; |
10800 | 0 | pr_warn("prog '%s': failed to attach to perf_event FD %d: %s\n", |
10801 | 0 | prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); |
10802 | 0 | if (err == -EPROTO) |
10803 | 0 | pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n", |
10804 | 0 | prog->name, pfd); |
10805 | 0 | goto err_out; |
10806 | 0 | } |
10807 | 0 | link->link.fd = pfd; |
10808 | 0 | } |
10809 | 0 | if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) { |
10810 | 0 | err = -errno; |
10811 | 0 | pr_warn("prog '%s': failed to enable perf_event FD %d: %s\n", |
10812 | 0 | prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); |
10813 | 0 | goto err_out; |
10814 | 0 | } |
10815 | | |
10816 | 0 | return &link->link; |
10817 | 0 | err_out: |
10818 | 0 | if (link_fd >= 0) |
10819 | 0 | close(link_fd); |
10820 | 0 | free(link); |
10821 | 0 | return libbpf_err_ptr(err); |
10822 | 0 | } |
10823 | | |
10824 | | struct bpf_link *bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd) |
10825 | 0 | { |
10826 | 0 | return bpf_program__attach_perf_event_opts(prog, pfd, NULL); |
10827 | 0 | } |
10828 | | |
10829 | | /* |
10830 | | * this function is expected to parse integer in the range of [0, 2^31-1] from |
10831 | | * given file using scanf format string fmt. If actual parsed value is |
10832 | | * negative, the result might be indistinguishable from error |
10833 | | */ |
10834 | | static int parse_uint_from_file(const char *file, const char *fmt) |
10835 | 0 | { |
10836 | 0 | char buf[STRERR_BUFSIZE]; |
10837 | 0 | int err, ret; |
10838 | 0 | FILE *f; |
10839 | |
|
10840 | 0 | f = fopen(file, "re"); |
10841 | 0 | if (!f) { |
10842 | 0 | err = -errno; |
10843 | 0 | pr_debug("failed to open '%s': %s\n", file, |
10844 | 0 | libbpf_strerror_r(err, buf, sizeof(buf))); |
10845 | 0 | return err; |
10846 | 0 | } |
10847 | 0 | err = fscanf(f, fmt, &ret); |
10848 | 0 | if (err != 1) { |
10849 | 0 | err = err == EOF ? -EIO : -errno; |
10850 | 0 | pr_debug("failed to parse '%s': %s\n", file, |
10851 | 0 | libbpf_strerror_r(err, buf, sizeof(buf))); |
10852 | 0 | fclose(f); |
10853 | 0 | return err; |
10854 | 0 | } |
10855 | 0 | fclose(f); |
10856 | 0 | return ret; |
10857 | 0 | } |
10858 | | |
10859 | | static int determine_kprobe_perf_type(void) |
10860 | 0 | { |
10861 | 0 | const char *file = "/sys/bus/event_source/devices/kprobe/type"; |
10862 | |
|
10863 | 0 | return parse_uint_from_file(file, "%d\n"); |
10864 | 0 | } |
10865 | | |
10866 | | static int determine_uprobe_perf_type(void) |
10867 | 0 | { |
10868 | 0 | const char *file = "/sys/bus/event_source/devices/uprobe/type"; |
10869 | |
|
10870 | 0 | return parse_uint_from_file(file, "%d\n"); |
10871 | 0 | } |
10872 | | |
10873 | | static int determine_kprobe_retprobe_bit(void) |
10874 | 0 | { |
10875 | 0 | const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe"; |
10876 | |
|
10877 | 0 | return parse_uint_from_file(file, "config:%d\n"); |
10878 | 0 | } |
10879 | | |
10880 | | static int determine_uprobe_retprobe_bit(void) |
10881 | 0 | { |
10882 | 0 | const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe"; |
10883 | |
|
10884 | 0 | return parse_uint_from_file(file, "config:%d\n"); |
10885 | 0 | } |
10886 | | |
10887 | 0 | #define PERF_UPROBE_REF_CTR_OFFSET_BITS 32 |
10888 | 0 | #define PERF_UPROBE_REF_CTR_OFFSET_SHIFT 32 |
10889 | | |
10890 | | static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name, |
10891 | | uint64_t offset, int pid, size_t ref_ctr_off) |
10892 | 0 | { |
10893 | 0 | const size_t attr_sz = sizeof(struct perf_event_attr); |
10894 | 0 | struct perf_event_attr attr; |
10895 | 0 | char errmsg[STRERR_BUFSIZE]; |
10896 | 0 | int type, pfd; |
10897 | |
|
10898 | 0 | if ((__u64)ref_ctr_off >= (1ULL << PERF_UPROBE_REF_CTR_OFFSET_BITS)) |
10899 | 0 | return -EINVAL; |
10900 | | |
10901 | 0 | memset(&attr, 0, attr_sz); |
10902 | |
|
10903 | 0 | type = uprobe ? determine_uprobe_perf_type() |
10904 | 0 | : determine_kprobe_perf_type(); |
10905 | 0 | if (type < 0) { |
10906 | 0 | pr_warn("failed to determine %s perf type: %s\n", |
10907 | 0 | uprobe ? "uprobe" : "kprobe", |
10908 | 0 | libbpf_strerror_r(type, errmsg, sizeof(errmsg))); |
10909 | 0 | return type; |
10910 | 0 | } |
10911 | 0 | if (retprobe) { |
10912 | 0 | int bit = uprobe ? determine_uprobe_retprobe_bit() |
10913 | 0 | : determine_kprobe_retprobe_bit(); |
10914 | |
|
10915 | 0 | if (bit < 0) { |
10916 | 0 | pr_warn("failed to determine %s retprobe bit: %s\n", |
10917 | 0 | uprobe ? "uprobe" : "kprobe", |
10918 | 0 | libbpf_strerror_r(bit, errmsg, sizeof(errmsg))); |
10919 | 0 | return bit; |
10920 | 0 | } |
10921 | 0 | attr.config |= 1 << bit; |
10922 | 0 | } |
10923 | 0 | attr.size = attr_sz; |
10924 | 0 | attr.type = type; |
10925 | 0 | attr.config |= (__u64)ref_ctr_off << PERF_UPROBE_REF_CTR_OFFSET_SHIFT; |
10926 | 0 | attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */ |
10927 | 0 | attr.config2 = offset; /* kprobe_addr or probe_offset */ |
10928 | | |
10929 | | /* pid filter is meaningful only for uprobes */ |
10930 | 0 | pfd = syscall(__NR_perf_event_open, &attr, |
10931 | 0 | pid < 0 ? -1 : pid /* pid */, |
10932 | 0 | pid == -1 ? 0 : -1 /* cpu */, |
10933 | 0 | -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); |
10934 | 0 | return pfd >= 0 ? pfd : -errno; |
10935 | 0 | } |
10936 | | |
10937 | | static int append_to_file(const char *file, const char *fmt, ...) |
10938 | 0 | { |
10939 | 0 | int fd, n, err = 0; |
10940 | 0 | va_list ap; |
10941 | 0 | char buf[1024]; |
10942 | |
|
10943 | 0 | va_start(ap, fmt); |
10944 | 0 | n = vsnprintf(buf, sizeof(buf), fmt, ap); |
10945 | 0 | va_end(ap); |
10946 | |
|
10947 | 0 | if (n < 0 || n >= sizeof(buf)) |
10948 | 0 | return -EINVAL; |
10949 | | |
10950 | 0 | fd = open(file, O_WRONLY | O_APPEND | O_CLOEXEC, 0); |
10951 | 0 | if (fd < 0) |
10952 | 0 | return -errno; |
10953 | | |
10954 | 0 | if (write(fd, buf, n) < 0) |
10955 | 0 | err = -errno; |
10956 | |
|
10957 | 0 | close(fd); |
10958 | 0 | return err; |
10959 | 0 | } |
10960 | | |
10961 | 0 | #define DEBUGFS "/sys/kernel/debug/tracing" |
10962 | 0 | #define TRACEFS "/sys/kernel/tracing" |
10963 | | |
10964 | | static bool use_debugfs(void) |
10965 | 0 | { |
10966 | 0 | static int has_debugfs = -1; |
10967 | |
|
10968 | 0 | if (has_debugfs < 0) |
10969 | 0 | has_debugfs = faccessat(AT_FDCWD, DEBUGFS, F_OK, AT_EACCESS) == 0; |
10970 | |
|
10971 | 0 | return has_debugfs == 1; |
10972 | 0 | } |
10973 | | |
10974 | | static const char *tracefs_path(void) |
10975 | 0 | { |
10976 | 0 | return use_debugfs() ? DEBUGFS : TRACEFS; |
10977 | 0 | } |
10978 | | |
10979 | | static const char *tracefs_kprobe_events(void) |
10980 | 0 | { |
10981 | 0 | return use_debugfs() ? DEBUGFS"/kprobe_events" : TRACEFS"/kprobe_events"; |
10982 | 0 | } |
10983 | | |
10984 | | static const char *tracefs_uprobe_events(void) |
10985 | 0 | { |
10986 | 0 | return use_debugfs() ? DEBUGFS"/uprobe_events" : TRACEFS"/uprobe_events"; |
10987 | 0 | } |
10988 | | |
10989 | | static const char *tracefs_available_filter_functions(void) |
10990 | 0 | { |
10991 | 0 | return use_debugfs() ? DEBUGFS"/available_filter_functions" |
10992 | 0 | : TRACEFS"/available_filter_functions"; |
10993 | 0 | } |
10994 | | |
10995 | | static const char *tracefs_available_filter_functions_addrs(void) |
10996 | 0 | { |
10997 | 0 | return use_debugfs() ? DEBUGFS"/available_filter_functions_addrs" |
10998 | 0 | : TRACEFS"/available_filter_functions_addrs"; |
10999 | 0 | } |
11000 | | |
11001 | | static void gen_kprobe_legacy_event_name(char *buf, size_t buf_sz, |
11002 | | const char *kfunc_name, size_t offset) |
11003 | 0 | { |
11004 | 0 | static int index = 0; |
11005 | 0 | int i; |
11006 | |
|
11007 | 0 | snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx_%d", getpid(), kfunc_name, offset, |
11008 | 0 | __sync_fetch_and_add(&index, 1)); |
11009 | | |
11010 | | /* sanitize binary_path in the probe name */ |
11011 | 0 | for (i = 0; buf[i]; i++) { |
11012 | 0 | if (!isalnum(buf[i])) |
11013 | 0 | buf[i] = '_'; |
11014 | 0 | } |
11015 | 0 | } |
11016 | | |
11017 | | static int add_kprobe_event_legacy(const char *probe_name, bool retprobe, |
11018 | | const char *kfunc_name, size_t offset) |
11019 | 0 | { |
11020 | 0 | return append_to_file(tracefs_kprobe_events(), "%c:%s/%s %s+0x%zx", |
11021 | 0 | retprobe ? 'r' : 'p', |
11022 | 0 | retprobe ? "kretprobes" : "kprobes", |
11023 | 0 | probe_name, kfunc_name, offset); |
11024 | 0 | } |
11025 | | |
11026 | | static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe) |
11027 | 0 | { |
11028 | 0 | return append_to_file(tracefs_kprobe_events(), "-:%s/%s", |
11029 | 0 | retprobe ? "kretprobes" : "kprobes", probe_name); |
11030 | 0 | } |
11031 | | |
11032 | | static int determine_kprobe_perf_type_legacy(const char *probe_name, bool retprobe) |
11033 | 0 | { |
11034 | 0 | char file[256]; |
11035 | |
|
11036 | 0 | snprintf(file, sizeof(file), "%s/events/%s/%s/id", |
11037 | 0 | tracefs_path(), retprobe ? "kretprobes" : "kprobes", probe_name); |
11038 | |
|
11039 | 0 | return parse_uint_from_file(file, "%d\n"); |
11040 | 0 | } |
11041 | | |
11042 | | static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe, |
11043 | | const char *kfunc_name, size_t offset, int pid) |
11044 | 0 | { |
11045 | 0 | const size_t attr_sz = sizeof(struct perf_event_attr); |
11046 | 0 | struct perf_event_attr attr; |
11047 | 0 | char errmsg[STRERR_BUFSIZE]; |
11048 | 0 | int type, pfd, err; |
11049 | |
|
11050 | 0 | err = add_kprobe_event_legacy(probe_name, retprobe, kfunc_name, offset); |
11051 | 0 | if (err < 0) { |
11052 | 0 | pr_warn("failed to add legacy kprobe event for '%s+0x%zx': %s\n", |
11053 | 0 | kfunc_name, offset, |
11054 | 0 | libbpf_strerror_r(err, errmsg, sizeof(errmsg))); |
11055 | 0 | return err; |
11056 | 0 | } |
11057 | 0 | type = determine_kprobe_perf_type_legacy(probe_name, retprobe); |
11058 | 0 | if (type < 0) { |
11059 | 0 | err = type; |
11060 | 0 | pr_warn("failed to determine legacy kprobe event id for '%s+0x%zx': %s\n", |
11061 | 0 | kfunc_name, offset, |
11062 | 0 | libbpf_strerror_r(err, errmsg, sizeof(errmsg))); |
11063 | 0 | goto err_clean_legacy; |
11064 | 0 | } |
11065 | | |
11066 | 0 | memset(&attr, 0, attr_sz); |
11067 | 0 | attr.size = attr_sz; |
11068 | 0 | attr.config = type; |
11069 | 0 | attr.type = PERF_TYPE_TRACEPOINT; |
11070 | |
|
11071 | 0 | pfd = syscall(__NR_perf_event_open, &attr, |
11072 | 0 | pid < 0 ? -1 : pid, /* pid */ |
11073 | 0 | pid == -1 ? 0 : -1, /* cpu */ |
11074 | 0 | -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); |
11075 | 0 | if (pfd < 0) { |
11076 | 0 | err = -errno; |
11077 | 0 | pr_warn("legacy kprobe perf_event_open() failed: %s\n", |
11078 | 0 | libbpf_strerror_r(err, errmsg, sizeof(errmsg))); |
11079 | 0 | goto err_clean_legacy; |
11080 | 0 | } |
11081 | 0 | return pfd; |
11082 | | |
11083 | 0 | err_clean_legacy: |
11084 | | /* Clear the newly added legacy kprobe_event */ |
11085 | 0 | remove_kprobe_event_legacy(probe_name, retprobe); |
11086 | 0 | return err; |
11087 | 0 | } |
11088 | | |
11089 | | static const char *arch_specific_syscall_pfx(void) |
11090 | 0 | { |
11091 | 0 | #if defined(__x86_64__) |
11092 | 0 | return "x64"; |
11093 | | #elif defined(__i386__) |
11094 | | return "ia32"; |
11095 | | #elif defined(__s390x__) |
11096 | | return "s390x"; |
11097 | | #elif defined(__s390__) |
11098 | | return "s390"; |
11099 | | #elif defined(__arm__) |
11100 | | return "arm"; |
11101 | | #elif defined(__aarch64__) |
11102 | | return "arm64"; |
11103 | | #elif defined(__mips__) |
11104 | | return "mips"; |
11105 | | #elif defined(__riscv) |
11106 | | return "riscv"; |
11107 | | #elif defined(__powerpc__) |
11108 | | return "powerpc"; |
11109 | | #elif defined(__powerpc64__) |
11110 | | return "powerpc64"; |
11111 | | #else |
11112 | | return NULL; |
11113 | | #endif |
11114 | 0 | } |
11115 | | |
11116 | | int probe_kern_syscall_wrapper(int token_fd) |
11117 | 0 | { |
11118 | 0 | char syscall_name[64]; |
11119 | 0 | const char *ksys_pfx; |
11120 | |
|
11121 | 0 | ksys_pfx = arch_specific_syscall_pfx(); |
11122 | 0 | if (!ksys_pfx) |
11123 | 0 | return 0; |
11124 | | |
11125 | 0 | snprintf(syscall_name, sizeof(syscall_name), "__%s_sys_bpf", ksys_pfx); |
11126 | |
|
11127 | 0 | if (determine_kprobe_perf_type() >= 0) { |
11128 | 0 | int pfd; |
11129 | |
|
11130 | 0 | pfd = perf_event_open_probe(false, false, syscall_name, 0, getpid(), 0); |
11131 | 0 | if (pfd >= 0) |
11132 | 0 | close(pfd); |
11133 | |
|
11134 | 0 | return pfd >= 0 ? 1 : 0; |
11135 | 0 | } else { /* legacy mode */ |
11136 | 0 | char probe_name[128]; |
11137 | |
|
11138 | 0 | gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name), syscall_name, 0); |
11139 | 0 | if (add_kprobe_event_legacy(probe_name, false, syscall_name, 0) < 0) |
11140 | 0 | return 0; |
11141 | | |
11142 | 0 | (void)remove_kprobe_event_legacy(probe_name, false); |
11143 | 0 | return 1; |
11144 | 0 | } |
11145 | 0 | } |
11146 | | |
11147 | | struct bpf_link * |
11148 | | bpf_program__attach_kprobe_opts(const struct bpf_program *prog, |
11149 | | const char *func_name, |
11150 | | const struct bpf_kprobe_opts *opts) |
11151 | 0 | { |
11152 | 0 | DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts); |
11153 | 0 | enum probe_attach_mode attach_mode; |
11154 | 0 | char errmsg[STRERR_BUFSIZE]; |
11155 | 0 | char *legacy_probe = NULL; |
11156 | 0 | struct bpf_link *link; |
11157 | 0 | size_t offset; |
11158 | 0 | bool retprobe, legacy; |
11159 | 0 | int pfd, err; |
11160 | |
|
11161 | 0 | if (!OPTS_VALID(opts, bpf_kprobe_opts)) |
11162 | 0 | return libbpf_err_ptr(-EINVAL); |
11163 | | |
11164 | 0 | attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT); |
11165 | 0 | retprobe = OPTS_GET(opts, retprobe, false); |
11166 | 0 | offset = OPTS_GET(opts, offset, 0); |
11167 | 0 | pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0); |
11168 | |
|
11169 | 0 | legacy = determine_kprobe_perf_type() < 0; |
11170 | 0 | switch (attach_mode) { |
11171 | 0 | case PROBE_ATTACH_MODE_LEGACY: |
11172 | 0 | legacy = true; |
11173 | 0 | pe_opts.force_ioctl_attach = true; |
11174 | 0 | break; |
11175 | 0 | case PROBE_ATTACH_MODE_PERF: |
11176 | 0 | if (legacy) |
11177 | 0 | return libbpf_err_ptr(-ENOTSUP); |
11178 | 0 | pe_opts.force_ioctl_attach = true; |
11179 | 0 | break; |
11180 | 0 | case PROBE_ATTACH_MODE_LINK: |
11181 | 0 | if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK)) |
11182 | 0 | return libbpf_err_ptr(-ENOTSUP); |
11183 | 0 | break; |
11184 | 0 | case PROBE_ATTACH_MODE_DEFAULT: |
11185 | 0 | break; |
11186 | 0 | default: |
11187 | 0 | return libbpf_err_ptr(-EINVAL); |
11188 | 0 | } |
11189 | | |
11190 | 0 | if (!legacy) { |
11191 | 0 | pfd = perf_event_open_probe(false /* uprobe */, retprobe, |
11192 | 0 | func_name, offset, |
11193 | 0 | -1 /* pid */, 0 /* ref_ctr_off */); |
11194 | 0 | } else { |
11195 | 0 | char probe_name[256]; |
11196 | |
|
11197 | 0 | gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name), |
11198 | 0 | func_name, offset); |
11199 | |
|
11200 | 0 | legacy_probe = strdup(probe_name); |
11201 | 0 | if (!legacy_probe) |
11202 | 0 | return libbpf_err_ptr(-ENOMEM); |
11203 | | |
11204 | 0 | pfd = perf_event_kprobe_open_legacy(legacy_probe, retprobe, func_name, |
11205 | 0 | offset, -1 /* pid */); |
11206 | 0 | } |
11207 | 0 | if (pfd < 0) { |
11208 | 0 | err = -errno; |
11209 | 0 | pr_warn("prog '%s': failed to create %s '%s+0x%zx' perf event: %s\n", |
11210 | 0 | prog->name, retprobe ? "kretprobe" : "kprobe", |
11211 | 0 | func_name, offset, |
11212 | 0 | libbpf_strerror_r(err, errmsg, sizeof(errmsg))); |
11213 | 0 | goto err_out; |
11214 | 0 | } |
11215 | 0 | link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts); |
11216 | 0 | err = libbpf_get_error(link); |
11217 | 0 | if (err) { |
11218 | 0 | close(pfd); |
11219 | 0 | pr_warn("prog '%s': failed to attach to %s '%s+0x%zx': %s\n", |
11220 | 0 | prog->name, retprobe ? "kretprobe" : "kprobe", |
11221 | 0 | func_name, offset, |
11222 | 0 | libbpf_strerror_r(err, errmsg, sizeof(errmsg))); |
11223 | 0 | goto err_clean_legacy; |
11224 | 0 | } |
11225 | 0 | if (legacy) { |
11226 | 0 | struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link); |
11227 | |
|
11228 | 0 | perf_link->legacy_probe_name = legacy_probe; |
11229 | 0 | perf_link->legacy_is_kprobe = true; |
11230 | 0 | perf_link->legacy_is_retprobe = retprobe; |
11231 | 0 | } |
11232 | |
|
11233 | 0 | return link; |
11234 | | |
11235 | 0 | err_clean_legacy: |
11236 | 0 | if (legacy) |
11237 | 0 | remove_kprobe_event_legacy(legacy_probe, retprobe); |
11238 | 0 | err_out: |
11239 | 0 | free(legacy_probe); |
11240 | 0 | return libbpf_err_ptr(err); |
11241 | 0 | } |
11242 | | |
11243 | | struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog, |
11244 | | bool retprobe, |
11245 | | const char *func_name) |
11246 | 0 | { |
11247 | 0 | DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts, |
11248 | 0 | .retprobe = retprobe, |
11249 | 0 | ); |
11250 | |
|
11251 | 0 | return bpf_program__attach_kprobe_opts(prog, func_name, &opts); |
11252 | 0 | } |
11253 | | |
11254 | | struct bpf_link *bpf_program__attach_ksyscall(const struct bpf_program *prog, |
11255 | | const char *syscall_name, |
11256 | | const struct bpf_ksyscall_opts *opts) |
11257 | 0 | { |
11258 | 0 | LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts); |
11259 | 0 | char func_name[128]; |
11260 | |
|
11261 | 0 | if (!OPTS_VALID(opts, bpf_ksyscall_opts)) |
11262 | 0 | return libbpf_err_ptr(-EINVAL); |
11263 | | |
11264 | 0 | if (kernel_supports(prog->obj, FEAT_SYSCALL_WRAPPER)) { |
11265 | | /* arch_specific_syscall_pfx() should never return NULL here |
11266 | | * because it is guarded by kernel_supports(). However, since |
11267 | | * compiler does not know that we have an explicit conditional |
11268 | | * as well. |
11269 | | */ |
11270 | 0 | snprintf(func_name, sizeof(func_name), "__%s_sys_%s", |
11271 | 0 | arch_specific_syscall_pfx() ? : "", syscall_name); |
11272 | 0 | } else { |
11273 | 0 | snprintf(func_name, sizeof(func_name), "__se_sys_%s", syscall_name); |
11274 | 0 | } |
11275 | |
|
11276 | 0 | kprobe_opts.retprobe = OPTS_GET(opts, retprobe, false); |
11277 | 0 | kprobe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0); |
11278 | |
|
11279 | 0 | return bpf_program__attach_kprobe_opts(prog, func_name, &kprobe_opts); |
11280 | 0 | } |
11281 | | |
11282 | | /* Adapted from perf/util/string.c */ |
11283 | | bool glob_match(const char *str, const char *pat) |
11284 | 0 | { |
11285 | 0 | while (*str && *pat && *pat != '*') { |
11286 | 0 | if (*pat == '?') { /* Matches any single character */ |
11287 | 0 | str++; |
11288 | 0 | pat++; |
11289 | 0 | continue; |
11290 | 0 | } |
11291 | 0 | if (*str != *pat) |
11292 | 0 | return false; |
11293 | 0 | str++; |
11294 | 0 | pat++; |
11295 | 0 | } |
11296 | | /* Check wild card */ |
11297 | 0 | if (*pat == '*') { |
11298 | 0 | while (*pat == '*') |
11299 | 0 | pat++; |
11300 | 0 | if (!*pat) /* Tail wild card matches all */ |
11301 | 0 | return true; |
11302 | 0 | while (*str) |
11303 | 0 | if (glob_match(str++, pat)) |
11304 | 0 | return true; |
11305 | 0 | } |
11306 | 0 | return !*str && !*pat; |
11307 | 0 | } |
11308 | | |
11309 | | struct kprobe_multi_resolve { |
11310 | | const char *pattern; |
11311 | | unsigned long *addrs; |
11312 | | size_t cap; |
11313 | | size_t cnt; |
11314 | | }; |
11315 | | |
11316 | | struct avail_kallsyms_data { |
11317 | | char **syms; |
11318 | | size_t cnt; |
11319 | | struct kprobe_multi_resolve *res; |
11320 | | }; |
11321 | | |
11322 | | static int avail_func_cmp(const void *a, const void *b) |
11323 | 0 | { |
11324 | 0 | return strcmp(*(const char **)a, *(const char **)b); |
11325 | 0 | } |
11326 | | |
11327 | | static int avail_kallsyms_cb(unsigned long long sym_addr, char sym_type, |
11328 | | const char *sym_name, void *ctx) |
11329 | 0 | { |
11330 | 0 | struct avail_kallsyms_data *data = ctx; |
11331 | 0 | struct kprobe_multi_resolve *res = data->res; |
11332 | 0 | int err; |
11333 | |
|
11334 | 0 | if (!bsearch(&sym_name, data->syms, data->cnt, sizeof(*data->syms), avail_func_cmp)) |
11335 | 0 | return 0; |
11336 | | |
11337 | 0 | err = libbpf_ensure_mem((void **)&res->addrs, &res->cap, sizeof(*res->addrs), res->cnt + 1); |
11338 | 0 | if (err) |
11339 | 0 | return err; |
11340 | | |
11341 | 0 | res->addrs[res->cnt++] = (unsigned long)sym_addr; |
11342 | 0 | return 0; |
11343 | 0 | } |
11344 | | |
11345 | | static int libbpf_available_kallsyms_parse(struct kprobe_multi_resolve *res) |
11346 | 0 | { |
11347 | 0 | const char *available_functions_file = tracefs_available_filter_functions(); |
11348 | 0 | struct avail_kallsyms_data data; |
11349 | 0 | char sym_name[500]; |
11350 | 0 | FILE *f; |
11351 | 0 | int err = 0, ret, i; |
11352 | 0 | char **syms = NULL; |
11353 | 0 | size_t cap = 0, cnt = 0; |
11354 | |
|
11355 | 0 | f = fopen(available_functions_file, "re"); |
11356 | 0 | if (!f) { |
11357 | 0 | err = -errno; |
11358 | 0 | pr_warn("failed to open %s: %d\n", available_functions_file, err); |
11359 | 0 | return err; |
11360 | 0 | } |
11361 | | |
11362 | 0 | while (true) { |
11363 | 0 | char *name; |
11364 | |
|
11365 | 0 | ret = fscanf(f, "%499s%*[^\n]\n", sym_name); |
11366 | 0 | if (ret == EOF && feof(f)) |
11367 | 0 | break; |
11368 | | |
11369 | 0 | if (ret != 1) { |
11370 | 0 | pr_warn("failed to parse available_filter_functions entry: %d\n", ret); |
11371 | 0 | err = -EINVAL; |
11372 | 0 | goto cleanup; |
11373 | 0 | } |
11374 | | |
11375 | 0 | if (!glob_match(sym_name, res->pattern)) |
11376 | 0 | continue; |
11377 | | |
11378 | 0 | err = libbpf_ensure_mem((void **)&syms, &cap, sizeof(*syms), cnt + 1); |
11379 | 0 | if (err) |
11380 | 0 | goto cleanup; |
11381 | | |
11382 | 0 | name = strdup(sym_name); |
11383 | 0 | if (!name) { |
11384 | 0 | err = -errno; |
11385 | 0 | goto cleanup; |
11386 | 0 | } |
11387 | | |
11388 | 0 | syms[cnt++] = name; |
11389 | 0 | } |
11390 | | |
11391 | | /* no entries found, bail out */ |
11392 | 0 | if (cnt == 0) { |
11393 | 0 | err = -ENOENT; |
11394 | 0 | goto cleanup; |
11395 | 0 | } |
11396 | | |
11397 | | /* sort available functions */ |
11398 | 0 | qsort(syms, cnt, sizeof(*syms), avail_func_cmp); |
11399 | |
|
11400 | 0 | data.syms = syms; |
11401 | 0 | data.res = res; |
11402 | 0 | data.cnt = cnt; |
11403 | 0 | libbpf_kallsyms_parse(avail_kallsyms_cb, &data); |
11404 | |
|
11405 | 0 | if (res->cnt == 0) |
11406 | 0 | err = -ENOENT; |
11407 | |
|
11408 | 0 | cleanup: |
11409 | 0 | for (i = 0; i < cnt; i++) |
11410 | 0 | free((char *)syms[i]); |
11411 | 0 | free(syms); |
11412 | |
|
11413 | 0 | fclose(f); |
11414 | 0 | return err; |
11415 | 0 | } |
11416 | | |
11417 | | static bool has_available_filter_functions_addrs(void) |
11418 | 0 | { |
11419 | 0 | return access(tracefs_available_filter_functions_addrs(), R_OK) != -1; |
11420 | 0 | } |
11421 | | |
11422 | | static int libbpf_available_kprobes_parse(struct kprobe_multi_resolve *res) |
11423 | 0 | { |
11424 | 0 | const char *available_path = tracefs_available_filter_functions_addrs(); |
11425 | 0 | char sym_name[500]; |
11426 | 0 | FILE *f; |
11427 | 0 | int ret, err = 0; |
11428 | 0 | unsigned long long sym_addr; |
11429 | |
|
11430 | 0 | f = fopen(available_path, "re"); |
11431 | 0 | if (!f) { |
11432 | 0 | err = -errno; |
11433 | 0 | pr_warn("failed to open %s: %d\n", available_path, err); |
11434 | 0 | return err; |
11435 | 0 | } |
11436 | | |
11437 | 0 | while (true) { |
11438 | 0 | ret = fscanf(f, "%llx %499s%*[^\n]\n", &sym_addr, sym_name); |
11439 | 0 | if (ret == EOF && feof(f)) |
11440 | 0 | break; |
11441 | | |
11442 | 0 | if (ret != 2) { |
11443 | 0 | pr_warn("failed to parse available_filter_functions_addrs entry: %d\n", |
11444 | 0 | ret); |
11445 | 0 | err = -EINVAL; |
11446 | 0 | goto cleanup; |
11447 | 0 | } |
11448 | | |
11449 | 0 | if (!glob_match(sym_name, res->pattern)) |
11450 | 0 | continue; |
11451 | | |
11452 | 0 | err = libbpf_ensure_mem((void **)&res->addrs, &res->cap, |
11453 | 0 | sizeof(*res->addrs), res->cnt + 1); |
11454 | 0 | if (err) |
11455 | 0 | goto cleanup; |
11456 | | |
11457 | 0 | res->addrs[res->cnt++] = (unsigned long)sym_addr; |
11458 | 0 | } |
11459 | | |
11460 | 0 | if (res->cnt == 0) |
11461 | 0 | err = -ENOENT; |
11462 | |
|
11463 | 0 | cleanup: |
11464 | 0 | fclose(f); |
11465 | 0 | return err; |
11466 | 0 | } |
11467 | | |
11468 | | struct bpf_link * |
11469 | | bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog, |
11470 | | const char *pattern, |
11471 | | const struct bpf_kprobe_multi_opts *opts) |
11472 | 0 | { |
11473 | 0 | LIBBPF_OPTS(bpf_link_create_opts, lopts); |
11474 | 0 | struct kprobe_multi_resolve res = { |
11475 | 0 | .pattern = pattern, |
11476 | 0 | }; |
11477 | 0 | enum bpf_attach_type attach_type; |
11478 | 0 | struct bpf_link *link = NULL; |
11479 | 0 | char errmsg[STRERR_BUFSIZE]; |
11480 | 0 | const unsigned long *addrs; |
11481 | 0 | int err, link_fd, prog_fd; |
11482 | 0 | bool retprobe, session; |
11483 | 0 | const __u64 *cookies; |
11484 | 0 | const char **syms; |
11485 | 0 | size_t cnt; |
11486 | |
|
11487 | 0 | if (!OPTS_VALID(opts, bpf_kprobe_multi_opts)) |
11488 | 0 | return libbpf_err_ptr(-EINVAL); |
11489 | | |
11490 | 0 | prog_fd = bpf_program__fd(prog); |
11491 | 0 | if (prog_fd < 0) { |
11492 | 0 | pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n", |
11493 | 0 | prog->name); |
11494 | 0 | return libbpf_err_ptr(-EINVAL); |
11495 | 0 | } |
11496 | | |
11497 | 0 | syms = OPTS_GET(opts, syms, false); |
11498 | 0 | addrs = OPTS_GET(opts, addrs, false); |
11499 | 0 | cnt = OPTS_GET(opts, cnt, false); |
11500 | 0 | cookies = OPTS_GET(opts, cookies, false); |
11501 | |
|
11502 | 0 | if (!pattern && !addrs && !syms) |
11503 | 0 | return libbpf_err_ptr(-EINVAL); |
11504 | 0 | if (pattern && (addrs || syms || cookies || cnt)) |
11505 | 0 | return libbpf_err_ptr(-EINVAL); |
11506 | 0 | if (!pattern && !cnt) |
11507 | 0 | return libbpf_err_ptr(-EINVAL); |
11508 | 0 | if (addrs && syms) |
11509 | 0 | return libbpf_err_ptr(-EINVAL); |
11510 | | |
11511 | 0 | if (pattern) { |
11512 | 0 | if (has_available_filter_functions_addrs()) |
11513 | 0 | err = libbpf_available_kprobes_parse(&res); |
11514 | 0 | else |
11515 | 0 | err = libbpf_available_kallsyms_parse(&res); |
11516 | 0 | if (err) |
11517 | 0 | goto error; |
11518 | 0 | addrs = res.addrs; |
11519 | 0 | cnt = res.cnt; |
11520 | 0 | } |
11521 | | |
11522 | 0 | retprobe = OPTS_GET(opts, retprobe, false); |
11523 | 0 | session = OPTS_GET(opts, session, false); |
11524 | |
|
11525 | 0 | if (retprobe && session) |
11526 | 0 | return libbpf_err_ptr(-EINVAL); |
11527 | | |
11528 | 0 | attach_type = session ? BPF_TRACE_KPROBE_SESSION : BPF_TRACE_KPROBE_MULTI; |
11529 | |
|
11530 | 0 | lopts.kprobe_multi.syms = syms; |
11531 | 0 | lopts.kprobe_multi.addrs = addrs; |
11532 | 0 | lopts.kprobe_multi.cookies = cookies; |
11533 | 0 | lopts.kprobe_multi.cnt = cnt; |
11534 | 0 | lopts.kprobe_multi.flags = retprobe ? BPF_F_KPROBE_MULTI_RETURN : 0; |
11535 | |
|
11536 | 0 | link = calloc(1, sizeof(*link)); |
11537 | 0 | if (!link) { |
11538 | 0 | err = -ENOMEM; |
11539 | 0 | goto error; |
11540 | 0 | } |
11541 | 0 | link->detach = &bpf_link__detach_fd; |
11542 | |
|
11543 | 0 | link_fd = bpf_link_create(prog_fd, 0, attach_type, &lopts); |
11544 | 0 | if (link_fd < 0) { |
11545 | 0 | err = -errno; |
11546 | 0 | pr_warn("prog '%s': failed to attach: %s\n", |
11547 | 0 | prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); |
11548 | 0 | goto error; |
11549 | 0 | } |
11550 | 0 | link->fd = link_fd; |
11551 | 0 | free(res.addrs); |
11552 | 0 | return link; |
11553 | | |
11554 | 0 | error: |
11555 | 0 | free(link); |
11556 | 0 | free(res.addrs); |
11557 | 0 | return libbpf_err_ptr(err); |
11558 | 0 | } |
11559 | | |
11560 | | static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link) |
11561 | 0 | { |
11562 | 0 | DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts); |
11563 | 0 | unsigned long offset = 0; |
11564 | 0 | const char *func_name; |
11565 | 0 | char *func; |
11566 | 0 | int n; |
11567 | |
|
11568 | 0 | *link = NULL; |
11569 | | |
11570 | | /* no auto-attach for SEC("kprobe") and SEC("kretprobe") */ |
11571 | 0 | if (strcmp(prog->sec_name, "kprobe") == 0 || strcmp(prog->sec_name, "kretprobe") == 0) |
11572 | 0 | return 0; |
11573 | | |
11574 | 0 | opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe/"); |
11575 | 0 | if (opts.retprobe) |
11576 | 0 | func_name = prog->sec_name + sizeof("kretprobe/") - 1; |
11577 | 0 | else |
11578 | 0 | func_name = prog->sec_name + sizeof("kprobe/") - 1; |
11579 | |
|
11580 | 0 | n = sscanf(func_name, "%m[a-zA-Z0-9_.]+%li", &func, &offset); |
11581 | 0 | if (n < 1) { |
11582 | 0 | pr_warn("kprobe name is invalid: %s\n", func_name); |
11583 | 0 | return -EINVAL; |
11584 | 0 | } |
11585 | 0 | if (opts.retprobe && offset != 0) { |
11586 | 0 | free(func); |
11587 | 0 | pr_warn("kretprobes do not support offset specification\n"); |
11588 | 0 | return -EINVAL; |
11589 | 0 | } |
11590 | | |
11591 | 0 | opts.offset = offset; |
11592 | 0 | *link = bpf_program__attach_kprobe_opts(prog, func, &opts); |
11593 | 0 | free(func); |
11594 | 0 | return libbpf_get_error(*link); |
11595 | 0 | } |
11596 | | |
11597 | | static int attach_ksyscall(const struct bpf_program *prog, long cookie, struct bpf_link **link) |
11598 | 0 | { |
11599 | 0 | LIBBPF_OPTS(bpf_ksyscall_opts, opts); |
11600 | 0 | const char *syscall_name; |
11601 | |
|
11602 | 0 | *link = NULL; |
11603 | | |
11604 | | /* no auto-attach for SEC("ksyscall") and SEC("kretsyscall") */ |
11605 | 0 | if (strcmp(prog->sec_name, "ksyscall") == 0 || strcmp(prog->sec_name, "kretsyscall") == 0) |
11606 | 0 | return 0; |
11607 | | |
11608 | 0 | opts.retprobe = str_has_pfx(prog->sec_name, "kretsyscall/"); |
11609 | 0 | if (opts.retprobe) |
11610 | 0 | syscall_name = prog->sec_name + sizeof("kretsyscall/") - 1; |
11611 | 0 | else |
11612 | 0 | syscall_name = prog->sec_name + sizeof("ksyscall/") - 1; |
11613 | |
|
11614 | 0 | *link = bpf_program__attach_ksyscall(prog, syscall_name, &opts); |
11615 | 0 | return *link ? 0 : -errno; |
11616 | 0 | } |
11617 | | |
11618 | | static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link) |
11619 | 0 | { |
11620 | 0 | LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); |
11621 | 0 | const char *spec; |
11622 | 0 | char *pattern; |
11623 | 0 | int n; |
11624 | |
|
11625 | 0 | *link = NULL; |
11626 | | |
11627 | | /* no auto-attach for SEC("kprobe.multi") and SEC("kretprobe.multi") */ |
11628 | 0 | if (strcmp(prog->sec_name, "kprobe.multi") == 0 || |
11629 | 0 | strcmp(prog->sec_name, "kretprobe.multi") == 0) |
11630 | 0 | return 0; |
11631 | | |
11632 | 0 | opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe.multi/"); |
11633 | 0 | if (opts.retprobe) |
11634 | 0 | spec = prog->sec_name + sizeof("kretprobe.multi/") - 1; |
11635 | 0 | else |
11636 | 0 | spec = prog->sec_name + sizeof("kprobe.multi/") - 1; |
11637 | |
|
11638 | 0 | n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern); |
11639 | 0 | if (n < 1) { |
11640 | 0 | pr_warn("kprobe multi pattern is invalid: %s\n", spec); |
11641 | 0 | return -EINVAL; |
11642 | 0 | } |
11643 | | |
11644 | 0 | *link = bpf_program__attach_kprobe_multi_opts(prog, pattern, &opts); |
11645 | 0 | free(pattern); |
11646 | 0 | return libbpf_get_error(*link); |
11647 | 0 | } |
11648 | | |
11649 | | static int attach_kprobe_session(const struct bpf_program *prog, long cookie, |
11650 | | struct bpf_link **link) |
11651 | 0 | { |
11652 | 0 | LIBBPF_OPTS(bpf_kprobe_multi_opts, opts, .session = true); |
11653 | 0 | const char *spec; |
11654 | 0 | char *pattern; |
11655 | 0 | int n; |
11656 | |
|
11657 | 0 | *link = NULL; |
11658 | | |
11659 | | /* no auto-attach for SEC("kprobe.session") */ |
11660 | 0 | if (strcmp(prog->sec_name, "kprobe.session") == 0) |
11661 | 0 | return 0; |
11662 | | |
11663 | 0 | spec = prog->sec_name + sizeof("kprobe.session/") - 1; |
11664 | 0 | n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern); |
11665 | 0 | if (n < 1) { |
11666 | 0 | pr_warn("kprobe session pattern is invalid: %s\n", spec); |
11667 | 0 | return -EINVAL; |
11668 | 0 | } |
11669 | | |
11670 | 0 | *link = bpf_program__attach_kprobe_multi_opts(prog, pattern, &opts); |
11671 | 0 | free(pattern); |
11672 | 0 | return *link ? 0 : -errno; |
11673 | 0 | } |
11674 | | |
11675 | | static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link) |
11676 | 0 | { |
11677 | 0 | char *probe_type = NULL, *binary_path = NULL, *func_name = NULL; |
11678 | 0 | LIBBPF_OPTS(bpf_uprobe_multi_opts, opts); |
11679 | 0 | int n, ret = -EINVAL; |
11680 | |
|
11681 | 0 | *link = NULL; |
11682 | |
|
11683 | 0 | n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%m[^\n]", |
11684 | 0 | &probe_type, &binary_path, &func_name); |
11685 | 0 | switch (n) { |
11686 | 0 | case 1: |
11687 | | /* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */ |
11688 | 0 | ret = 0; |
11689 | 0 | break; |
11690 | 0 | case 3: |
11691 | 0 | opts.retprobe = strcmp(probe_type, "uretprobe.multi") == 0; |
11692 | 0 | *link = bpf_program__attach_uprobe_multi(prog, -1, binary_path, func_name, &opts); |
11693 | 0 | ret = libbpf_get_error(*link); |
11694 | 0 | break; |
11695 | 0 | default: |
11696 | 0 | pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name, |
11697 | 0 | prog->sec_name); |
11698 | 0 | break; |
11699 | 0 | } |
11700 | 0 | free(probe_type); |
11701 | 0 | free(binary_path); |
11702 | 0 | free(func_name); |
11703 | 0 | return ret; |
11704 | 0 | } |
11705 | | |
11706 | | static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz, |
11707 | | const char *binary_path, uint64_t offset) |
11708 | 0 | { |
11709 | 0 | int i; |
11710 | |
|
11711 | 0 | snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx", getpid(), binary_path, (size_t)offset); |
11712 | | |
11713 | | /* sanitize binary_path in the probe name */ |
11714 | 0 | for (i = 0; buf[i]; i++) { |
11715 | 0 | if (!isalnum(buf[i])) |
11716 | 0 | buf[i] = '_'; |
11717 | 0 | } |
11718 | 0 | } |
11719 | | |
11720 | | static inline int add_uprobe_event_legacy(const char *probe_name, bool retprobe, |
11721 | | const char *binary_path, size_t offset) |
11722 | 0 | { |
11723 | 0 | return append_to_file(tracefs_uprobe_events(), "%c:%s/%s %s:0x%zx", |
11724 | 0 | retprobe ? 'r' : 'p', |
11725 | 0 | retprobe ? "uretprobes" : "uprobes", |
11726 | 0 | probe_name, binary_path, offset); |
11727 | 0 | } |
11728 | | |
11729 | | static inline int remove_uprobe_event_legacy(const char *probe_name, bool retprobe) |
11730 | 0 | { |
11731 | 0 | return append_to_file(tracefs_uprobe_events(), "-:%s/%s", |
11732 | 0 | retprobe ? "uretprobes" : "uprobes", probe_name); |
11733 | 0 | } |
11734 | | |
11735 | | static int determine_uprobe_perf_type_legacy(const char *probe_name, bool retprobe) |
11736 | 0 | { |
11737 | 0 | char file[512]; |
11738 | |
|
11739 | 0 | snprintf(file, sizeof(file), "%s/events/%s/%s/id", |
11740 | 0 | tracefs_path(), retprobe ? "uretprobes" : "uprobes", probe_name); |
11741 | |
|
11742 | 0 | return parse_uint_from_file(file, "%d\n"); |
11743 | 0 | } |
11744 | | |
11745 | | static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe, |
11746 | | const char *binary_path, size_t offset, int pid) |
11747 | 0 | { |
11748 | 0 | const size_t attr_sz = sizeof(struct perf_event_attr); |
11749 | 0 | struct perf_event_attr attr; |
11750 | 0 | int type, pfd, err; |
11751 | |
|
11752 | 0 | err = add_uprobe_event_legacy(probe_name, retprobe, binary_path, offset); |
11753 | 0 | if (err < 0) { |
11754 | 0 | pr_warn("failed to add legacy uprobe event for %s:0x%zx: %d\n", |
11755 | 0 | binary_path, (size_t)offset, err); |
11756 | 0 | return err; |
11757 | 0 | } |
11758 | 0 | type = determine_uprobe_perf_type_legacy(probe_name, retprobe); |
11759 | 0 | if (type < 0) { |
11760 | 0 | err = type; |
11761 | 0 | pr_warn("failed to determine legacy uprobe event id for %s:0x%zx: %d\n", |
11762 | 0 | binary_path, offset, err); |
11763 | 0 | goto err_clean_legacy; |
11764 | 0 | } |
11765 | | |
11766 | 0 | memset(&attr, 0, attr_sz); |
11767 | 0 | attr.size = attr_sz; |
11768 | 0 | attr.config = type; |
11769 | 0 | attr.type = PERF_TYPE_TRACEPOINT; |
11770 | |
|
11771 | 0 | pfd = syscall(__NR_perf_event_open, &attr, |
11772 | 0 | pid < 0 ? -1 : pid, /* pid */ |
11773 | 0 | pid == -1 ? 0 : -1, /* cpu */ |
11774 | 0 | -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); |
11775 | 0 | if (pfd < 0) { |
11776 | 0 | err = -errno; |
11777 | 0 | pr_warn("legacy uprobe perf_event_open() failed: %d\n", err); |
11778 | 0 | goto err_clean_legacy; |
11779 | 0 | } |
11780 | 0 | return pfd; |
11781 | | |
11782 | 0 | err_clean_legacy: |
11783 | | /* Clear the newly added legacy uprobe_event */ |
11784 | 0 | remove_uprobe_event_legacy(probe_name, retprobe); |
11785 | 0 | return err; |
11786 | 0 | } |
11787 | | |
11788 | | /* Find offset of function name in archive specified by path. Currently |
11789 | | * supported are .zip files that do not compress their contents, as used on |
11790 | | * Android in the form of APKs, for example. "file_name" is the name of the ELF |
11791 | | * file inside the archive. "func_name" matches symbol name or name@@LIB for |
11792 | | * library functions. |
11793 | | * |
11794 | | * An overview of the APK format specifically provided here: |
11795 | | * https://en.wikipedia.org/w/index.php?title=Apk_(file_format)&oldid=1139099120#Package_contents |
11796 | | */ |
11797 | | static long elf_find_func_offset_from_archive(const char *archive_path, const char *file_name, |
11798 | | const char *func_name) |
11799 | 0 | { |
11800 | 0 | struct zip_archive *archive; |
11801 | 0 | struct zip_entry entry; |
11802 | 0 | long ret; |
11803 | 0 | Elf *elf; |
11804 | |
|
11805 | 0 | archive = zip_archive_open(archive_path); |
11806 | 0 | if (IS_ERR(archive)) { |
11807 | 0 | ret = PTR_ERR(archive); |
11808 | 0 | pr_warn("zip: failed to open %s: %ld\n", archive_path, ret); |
11809 | 0 | return ret; |
11810 | 0 | } |
11811 | | |
11812 | 0 | ret = zip_archive_find_entry(archive, file_name, &entry); |
11813 | 0 | if (ret) { |
11814 | 0 | pr_warn("zip: could not find archive member %s in %s: %ld\n", file_name, |
11815 | 0 | archive_path, ret); |
11816 | 0 | goto out; |
11817 | 0 | } |
11818 | 0 | pr_debug("zip: found entry for %s in %s at 0x%lx\n", file_name, archive_path, |
11819 | 0 | (unsigned long)entry.data_offset); |
11820 | |
|
11821 | 0 | if (entry.compression) { |
11822 | 0 | pr_warn("zip: entry %s of %s is compressed and cannot be handled\n", file_name, |
11823 | 0 | archive_path); |
11824 | 0 | ret = -LIBBPF_ERRNO__FORMAT; |
11825 | 0 | goto out; |
11826 | 0 | } |
11827 | | |
11828 | 0 | elf = elf_memory((void *)entry.data, entry.data_length); |
11829 | 0 | if (!elf) { |
11830 | 0 | pr_warn("elf: could not read elf file %s from %s: %s\n", file_name, archive_path, |
11831 | 0 | elf_errmsg(-1)); |
11832 | 0 | ret = -LIBBPF_ERRNO__LIBELF; |
11833 | 0 | goto out; |
11834 | 0 | } |
11835 | | |
11836 | 0 | ret = elf_find_func_offset(elf, file_name, func_name); |
11837 | 0 | if (ret > 0) { |
11838 | 0 | pr_debug("elf: symbol address match for %s of %s in %s: 0x%x + 0x%lx = 0x%lx\n", |
11839 | 0 | func_name, file_name, archive_path, entry.data_offset, ret, |
11840 | 0 | ret + entry.data_offset); |
11841 | 0 | ret += entry.data_offset; |
11842 | 0 | } |
11843 | 0 | elf_end(elf); |
11844 | |
|
11845 | 0 | out: |
11846 | 0 | zip_archive_close(archive); |
11847 | 0 | return ret; |
11848 | 0 | } |
11849 | | |
11850 | | static const char *arch_specific_lib_paths(void) |
11851 | 0 | { |
11852 | | /* |
11853 | | * Based on https://packages.debian.org/sid/libc6. |
11854 | | * |
11855 | | * Assume that the traced program is built for the same architecture |
11856 | | * as libbpf, which should cover the vast majority of cases. |
11857 | | */ |
11858 | 0 | #if defined(__x86_64__) |
11859 | 0 | return "/lib/x86_64-linux-gnu"; |
11860 | | #elif defined(__i386__) |
11861 | | return "/lib/i386-linux-gnu"; |
11862 | | #elif defined(__s390x__) |
11863 | | return "/lib/s390x-linux-gnu"; |
11864 | | #elif defined(__s390__) |
11865 | | return "/lib/s390-linux-gnu"; |
11866 | | #elif defined(__arm__) && defined(__SOFTFP__) |
11867 | | return "/lib/arm-linux-gnueabi"; |
11868 | | #elif defined(__arm__) && !defined(__SOFTFP__) |
11869 | | return "/lib/arm-linux-gnueabihf"; |
11870 | | #elif defined(__aarch64__) |
11871 | | return "/lib/aarch64-linux-gnu"; |
11872 | | #elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 64 |
11873 | | return "/lib/mips64el-linux-gnuabi64"; |
11874 | | #elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 32 |
11875 | | return "/lib/mipsel-linux-gnu"; |
11876 | | #elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ |
11877 | | return "/lib/powerpc64le-linux-gnu"; |
11878 | | #elif defined(__sparc__) && defined(__arch64__) |
11879 | | return "/lib/sparc64-linux-gnu"; |
11880 | | #elif defined(__riscv) && __riscv_xlen == 64 |
11881 | | return "/lib/riscv64-linux-gnu"; |
11882 | | #else |
11883 | | return NULL; |
11884 | | #endif |
11885 | 0 | } |
11886 | | |
11887 | | /* Get full path to program/shared library. */ |
11888 | | static int resolve_full_path(const char *file, char *result, size_t result_sz) |
11889 | 0 | { |
11890 | 0 | const char *search_paths[3] = {}; |
11891 | 0 | int i, perm; |
11892 | |
|
11893 | 0 | if (str_has_sfx(file, ".so") || strstr(file, ".so.")) { |
11894 | 0 | search_paths[0] = getenv("LD_LIBRARY_PATH"); |
11895 | 0 | search_paths[1] = "/usr/lib64:/usr/lib"; |
11896 | 0 | search_paths[2] = arch_specific_lib_paths(); |
11897 | 0 | perm = R_OK; |
11898 | 0 | } else { |
11899 | 0 | search_paths[0] = getenv("PATH"); |
11900 | 0 | search_paths[1] = "/usr/bin:/usr/sbin"; |
11901 | 0 | perm = R_OK | X_OK; |
11902 | 0 | } |
11903 | |
|
11904 | 0 | for (i = 0; i < ARRAY_SIZE(search_paths); i++) { |
11905 | 0 | const char *s; |
11906 | |
|
11907 | 0 | if (!search_paths[i]) |
11908 | 0 | continue; |
11909 | 0 | for (s = search_paths[i]; s != NULL; s = strchr(s, ':')) { |
11910 | 0 | char *next_path; |
11911 | 0 | int seg_len; |
11912 | |
|
11913 | 0 | if (s[0] == ':') |
11914 | 0 | s++; |
11915 | 0 | next_path = strchr(s, ':'); |
11916 | 0 | seg_len = next_path ? next_path - s : strlen(s); |
11917 | 0 | if (!seg_len) |
11918 | 0 | continue; |
11919 | 0 | snprintf(result, result_sz, "%.*s/%s", seg_len, s, file); |
11920 | | /* ensure it has required permissions */ |
11921 | 0 | if (faccessat(AT_FDCWD, result, perm, AT_EACCESS) < 0) |
11922 | 0 | continue; |
11923 | 0 | pr_debug("resolved '%s' to '%s'\n", file, result); |
11924 | 0 | return 0; |
11925 | 0 | } |
11926 | 0 | } |
11927 | 0 | return -ENOENT; |
11928 | 0 | } |
11929 | | |
11930 | | struct bpf_link * |
11931 | | bpf_program__attach_uprobe_multi(const struct bpf_program *prog, |
11932 | | pid_t pid, |
11933 | | const char *path, |
11934 | | const char *func_pattern, |
11935 | | const struct bpf_uprobe_multi_opts *opts) |
11936 | 0 | { |
11937 | 0 | const unsigned long *ref_ctr_offsets = NULL, *offsets = NULL; |
11938 | 0 | LIBBPF_OPTS(bpf_link_create_opts, lopts); |
11939 | 0 | unsigned long *resolved_offsets = NULL; |
11940 | 0 | int err = 0, link_fd, prog_fd; |
11941 | 0 | struct bpf_link *link = NULL; |
11942 | 0 | char errmsg[STRERR_BUFSIZE]; |
11943 | 0 | char full_path[PATH_MAX]; |
11944 | 0 | const __u64 *cookies; |
11945 | 0 | const char **syms; |
11946 | 0 | size_t cnt; |
11947 | |
|
11948 | 0 | if (!OPTS_VALID(opts, bpf_uprobe_multi_opts)) |
11949 | 0 | return libbpf_err_ptr(-EINVAL); |
11950 | | |
11951 | 0 | prog_fd = bpf_program__fd(prog); |
11952 | 0 | if (prog_fd < 0) { |
11953 | 0 | pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n", |
11954 | 0 | prog->name); |
11955 | 0 | return libbpf_err_ptr(-EINVAL); |
11956 | 0 | } |
11957 | | |
11958 | 0 | syms = OPTS_GET(opts, syms, NULL); |
11959 | 0 | offsets = OPTS_GET(opts, offsets, NULL); |
11960 | 0 | ref_ctr_offsets = OPTS_GET(opts, ref_ctr_offsets, NULL); |
11961 | 0 | cookies = OPTS_GET(opts, cookies, NULL); |
11962 | 0 | cnt = OPTS_GET(opts, cnt, 0); |
11963 | | |
11964 | | /* |
11965 | | * User can specify 2 mutually exclusive set of inputs: |
11966 | | * |
11967 | | * 1) use only path/func_pattern/pid arguments |
11968 | | * |
11969 | | * 2) use path/pid with allowed combinations of: |
11970 | | * syms/offsets/ref_ctr_offsets/cookies/cnt |
11971 | | * |
11972 | | * - syms and offsets are mutually exclusive |
11973 | | * - ref_ctr_offsets and cookies are optional |
11974 | | * |
11975 | | * Any other usage results in error. |
11976 | | */ |
11977 | |
|
11978 | 0 | if (!path) |
11979 | 0 | return libbpf_err_ptr(-EINVAL); |
11980 | 0 | if (!func_pattern && cnt == 0) |
11981 | 0 | return libbpf_err_ptr(-EINVAL); |
11982 | | |
11983 | 0 | if (func_pattern) { |
11984 | 0 | if (syms || offsets || ref_ctr_offsets || cookies || cnt) |
11985 | 0 | return libbpf_err_ptr(-EINVAL); |
11986 | 0 | } else { |
11987 | 0 | if (!!syms == !!offsets) |
11988 | 0 | return libbpf_err_ptr(-EINVAL); |
11989 | 0 | } |
11990 | | |
11991 | 0 | if (func_pattern) { |
11992 | 0 | if (!strchr(path, '/')) { |
11993 | 0 | err = resolve_full_path(path, full_path, sizeof(full_path)); |
11994 | 0 | if (err) { |
11995 | 0 | pr_warn("prog '%s': failed to resolve full path for '%s': %d\n", |
11996 | 0 | prog->name, path, err); |
11997 | 0 | return libbpf_err_ptr(err); |
11998 | 0 | } |
11999 | 0 | path = full_path; |
12000 | 0 | } |
12001 | | |
12002 | 0 | err = elf_resolve_pattern_offsets(path, func_pattern, |
12003 | 0 | &resolved_offsets, &cnt); |
12004 | 0 | if (err < 0) |
12005 | 0 | return libbpf_err_ptr(err); |
12006 | 0 | offsets = resolved_offsets; |
12007 | 0 | } else if (syms) { |
12008 | 0 | err = elf_resolve_syms_offsets(path, cnt, syms, &resolved_offsets, STT_FUNC); |
12009 | 0 | if (err < 0) |
12010 | 0 | return libbpf_err_ptr(err); |
12011 | 0 | offsets = resolved_offsets; |
12012 | 0 | } |
12013 | | |
12014 | 0 | lopts.uprobe_multi.path = path; |
12015 | 0 | lopts.uprobe_multi.offsets = offsets; |
12016 | 0 | lopts.uprobe_multi.ref_ctr_offsets = ref_ctr_offsets; |
12017 | 0 | lopts.uprobe_multi.cookies = cookies; |
12018 | 0 | lopts.uprobe_multi.cnt = cnt; |
12019 | 0 | lopts.uprobe_multi.flags = OPTS_GET(opts, retprobe, false) ? BPF_F_UPROBE_MULTI_RETURN : 0; |
12020 | |
|
12021 | 0 | if (pid == 0) |
12022 | 0 | pid = getpid(); |
12023 | 0 | if (pid > 0) |
12024 | 0 | lopts.uprobe_multi.pid = pid; |
12025 | |
|
12026 | 0 | link = calloc(1, sizeof(*link)); |
12027 | 0 | if (!link) { |
12028 | 0 | err = -ENOMEM; |
12029 | 0 | goto error; |
12030 | 0 | } |
12031 | 0 | link->detach = &bpf_link__detach_fd; |
12032 | |
|
12033 | 0 | link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &lopts); |
12034 | 0 | if (link_fd < 0) { |
12035 | 0 | err = -errno; |
12036 | 0 | pr_warn("prog '%s': failed to attach multi-uprobe: %s\n", |
12037 | 0 | prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); |
12038 | 0 | goto error; |
12039 | 0 | } |
12040 | 0 | link->fd = link_fd; |
12041 | 0 | free(resolved_offsets); |
12042 | 0 | return link; |
12043 | | |
12044 | 0 | error: |
12045 | 0 | free(resolved_offsets); |
12046 | 0 | free(link); |
12047 | 0 | return libbpf_err_ptr(err); |
12048 | 0 | } |
12049 | | |
12050 | | LIBBPF_API struct bpf_link * |
12051 | | bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid, |
12052 | | const char *binary_path, size_t func_offset, |
12053 | | const struct bpf_uprobe_opts *opts) |
12054 | 0 | { |
12055 | 0 | const char *archive_path = NULL, *archive_sep = NULL; |
12056 | 0 | char errmsg[STRERR_BUFSIZE], *legacy_probe = NULL; |
12057 | 0 | DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts); |
12058 | 0 | enum probe_attach_mode attach_mode; |
12059 | 0 | char full_path[PATH_MAX]; |
12060 | 0 | struct bpf_link *link; |
12061 | 0 | size_t ref_ctr_off; |
12062 | 0 | int pfd, err; |
12063 | 0 | bool retprobe, legacy; |
12064 | 0 | const char *func_name; |
12065 | |
|
12066 | 0 | if (!OPTS_VALID(opts, bpf_uprobe_opts)) |
12067 | 0 | return libbpf_err_ptr(-EINVAL); |
12068 | | |
12069 | 0 | attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT); |
12070 | 0 | retprobe = OPTS_GET(opts, retprobe, false); |
12071 | 0 | ref_ctr_off = OPTS_GET(opts, ref_ctr_offset, 0); |
12072 | 0 | pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0); |
12073 | |
|
12074 | 0 | if (!binary_path) |
12075 | 0 | return libbpf_err_ptr(-EINVAL); |
12076 | | |
12077 | | /* Check if "binary_path" refers to an archive. */ |
12078 | 0 | archive_sep = strstr(binary_path, "!/"); |
12079 | 0 | if (archive_sep) { |
12080 | 0 | full_path[0] = '\0'; |
12081 | 0 | libbpf_strlcpy(full_path, binary_path, |
12082 | 0 | min(sizeof(full_path), (size_t)(archive_sep - binary_path + 1))); |
12083 | 0 | archive_path = full_path; |
12084 | 0 | binary_path = archive_sep + 2; |
12085 | 0 | } else if (!strchr(binary_path, '/')) { |
12086 | 0 | err = resolve_full_path(binary_path, full_path, sizeof(full_path)); |
12087 | 0 | if (err) { |
12088 | 0 | pr_warn("prog '%s': failed to resolve full path for '%s': %d\n", |
12089 | 0 | prog->name, binary_path, err); |
12090 | 0 | return libbpf_err_ptr(err); |
12091 | 0 | } |
12092 | 0 | binary_path = full_path; |
12093 | 0 | } |
12094 | 0 | func_name = OPTS_GET(opts, func_name, NULL); |
12095 | 0 | if (func_name) { |
12096 | 0 | long sym_off; |
12097 | |
|
12098 | 0 | if (archive_path) { |
12099 | 0 | sym_off = elf_find_func_offset_from_archive(archive_path, binary_path, |
12100 | 0 | func_name); |
12101 | 0 | binary_path = archive_path; |
12102 | 0 | } else { |
12103 | 0 | sym_off = elf_find_func_offset_from_file(binary_path, func_name); |
12104 | 0 | } |
12105 | 0 | if (sym_off < 0) |
12106 | 0 | return libbpf_err_ptr(sym_off); |
12107 | 0 | func_offset += sym_off; |
12108 | 0 | } |
12109 | | |
12110 | 0 | legacy = determine_uprobe_perf_type() < 0; |
12111 | 0 | switch (attach_mode) { |
12112 | 0 | case PROBE_ATTACH_MODE_LEGACY: |
12113 | 0 | legacy = true; |
12114 | 0 | pe_opts.force_ioctl_attach = true; |
12115 | 0 | break; |
12116 | 0 | case PROBE_ATTACH_MODE_PERF: |
12117 | 0 | if (legacy) |
12118 | 0 | return libbpf_err_ptr(-ENOTSUP); |
12119 | 0 | pe_opts.force_ioctl_attach = true; |
12120 | 0 | break; |
12121 | 0 | case PROBE_ATTACH_MODE_LINK: |
12122 | 0 | if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK)) |
12123 | 0 | return libbpf_err_ptr(-ENOTSUP); |
12124 | 0 | break; |
12125 | 0 | case PROBE_ATTACH_MODE_DEFAULT: |
12126 | 0 | break; |
12127 | 0 | default: |
12128 | 0 | return libbpf_err_ptr(-EINVAL); |
12129 | 0 | } |
12130 | | |
12131 | 0 | if (!legacy) { |
12132 | 0 | pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path, |
12133 | 0 | func_offset, pid, ref_ctr_off); |
12134 | 0 | } else { |
12135 | 0 | char probe_name[PATH_MAX + 64]; |
12136 | |
|
12137 | 0 | if (ref_ctr_off) |
12138 | 0 | return libbpf_err_ptr(-EINVAL); |
12139 | | |
12140 | 0 | gen_uprobe_legacy_event_name(probe_name, sizeof(probe_name), |
12141 | 0 | binary_path, func_offset); |
12142 | |
|
12143 | 0 | legacy_probe = strdup(probe_name); |
12144 | 0 | if (!legacy_probe) |
12145 | 0 | return libbpf_err_ptr(-ENOMEM); |
12146 | | |
12147 | 0 | pfd = perf_event_uprobe_open_legacy(legacy_probe, retprobe, |
12148 | 0 | binary_path, func_offset, pid); |
12149 | 0 | } |
12150 | 0 | if (pfd < 0) { |
12151 | 0 | err = -errno; |
12152 | 0 | pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n", |
12153 | 0 | prog->name, retprobe ? "uretprobe" : "uprobe", |
12154 | 0 | binary_path, func_offset, |
12155 | 0 | libbpf_strerror_r(err, errmsg, sizeof(errmsg))); |
12156 | 0 | goto err_out; |
12157 | 0 | } |
12158 | | |
12159 | 0 | link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts); |
12160 | 0 | err = libbpf_get_error(link); |
12161 | 0 | if (err) { |
12162 | 0 | close(pfd); |
12163 | 0 | pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n", |
12164 | 0 | prog->name, retprobe ? "uretprobe" : "uprobe", |
12165 | 0 | binary_path, func_offset, |
12166 | 0 | libbpf_strerror_r(err, errmsg, sizeof(errmsg))); |
12167 | 0 | goto err_clean_legacy; |
12168 | 0 | } |
12169 | 0 | if (legacy) { |
12170 | 0 | struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link); |
12171 | |
|
12172 | 0 | perf_link->legacy_probe_name = legacy_probe; |
12173 | 0 | perf_link->legacy_is_kprobe = false; |
12174 | 0 | perf_link->legacy_is_retprobe = retprobe; |
12175 | 0 | } |
12176 | 0 | return link; |
12177 | | |
12178 | 0 | err_clean_legacy: |
12179 | 0 | if (legacy) |
12180 | 0 | remove_uprobe_event_legacy(legacy_probe, retprobe); |
12181 | 0 | err_out: |
12182 | 0 | free(legacy_probe); |
12183 | 0 | return libbpf_err_ptr(err); |
12184 | 0 | } |
12185 | | |
12186 | | /* Format of u[ret]probe section definition supporting auto-attach: |
12187 | | * u[ret]probe/binary:function[+offset] |
12188 | | * |
12189 | | * binary can be an absolute/relative path or a filename; the latter is resolved to a |
12190 | | * full binary path via bpf_program__attach_uprobe_opts. |
12191 | | * |
12192 | | * Specifying uprobe+ ensures we carry out strict matching; either "uprobe" must be |
12193 | | * specified (and auto-attach is not possible) or the above format is specified for |
12194 | | * auto-attach. |
12195 | | */ |
12196 | | static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link) |
12197 | 0 | { |
12198 | 0 | DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts); |
12199 | 0 | char *probe_type = NULL, *binary_path = NULL, *func_name = NULL, *func_off; |
12200 | 0 | int n, c, ret = -EINVAL; |
12201 | 0 | long offset = 0; |
12202 | |
|
12203 | 0 | *link = NULL; |
12204 | |
|
12205 | 0 | n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%m[^\n]", |
12206 | 0 | &probe_type, &binary_path, &func_name); |
12207 | 0 | switch (n) { |
12208 | 0 | case 1: |
12209 | | /* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */ |
12210 | 0 | ret = 0; |
12211 | 0 | break; |
12212 | 0 | case 2: |
12213 | 0 | pr_warn("prog '%s': section '%s' missing ':function[+offset]' specification\n", |
12214 | 0 | prog->name, prog->sec_name); |
12215 | 0 | break; |
12216 | 0 | case 3: |
12217 | | /* check if user specifies `+offset`, if yes, this should be |
12218 | | * the last part of the string, make sure sscanf read to EOL |
12219 | | */ |
12220 | 0 | func_off = strrchr(func_name, '+'); |
12221 | 0 | if (func_off) { |
12222 | 0 | n = sscanf(func_off, "+%li%n", &offset, &c); |
12223 | 0 | if (n == 1 && *(func_off + c) == '\0') |
12224 | 0 | func_off[0] = '\0'; |
12225 | 0 | else |
12226 | 0 | offset = 0; |
12227 | 0 | } |
12228 | 0 | opts.retprobe = strcmp(probe_type, "uretprobe") == 0 || |
12229 | 0 | strcmp(probe_type, "uretprobe.s") == 0; |
12230 | 0 | if (opts.retprobe && offset != 0) { |
12231 | 0 | pr_warn("prog '%s': uretprobes do not support offset specification\n", |
12232 | 0 | prog->name); |
12233 | 0 | break; |
12234 | 0 | } |
12235 | 0 | opts.func_name = func_name; |
12236 | 0 | *link = bpf_program__attach_uprobe_opts(prog, -1, binary_path, offset, &opts); |
12237 | 0 | ret = libbpf_get_error(*link); |
12238 | 0 | break; |
12239 | 0 | default: |
12240 | 0 | pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name, |
12241 | 0 | prog->sec_name); |
12242 | 0 | break; |
12243 | 0 | } |
12244 | 0 | free(probe_type); |
12245 | 0 | free(binary_path); |
12246 | 0 | free(func_name); |
12247 | |
|
12248 | 0 | return ret; |
12249 | 0 | } |
12250 | | |
12251 | | struct bpf_link *bpf_program__attach_uprobe(const struct bpf_program *prog, |
12252 | | bool retprobe, pid_t pid, |
12253 | | const char *binary_path, |
12254 | | size_t func_offset) |
12255 | 0 | { |
12256 | 0 | DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts, .retprobe = retprobe); |
12257 | |
|
12258 | 0 | return bpf_program__attach_uprobe_opts(prog, pid, binary_path, func_offset, &opts); |
12259 | 0 | } |
12260 | | |
12261 | | struct bpf_link *bpf_program__attach_usdt(const struct bpf_program *prog, |
12262 | | pid_t pid, const char *binary_path, |
12263 | | const char *usdt_provider, const char *usdt_name, |
12264 | | const struct bpf_usdt_opts *opts) |
12265 | 0 | { |
12266 | 0 | char resolved_path[512]; |
12267 | 0 | struct bpf_object *obj = prog->obj; |
12268 | 0 | struct bpf_link *link; |
12269 | 0 | __u64 usdt_cookie; |
12270 | 0 | int err; |
12271 | |
|
12272 | 0 | if (!OPTS_VALID(opts, bpf_uprobe_opts)) |
12273 | 0 | return libbpf_err_ptr(-EINVAL); |
12274 | | |
12275 | 0 | if (bpf_program__fd(prog) < 0) { |
12276 | 0 | pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n", |
12277 | 0 | prog->name); |
12278 | 0 | return libbpf_err_ptr(-EINVAL); |
12279 | 0 | } |
12280 | | |
12281 | 0 | if (!binary_path) |
12282 | 0 | return libbpf_err_ptr(-EINVAL); |
12283 | | |
12284 | 0 | if (!strchr(binary_path, '/')) { |
12285 | 0 | err = resolve_full_path(binary_path, resolved_path, sizeof(resolved_path)); |
12286 | 0 | if (err) { |
12287 | 0 | pr_warn("prog '%s': failed to resolve full path for '%s': %d\n", |
12288 | 0 | prog->name, binary_path, err); |
12289 | 0 | return libbpf_err_ptr(err); |
12290 | 0 | } |
12291 | 0 | binary_path = resolved_path; |
12292 | 0 | } |
12293 | | |
12294 | | /* USDT manager is instantiated lazily on first USDT attach. It will |
12295 | | * be destroyed together with BPF object in bpf_object__close(). |
12296 | | */ |
12297 | 0 | if (IS_ERR(obj->usdt_man)) |
12298 | 0 | return libbpf_ptr(obj->usdt_man); |
12299 | 0 | if (!obj->usdt_man) { |
12300 | 0 | obj->usdt_man = usdt_manager_new(obj); |
12301 | 0 | if (IS_ERR(obj->usdt_man)) |
12302 | 0 | return libbpf_ptr(obj->usdt_man); |
12303 | 0 | } |
12304 | | |
12305 | 0 | usdt_cookie = OPTS_GET(opts, usdt_cookie, 0); |
12306 | 0 | link = usdt_manager_attach_usdt(obj->usdt_man, prog, pid, binary_path, |
12307 | 0 | usdt_provider, usdt_name, usdt_cookie); |
12308 | 0 | err = libbpf_get_error(link); |
12309 | 0 | if (err) |
12310 | 0 | return libbpf_err_ptr(err); |
12311 | 0 | return link; |
12312 | 0 | } |
12313 | | |
12314 | | static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link) |
12315 | 0 | { |
12316 | 0 | char *path = NULL, *provider = NULL, *name = NULL; |
12317 | 0 | const char *sec_name; |
12318 | 0 | int n, err; |
12319 | |
|
12320 | 0 | sec_name = bpf_program__section_name(prog); |
12321 | 0 | if (strcmp(sec_name, "usdt") == 0) { |
12322 | | /* no auto-attach for just SEC("usdt") */ |
12323 | 0 | *link = NULL; |
12324 | 0 | return 0; |
12325 | 0 | } |
12326 | | |
12327 | 0 | n = sscanf(sec_name, "usdt/%m[^:]:%m[^:]:%m[^:]", &path, &provider, &name); |
12328 | 0 | if (n != 3) { |
12329 | 0 | pr_warn("invalid section '%s', expected SEC(\"usdt/<path>:<provider>:<name>\")\n", |
12330 | 0 | sec_name); |
12331 | 0 | err = -EINVAL; |
12332 | 0 | } else { |
12333 | 0 | *link = bpf_program__attach_usdt(prog, -1 /* any process */, path, |
12334 | 0 | provider, name, NULL); |
12335 | 0 | err = libbpf_get_error(*link); |
12336 | 0 | } |
12337 | 0 | free(path); |
12338 | 0 | free(provider); |
12339 | 0 | free(name); |
12340 | 0 | return err; |
12341 | 0 | } |
12342 | | |
12343 | | static int determine_tracepoint_id(const char *tp_category, |
12344 | | const char *tp_name) |
12345 | 0 | { |
12346 | 0 | char file[PATH_MAX]; |
12347 | 0 | int ret; |
12348 | |
|
12349 | 0 | ret = snprintf(file, sizeof(file), "%s/events/%s/%s/id", |
12350 | 0 | tracefs_path(), tp_category, tp_name); |
12351 | 0 | if (ret < 0) |
12352 | 0 | return -errno; |
12353 | 0 | if (ret >= sizeof(file)) { |
12354 | 0 | pr_debug("tracepoint %s/%s path is too long\n", |
12355 | 0 | tp_category, tp_name); |
12356 | 0 | return -E2BIG; |
12357 | 0 | } |
12358 | 0 | return parse_uint_from_file(file, "%d\n"); |
12359 | 0 | } |
12360 | | |
12361 | | static int perf_event_open_tracepoint(const char *tp_category, |
12362 | | const char *tp_name) |
12363 | 0 | { |
12364 | 0 | const size_t attr_sz = sizeof(struct perf_event_attr); |
12365 | 0 | struct perf_event_attr attr; |
12366 | 0 | char errmsg[STRERR_BUFSIZE]; |
12367 | 0 | int tp_id, pfd, err; |
12368 | |
|
12369 | 0 | tp_id = determine_tracepoint_id(tp_category, tp_name); |
12370 | 0 | if (tp_id < 0) { |
12371 | 0 | pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n", |
12372 | 0 | tp_category, tp_name, |
12373 | 0 | libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg))); |
12374 | 0 | return tp_id; |
12375 | 0 | } |
12376 | | |
12377 | 0 | memset(&attr, 0, attr_sz); |
12378 | 0 | attr.type = PERF_TYPE_TRACEPOINT; |
12379 | 0 | attr.size = attr_sz; |
12380 | 0 | attr.config = tp_id; |
12381 | |
|
12382 | 0 | pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */, |
12383 | 0 | -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); |
12384 | 0 | if (pfd < 0) { |
12385 | 0 | err = -errno; |
12386 | 0 | pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n", |
12387 | 0 | tp_category, tp_name, |
12388 | 0 | libbpf_strerror_r(err, errmsg, sizeof(errmsg))); |
12389 | 0 | return err; |
12390 | 0 | } |
12391 | 0 | return pfd; |
12392 | 0 | } |
12393 | | |
12394 | | struct bpf_link *bpf_program__attach_tracepoint_opts(const struct bpf_program *prog, |
12395 | | const char *tp_category, |
12396 | | const char *tp_name, |
12397 | | const struct bpf_tracepoint_opts *opts) |
12398 | 0 | { |
12399 | 0 | DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts); |
12400 | 0 | char errmsg[STRERR_BUFSIZE]; |
12401 | 0 | struct bpf_link *link; |
12402 | 0 | int pfd, err; |
12403 | |
|
12404 | 0 | if (!OPTS_VALID(opts, bpf_tracepoint_opts)) |
12405 | 0 | return libbpf_err_ptr(-EINVAL); |
12406 | | |
12407 | 0 | pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0); |
12408 | |
|
12409 | 0 | pfd = perf_event_open_tracepoint(tp_category, tp_name); |
12410 | 0 | if (pfd < 0) { |
12411 | 0 | pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n", |
12412 | 0 | prog->name, tp_category, tp_name, |
12413 | 0 | libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); |
12414 | 0 | return libbpf_err_ptr(pfd); |
12415 | 0 | } |
12416 | 0 | link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts); |
12417 | 0 | err = libbpf_get_error(link); |
12418 | 0 | if (err) { |
12419 | 0 | close(pfd); |
12420 | 0 | pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n", |
12421 | 0 | prog->name, tp_category, tp_name, |
12422 | 0 | libbpf_strerror_r(err, errmsg, sizeof(errmsg))); |
12423 | 0 | return libbpf_err_ptr(err); |
12424 | 0 | } |
12425 | 0 | return link; |
12426 | 0 | } |
12427 | | |
12428 | | struct bpf_link *bpf_program__attach_tracepoint(const struct bpf_program *prog, |
12429 | | const char *tp_category, |
12430 | | const char *tp_name) |
12431 | 0 | { |
12432 | 0 | return bpf_program__attach_tracepoint_opts(prog, tp_category, tp_name, NULL); |
12433 | 0 | } |
12434 | | |
12435 | | static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link) |
12436 | 0 | { |
12437 | 0 | char *sec_name, *tp_cat, *tp_name; |
12438 | |
|
12439 | 0 | *link = NULL; |
12440 | | |
12441 | | /* no auto-attach for SEC("tp") or SEC("tracepoint") */ |
12442 | 0 | if (strcmp(prog->sec_name, "tp") == 0 || strcmp(prog->sec_name, "tracepoint") == 0) |
12443 | 0 | return 0; |
12444 | | |
12445 | 0 | sec_name = strdup(prog->sec_name); |
12446 | 0 | if (!sec_name) |
12447 | 0 | return -ENOMEM; |
12448 | | |
12449 | | /* extract "tp/<category>/<name>" or "tracepoint/<category>/<name>" */ |
12450 | 0 | if (str_has_pfx(prog->sec_name, "tp/")) |
12451 | 0 | tp_cat = sec_name + sizeof("tp/") - 1; |
12452 | 0 | else |
12453 | 0 | tp_cat = sec_name + sizeof("tracepoint/") - 1; |
12454 | 0 | tp_name = strchr(tp_cat, '/'); |
12455 | 0 | if (!tp_name) { |
12456 | 0 | free(sec_name); |
12457 | 0 | return -EINVAL; |
12458 | 0 | } |
12459 | 0 | *tp_name = '\0'; |
12460 | 0 | tp_name++; |
12461 | |
|
12462 | 0 | *link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name); |
12463 | 0 | free(sec_name); |
12464 | 0 | return libbpf_get_error(*link); |
12465 | 0 | } |
12466 | | |
12467 | | struct bpf_link * |
12468 | | bpf_program__attach_raw_tracepoint_opts(const struct bpf_program *prog, |
12469 | | const char *tp_name, |
12470 | | struct bpf_raw_tracepoint_opts *opts) |
12471 | 0 | { |
12472 | 0 | LIBBPF_OPTS(bpf_raw_tp_opts, raw_opts); |
12473 | 0 | char errmsg[STRERR_BUFSIZE]; |
12474 | 0 | struct bpf_link *link; |
12475 | 0 | int prog_fd, pfd; |
12476 | |
|
12477 | 0 | if (!OPTS_VALID(opts, bpf_raw_tracepoint_opts)) |
12478 | 0 | return libbpf_err_ptr(-EINVAL); |
12479 | | |
12480 | 0 | prog_fd = bpf_program__fd(prog); |
12481 | 0 | if (prog_fd < 0) { |
12482 | 0 | pr_warn("prog '%s': can't attach before loaded\n", prog->name); |
12483 | 0 | return libbpf_err_ptr(-EINVAL); |
12484 | 0 | } |
12485 | | |
12486 | 0 | link = calloc(1, sizeof(*link)); |
12487 | 0 | if (!link) |
12488 | 0 | return libbpf_err_ptr(-ENOMEM); |
12489 | 0 | link->detach = &bpf_link__detach_fd; |
12490 | |
|
12491 | 0 | raw_opts.tp_name = tp_name; |
12492 | 0 | raw_opts.cookie = OPTS_GET(opts, cookie, 0); |
12493 | 0 | pfd = bpf_raw_tracepoint_open_opts(prog_fd, &raw_opts); |
12494 | 0 | if (pfd < 0) { |
12495 | 0 | pfd = -errno; |
12496 | 0 | free(link); |
12497 | 0 | pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n", |
12498 | 0 | prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); |
12499 | 0 | return libbpf_err_ptr(pfd); |
12500 | 0 | } |
12501 | 0 | link->fd = pfd; |
12502 | 0 | return link; |
12503 | 0 | } |
12504 | | |
12505 | | struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *prog, |
12506 | | const char *tp_name) |
12507 | 0 | { |
12508 | 0 | return bpf_program__attach_raw_tracepoint_opts(prog, tp_name, NULL); |
12509 | 0 | } |
12510 | | |
12511 | | static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link) |
12512 | 0 | { |
12513 | 0 | static const char *const prefixes[] = { |
12514 | 0 | "raw_tp", |
12515 | 0 | "raw_tracepoint", |
12516 | 0 | "raw_tp.w", |
12517 | 0 | "raw_tracepoint.w", |
12518 | 0 | }; |
12519 | 0 | size_t i; |
12520 | 0 | const char *tp_name = NULL; |
12521 | |
|
12522 | 0 | *link = NULL; |
12523 | |
|
12524 | 0 | for (i = 0; i < ARRAY_SIZE(prefixes); i++) { |
12525 | 0 | size_t pfx_len; |
12526 | |
|
12527 | 0 | if (!str_has_pfx(prog->sec_name, prefixes[i])) |
12528 | 0 | continue; |
12529 | | |
12530 | 0 | pfx_len = strlen(prefixes[i]); |
12531 | | /* no auto-attach case of, e.g., SEC("raw_tp") */ |
12532 | 0 | if (prog->sec_name[pfx_len] == '\0') |
12533 | 0 | return 0; |
12534 | | |
12535 | 0 | if (prog->sec_name[pfx_len] != '/') |
12536 | 0 | continue; |
12537 | | |
12538 | 0 | tp_name = prog->sec_name + pfx_len + 1; |
12539 | 0 | break; |
12540 | 0 | } |
12541 | | |
12542 | 0 | if (!tp_name) { |
12543 | 0 | pr_warn("prog '%s': invalid section name '%s'\n", |
12544 | 0 | prog->name, prog->sec_name); |
12545 | 0 | return -EINVAL; |
12546 | 0 | } |
12547 | | |
12548 | 0 | *link = bpf_program__attach_raw_tracepoint(prog, tp_name); |
12549 | 0 | return libbpf_get_error(*link); |
12550 | 0 | } |
12551 | | |
12552 | | /* Common logic for all BPF program types that attach to a btf_id */ |
12553 | | static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *prog, |
12554 | | const struct bpf_trace_opts *opts) |
12555 | 0 | { |
12556 | 0 | LIBBPF_OPTS(bpf_link_create_opts, link_opts); |
12557 | 0 | char errmsg[STRERR_BUFSIZE]; |
12558 | 0 | struct bpf_link *link; |
12559 | 0 | int prog_fd, pfd; |
12560 | |
|
12561 | 0 | if (!OPTS_VALID(opts, bpf_trace_opts)) |
12562 | 0 | return libbpf_err_ptr(-EINVAL); |
12563 | | |
12564 | 0 | prog_fd = bpf_program__fd(prog); |
12565 | 0 | if (prog_fd < 0) { |
12566 | 0 | pr_warn("prog '%s': can't attach before loaded\n", prog->name); |
12567 | 0 | return libbpf_err_ptr(-EINVAL); |
12568 | 0 | } |
12569 | | |
12570 | 0 | link = calloc(1, sizeof(*link)); |
12571 | 0 | if (!link) |
12572 | 0 | return libbpf_err_ptr(-ENOMEM); |
12573 | 0 | link->detach = &bpf_link__detach_fd; |
12574 | | |
12575 | | /* libbpf is smart enough to redirect to BPF_RAW_TRACEPOINT_OPEN on old kernels */ |
12576 | 0 | link_opts.tracing.cookie = OPTS_GET(opts, cookie, 0); |
12577 | 0 | pfd = bpf_link_create(prog_fd, 0, bpf_program__expected_attach_type(prog), &link_opts); |
12578 | 0 | if (pfd < 0) { |
12579 | 0 | pfd = -errno; |
12580 | 0 | free(link); |
12581 | 0 | pr_warn("prog '%s': failed to attach: %s\n", |
12582 | 0 | prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); |
12583 | 0 | return libbpf_err_ptr(pfd); |
12584 | 0 | } |
12585 | 0 | link->fd = pfd; |
12586 | 0 | return link; |
12587 | 0 | } |
12588 | | |
12589 | | struct bpf_link *bpf_program__attach_trace(const struct bpf_program *prog) |
12590 | 0 | { |
12591 | 0 | return bpf_program__attach_btf_id(prog, NULL); |
12592 | 0 | } |
12593 | | |
12594 | | struct bpf_link *bpf_program__attach_trace_opts(const struct bpf_program *prog, |
12595 | | const struct bpf_trace_opts *opts) |
12596 | 0 | { |
12597 | 0 | return bpf_program__attach_btf_id(prog, opts); |
12598 | 0 | } |
12599 | | |
12600 | | struct bpf_link *bpf_program__attach_lsm(const struct bpf_program *prog) |
12601 | 0 | { |
12602 | 0 | return bpf_program__attach_btf_id(prog, NULL); |
12603 | 0 | } |
12604 | | |
12605 | | static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link) |
12606 | 0 | { |
12607 | 0 | *link = bpf_program__attach_trace(prog); |
12608 | 0 | return libbpf_get_error(*link); |
12609 | 0 | } |
12610 | | |
12611 | | static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link) |
12612 | 0 | { |
12613 | 0 | *link = bpf_program__attach_lsm(prog); |
12614 | 0 | return libbpf_get_error(*link); |
12615 | 0 | } |
12616 | | |
12617 | | static struct bpf_link * |
12618 | | bpf_program_attach_fd(const struct bpf_program *prog, |
12619 | | int target_fd, const char *target_name, |
12620 | | const struct bpf_link_create_opts *opts) |
12621 | 0 | { |
12622 | 0 | enum bpf_attach_type attach_type; |
12623 | 0 | char errmsg[STRERR_BUFSIZE]; |
12624 | 0 | struct bpf_link *link; |
12625 | 0 | int prog_fd, link_fd; |
12626 | |
|
12627 | 0 | prog_fd = bpf_program__fd(prog); |
12628 | 0 | if (prog_fd < 0) { |
12629 | 0 | pr_warn("prog '%s': can't attach before loaded\n", prog->name); |
12630 | 0 | return libbpf_err_ptr(-EINVAL); |
12631 | 0 | } |
12632 | | |
12633 | 0 | link = calloc(1, sizeof(*link)); |
12634 | 0 | if (!link) |
12635 | 0 | return libbpf_err_ptr(-ENOMEM); |
12636 | 0 | link->detach = &bpf_link__detach_fd; |
12637 | |
|
12638 | 0 | attach_type = bpf_program__expected_attach_type(prog); |
12639 | 0 | link_fd = bpf_link_create(prog_fd, target_fd, attach_type, opts); |
12640 | 0 | if (link_fd < 0) { |
12641 | 0 | link_fd = -errno; |
12642 | 0 | free(link); |
12643 | 0 | pr_warn("prog '%s': failed to attach to %s: %s\n", |
12644 | 0 | prog->name, target_name, |
12645 | 0 | libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg))); |
12646 | 0 | return libbpf_err_ptr(link_fd); |
12647 | 0 | } |
12648 | 0 | link->fd = link_fd; |
12649 | 0 | return link; |
12650 | 0 | } |
12651 | | |
12652 | | struct bpf_link * |
12653 | | bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd) |
12654 | 0 | { |
12655 | 0 | return bpf_program_attach_fd(prog, cgroup_fd, "cgroup", NULL); |
12656 | 0 | } |
12657 | | |
12658 | | struct bpf_link * |
12659 | | bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd) |
12660 | 0 | { |
12661 | 0 | return bpf_program_attach_fd(prog, netns_fd, "netns", NULL); |
12662 | 0 | } |
12663 | | |
12664 | | struct bpf_link * |
12665 | | bpf_program__attach_sockmap(const struct bpf_program *prog, int map_fd) |
12666 | 0 | { |
12667 | 0 | return bpf_program_attach_fd(prog, map_fd, "sockmap", NULL); |
12668 | 0 | } |
12669 | | |
12670 | | struct bpf_link *bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex) |
12671 | 0 | { |
12672 | | /* target_fd/target_ifindex use the same field in LINK_CREATE */ |
12673 | 0 | return bpf_program_attach_fd(prog, ifindex, "xdp", NULL); |
12674 | 0 | } |
12675 | | |
12676 | | struct bpf_link * |
12677 | | bpf_program__attach_tcx(const struct bpf_program *prog, int ifindex, |
12678 | | const struct bpf_tcx_opts *opts) |
12679 | 0 | { |
12680 | 0 | LIBBPF_OPTS(bpf_link_create_opts, link_create_opts); |
12681 | 0 | __u32 relative_id; |
12682 | 0 | int relative_fd; |
12683 | |
|
12684 | 0 | if (!OPTS_VALID(opts, bpf_tcx_opts)) |
12685 | 0 | return libbpf_err_ptr(-EINVAL); |
12686 | | |
12687 | 0 | relative_id = OPTS_GET(opts, relative_id, 0); |
12688 | 0 | relative_fd = OPTS_GET(opts, relative_fd, 0); |
12689 | | |
12690 | | /* validate we don't have unexpected combinations of non-zero fields */ |
12691 | 0 | if (!ifindex) { |
12692 | 0 | pr_warn("prog '%s': target netdevice ifindex cannot be zero\n", |
12693 | 0 | prog->name); |
12694 | 0 | return libbpf_err_ptr(-EINVAL); |
12695 | 0 | } |
12696 | 0 | if (relative_fd && relative_id) { |
12697 | 0 | pr_warn("prog '%s': relative_fd and relative_id cannot be set at the same time\n", |
12698 | 0 | prog->name); |
12699 | 0 | return libbpf_err_ptr(-EINVAL); |
12700 | 0 | } |
12701 | | |
12702 | 0 | link_create_opts.tcx.expected_revision = OPTS_GET(opts, expected_revision, 0); |
12703 | 0 | link_create_opts.tcx.relative_fd = relative_fd; |
12704 | 0 | link_create_opts.tcx.relative_id = relative_id; |
12705 | 0 | link_create_opts.flags = OPTS_GET(opts, flags, 0); |
12706 | | |
12707 | | /* target_fd/target_ifindex use the same field in LINK_CREATE */ |
12708 | 0 | return bpf_program_attach_fd(prog, ifindex, "tcx", &link_create_opts); |
12709 | 0 | } |
12710 | | |
12711 | | struct bpf_link * |
12712 | | bpf_program__attach_netkit(const struct bpf_program *prog, int ifindex, |
12713 | | const struct bpf_netkit_opts *opts) |
12714 | 0 | { |
12715 | 0 | LIBBPF_OPTS(bpf_link_create_opts, link_create_opts); |
12716 | 0 | __u32 relative_id; |
12717 | 0 | int relative_fd; |
12718 | |
|
12719 | 0 | if (!OPTS_VALID(opts, bpf_netkit_opts)) |
12720 | 0 | return libbpf_err_ptr(-EINVAL); |
12721 | | |
12722 | 0 | relative_id = OPTS_GET(opts, relative_id, 0); |
12723 | 0 | relative_fd = OPTS_GET(opts, relative_fd, 0); |
12724 | | |
12725 | | /* validate we don't have unexpected combinations of non-zero fields */ |
12726 | 0 | if (!ifindex) { |
12727 | 0 | pr_warn("prog '%s': target netdevice ifindex cannot be zero\n", |
12728 | 0 | prog->name); |
12729 | 0 | return libbpf_err_ptr(-EINVAL); |
12730 | 0 | } |
12731 | 0 | if (relative_fd && relative_id) { |
12732 | 0 | pr_warn("prog '%s': relative_fd and relative_id cannot be set at the same time\n", |
12733 | 0 | prog->name); |
12734 | 0 | return libbpf_err_ptr(-EINVAL); |
12735 | 0 | } |
12736 | | |
12737 | 0 | link_create_opts.netkit.expected_revision = OPTS_GET(opts, expected_revision, 0); |
12738 | 0 | link_create_opts.netkit.relative_fd = relative_fd; |
12739 | 0 | link_create_opts.netkit.relative_id = relative_id; |
12740 | 0 | link_create_opts.flags = OPTS_GET(opts, flags, 0); |
12741 | |
|
12742 | 0 | return bpf_program_attach_fd(prog, ifindex, "netkit", &link_create_opts); |
12743 | 0 | } |
12744 | | |
12745 | | struct bpf_link *bpf_program__attach_freplace(const struct bpf_program *prog, |
12746 | | int target_fd, |
12747 | | const char *attach_func_name) |
12748 | 0 | { |
12749 | 0 | int btf_id; |
12750 | |
|
12751 | 0 | if (!!target_fd != !!attach_func_name) { |
12752 | 0 | pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n", |
12753 | 0 | prog->name); |
12754 | 0 | return libbpf_err_ptr(-EINVAL); |
12755 | 0 | } |
12756 | | |
12757 | 0 | if (prog->type != BPF_PROG_TYPE_EXT) { |
12758 | 0 | pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace", |
12759 | 0 | prog->name); |
12760 | 0 | return libbpf_err_ptr(-EINVAL); |
12761 | 0 | } |
12762 | | |
12763 | 0 | if (target_fd) { |
12764 | 0 | LIBBPF_OPTS(bpf_link_create_opts, target_opts); |
12765 | |
|
12766 | 0 | btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd); |
12767 | 0 | if (btf_id < 0) |
12768 | 0 | return libbpf_err_ptr(btf_id); |
12769 | | |
12770 | 0 | target_opts.target_btf_id = btf_id; |
12771 | |
|
12772 | 0 | return bpf_program_attach_fd(prog, target_fd, "freplace", |
12773 | 0 | &target_opts); |
12774 | 0 | } else { |
12775 | | /* no target, so use raw_tracepoint_open for compatibility |
12776 | | * with old kernels |
12777 | | */ |
12778 | 0 | return bpf_program__attach_trace(prog); |
12779 | 0 | } |
12780 | 0 | } |
12781 | | |
12782 | | struct bpf_link * |
12783 | | bpf_program__attach_iter(const struct bpf_program *prog, |
12784 | | const struct bpf_iter_attach_opts *opts) |
12785 | 0 | { |
12786 | 0 | DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts); |
12787 | 0 | char errmsg[STRERR_BUFSIZE]; |
12788 | 0 | struct bpf_link *link; |
12789 | 0 | int prog_fd, link_fd; |
12790 | 0 | __u32 target_fd = 0; |
12791 | |
|
12792 | 0 | if (!OPTS_VALID(opts, bpf_iter_attach_opts)) |
12793 | 0 | return libbpf_err_ptr(-EINVAL); |
12794 | | |
12795 | 0 | link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0); |
12796 | 0 | link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0); |
12797 | |
|
12798 | 0 | prog_fd = bpf_program__fd(prog); |
12799 | 0 | if (prog_fd < 0) { |
12800 | 0 | pr_warn("prog '%s': can't attach before loaded\n", prog->name); |
12801 | 0 | return libbpf_err_ptr(-EINVAL); |
12802 | 0 | } |
12803 | | |
12804 | 0 | link = calloc(1, sizeof(*link)); |
12805 | 0 | if (!link) |
12806 | 0 | return libbpf_err_ptr(-ENOMEM); |
12807 | 0 | link->detach = &bpf_link__detach_fd; |
12808 | |
|
12809 | 0 | link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER, |
12810 | 0 | &link_create_opts); |
12811 | 0 | if (link_fd < 0) { |
12812 | 0 | link_fd = -errno; |
12813 | 0 | free(link); |
12814 | 0 | pr_warn("prog '%s': failed to attach to iterator: %s\n", |
12815 | 0 | prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg))); |
12816 | 0 | return libbpf_err_ptr(link_fd); |
12817 | 0 | } |
12818 | 0 | link->fd = link_fd; |
12819 | 0 | return link; |
12820 | 0 | } |
12821 | | |
12822 | | static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link) |
12823 | 0 | { |
12824 | 0 | *link = bpf_program__attach_iter(prog, NULL); |
12825 | 0 | return libbpf_get_error(*link); |
12826 | 0 | } |
12827 | | |
12828 | | struct bpf_link *bpf_program__attach_netfilter(const struct bpf_program *prog, |
12829 | | const struct bpf_netfilter_opts *opts) |
12830 | 0 | { |
12831 | 0 | LIBBPF_OPTS(bpf_link_create_opts, lopts); |
12832 | 0 | struct bpf_link *link; |
12833 | 0 | int prog_fd, link_fd; |
12834 | |
|
12835 | 0 | if (!OPTS_VALID(opts, bpf_netfilter_opts)) |
12836 | 0 | return libbpf_err_ptr(-EINVAL); |
12837 | | |
12838 | 0 | prog_fd = bpf_program__fd(prog); |
12839 | 0 | if (prog_fd < 0) { |
12840 | 0 | pr_warn("prog '%s': can't attach before loaded\n", prog->name); |
12841 | 0 | return libbpf_err_ptr(-EINVAL); |
12842 | 0 | } |
12843 | | |
12844 | 0 | link = calloc(1, sizeof(*link)); |
12845 | 0 | if (!link) |
12846 | 0 | return libbpf_err_ptr(-ENOMEM); |
12847 | | |
12848 | 0 | link->detach = &bpf_link__detach_fd; |
12849 | |
|
12850 | 0 | lopts.netfilter.pf = OPTS_GET(opts, pf, 0); |
12851 | 0 | lopts.netfilter.hooknum = OPTS_GET(opts, hooknum, 0); |
12852 | 0 | lopts.netfilter.priority = OPTS_GET(opts, priority, 0); |
12853 | 0 | lopts.netfilter.flags = OPTS_GET(opts, flags, 0); |
12854 | |
|
12855 | 0 | link_fd = bpf_link_create(prog_fd, 0, BPF_NETFILTER, &lopts); |
12856 | 0 | if (link_fd < 0) { |
12857 | 0 | char errmsg[STRERR_BUFSIZE]; |
12858 | |
|
12859 | 0 | link_fd = -errno; |
12860 | 0 | free(link); |
12861 | 0 | pr_warn("prog '%s': failed to attach to netfilter: %s\n", |
12862 | 0 | prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg))); |
12863 | 0 | return libbpf_err_ptr(link_fd); |
12864 | 0 | } |
12865 | 0 | link->fd = link_fd; |
12866 | |
|
12867 | 0 | return link; |
12868 | 0 | } |
12869 | | |
12870 | | struct bpf_link *bpf_program__attach(const struct bpf_program *prog) |
12871 | 0 | { |
12872 | 0 | struct bpf_link *link = NULL; |
12873 | 0 | int err; |
12874 | |
|
12875 | 0 | if (!prog->sec_def || !prog->sec_def->prog_attach_fn) |
12876 | 0 | return libbpf_err_ptr(-EOPNOTSUPP); |
12877 | | |
12878 | 0 | if (bpf_program__fd(prog) < 0) { |
12879 | 0 | pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n", |
12880 | 0 | prog->name); |
12881 | 0 | return libbpf_err_ptr(-EINVAL); |
12882 | 0 | } |
12883 | | |
12884 | 0 | err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, &link); |
12885 | 0 | if (err) |
12886 | 0 | return libbpf_err_ptr(err); |
12887 | | |
12888 | | /* When calling bpf_program__attach() explicitly, auto-attach support |
12889 | | * is expected to work, so NULL returned link is considered an error. |
12890 | | * This is different for skeleton's attach, see comment in |
12891 | | * bpf_object__attach_skeleton(). |
12892 | | */ |
12893 | 0 | if (!link) |
12894 | 0 | return libbpf_err_ptr(-EOPNOTSUPP); |
12895 | | |
12896 | 0 | return link; |
12897 | 0 | } |
12898 | | |
12899 | | struct bpf_link_struct_ops { |
12900 | | struct bpf_link link; |
12901 | | int map_fd; |
12902 | | }; |
12903 | | |
12904 | | static int bpf_link__detach_struct_ops(struct bpf_link *link) |
12905 | 0 | { |
12906 | 0 | struct bpf_link_struct_ops *st_link; |
12907 | 0 | __u32 zero = 0; |
12908 | |
|
12909 | 0 | st_link = container_of(link, struct bpf_link_struct_ops, link); |
12910 | |
|
12911 | 0 | if (st_link->map_fd < 0) |
12912 | | /* w/o a real link */ |
12913 | 0 | return bpf_map_delete_elem(link->fd, &zero); |
12914 | | |
12915 | 0 | return close(link->fd); |
12916 | 0 | } |
12917 | | |
12918 | | struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map) |
12919 | 0 | { |
12920 | 0 | struct bpf_link_struct_ops *link; |
12921 | 0 | __u32 zero = 0; |
12922 | 0 | int err, fd; |
12923 | |
|
12924 | 0 | if (!bpf_map__is_struct_ops(map)) { |
12925 | 0 | pr_warn("map '%s': can't attach non-struct_ops map\n", map->name); |
12926 | 0 | return libbpf_err_ptr(-EINVAL); |
12927 | 0 | } |
12928 | | |
12929 | 0 | if (map->fd < 0) { |
12930 | 0 | pr_warn("map '%s': can't attach BPF map without FD (was it created?)\n", map->name); |
12931 | 0 | return libbpf_err_ptr(-EINVAL); |
12932 | 0 | } |
12933 | | |
12934 | 0 | link = calloc(1, sizeof(*link)); |
12935 | 0 | if (!link) |
12936 | 0 | return libbpf_err_ptr(-EINVAL); |
12937 | | |
12938 | | /* kern_vdata should be prepared during the loading phase. */ |
12939 | 0 | err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0); |
12940 | | /* It can be EBUSY if the map has been used to create or |
12941 | | * update a link before. We don't allow updating the value of |
12942 | | * a struct_ops once it is set. That ensures that the value |
12943 | | * never changed. So, it is safe to skip EBUSY. |
12944 | | */ |
12945 | 0 | if (err && (!(map->def.map_flags & BPF_F_LINK) || err != -EBUSY)) { |
12946 | 0 | free(link); |
12947 | 0 | return libbpf_err_ptr(err); |
12948 | 0 | } |
12949 | | |
12950 | 0 | link->link.detach = bpf_link__detach_struct_ops; |
12951 | |
|
12952 | 0 | if (!(map->def.map_flags & BPF_F_LINK)) { |
12953 | | /* w/o a real link */ |
12954 | 0 | link->link.fd = map->fd; |
12955 | 0 | link->map_fd = -1; |
12956 | 0 | return &link->link; |
12957 | 0 | } |
12958 | | |
12959 | 0 | fd = bpf_link_create(map->fd, 0, BPF_STRUCT_OPS, NULL); |
12960 | 0 | if (fd < 0) { |
12961 | 0 | free(link); |
12962 | 0 | return libbpf_err_ptr(fd); |
12963 | 0 | } |
12964 | | |
12965 | 0 | link->link.fd = fd; |
12966 | 0 | link->map_fd = map->fd; |
12967 | |
|
12968 | 0 | return &link->link; |
12969 | 0 | } |
12970 | | |
12971 | | /* |
12972 | | * Swap the back struct_ops of a link with a new struct_ops map. |
12973 | | */ |
12974 | | int bpf_link__update_map(struct bpf_link *link, const struct bpf_map *map) |
12975 | 0 | { |
12976 | 0 | struct bpf_link_struct_ops *st_ops_link; |
12977 | 0 | __u32 zero = 0; |
12978 | 0 | int err; |
12979 | |
|
12980 | 0 | if (!bpf_map__is_struct_ops(map)) |
12981 | 0 | return -EINVAL; |
12982 | | |
12983 | 0 | if (map->fd < 0) { |
12984 | 0 | pr_warn("map '%s': can't use BPF map without FD (was it created?)\n", map->name); |
12985 | 0 | return -EINVAL; |
12986 | 0 | } |
12987 | | |
12988 | 0 | st_ops_link = container_of(link, struct bpf_link_struct_ops, link); |
12989 | | /* Ensure the type of a link is correct */ |
12990 | 0 | if (st_ops_link->map_fd < 0) |
12991 | 0 | return -EINVAL; |
12992 | | |
12993 | 0 | err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0); |
12994 | | /* It can be EBUSY if the map has been used to create or |
12995 | | * update a link before. We don't allow updating the value of |
12996 | | * a struct_ops once it is set. That ensures that the value |
12997 | | * never changed. So, it is safe to skip EBUSY. |
12998 | | */ |
12999 | 0 | if (err && err != -EBUSY) |
13000 | 0 | return err; |
13001 | | |
13002 | 0 | err = bpf_link_update(link->fd, map->fd, NULL); |
13003 | 0 | if (err < 0) |
13004 | 0 | return err; |
13005 | | |
13006 | 0 | st_ops_link->map_fd = map->fd; |
13007 | |
|
13008 | 0 | return 0; |
13009 | 0 | } |
13010 | | |
13011 | | typedef enum bpf_perf_event_ret (*bpf_perf_event_print_t)(struct perf_event_header *hdr, |
13012 | | void *private_data); |
13013 | | |
13014 | | static enum bpf_perf_event_ret |
13015 | | perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size, |
13016 | | void **copy_mem, size_t *copy_size, |
13017 | | bpf_perf_event_print_t fn, void *private_data) |
13018 | 0 | { |
13019 | 0 | struct perf_event_mmap_page *header = mmap_mem; |
13020 | 0 | __u64 data_head = ring_buffer_read_head(header); |
13021 | 0 | __u64 data_tail = header->data_tail; |
13022 | 0 | void *base = ((__u8 *)header) + page_size; |
13023 | 0 | int ret = LIBBPF_PERF_EVENT_CONT; |
13024 | 0 | struct perf_event_header *ehdr; |
13025 | 0 | size_t ehdr_size; |
13026 | |
|
13027 | 0 | while (data_head != data_tail) { |
13028 | 0 | ehdr = base + (data_tail & (mmap_size - 1)); |
13029 | 0 | ehdr_size = ehdr->size; |
13030 | |
|
13031 | 0 | if (((void *)ehdr) + ehdr_size > base + mmap_size) { |
13032 | 0 | void *copy_start = ehdr; |
13033 | 0 | size_t len_first = base + mmap_size - copy_start; |
13034 | 0 | size_t len_secnd = ehdr_size - len_first; |
13035 | |
|
13036 | 0 | if (*copy_size < ehdr_size) { |
13037 | 0 | free(*copy_mem); |
13038 | 0 | *copy_mem = malloc(ehdr_size); |
13039 | 0 | if (!*copy_mem) { |
13040 | 0 | *copy_size = 0; |
13041 | 0 | ret = LIBBPF_PERF_EVENT_ERROR; |
13042 | 0 | break; |
13043 | 0 | } |
13044 | 0 | *copy_size = ehdr_size; |
13045 | 0 | } |
13046 | | |
13047 | 0 | memcpy(*copy_mem, copy_start, len_first); |
13048 | 0 | memcpy(*copy_mem + len_first, base, len_secnd); |
13049 | 0 | ehdr = *copy_mem; |
13050 | 0 | } |
13051 | | |
13052 | 0 | ret = fn(ehdr, private_data); |
13053 | 0 | data_tail += ehdr_size; |
13054 | 0 | if (ret != LIBBPF_PERF_EVENT_CONT) |
13055 | 0 | break; |
13056 | 0 | } |
13057 | |
|
13058 | 0 | ring_buffer_write_tail(header, data_tail); |
13059 | 0 | return libbpf_err(ret); |
13060 | 0 | } |
13061 | | |
13062 | | struct perf_buffer; |
13063 | | |
13064 | | struct perf_buffer_params { |
13065 | | struct perf_event_attr *attr; |
13066 | | /* if event_cb is specified, it takes precendence */ |
13067 | | perf_buffer_event_fn event_cb; |
13068 | | /* sample_cb and lost_cb are higher-level common-case callbacks */ |
13069 | | perf_buffer_sample_fn sample_cb; |
13070 | | perf_buffer_lost_fn lost_cb; |
13071 | | void *ctx; |
13072 | | int cpu_cnt; |
13073 | | int *cpus; |
13074 | | int *map_keys; |
13075 | | }; |
13076 | | |
13077 | | struct perf_cpu_buf { |
13078 | | struct perf_buffer *pb; |
13079 | | void *base; /* mmap()'ed memory */ |
13080 | | void *buf; /* for reconstructing segmented data */ |
13081 | | size_t buf_size; |
13082 | | int fd; |
13083 | | int cpu; |
13084 | | int map_key; |
13085 | | }; |
13086 | | |
13087 | | struct perf_buffer { |
13088 | | perf_buffer_event_fn event_cb; |
13089 | | perf_buffer_sample_fn sample_cb; |
13090 | | perf_buffer_lost_fn lost_cb; |
13091 | | void *ctx; /* passed into callbacks */ |
13092 | | |
13093 | | size_t page_size; |
13094 | | size_t mmap_size; |
13095 | | struct perf_cpu_buf **cpu_bufs; |
13096 | | struct epoll_event *events; |
13097 | | int cpu_cnt; /* number of allocated CPU buffers */ |
13098 | | int epoll_fd; /* perf event FD */ |
13099 | | int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */ |
13100 | | }; |
13101 | | |
13102 | | static void perf_buffer__free_cpu_buf(struct perf_buffer *pb, |
13103 | | struct perf_cpu_buf *cpu_buf) |
13104 | 0 | { |
13105 | 0 | if (!cpu_buf) |
13106 | 0 | return; |
13107 | 0 | if (cpu_buf->base && |
13108 | 0 | munmap(cpu_buf->base, pb->mmap_size + pb->page_size)) |
13109 | 0 | pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu); |
13110 | 0 | if (cpu_buf->fd >= 0) { |
13111 | 0 | ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0); |
13112 | 0 | close(cpu_buf->fd); |
13113 | 0 | } |
13114 | 0 | free(cpu_buf->buf); |
13115 | 0 | free(cpu_buf); |
13116 | 0 | } |
13117 | | |
13118 | | void perf_buffer__free(struct perf_buffer *pb) |
13119 | 0 | { |
13120 | 0 | int i; |
13121 | |
|
13122 | 0 | if (IS_ERR_OR_NULL(pb)) |
13123 | 0 | return; |
13124 | 0 | if (pb->cpu_bufs) { |
13125 | 0 | for (i = 0; i < pb->cpu_cnt; i++) { |
13126 | 0 | struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i]; |
13127 | |
|
13128 | 0 | if (!cpu_buf) |
13129 | 0 | continue; |
13130 | | |
13131 | 0 | bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key); |
13132 | 0 | perf_buffer__free_cpu_buf(pb, cpu_buf); |
13133 | 0 | } |
13134 | 0 | free(pb->cpu_bufs); |
13135 | 0 | } |
13136 | 0 | if (pb->epoll_fd >= 0) |
13137 | 0 | close(pb->epoll_fd); |
13138 | 0 | free(pb->events); |
13139 | 0 | free(pb); |
13140 | 0 | } |
13141 | | |
13142 | | static struct perf_cpu_buf * |
13143 | | perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr, |
13144 | | int cpu, int map_key) |
13145 | 0 | { |
13146 | 0 | struct perf_cpu_buf *cpu_buf; |
13147 | 0 | char msg[STRERR_BUFSIZE]; |
13148 | 0 | int err; |
13149 | |
|
13150 | 0 | cpu_buf = calloc(1, sizeof(*cpu_buf)); |
13151 | 0 | if (!cpu_buf) |
13152 | 0 | return ERR_PTR(-ENOMEM); |
13153 | | |
13154 | 0 | cpu_buf->pb = pb; |
13155 | 0 | cpu_buf->cpu = cpu; |
13156 | 0 | cpu_buf->map_key = map_key; |
13157 | |
|
13158 | 0 | cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu, |
13159 | 0 | -1, PERF_FLAG_FD_CLOEXEC); |
13160 | 0 | if (cpu_buf->fd < 0) { |
13161 | 0 | err = -errno; |
13162 | 0 | pr_warn("failed to open perf buffer event on cpu #%d: %s\n", |
13163 | 0 | cpu, libbpf_strerror_r(err, msg, sizeof(msg))); |
13164 | 0 | goto error; |
13165 | 0 | } |
13166 | | |
13167 | 0 | cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size, |
13168 | 0 | PROT_READ | PROT_WRITE, MAP_SHARED, |
13169 | 0 | cpu_buf->fd, 0); |
13170 | 0 | if (cpu_buf->base == MAP_FAILED) { |
13171 | 0 | cpu_buf->base = NULL; |
13172 | 0 | err = -errno; |
13173 | 0 | pr_warn("failed to mmap perf buffer on cpu #%d: %s\n", |
13174 | 0 | cpu, libbpf_strerror_r(err, msg, sizeof(msg))); |
13175 | 0 | goto error; |
13176 | 0 | } |
13177 | | |
13178 | 0 | if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) { |
13179 | 0 | err = -errno; |
13180 | 0 | pr_warn("failed to enable perf buffer event on cpu #%d: %s\n", |
13181 | 0 | cpu, libbpf_strerror_r(err, msg, sizeof(msg))); |
13182 | 0 | goto error; |
13183 | 0 | } |
13184 | | |
13185 | 0 | return cpu_buf; |
13186 | | |
13187 | 0 | error: |
13188 | 0 | perf_buffer__free_cpu_buf(pb, cpu_buf); |
13189 | 0 | return (struct perf_cpu_buf *)ERR_PTR(err); |
13190 | 0 | } |
13191 | | |
13192 | | static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, |
13193 | | struct perf_buffer_params *p); |
13194 | | |
13195 | | struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt, |
13196 | | perf_buffer_sample_fn sample_cb, |
13197 | | perf_buffer_lost_fn lost_cb, |
13198 | | void *ctx, |
13199 | | const struct perf_buffer_opts *opts) |
13200 | 0 | { |
13201 | 0 | const size_t attr_sz = sizeof(struct perf_event_attr); |
13202 | 0 | struct perf_buffer_params p = {}; |
13203 | 0 | struct perf_event_attr attr; |
13204 | 0 | __u32 sample_period; |
13205 | |
|
13206 | 0 | if (!OPTS_VALID(opts, perf_buffer_opts)) |
13207 | 0 | return libbpf_err_ptr(-EINVAL); |
13208 | | |
13209 | 0 | sample_period = OPTS_GET(opts, sample_period, 1); |
13210 | 0 | if (!sample_period) |
13211 | 0 | sample_period = 1; |
13212 | |
|
13213 | 0 | memset(&attr, 0, attr_sz); |
13214 | 0 | attr.size = attr_sz; |
13215 | 0 | attr.config = PERF_COUNT_SW_BPF_OUTPUT; |
13216 | 0 | attr.type = PERF_TYPE_SOFTWARE; |
13217 | 0 | attr.sample_type = PERF_SAMPLE_RAW; |
13218 | 0 | attr.sample_period = sample_period; |
13219 | 0 | attr.wakeup_events = sample_period; |
13220 | |
|
13221 | 0 | p.attr = &attr; |
13222 | 0 | p.sample_cb = sample_cb; |
13223 | 0 | p.lost_cb = lost_cb; |
13224 | 0 | p.ctx = ctx; |
13225 | |
|
13226 | 0 | return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p)); |
13227 | 0 | } |
13228 | | |
13229 | | struct perf_buffer *perf_buffer__new_raw(int map_fd, size_t page_cnt, |
13230 | | struct perf_event_attr *attr, |
13231 | | perf_buffer_event_fn event_cb, void *ctx, |
13232 | | const struct perf_buffer_raw_opts *opts) |
13233 | 0 | { |
13234 | 0 | struct perf_buffer_params p = {}; |
13235 | |
|
13236 | 0 | if (!attr) |
13237 | 0 | return libbpf_err_ptr(-EINVAL); |
13238 | | |
13239 | 0 | if (!OPTS_VALID(opts, perf_buffer_raw_opts)) |
13240 | 0 | return libbpf_err_ptr(-EINVAL); |
13241 | | |
13242 | 0 | p.attr = attr; |
13243 | 0 | p.event_cb = event_cb; |
13244 | 0 | p.ctx = ctx; |
13245 | 0 | p.cpu_cnt = OPTS_GET(opts, cpu_cnt, 0); |
13246 | 0 | p.cpus = OPTS_GET(opts, cpus, NULL); |
13247 | 0 | p.map_keys = OPTS_GET(opts, map_keys, NULL); |
13248 | |
|
13249 | 0 | return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p)); |
13250 | 0 | } |
13251 | | |
13252 | | static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, |
13253 | | struct perf_buffer_params *p) |
13254 | 0 | { |
13255 | 0 | const char *online_cpus_file = "/sys/devices/system/cpu/online"; |
13256 | 0 | struct bpf_map_info map; |
13257 | 0 | char msg[STRERR_BUFSIZE]; |
13258 | 0 | struct perf_buffer *pb; |
13259 | 0 | bool *online = NULL; |
13260 | 0 | __u32 map_info_len; |
13261 | 0 | int err, i, j, n; |
13262 | |
|
13263 | 0 | if (page_cnt == 0 || (page_cnt & (page_cnt - 1))) { |
13264 | 0 | pr_warn("page count should be power of two, but is %zu\n", |
13265 | 0 | page_cnt); |
13266 | 0 | return ERR_PTR(-EINVAL); |
13267 | 0 | } |
13268 | | |
13269 | | /* best-effort sanity checks */ |
13270 | 0 | memset(&map, 0, sizeof(map)); |
13271 | 0 | map_info_len = sizeof(map); |
13272 | 0 | err = bpf_map_get_info_by_fd(map_fd, &map, &map_info_len); |
13273 | 0 | if (err) { |
13274 | 0 | err = -errno; |
13275 | | /* if BPF_OBJ_GET_INFO_BY_FD is supported, will return |
13276 | | * -EBADFD, -EFAULT, or -E2BIG on real error |
13277 | | */ |
13278 | 0 | if (err != -EINVAL) { |
13279 | 0 | pr_warn("failed to get map info for map FD %d: %s\n", |
13280 | 0 | map_fd, libbpf_strerror_r(err, msg, sizeof(msg))); |
13281 | 0 | return ERR_PTR(err); |
13282 | 0 | } |
13283 | 0 | pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n", |
13284 | 0 | map_fd); |
13285 | 0 | } else { |
13286 | 0 | if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { |
13287 | 0 | pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n", |
13288 | 0 | map.name); |
13289 | 0 | return ERR_PTR(-EINVAL); |
13290 | 0 | } |
13291 | 0 | } |
13292 | | |
13293 | 0 | pb = calloc(1, sizeof(*pb)); |
13294 | 0 | if (!pb) |
13295 | 0 | return ERR_PTR(-ENOMEM); |
13296 | | |
13297 | 0 | pb->event_cb = p->event_cb; |
13298 | 0 | pb->sample_cb = p->sample_cb; |
13299 | 0 | pb->lost_cb = p->lost_cb; |
13300 | 0 | pb->ctx = p->ctx; |
13301 | |
|
13302 | 0 | pb->page_size = getpagesize(); |
13303 | 0 | pb->mmap_size = pb->page_size * page_cnt; |
13304 | 0 | pb->map_fd = map_fd; |
13305 | |
|
13306 | 0 | pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC); |
13307 | 0 | if (pb->epoll_fd < 0) { |
13308 | 0 | err = -errno; |
13309 | 0 | pr_warn("failed to create epoll instance: %s\n", |
13310 | 0 | libbpf_strerror_r(err, msg, sizeof(msg))); |
13311 | 0 | goto error; |
13312 | 0 | } |
13313 | | |
13314 | 0 | if (p->cpu_cnt > 0) { |
13315 | 0 | pb->cpu_cnt = p->cpu_cnt; |
13316 | 0 | } else { |
13317 | 0 | pb->cpu_cnt = libbpf_num_possible_cpus(); |
13318 | 0 | if (pb->cpu_cnt < 0) { |
13319 | 0 | err = pb->cpu_cnt; |
13320 | 0 | goto error; |
13321 | 0 | } |
13322 | 0 | if (map.max_entries && map.max_entries < pb->cpu_cnt) |
13323 | 0 | pb->cpu_cnt = map.max_entries; |
13324 | 0 | } |
13325 | | |
13326 | 0 | pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events)); |
13327 | 0 | if (!pb->events) { |
13328 | 0 | err = -ENOMEM; |
13329 | 0 | pr_warn("failed to allocate events: out of memory\n"); |
13330 | 0 | goto error; |
13331 | 0 | } |
13332 | 0 | pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs)); |
13333 | 0 | if (!pb->cpu_bufs) { |
13334 | 0 | err = -ENOMEM; |
13335 | 0 | pr_warn("failed to allocate buffers: out of memory\n"); |
13336 | 0 | goto error; |
13337 | 0 | } |
13338 | | |
13339 | 0 | err = parse_cpu_mask_file(online_cpus_file, &online, &n); |
13340 | 0 | if (err) { |
13341 | 0 | pr_warn("failed to get online CPU mask: %d\n", err); |
13342 | 0 | goto error; |
13343 | 0 | } |
13344 | | |
13345 | 0 | for (i = 0, j = 0; i < pb->cpu_cnt; i++) { |
13346 | 0 | struct perf_cpu_buf *cpu_buf; |
13347 | 0 | int cpu, map_key; |
13348 | |
|
13349 | 0 | cpu = p->cpu_cnt > 0 ? p->cpus[i] : i; |
13350 | 0 | map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i; |
13351 | | |
13352 | | /* in case user didn't explicitly requested particular CPUs to |
13353 | | * be attached to, skip offline/not present CPUs |
13354 | | */ |
13355 | 0 | if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu])) |
13356 | 0 | continue; |
13357 | | |
13358 | 0 | cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key); |
13359 | 0 | if (IS_ERR(cpu_buf)) { |
13360 | 0 | err = PTR_ERR(cpu_buf); |
13361 | 0 | goto error; |
13362 | 0 | } |
13363 | | |
13364 | 0 | pb->cpu_bufs[j] = cpu_buf; |
13365 | |
|
13366 | 0 | err = bpf_map_update_elem(pb->map_fd, &map_key, |
13367 | 0 | &cpu_buf->fd, 0); |
13368 | 0 | if (err) { |
13369 | 0 | err = -errno; |
13370 | 0 | pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n", |
13371 | 0 | cpu, map_key, cpu_buf->fd, |
13372 | 0 | libbpf_strerror_r(err, msg, sizeof(msg))); |
13373 | 0 | goto error; |
13374 | 0 | } |
13375 | | |
13376 | 0 | pb->events[j].events = EPOLLIN; |
13377 | 0 | pb->events[j].data.ptr = cpu_buf; |
13378 | 0 | if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd, |
13379 | 0 | &pb->events[j]) < 0) { |
13380 | 0 | err = -errno; |
13381 | 0 | pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n", |
13382 | 0 | cpu, cpu_buf->fd, |
13383 | 0 | libbpf_strerror_r(err, msg, sizeof(msg))); |
13384 | 0 | goto error; |
13385 | 0 | } |
13386 | 0 | j++; |
13387 | 0 | } |
13388 | 0 | pb->cpu_cnt = j; |
13389 | 0 | free(online); |
13390 | |
|
13391 | 0 | return pb; |
13392 | | |
13393 | 0 | error: |
13394 | 0 | free(online); |
13395 | 0 | if (pb) |
13396 | 0 | perf_buffer__free(pb); |
13397 | 0 | return ERR_PTR(err); |
13398 | 0 | } |
13399 | | |
13400 | | struct perf_sample_raw { |
13401 | | struct perf_event_header header; |
13402 | | uint32_t size; |
13403 | | char data[]; |
13404 | | }; |
13405 | | |
13406 | | struct perf_sample_lost { |
13407 | | struct perf_event_header header; |
13408 | | uint64_t id; |
13409 | | uint64_t lost; |
13410 | | uint64_t sample_id; |
13411 | | }; |
13412 | | |
13413 | | static enum bpf_perf_event_ret |
13414 | | perf_buffer__process_record(struct perf_event_header *e, void *ctx) |
13415 | 0 | { |
13416 | 0 | struct perf_cpu_buf *cpu_buf = ctx; |
13417 | 0 | struct perf_buffer *pb = cpu_buf->pb; |
13418 | 0 | void *data = e; |
13419 | | |
13420 | | /* user wants full control over parsing perf event */ |
13421 | 0 | if (pb->event_cb) |
13422 | 0 | return pb->event_cb(pb->ctx, cpu_buf->cpu, e); |
13423 | | |
13424 | 0 | switch (e->type) { |
13425 | 0 | case PERF_RECORD_SAMPLE: { |
13426 | 0 | struct perf_sample_raw *s = data; |
13427 | |
|
13428 | 0 | if (pb->sample_cb) |
13429 | 0 | pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size); |
13430 | 0 | break; |
13431 | 0 | } |
13432 | 0 | case PERF_RECORD_LOST: { |
13433 | 0 | struct perf_sample_lost *s = data; |
13434 | |
|
13435 | 0 | if (pb->lost_cb) |
13436 | 0 | pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost); |
13437 | 0 | break; |
13438 | 0 | } |
13439 | 0 | default: |
13440 | 0 | pr_warn("unknown perf sample type %d\n", e->type); |
13441 | 0 | return LIBBPF_PERF_EVENT_ERROR; |
13442 | 0 | } |
13443 | 0 | return LIBBPF_PERF_EVENT_CONT; |
13444 | 0 | } |
13445 | | |
13446 | | static int perf_buffer__process_records(struct perf_buffer *pb, |
13447 | | struct perf_cpu_buf *cpu_buf) |
13448 | 0 | { |
13449 | 0 | enum bpf_perf_event_ret ret; |
13450 | |
|
13451 | 0 | ret = perf_event_read_simple(cpu_buf->base, pb->mmap_size, |
13452 | 0 | pb->page_size, &cpu_buf->buf, |
13453 | 0 | &cpu_buf->buf_size, |
13454 | 0 | perf_buffer__process_record, cpu_buf); |
13455 | 0 | if (ret != LIBBPF_PERF_EVENT_CONT) |
13456 | 0 | return ret; |
13457 | 0 | return 0; |
13458 | 0 | } |
13459 | | |
13460 | | int perf_buffer__epoll_fd(const struct perf_buffer *pb) |
13461 | 0 | { |
13462 | 0 | return pb->epoll_fd; |
13463 | 0 | } |
13464 | | |
13465 | | int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms) |
13466 | 0 | { |
13467 | 0 | int i, cnt, err; |
13468 | |
|
13469 | 0 | cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms); |
13470 | 0 | if (cnt < 0) |
13471 | 0 | return -errno; |
13472 | | |
13473 | 0 | for (i = 0; i < cnt; i++) { |
13474 | 0 | struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr; |
13475 | |
|
13476 | 0 | err = perf_buffer__process_records(pb, cpu_buf); |
13477 | 0 | if (err) { |
13478 | 0 | pr_warn("error while processing records: %d\n", err); |
13479 | 0 | return libbpf_err(err); |
13480 | 0 | } |
13481 | 0 | } |
13482 | 0 | return cnt; |
13483 | 0 | } |
13484 | | |
13485 | | /* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer |
13486 | | * manager. |
13487 | | */ |
13488 | | size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb) |
13489 | 0 | { |
13490 | 0 | return pb->cpu_cnt; |
13491 | 0 | } |
13492 | | |
13493 | | /* |
13494 | | * Return perf_event FD of a ring buffer in *buf_idx* slot of |
13495 | | * PERF_EVENT_ARRAY BPF map. This FD can be polled for new data using |
13496 | | * select()/poll()/epoll() Linux syscalls. |
13497 | | */ |
13498 | | int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx) |
13499 | 0 | { |
13500 | 0 | struct perf_cpu_buf *cpu_buf; |
13501 | |
|
13502 | 0 | if (buf_idx >= pb->cpu_cnt) |
13503 | 0 | return libbpf_err(-EINVAL); |
13504 | | |
13505 | 0 | cpu_buf = pb->cpu_bufs[buf_idx]; |
13506 | 0 | if (!cpu_buf) |
13507 | 0 | return libbpf_err(-ENOENT); |
13508 | | |
13509 | 0 | return cpu_buf->fd; |
13510 | 0 | } |
13511 | | |
13512 | | int perf_buffer__buffer(struct perf_buffer *pb, int buf_idx, void **buf, size_t *buf_size) |
13513 | 0 | { |
13514 | 0 | struct perf_cpu_buf *cpu_buf; |
13515 | |
|
13516 | 0 | if (buf_idx >= pb->cpu_cnt) |
13517 | 0 | return libbpf_err(-EINVAL); |
13518 | | |
13519 | 0 | cpu_buf = pb->cpu_bufs[buf_idx]; |
13520 | 0 | if (!cpu_buf) |
13521 | 0 | return libbpf_err(-ENOENT); |
13522 | | |
13523 | 0 | *buf = cpu_buf->base; |
13524 | 0 | *buf_size = pb->mmap_size; |
13525 | 0 | return 0; |
13526 | 0 | } |
13527 | | |
13528 | | /* |
13529 | | * Consume data from perf ring buffer corresponding to slot *buf_idx* in |
13530 | | * PERF_EVENT_ARRAY BPF map without waiting/polling. If there is no data to |
13531 | | * consume, do nothing and return success. |
13532 | | * Returns: |
13533 | | * - 0 on success; |
13534 | | * - <0 on failure. |
13535 | | */ |
13536 | | int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx) |
13537 | 0 | { |
13538 | 0 | struct perf_cpu_buf *cpu_buf; |
13539 | |
|
13540 | 0 | if (buf_idx >= pb->cpu_cnt) |
13541 | 0 | return libbpf_err(-EINVAL); |
13542 | | |
13543 | 0 | cpu_buf = pb->cpu_bufs[buf_idx]; |
13544 | 0 | if (!cpu_buf) |
13545 | 0 | return libbpf_err(-ENOENT); |
13546 | | |
13547 | 0 | return perf_buffer__process_records(pb, cpu_buf); |
13548 | 0 | } |
13549 | | |
13550 | | int perf_buffer__consume(struct perf_buffer *pb) |
13551 | 0 | { |
13552 | 0 | int i, err; |
13553 | |
|
13554 | 0 | for (i = 0; i < pb->cpu_cnt; i++) { |
13555 | 0 | struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i]; |
13556 | |
|
13557 | 0 | if (!cpu_buf) |
13558 | 0 | continue; |
13559 | | |
13560 | 0 | err = perf_buffer__process_records(pb, cpu_buf); |
13561 | 0 | if (err) { |
13562 | 0 | pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err); |
13563 | 0 | return libbpf_err(err); |
13564 | 0 | } |
13565 | 0 | } |
13566 | 0 | return 0; |
13567 | 0 | } |
13568 | | |
13569 | | int bpf_program__set_attach_target(struct bpf_program *prog, |
13570 | | int attach_prog_fd, |
13571 | | const char *attach_func_name) |
13572 | 0 | { |
13573 | 0 | int btf_obj_fd = 0, btf_id = 0, err; |
13574 | |
|
13575 | 0 | if (!prog || attach_prog_fd < 0) |
13576 | 0 | return libbpf_err(-EINVAL); |
13577 | | |
13578 | 0 | if (prog->obj->loaded) |
13579 | 0 | return libbpf_err(-EINVAL); |
13580 | | |
13581 | 0 | if (attach_prog_fd && !attach_func_name) { |
13582 | | /* remember attach_prog_fd and let bpf_program__load() find |
13583 | | * BTF ID during the program load |
13584 | | */ |
13585 | 0 | prog->attach_prog_fd = attach_prog_fd; |
13586 | 0 | return 0; |
13587 | 0 | } |
13588 | | |
13589 | 0 | if (attach_prog_fd) { |
13590 | 0 | btf_id = libbpf_find_prog_btf_id(attach_func_name, |
13591 | 0 | attach_prog_fd); |
13592 | 0 | if (btf_id < 0) |
13593 | 0 | return libbpf_err(btf_id); |
13594 | 0 | } else { |
13595 | 0 | if (!attach_func_name) |
13596 | 0 | return libbpf_err(-EINVAL); |
13597 | | |
13598 | | /* load btf_vmlinux, if not yet */ |
13599 | 0 | err = bpf_object__load_vmlinux_btf(prog->obj, true); |
13600 | 0 | if (err) |
13601 | 0 | return libbpf_err(err); |
13602 | 0 | err = find_kernel_btf_id(prog->obj, attach_func_name, |
13603 | 0 | prog->expected_attach_type, |
13604 | 0 | &btf_obj_fd, &btf_id); |
13605 | 0 | if (err) |
13606 | 0 | return libbpf_err(err); |
13607 | 0 | } |
13608 | | |
13609 | 0 | prog->attach_btf_id = btf_id; |
13610 | 0 | prog->attach_btf_obj_fd = btf_obj_fd; |
13611 | 0 | prog->attach_prog_fd = attach_prog_fd; |
13612 | 0 | return 0; |
13613 | 0 | } |
13614 | | |
13615 | | int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz) |
13616 | 0 | { |
13617 | 0 | int err = 0, n, len, start, end = -1; |
13618 | 0 | bool *tmp; |
13619 | |
|
13620 | 0 | *mask = NULL; |
13621 | 0 | *mask_sz = 0; |
13622 | | |
13623 | | /* Each sub string separated by ',' has format \d+-\d+ or \d+ */ |
13624 | 0 | while (*s) { |
13625 | 0 | if (*s == ',' || *s == '\n') { |
13626 | 0 | s++; |
13627 | 0 | continue; |
13628 | 0 | } |
13629 | 0 | n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len); |
13630 | 0 | if (n <= 0 || n > 2) { |
13631 | 0 | pr_warn("Failed to get CPU range %s: %d\n", s, n); |
13632 | 0 | err = -EINVAL; |
13633 | 0 | goto cleanup; |
13634 | 0 | } else if (n == 1) { |
13635 | 0 | end = start; |
13636 | 0 | } |
13637 | 0 | if (start < 0 || start > end) { |
13638 | 0 | pr_warn("Invalid CPU range [%d,%d] in %s\n", |
13639 | 0 | start, end, s); |
13640 | 0 | err = -EINVAL; |
13641 | 0 | goto cleanup; |
13642 | 0 | } |
13643 | 0 | tmp = realloc(*mask, end + 1); |
13644 | 0 | if (!tmp) { |
13645 | 0 | err = -ENOMEM; |
13646 | 0 | goto cleanup; |
13647 | 0 | } |
13648 | 0 | *mask = tmp; |
13649 | 0 | memset(tmp + *mask_sz, 0, start - *mask_sz); |
13650 | 0 | memset(tmp + start, 1, end - start + 1); |
13651 | 0 | *mask_sz = end + 1; |
13652 | 0 | s += len; |
13653 | 0 | } |
13654 | 0 | if (!*mask_sz) { |
13655 | 0 | pr_warn("Empty CPU range\n"); |
13656 | 0 | return -EINVAL; |
13657 | 0 | } |
13658 | 0 | return 0; |
13659 | 0 | cleanup: |
13660 | 0 | free(*mask); |
13661 | 0 | *mask = NULL; |
13662 | 0 | return err; |
13663 | 0 | } |
13664 | | |
13665 | | int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz) |
13666 | 0 | { |
13667 | 0 | int fd, err = 0, len; |
13668 | 0 | char buf[128]; |
13669 | |
|
13670 | 0 | fd = open(fcpu, O_RDONLY | O_CLOEXEC); |
13671 | 0 | if (fd < 0) { |
13672 | 0 | err = -errno; |
13673 | 0 | pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err); |
13674 | 0 | return err; |
13675 | 0 | } |
13676 | 0 | len = read(fd, buf, sizeof(buf)); |
13677 | 0 | close(fd); |
13678 | 0 | if (len <= 0) { |
13679 | 0 | err = len ? -errno : -EINVAL; |
13680 | 0 | pr_warn("Failed to read cpu mask from %s: %d\n", fcpu, err); |
13681 | 0 | return err; |
13682 | 0 | } |
13683 | 0 | if (len >= sizeof(buf)) { |
13684 | 0 | pr_warn("CPU mask is too big in file %s\n", fcpu); |
13685 | 0 | return -E2BIG; |
13686 | 0 | } |
13687 | 0 | buf[len] = '\0'; |
13688 | |
|
13689 | 0 | return parse_cpu_mask_str(buf, mask, mask_sz); |
13690 | 0 | } |
13691 | | |
13692 | | int libbpf_num_possible_cpus(void) |
13693 | 0 | { |
13694 | 0 | static const char *fcpu = "/sys/devices/system/cpu/possible"; |
13695 | 0 | static int cpus; |
13696 | 0 | int err, n, i, tmp_cpus; |
13697 | 0 | bool *mask; |
13698 | |
|
13699 | 0 | tmp_cpus = READ_ONCE(cpus); |
13700 | 0 | if (tmp_cpus > 0) |
13701 | 0 | return tmp_cpus; |
13702 | | |
13703 | 0 | err = parse_cpu_mask_file(fcpu, &mask, &n); |
13704 | 0 | if (err) |
13705 | 0 | return libbpf_err(err); |
13706 | | |
13707 | 0 | tmp_cpus = 0; |
13708 | 0 | for (i = 0; i < n; i++) { |
13709 | 0 | if (mask[i]) |
13710 | 0 | tmp_cpus++; |
13711 | 0 | } |
13712 | 0 | free(mask); |
13713 | |
|
13714 | 0 | WRITE_ONCE(cpus, tmp_cpus); |
13715 | 0 | return tmp_cpus; |
13716 | 0 | } |
13717 | | |
13718 | | static int populate_skeleton_maps(const struct bpf_object *obj, |
13719 | | struct bpf_map_skeleton *maps, |
13720 | | size_t map_cnt, size_t map_skel_sz) |
13721 | 0 | { |
13722 | 0 | int i; |
13723 | |
|
13724 | 0 | for (i = 0; i < map_cnt; i++) { |
13725 | 0 | struct bpf_map_skeleton *map_skel = (void *)maps + i * map_skel_sz; |
13726 | 0 | struct bpf_map **map = map_skel->map; |
13727 | 0 | const char *name = map_skel->name; |
13728 | 0 | void **mmaped = map_skel->mmaped; |
13729 | |
|
13730 | 0 | *map = bpf_object__find_map_by_name(obj, name); |
13731 | 0 | if (!*map) { |
13732 | 0 | pr_warn("failed to find skeleton map '%s'\n", name); |
13733 | 0 | return -ESRCH; |
13734 | 0 | } |
13735 | | |
13736 | | /* externs shouldn't be pre-setup from user code */ |
13737 | 0 | if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG) |
13738 | 0 | *mmaped = (*map)->mmaped; |
13739 | 0 | } |
13740 | 0 | return 0; |
13741 | 0 | } |
13742 | | |
13743 | | static int populate_skeleton_progs(const struct bpf_object *obj, |
13744 | | struct bpf_prog_skeleton *progs, |
13745 | | size_t prog_cnt, size_t prog_skel_sz) |
13746 | 0 | { |
13747 | 0 | int i; |
13748 | |
|
13749 | 0 | for (i = 0; i < prog_cnt; i++) { |
13750 | 0 | struct bpf_prog_skeleton *prog_skel = (void *)progs + i * prog_skel_sz; |
13751 | 0 | struct bpf_program **prog = prog_skel->prog; |
13752 | 0 | const char *name = prog_skel->name; |
13753 | |
|
13754 | 0 | *prog = bpf_object__find_program_by_name(obj, name); |
13755 | 0 | if (!*prog) { |
13756 | 0 | pr_warn("failed to find skeleton program '%s'\n", name); |
13757 | 0 | return -ESRCH; |
13758 | 0 | } |
13759 | 0 | } |
13760 | 0 | return 0; |
13761 | 0 | } |
13762 | | |
13763 | | int bpf_object__open_skeleton(struct bpf_object_skeleton *s, |
13764 | | const struct bpf_object_open_opts *opts) |
13765 | 0 | { |
13766 | 0 | struct bpf_object *obj; |
13767 | 0 | int err; |
13768 | |
|
13769 | 0 | obj = bpf_object_open(NULL, s->data, s->data_sz, s->name, opts); |
13770 | 0 | if (IS_ERR(obj)) { |
13771 | 0 | err = PTR_ERR(obj); |
13772 | 0 | pr_warn("failed to initialize skeleton BPF object '%s': %d\n", s->name, err); |
13773 | 0 | return libbpf_err(err); |
13774 | 0 | } |
13775 | | |
13776 | 0 | *s->obj = obj; |
13777 | 0 | err = populate_skeleton_maps(obj, s->maps, s->map_cnt, s->map_skel_sz); |
13778 | 0 | if (err) { |
13779 | 0 | pr_warn("failed to populate skeleton maps for '%s': %d\n", s->name, err); |
13780 | 0 | return libbpf_err(err); |
13781 | 0 | } |
13782 | | |
13783 | 0 | err = populate_skeleton_progs(obj, s->progs, s->prog_cnt, s->prog_skel_sz); |
13784 | 0 | if (err) { |
13785 | 0 | pr_warn("failed to populate skeleton progs for '%s': %d\n", s->name, err); |
13786 | 0 | return libbpf_err(err); |
13787 | 0 | } |
13788 | | |
13789 | 0 | return 0; |
13790 | 0 | } |
13791 | | |
13792 | | int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s) |
13793 | 0 | { |
13794 | 0 | int err, len, var_idx, i; |
13795 | 0 | const char *var_name; |
13796 | 0 | const struct bpf_map *map; |
13797 | 0 | struct btf *btf; |
13798 | 0 | __u32 map_type_id; |
13799 | 0 | const struct btf_type *map_type, *var_type; |
13800 | 0 | const struct bpf_var_skeleton *var_skel; |
13801 | 0 | struct btf_var_secinfo *var; |
13802 | |
|
13803 | 0 | if (!s->obj) |
13804 | 0 | return libbpf_err(-EINVAL); |
13805 | | |
13806 | 0 | btf = bpf_object__btf(s->obj); |
13807 | 0 | if (!btf) { |
13808 | 0 | pr_warn("subskeletons require BTF at runtime (object %s)\n", |
13809 | 0 | bpf_object__name(s->obj)); |
13810 | 0 | return libbpf_err(-errno); |
13811 | 0 | } |
13812 | | |
13813 | 0 | err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt, s->map_skel_sz); |
13814 | 0 | if (err) { |
13815 | 0 | pr_warn("failed to populate subskeleton maps: %d\n", err); |
13816 | 0 | return libbpf_err(err); |
13817 | 0 | } |
13818 | | |
13819 | 0 | err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt, s->prog_skel_sz); |
13820 | 0 | if (err) { |
13821 | 0 | pr_warn("failed to populate subskeleton maps: %d\n", err); |
13822 | 0 | return libbpf_err(err); |
13823 | 0 | } |
13824 | | |
13825 | 0 | for (var_idx = 0; var_idx < s->var_cnt; var_idx++) { |
13826 | 0 | var_skel = (void *)s->vars + var_idx * s->var_skel_sz; |
13827 | 0 | map = *var_skel->map; |
13828 | 0 | map_type_id = bpf_map__btf_value_type_id(map); |
13829 | 0 | map_type = btf__type_by_id(btf, map_type_id); |
13830 | |
|
13831 | 0 | if (!btf_is_datasec(map_type)) { |
13832 | 0 | pr_warn("type for map '%1$s' is not a datasec: %2$s", |
13833 | 0 | bpf_map__name(map), |
13834 | 0 | __btf_kind_str(btf_kind(map_type))); |
13835 | 0 | return libbpf_err(-EINVAL); |
13836 | 0 | } |
13837 | | |
13838 | 0 | len = btf_vlen(map_type); |
13839 | 0 | var = btf_var_secinfos(map_type); |
13840 | 0 | for (i = 0; i < len; i++, var++) { |
13841 | 0 | var_type = btf__type_by_id(btf, var->type); |
13842 | 0 | var_name = btf__name_by_offset(btf, var_type->name_off); |
13843 | 0 | if (strcmp(var_name, var_skel->name) == 0) { |
13844 | 0 | *var_skel->addr = map->mmaped + var->offset; |
13845 | 0 | break; |
13846 | 0 | } |
13847 | 0 | } |
13848 | 0 | } |
13849 | 0 | return 0; |
13850 | 0 | } |
13851 | | |
13852 | | void bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s) |
13853 | 0 | { |
13854 | 0 | if (!s) |
13855 | 0 | return; |
13856 | 0 | free(s->maps); |
13857 | 0 | free(s->progs); |
13858 | 0 | free(s->vars); |
13859 | 0 | free(s); |
13860 | 0 | } |
13861 | | |
13862 | | int bpf_object__load_skeleton(struct bpf_object_skeleton *s) |
13863 | 0 | { |
13864 | 0 | int i, err; |
13865 | |
|
13866 | 0 | err = bpf_object__load(*s->obj); |
13867 | 0 | if (err) { |
13868 | 0 | pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err); |
13869 | 0 | return libbpf_err(err); |
13870 | 0 | } |
13871 | | |
13872 | 0 | for (i = 0; i < s->map_cnt; i++) { |
13873 | 0 | struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz; |
13874 | 0 | struct bpf_map *map = *map_skel->map; |
13875 | 0 | size_t mmap_sz = bpf_map_mmap_sz(map); |
13876 | 0 | int prot, map_fd = map->fd; |
13877 | 0 | void **mmaped = map_skel->mmaped; |
13878 | |
|
13879 | 0 | if (!mmaped) |
13880 | 0 | continue; |
13881 | | |
13882 | 0 | if (!(map->def.map_flags & BPF_F_MMAPABLE)) { |
13883 | 0 | *mmaped = NULL; |
13884 | 0 | continue; |
13885 | 0 | } |
13886 | | |
13887 | 0 | if (map->def.type == BPF_MAP_TYPE_ARENA) { |
13888 | 0 | *mmaped = map->mmaped; |
13889 | 0 | continue; |
13890 | 0 | } |
13891 | | |
13892 | 0 | if (map->def.map_flags & BPF_F_RDONLY_PROG) |
13893 | 0 | prot = PROT_READ; |
13894 | 0 | else |
13895 | 0 | prot = PROT_READ | PROT_WRITE; |
13896 | | |
13897 | | /* Remap anonymous mmap()-ed "map initialization image" as |
13898 | | * a BPF map-backed mmap()-ed memory, but preserving the same |
13899 | | * memory address. This will cause kernel to change process' |
13900 | | * page table to point to a different piece of kernel memory, |
13901 | | * but from userspace point of view memory address (and its |
13902 | | * contents, being identical at this point) will stay the |
13903 | | * same. This mapping will be released by bpf_object__close() |
13904 | | * as per normal clean up procedure, so we don't need to worry |
13905 | | * about it from skeleton's clean up perspective. |
13906 | | */ |
13907 | 0 | *mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map_fd, 0); |
13908 | 0 | if (*mmaped == MAP_FAILED) { |
13909 | 0 | err = -errno; |
13910 | 0 | *mmaped = NULL; |
13911 | 0 | pr_warn("failed to re-mmap() map '%s': %d\n", |
13912 | 0 | bpf_map__name(map), err); |
13913 | 0 | return libbpf_err(err); |
13914 | 0 | } |
13915 | 0 | } |
13916 | | |
13917 | 0 | return 0; |
13918 | 0 | } |
13919 | | |
13920 | | int bpf_object__attach_skeleton(struct bpf_object_skeleton *s) |
13921 | 0 | { |
13922 | 0 | int i, err; |
13923 | |
|
13924 | 0 | for (i = 0; i < s->prog_cnt; i++) { |
13925 | 0 | struct bpf_prog_skeleton *prog_skel = (void *)s->progs + i * s->prog_skel_sz; |
13926 | 0 | struct bpf_program *prog = *prog_skel->prog; |
13927 | 0 | struct bpf_link **link = prog_skel->link; |
13928 | |
|
13929 | 0 | if (!prog->autoload || !prog->autoattach) |
13930 | 0 | continue; |
13931 | | |
13932 | | /* auto-attaching not supported for this program */ |
13933 | 0 | if (!prog->sec_def || !prog->sec_def->prog_attach_fn) |
13934 | 0 | continue; |
13935 | | |
13936 | | /* if user already set the link manually, don't attempt auto-attach */ |
13937 | 0 | if (*link) |
13938 | 0 | continue; |
13939 | | |
13940 | 0 | err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, link); |
13941 | 0 | if (err) { |
13942 | 0 | pr_warn("prog '%s': failed to auto-attach: %d\n", |
13943 | 0 | bpf_program__name(prog), err); |
13944 | 0 | return libbpf_err(err); |
13945 | 0 | } |
13946 | | |
13947 | | /* It's possible that for some SEC() definitions auto-attach |
13948 | | * is supported in some cases (e.g., if definition completely |
13949 | | * specifies target information), but is not in other cases. |
13950 | | * SEC("uprobe") is one such case. If user specified target |
13951 | | * binary and function name, such BPF program can be |
13952 | | * auto-attached. But if not, it shouldn't trigger skeleton's |
13953 | | * attach to fail. It should just be skipped. |
13954 | | * attach_fn signals such case with returning 0 (no error) and |
13955 | | * setting link to NULL. |
13956 | | */ |
13957 | 0 | } |
13958 | | |
13959 | | |
13960 | 0 | for (i = 0; i < s->map_cnt; i++) { |
13961 | 0 | struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz; |
13962 | 0 | struct bpf_map *map = *map_skel->map; |
13963 | 0 | struct bpf_link **link; |
13964 | |
|
13965 | 0 | if (!map->autocreate || !map->autoattach) |
13966 | 0 | continue; |
13967 | | |
13968 | | /* only struct_ops maps can be attached */ |
13969 | 0 | if (!bpf_map__is_struct_ops(map)) |
13970 | 0 | continue; |
13971 | | |
13972 | | /* skeleton is created with earlier version of bpftool, notify user */ |
13973 | 0 | if (s->map_skel_sz < offsetofend(struct bpf_map_skeleton, link)) { |
13974 | 0 | pr_warn("map '%s': BPF skeleton version is old, skipping map auto-attachment...\n", |
13975 | 0 | bpf_map__name(map)); |
13976 | 0 | continue; |
13977 | 0 | } |
13978 | | |
13979 | 0 | link = map_skel->link; |
13980 | 0 | if (*link) |
13981 | 0 | continue; |
13982 | | |
13983 | 0 | *link = bpf_map__attach_struct_ops(map); |
13984 | 0 | if (!*link) { |
13985 | 0 | err = -errno; |
13986 | 0 | pr_warn("map '%s': failed to auto-attach: %d\n", bpf_map__name(map), err); |
13987 | 0 | return libbpf_err(err); |
13988 | 0 | } |
13989 | 0 | } |
13990 | | |
13991 | 0 | return 0; |
13992 | 0 | } |
13993 | | |
13994 | | void bpf_object__detach_skeleton(struct bpf_object_skeleton *s) |
13995 | 0 | { |
13996 | 0 | int i; |
13997 | |
|
13998 | 0 | for (i = 0; i < s->prog_cnt; i++) { |
13999 | 0 | struct bpf_prog_skeleton *prog_skel = (void *)s->progs + i * s->prog_skel_sz; |
14000 | 0 | struct bpf_link **link = prog_skel->link; |
14001 | |
|
14002 | 0 | bpf_link__destroy(*link); |
14003 | 0 | *link = NULL; |
14004 | 0 | } |
14005 | |
|
14006 | 0 | if (s->map_skel_sz < sizeof(struct bpf_map_skeleton)) |
14007 | 0 | return; |
14008 | | |
14009 | 0 | for (i = 0; i < s->map_cnt; i++) { |
14010 | 0 | struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz; |
14011 | 0 | struct bpf_link **link = map_skel->link; |
14012 | |
|
14013 | 0 | if (link) { |
14014 | 0 | bpf_link__destroy(*link); |
14015 | 0 | *link = NULL; |
14016 | 0 | } |
14017 | 0 | } |
14018 | 0 | } |
14019 | | |
14020 | | void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s) |
14021 | 0 | { |
14022 | 0 | if (!s) |
14023 | 0 | return; |
14024 | | |
14025 | 0 | bpf_object__detach_skeleton(s); |
14026 | 0 | if (s->obj) |
14027 | 0 | bpf_object__close(*s->obj); |
14028 | 0 | free(s->maps); |
14029 | 0 | free(s->progs); |
14030 | 0 | free(s); |
14031 | 0 | } |