/src/elfutils/libdwfl/core-file.c
Line | Count | Source |
1 | | /* Core file handling. |
2 | | Copyright (C) 2008-2010, 2013, 2015 Red Hat, Inc. |
3 | | Copyright (C) 2021 Mark J. Wielaard <mark@klomp.org> |
4 | | This file is part of elfutils. |
5 | | |
6 | | This file is free software; you can redistribute it and/or modify |
7 | | it under the terms of either |
8 | | |
9 | | * the GNU Lesser General Public License as published by the Free |
10 | | Software Foundation; either version 3 of the License, or (at |
11 | | your option) any later version |
12 | | |
13 | | or |
14 | | |
15 | | * the GNU General Public License as published by the Free |
16 | | Software Foundation; either version 2 of the License, or (at |
17 | | your option) any later version |
18 | | |
19 | | or both in parallel, as here. |
20 | | |
21 | | elfutils is distributed in the hope that it will be useful, but |
22 | | WITHOUT ANY WARRANTY; without even the implied warranty of |
23 | | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
24 | | General Public License for more details. |
25 | | |
26 | | You should have received copies of the GNU General Public License and |
27 | | the GNU Lesser General Public License along with this program. If |
28 | | not, see <http://www.gnu.org/licenses/>. */ |
29 | | |
30 | | #include <config.h> |
31 | | #include "libelfP.h" /* For NOTE_ALIGN. */ |
32 | | #include "libdwflP.h" |
33 | | #include <gelf.h> |
34 | | |
35 | | /* On failure return, we update *NEXT to point back at OFFSET. */ |
36 | | static inline Elf * |
37 | | do_fail (int error, off_t *next, off_t offset) |
38 | 216 | { |
39 | 216 | if (next != NULL) |
40 | 0 | *next = offset; |
41 | | //__libelf_seterrno (error); |
42 | 216 | __libdwfl_seterrno (DWFL_E (LIBELF, error)); |
43 | 216 | return NULL; |
44 | 216 | } |
45 | | |
46 | 216 | #define fail(error) do_fail (error, next, offset) |
47 | | |
48 | | /* This is a prototype of what a new libelf interface might be. |
49 | | This implementation is pessimal for non-mmap cases and should |
50 | | be replaced by more diddling inside libelf internals. */ |
51 | | static Elf * |
52 | | elf_begin_rand (Elf *parent, off_t offset, off_t size, off_t *next) |
53 | 2.77k | { |
54 | 2.77k | if (parent == NULL) |
55 | 0 | return NULL; |
56 | | |
57 | 2.77k | off_t min = (parent->kind == ELF_K_ELF ? |
58 | 2.77k | (parent->class == ELFCLASS32 |
59 | 2.77k | ? sizeof (Elf32_Ehdr) : sizeof (Elf64_Ehdr)) |
60 | 2.77k | : parent->kind == ELF_K_AR ? SARMAG |
61 | 0 | : 0); |
62 | | |
63 | 2.77k | if (unlikely (offset < min) |
64 | 2.56k | || unlikely (offset >= (off_t) parent->maximum_size)) |
65 | 216 | return fail (ELF_E_RANGE); |
66 | | |
67 | | /* For an archive, fetch just the size field |
68 | | from the archive header to override SIZE. */ |
69 | 2.56k | if (parent->kind == ELF_K_AR) |
70 | 0 | { |
71 | | /* File size, in ASCII decimal, right-padded with ASCII spaces. |
72 | | Max 10 characters. Not zero terminated. So make this ar_size |
73 | | array one larger and explicitly zero terminate it. As needed |
74 | | for strtoll. */ |
75 | 0 | #define AR_SIZE_CHARS 10 |
76 | 0 | char ar_size[AR_SIZE_CHARS + 1]; |
77 | 0 | ar_size[AR_SIZE_CHARS] = '\0'; |
78 | |
|
79 | 0 | if (unlikely (parent->maximum_size - offset < sizeof (struct ar_hdr))) |
80 | 0 | return fail (ELF_E_RANGE); |
81 | | |
82 | 0 | if (parent->map_address != NULL) |
83 | 0 | memcpy (ar_size, parent->map_address + parent->start_offset + offset, |
84 | 0 | AR_SIZE_CHARS); |
85 | 0 | else if (unlikely (pread_retry (parent->fildes, |
86 | 0 | ar_size, AR_SIZE_CHARS, |
87 | 0 | parent->start_offset + offset |
88 | 0 | + offsetof (struct ar_hdr, ar_size)) |
89 | 0 | != AR_SIZE_CHARS)) |
90 | 0 | return fail (ELF_E_READ_ERROR); |
91 | | |
92 | 0 | offset += sizeof (struct ar_hdr); |
93 | |
|
94 | 0 | char *endp; |
95 | 0 | size = strtoll (ar_size, &endp, 10); |
96 | 0 | if (unlikely (endp == ar_size) |
97 | 0 | || unlikely ((off_t) parent->maximum_size - offset < size)) |
98 | 0 | return fail (ELF_E_INVALID_ARCHIVE); |
99 | 0 | } |
100 | | |
101 | 2.56k | if (unlikely ((off_t) parent->maximum_size - offset < size)) |
102 | 0 | return fail (ELF_E_RANGE); |
103 | | |
104 | | /* Even if we fail at this point, update *NEXT to point past the file. */ |
105 | 2.56k | if (next != NULL) |
106 | 0 | *next = offset + size; |
107 | | |
108 | 2.56k | if (unlikely (offset == 0) |
109 | 0 | && unlikely (size == (off_t) parent->maximum_size)) |
110 | 0 | return elf_clone (parent, parent->cmd); |
111 | | |
112 | | /* Note the image is guaranteed live only as long as PARENT |
113 | | lives. Using elf_memory is quite suboptimal if the whole |
114 | | file is not mmap'd. We really should have something like |
115 | | a generalization of the archive support. */ |
116 | 2.56k | Elf_Data *data = elf_getdata_rawchunk (parent, offset, size, ELF_T_BYTE); |
117 | 2.56k | if (data == NULL) |
118 | 0 | return NULL; |
119 | 2.56k | assert ((off_t) data->d_size == size); |
120 | 2.56k | return elf_memory (data->d_buf, size); |
121 | 2.56k | } |
122 | | |
123 | | |
124 | | int |
125 | | dwfl_report_core_segments (Dwfl *dwfl, Elf *elf, size_t phnum, GElf_Phdr *notes) |
126 | 15.4k | { |
127 | 15.4k | if (unlikely (dwfl == NULL)) |
128 | 0 | return -1; |
129 | | |
130 | 15.4k | int result = 0; |
131 | | |
132 | 15.4k | if (notes != NULL) |
133 | 15.4k | notes->p_type = PT_NULL; |
134 | | |
135 | 500k | for (size_t ndx = 0; result >= 0 && ndx < phnum; ++ndx) |
136 | 485k | { |
137 | 485k | GElf_Phdr phdr_mem; |
138 | 485k | GElf_Phdr *phdr = gelf_getphdr (elf, ndx, &phdr_mem); |
139 | 485k | if (unlikely (phdr == NULL)) |
140 | 136 | { |
141 | 136 | __libdwfl_seterrno (DWFL_E_LIBELF); |
142 | 136 | return -1; |
143 | 136 | } |
144 | 485k | switch (phdr->p_type) |
145 | 485k | { |
146 | 50.2k | case PT_LOAD: |
147 | 50.2k | result = dwfl_report_segment (dwfl, ndx, phdr, 0, NULL); |
148 | 50.2k | break; |
149 | | |
150 | 13.4k | case PT_NOTE: |
151 | 13.4k | if (notes != NULL) |
152 | 9.43k | { |
153 | 9.43k | *notes = *phdr; |
154 | 9.43k | notes = NULL; |
155 | 9.43k | } |
156 | 13.4k | break; |
157 | 485k | } |
158 | 485k | } |
159 | | |
160 | 15.3k | return result; |
161 | 15.4k | } |
162 | | |
163 | | /* Never read more than this much without mmap. */ |
164 | 26.3k | #define MAX_EAGER_COST 8192 |
165 | | |
166 | | /* Dwfl_Module_Callback passed to and called by dwfl_segment_report_module |
167 | | to read in a segment as ELF image directly if possible or indicate an |
168 | | attempt must be made to read in the while segment right now. */ |
169 | | static bool |
170 | | core_file_read_eagerly (Dwfl_Module *mod, |
171 | | void **userdata __attribute__ ((unused)), |
172 | | const char *name __attribute__ ((unused)), |
173 | | Dwarf_Addr start __attribute__ ((unused)), |
174 | | void **buffer, size_t *buffer_available, |
175 | | GElf_Off cost, GElf_Off worthwhile, |
176 | | GElf_Off whole, |
177 | | GElf_Off contiguous __attribute__ ((unused)), |
178 | | void *arg, Elf **elfp) |
179 | 38.1k | { |
180 | 38.1k | Elf *core = arg; |
181 | | |
182 | | /* The available buffer is often the whole segment when the core file |
183 | | was mmap'd if used together with the dwfl_elf_phdr_memory_callback. |
184 | | Which means that if it is complete we can just construct the whole |
185 | | ELF image right now without having to read in anything more. */ |
186 | 38.1k | if (whole <= *buffer_available) |
187 | 2.77k | { |
188 | | /* All there ever was, we already have on hand. */ |
189 | | |
190 | 2.77k | if (core->map_address == NULL) |
191 | 0 | { |
192 | | /* We already malloc'd the buffer. */ |
193 | 0 | *elfp = elf_memory (*buffer, whole); |
194 | 0 | if (unlikely (*elfp == NULL)) |
195 | 0 | return false; |
196 | | |
197 | 0 | (*elfp)->flags |= ELF_F_MALLOCED; |
198 | 0 | *buffer = NULL; |
199 | 0 | *buffer_available = 0; |
200 | 0 | return true; |
201 | 0 | } |
202 | | |
203 | | /* We can use the image inside the core file directly. */ |
204 | 2.77k | *elfp = elf_begin_rand (core, *buffer - core->map_address, whole, NULL); |
205 | 2.77k | *buffer = NULL; |
206 | 2.77k | *buffer_available = 0; |
207 | 2.77k | return *elfp != NULL; |
208 | 2.77k | } |
209 | | |
210 | | /* We don't have the whole file. Which either means the core file |
211 | | wasn't mmap'd, but needs to still be read in, or that the segment |
212 | | is truncated. Figure out if this is better than nothing. */ |
213 | | |
214 | 35.3k | if (worthwhile == 0) |
215 | | /* Caller doesn't think so. */ |
216 | 22.1k | return false; |
217 | | |
218 | | /* |
219 | | XXX would like to fall back to partial file via memory |
220 | | when build id find_elf fails |
221 | | also, link_map name may give file name from disk better than partial here |
222 | | requires find_elf hook re-doing the magic to fall back if no file found |
223 | | */ |
224 | | |
225 | 13.1k | if (whole > MAX_EAGER_COST && mod->build_id_len > 0) |
226 | | /* We can't cheaply read the whole file here, so we'd |
227 | | be using a partial file. But there is a build ID that could |
228 | | help us find the whole file, which might be more useful than |
229 | | what we have. We'll just rely on that. */ |
230 | 238 | return false; |
231 | | |
232 | | /* The file is either small (most likely the vdso) or big and incomplete, |
233 | | but we don't have a build-id. */ |
234 | | |
235 | 12.9k | if (core->map_address != NULL) |
236 | | /* It's cheap to get, so get it. */ |
237 | 12.9k | return true; |
238 | | |
239 | | /* Only use it if there isn't too much to be read. */ |
240 | 0 | return cost <= MAX_EAGER_COST; |
241 | 12.9k | } |
242 | | |
243 | | static inline void |
244 | | update_end (GElf_Phdr *pphdr, const GElf_Off align, |
245 | | GElf_Off *pend, GElf_Addr *pend_vaddr) |
246 | 516k | { |
247 | 516k | *pend = (pphdr->p_offset + pphdr->p_filesz + align - 1) & -align; |
248 | 516k | *pend_vaddr = (pphdr->p_vaddr + pphdr->p_memsz + align - 1) & -align; |
249 | 516k | } |
250 | | |
251 | | /* Use following contiguous segments to get towards SIZE. */ |
252 | | static inline bool |
253 | | do_more (size_t size, GElf_Phdr *pphdr, const GElf_Off align, |
254 | | Elf *elf, GElf_Off start, int *pndx, |
255 | | GElf_Off *pend, GElf_Addr *pend_vaddr) |
256 | 964k | { |
257 | 1.29M | while (*pend <= start || *pend - start < size) |
258 | 555k | { |
259 | 555k | if (pphdr->p_filesz < pphdr->p_memsz) |
260 | | /* This segment is truncated, so no following one helps us. */ |
261 | 203k | return false; |
262 | | |
263 | 351k | if (unlikely (gelf_getphdr (elf, (*pndx)++, pphdr) == NULL)) |
264 | 4.41k | return false; |
265 | | |
266 | 347k | if (pphdr->p_type == PT_LOAD) |
267 | 36.1k | { |
268 | 36.1k | if (pphdr->p_offset > *pend |
269 | 24.0k | || pphdr->p_vaddr > *pend_vaddr) |
270 | | /* It's discontiguous! */ |
271 | 13.7k | return false; |
272 | | |
273 | 22.4k | update_end (pphdr, align, pend, pend_vaddr); |
274 | 22.4k | } |
275 | 347k | } |
276 | 742k | return true; |
277 | 964k | } |
278 | | |
279 | 964k | #define more(size) do_more (size, &phdr, align, elf, start, &ndx, &end, &end_vaddr) |
280 | | |
281 | | bool |
282 | | dwfl_elf_phdr_memory_callback (Dwfl *dwfl, int ndx, |
283 | | void **buffer, size_t *buffer_available, |
284 | | GElf_Addr vaddr, |
285 | | size_t minread, |
286 | | void *arg) |
287 | 608k | { |
288 | 608k | Elf *elf = arg; |
289 | | |
290 | 608k | if (ndx == -1) |
291 | 89.5k | { |
292 | | /* Called for cleanup. */ |
293 | 89.5k | if (elf->map_address == NULL) |
294 | 0 | free (*buffer); |
295 | 89.5k | *buffer = NULL; |
296 | 89.5k | *buffer_available = 0; |
297 | 89.5k | return false; |
298 | 89.5k | } |
299 | | |
300 | 519k | const GElf_Off align = dwfl->segment_align ?: 1; |
301 | 519k | GElf_Phdr phdr; |
302 | | |
303 | 519k | do |
304 | 228M | if (unlikely (gelf_getphdr (elf, ndx++, &phdr) == NULL)) |
305 | 25.4k | return false; |
306 | 228M | while (phdr.p_type != PT_LOAD |
307 | 16.6M | || ((phdr.p_vaddr + phdr.p_memsz + align - 1) & -align) <= vaddr); |
308 | | |
309 | 493k | GElf_Off start = vaddr - phdr.p_vaddr + phdr.p_offset; |
310 | 493k | GElf_Off end; |
311 | 493k | GElf_Addr end_vaddr; |
312 | | |
313 | 493k | update_end (&phdr, align, &end, &end_vaddr); |
314 | | |
315 | | /* We need at least this much. */ |
316 | 493k | if (! more (minread)) |
317 | 184k | return false; |
318 | | |
319 | | /* See how much more we can get of what the caller wants. */ |
320 | 309k | (void) more (*buffer_available); |
321 | | |
322 | | /* If it's already on hand anyway, use as much as there is. */ |
323 | 309k | if (elf->map_address != NULL && start < elf->maximum_size) |
324 | 161k | (void) more (elf->maximum_size - start); |
325 | | |
326 | | /* Make sure we don't look past the end of the actual file, |
327 | | even if the headers tell us to. */ |
328 | 309k | if (unlikely (end > elf->maximum_size)) |
329 | 287k | end = elf->maximum_size; |
330 | | |
331 | | /* If the file is too small, there is nothing at all to get. */ |
332 | 309k | if (unlikely (start >= end)) |
333 | 147k | return false; |
334 | | |
335 | 161k | if (end - start < minread) |
336 | 51.6k | return false; |
337 | | |
338 | 109k | if (elf->map_address != NULL) |
339 | 109k | { |
340 | 109k | void *contents = elf->map_address + elf->start_offset + start; |
341 | 109k | size_t size = end - start; |
342 | | |
343 | 109k | if (minread == 0) /* String mode. */ |
344 | 8.87k | { |
345 | 8.87k | const void *eos = memchr (contents, '\0', size); |
346 | 8.87k | if (unlikely (eos == NULL) || unlikely (eos == contents)) |
347 | 892 | return false; |
348 | 7.98k | size = eos + 1 - contents; |
349 | 7.98k | } |
350 | | |
351 | 108k | if (*buffer == NULL) |
352 | 92.0k | { |
353 | 92.0k | *buffer = contents; |
354 | 92.0k | *buffer_available = size; |
355 | 92.0k | } |
356 | 16.7k | else |
357 | 16.7k | { |
358 | 16.7k | *buffer_available = MIN (size, *buffer_available); |
359 | 16.7k | memcpy (*buffer, contents, *buffer_available); |
360 | 16.7k | } |
361 | 108k | } |
362 | 0 | else |
363 | 0 | { |
364 | 0 | void *into = *buffer; |
365 | 0 | if (*buffer == NULL) |
366 | 0 | { |
367 | 0 | *buffer_available = MIN (minread ?: 512, |
368 | 0 | MAX (4096, MIN (end - start, |
369 | 0 | *buffer_available))); |
370 | 0 | into = malloc (*buffer_available); |
371 | 0 | if (unlikely (into == NULL)) |
372 | 0 | { |
373 | 0 | __libdwfl_seterrno (DWFL_E_NOMEM); |
374 | 0 | return false; |
375 | 0 | } |
376 | 0 | } |
377 | | |
378 | 0 | ssize_t nread = pread_retry (elf->fildes, into, *buffer_available, start); |
379 | 0 | if (nread < (ssize_t) minread) |
380 | 0 | { |
381 | 0 | if (into != *buffer) |
382 | 0 | free (into); |
383 | 0 | if (nread < 0) |
384 | 0 | __libdwfl_seterrno (DWFL_E_ERRNO); |
385 | 0 | return false; |
386 | 0 | } |
387 | | |
388 | 0 | if (minread == 0) /* String mode. */ |
389 | 0 | { |
390 | 0 | const void *eos = memchr (into, '\0', nread); |
391 | 0 | if (unlikely (eos == NULL) || unlikely (eos == into)) |
392 | 0 | { |
393 | 0 | if (*buffer == NULL) |
394 | 0 | free (into); |
395 | 0 | return false; |
396 | 0 | } |
397 | 0 | nread = eos + 1 - into; |
398 | 0 | } |
399 | | |
400 | 0 | if (*buffer == NULL) |
401 | 0 | *buffer = into; |
402 | 0 | *buffer_available = nread; |
403 | 0 | } |
404 | | |
405 | 108k | return true; |
406 | 109k | } |
407 | | |
408 | | /* Free the contents of R_DEBUG_INFO without the R_DEBUG_INFO memory itself. */ |
409 | | |
410 | | static void |
411 | | clear_r_debug_info (struct r_debug_info *r_debug_info) |
412 | 9.54k | { |
413 | 71.7k | while (r_debug_info->module != NULL) |
414 | 62.1k | { |
415 | 62.1k | struct r_debug_info_module *module = r_debug_info->module; |
416 | 62.1k | r_debug_info->module = module->next; |
417 | 62.1k | elf_end (module->elf); |
418 | 62.1k | if (module->fd != -1) |
419 | 6.65k | close (module->fd); |
420 | 62.1k | free (module); |
421 | 62.1k | } |
422 | 9.54k | } |
423 | | |
424 | | bool |
425 | | internal_function |
426 | | __libdwfl_dynamic_vaddr_get (Elf *elf, GElf_Addr *vaddrp) |
427 | 13.0k | { |
428 | 13.0k | size_t phnum; |
429 | 13.0k | if (unlikely (elf_getphdrnum (elf, &phnum) != 0)) |
430 | 0 | return false; |
431 | 82.4k | for (size_t i = 0; i < phnum; ++i) |
432 | 82.4k | { |
433 | 82.4k | GElf_Phdr phdr_mem; |
434 | 82.4k | GElf_Phdr *phdr = gelf_getphdr (elf, i, &phdr_mem); |
435 | 82.4k | if (unlikely (phdr == NULL)) |
436 | 0 | return false; |
437 | 82.4k | if (phdr->p_type == PT_DYNAMIC) |
438 | 13.0k | { |
439 | 13.0k | *vaddrp = phdr->p_vaddr; |
440 | 13.0k | return true; |
441 | 13.0k | } |
442 | 82.4k | } |
443 | 0 | return false; |
444 | 13.0k | } |
445 | | |
446 | | NEW_VERSION (dwfl_core_file_report, ELFUTILS_0.158) |
447 | | int |
448 | | dwfl_core_file_report (Dwfl *dwfl, Elf *elf, const char *executable) |
449 | 15.9k | { |
450 | 15.9k | size_t phnum; |
451 | 15.9k | if (unlikely (elf_getphdrnum (elf, &phnum) != 0)) |
452 | 445 | { |
453 | 445 | __libdwfl_seterrno (DWFL_E_LIBELF); |
454 | 445 | return -1; |
455 | 445 | } |
456 | | |
457 | 15.9k | bool cleanup_user_core = false; |
458 | 15.4k | if (dwfl->user_core != NULL) |
459 | 0 | free (dwfl->user_core->executable_for_core); |
460 | 15.4k | if (executable == NULL) |
461 | 15.4k | { |
462 | 15.4k | if (dwfl->user_core != NULL) |
463 | 0 | dwfl->user_core->executable_for_core = NULL; |
464 | 15.4k | } |
465 | 0 | else |
466 | 0 | { |
467 | 0 | if (dwfl->user_core == NULL) |
468 | 0 | { |
469 | 0 | cleanup_user_core = true; |
470 | 0 | dwfl->user_core = calloc (1, sizeof (struct Dwfl_User_Core)); |
471 | 0 | if (dwfl->user_core == NULL) |
472 | 0 | { |
473 | 0 | __libdwfl_seterrno (DWFL_E_NOMEM); |
474 | 0 | return -1; |
475 | 0 | } |
476 | 0 | dwfl->user_core->fd = -1; |
477 | 0 | } |
478 | 0 | dwfl->user_core->executable_for_core = strdup (executable); |
479 | 0 | if (dwfl->user_core->executable_for_core == NULL) |
480 | 0 | { |
481 | 0 | if (cleanup_user_core) |
482 | 0 | { |
483 | 0 | free (dwfl->user_core); |
484 | 0 | dwfl->user_core = NULL; |
485 | 0 | } |
486 | 0 | __libdwfl_seterrno (DWFL_E_NOMEM); |
487 | 0 | return -1; |
488 | 0 | } |
489 | 0 | } |
490 | | |
491 | | /* First report each PT_LOAD segment. */ |
492 | 15.4k | GElf_Phdr notes_phdr; |
493 | 15.4k | int ndx = dwfl_report_core_segments (dwfl, elf, phnum, ¬es_phdr); |
494 | 15.4k | if (unlikely (ndx <= 0)) |
495 | 5.94k | { |
496 | 5.94k | if (cleanup_user_core) |
497 | 0 | { |
498 | 0 | free (dwfl->user_core->executable_for_core); |
499 | 0 | free (dwfl->user_core); |
500 | 0 | dwfl->user_core = NULL; |
501 | 0 | } |
502 | 5.94k | return ndx; |
503 | 5.94k | } |
504 | | |
505 | | /* Next, we should follow the chain from DT_DEBUG. */ |
506 | | |
507 | 9.54k | const void *auxv = NULL; |
508 | 9.54k | const void *note_file = NULL; |
509 | 9.54k | size_t auxv_size = 0; |
510 | 9.54k | size_t note_file_size = 0; |
511 | 9.54k | if (likely (notes_phdr.p_type == PT_NOTE)) |
512 | 5.72k | { |
513 | | /* PT_NOTE -> NT_AUXV -> AT_PHDR -> PT_DYNAMIC -> DT_DEBUG */ |
514 | | |
515 | 5.72k | Elf_Data *notes = elf_getdata_rawchunk (elf, |
516 | 5.72k | notes_phdr.p_offset, |
517 | 5.72k | notes_phdr.p_filesz, |
518 | 5.72k | (notes_phdr.p_align == 8 |
519 | 5.72k | ? ELF_T_NHDR8 |
520 | 5.72k | : ELF_T_NHDR)); |
521 | 5.72k | if (likely (notes != NULL)) |
522 | 5.38k | { |
523 | 5.38k | size_t pos = 0; |
524 | 5.38k | GElf_Nhdr nhdr; |
525 | 5.38k | size_t name_pos; |
526 | 5.38k | size_t desc_pos; |
527 | 49.1k | while ((pos = gelf_getnote (notes, pos, &nhdr, |
528 | 49.1k | &name_pos, &desc_pos)) > 0) |
529 | 43.7k | if (nhdr.n_namesz == sizeof "CORE" |
530 | 11.9k | && !memcmp (notes->d_buf + name_pos, "CORE", sizeof "CORE")) |
531 | 6.83k | { |
532 | 6.83k | if (nhdr.n_type == NT_AUXV) |
533 | 3.85k | { |
534 | 3.85k | auxv = notes->d_buf + desc_pos; |
535 | 3.85k | auxv_size = nhdr.n_descsz; |
536 | 3.85k | } |
537 | 6.83k | if (nhdr.n_type == NT_FILE) |
538 | 526 | { |
539 | 526 | note_file = notes->d_buf + desc_pos; |
540 | 526 | note_file_size = nhdr.n_descsz; |
541 | 526 | } |
542 | 6.83k | } |
543 | 5.38k | } |
544 | 5.72k | } |
545 | | |
546 | | /* Now we have NT_AUXV contents. From here on this processing could be |
547 | | used for a live process with auxv read from /proc. */ |
548 | | |
549 | 9.54k | struct r_debug_info r_debug_info; |
550 | 9.54k | memset (&r_debug_info, 0, sizeof r_debug_info); |
551 | 9.54k | int retval = dwfl_link_map_report (dwfl, auxv, auxv_size, |
552 | 9.54k | dwfl_elf_phdr_memory_callback, elf, |
553 | 9.54k | &r_debug_info); |
554 | 9.54k | int listed = retval > 0 ? retval : 0; |
555 | | |
556 | | /* Now sniff segment contents for modules hinted by information gathered |
557 | | from DT_DEBUG. */ |
558 | | |
559 | 9.54k | ndx = 0; |
560 | 9.54k | do |
561 | 468k | { |
562 | 468k | int seg = dwfl_segment_report_module (dwfl, ndx, NULL, executable, |
563 | 468k | &dwfl_elf_phdr_memory_callback, elf, |
564 | 468k | core_file_read_eagerly, elf, |
565 | 468k | elf->maximum_size, |
566 | 468k | note_file, note_file_size, |
567 | 468k | &r_debug_info); |
568 | 468k | if (unlikely (seg < 0)) |
569 | 197 | { |
570 | 197 | clear_r_debug_info (&r_debug_info); |
571 | 197 | return seg; |
572 | 197 | } |
573 | 468k | if (seg > ndx) |
574 | 38.3k | { |
575 | 38.3k | ndx = seg; |
576 | 38.3k | ++listed; |
577 | 38.3k | } |
578 | 429k | else |
579 | 429k | ++ndx; |
580 | 468k | } |
581 | 468k | while (ndx < (int) phnum); |
582 | | |
583 | | /* Now report the modules from dwfl_link_map_report which were not filtered |
584 | | out by dwfl_segment_report_module. */ |
585 | | |
586 | 9.34k | Dwfl_Module **lastmodp = &dwfl->modulelist; |
587 | 15.4k | while (*lastmodp != NULL) |
588 | 6.11k | lastmodp = &(*lastmodp)->next; |
589 | 9.34k | for (struct r_debug_info_module *module = r_debug_info.module; |
590 | 69.7k | module != NULL; module = module->next) |
591 | 60.3k | { |
592 | 60.3k | if (module->elf == NULL) |
593 | 54.3k | continue; |
594 | 6.01k | GElf_Addr file_dynamic_vaddr; |
595 | 6.01k | if (! __libdwfl_dynamic_vaddr_get (module->elf, &file_dynamic_vaddr)) |
596 | 0 | continue; |
597 | 6.01k | Dwfl_Module *mod; |
598 | 6.01k | mod = __libdwfl_report_elf (dwfl, xbasename (module->name), module->name, |
599 | 6.01k | module->fd, module->elf, |
600 | 6.01k | module->l_ld - file_dynamic_vaddr, |
601 | 6.01k | true, true); |
602 | 6.01k | if (mod == NULL) |
603 | 5.92k | continue; |
604 | 96 | ++listed; |
605 | 96 | module->elf = NULL; |
606 | 96 | module->fd = -1; |
607 | | /* Move this module to the end of the list, so that we end |
608 | | up with a list in the same order as the link_map chain. */ |
609 | 96 | if (mod->next != NULL) |
610 | 14 | { |
611 | 14 | if (*lastmodp != mod) |
612 | 14 | { |
613 | 14 | lastmodp = &dwfl->modulelist; |
614 | 30 | while (*lastmodp != mod) |
615 | 16 | lastmodp = &(*lastmodp)->next; |
616 | 14 | } |
617 | 14 | *lastmodp = mod->next; |
618 | 14 | mod->next = NULL; |
619 | 43 | while (*lastmodp != NULL) |
620 | 29 | lastmodp = &(*lastmodp)->next; |
621 | 14 | *lastmodp = mod; |
622 | 14 | } |
623 | 96 | lastmodp = &mod->next; |
624 | 96 | } |
625 | | |
626 | 9.34k | clear_r_debug_info (&r_debug_info); |
627 | | |
628 | | /* We return the number of modules we found if we found any. |
629 | | If we found none, we return -1 instead of 0 if there was an |
630 | | error rather than just nothing found. */ |
631 | 9.34k | return listed > 0 ? listed : retval; |
632 | 9.54k | } |
633 | | NEW_INTDEF (dwfl_core_file_report) |
634 | | |
635 | | #ifdef SYMBOL_VERSIONING |
636 | | int _compat_without_executable_dwfl_core_file_report (Dwfl *dwfl, Elf *elf); |
637 | | COMPAT_VERSION_NEWPROTO (dwfl_core_file_report, ELFUTILS_0.146, |
638 | | without_executable) |
639 | | |
640 | | int |
641 | | _compat_without_executable_dwfl_core_file_report (Dwfl *dwfl, Elf *elf) |
642 | | { |
643 | | return dwfl_core_file_report (dwfl, elf, NULL); |
644 | | } |
645 | | #endif |