/src/libunwind/src/dwarf/Gparser.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* libunwind - a platform-independent unwind library |
2 | | Copyright (c) 2003, 2005 Hewlett-Packard Development Company, L.P. |
3 | | Contributed by David Mosberger-Tang <davidm@hpl.hp.com> |
4 | | |
5 | | This file is part of libunwind. |
6 | | |
7 | | Permission is hereby granted, free of charge, to any person obtaining |
8 | | a copy of this software and associated documentation files (the |
9 | | "Software"), to deal in the Software without restriction, including |
10 | | without limitation the rights to use, copy, modify, merge, publish, |
11 | | distribute, sublicense, and/or sell copies of the Software, and to |
12 | | permit persons to whom the Software is furnished to do so, subject to |
13 | | the following conditions: |
14 | | |
15 | | The above copyright notice and this permission notice shall be |
16 | | included in all copies or substantial portions of the Software. |
17 | | |
18 | | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
19 | | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
20 | | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
21 | | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE |
22 | | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION |
23 | | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION |
24 | | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ |
25 | | |
26 | | #include "dwarf_i.h" |
27 | | #include "libunwind_i.h" |
28 | | #include <stddef.h> |
29 | | #include <limits.h> |
30 | | |
31 | 0 | #define alloc_reg_state() (mempool_alloc (&dwarf_reg_state_pool)) |
32 | 0 | #define free_reg_state(rs) (mempool_free (&dwarf_reg_state_pool, rs)) |
33 | | |
34 | 6.33k | #define DWARF_UNW_CACHE_SIZE(log_size) (1 << log_size) |
35 | 257 | #define DWARF_UNW_HASH_SIZE(log_size) (1 << (log_size + 1)) |
36 | | |
37 | | static inline int |
38 | | read_regnum (unw_addr_space_t as, unw_accessors_t *a, unw_word_t *addr, |
39 | | unw_word_t *valp, void *arg) |
40 | 31 | { |
41 | 31 | int ret; |
42 | | |
43 | 31 | if ((ret = dwarf_read_uleb128 (as, a, addr, valp, arg)) < 0) |
44 | 0 | return ret; |
45 | | |
46 | 31 | if (*valp >= DWARF_NUM_PRESERVED_REGS) |
47 | 0 | { |
48 | 0 | Debug (1, "Invalid register number %u\n", (unsigned int) *valp); |
49 | 0 | return -UNW_EBADREG; |
50 | 0 | } |
51 | 31 | return 0; |
52 | 31 | } |
53 | | |
54 | | static inline void |
55 | | set_reg (dwarf_state_record_t *sr, unw_word_t regnum, dwarf_where_t where, |
56 | | unw_word_t val) |
57 | 442 | { |
58 | 442 | sr->rs_current.reg.where[regnum] = where; |
59 | 442 | sr->rs_current.reg.val[regnum] = val; |
60 | 442 | } |
61 | | |
62 | | static inline int |
63 | | push_rstate_stack(dwarf_stackable_reg_state_t **rs_stack) |
64 | 0 | { |
65 | 0 | dwarf_stackable_reg_state_t *old_rs = *rs_stack; |
66 | 0 | if (NULL == (*rs_stack = alloc_reg_state ())) |
67 | 0 | { |
68 | 0 | *rs_stack = old_rs; |
69 | 0 | return -1; |
70 | 0 | } |
71 | 0 | (*rs_stack)->next = old_rs; |
72 | 0 | return 0; |
73 | 0 | } |
74 | | |
75 | | static inline void |
76 | | pop_rstate_stack(dwarf_stackable_reg_state_t **rs_stack) |
77 | 0 | { |
78 | 0 | dwarf_stackable_reg_state_t *old_rs = *rs_stack; |
79 | 0 | *rs_stack = old_rs->next; |
80 | 0 | free_reg_state (old_rs); |
81 | 0 | } |
82 | | |
83 | | static inline void |
84 | | empty_rstate_stack(dwarf_stackable_reg_state_t **rs_stack) |
85 | 30 | { |
86 | 30 | while (*rs_stack) |
87 | 0 | pop_rstate_stack(rs_stack); |
88 | 30 | } |
89 | | |
90 | | #ifdef UNW_TARGET_AARCH64 |
91 | | |
92 | | static void |
93 | | aarch64_negate_ra_sign_state(dwarf_state_record_t *sr); |
94 | | |
95 | | #endif |
96 | | |
97 | | /* Run a CFI program to update the register state. */ |
98 | | static int |
99 | | run_cfi_program (struct dwarf_cursor *c, dwarf_state_record_t *sr, |
100 | | unw_word_t *ip, unw_word_t end_ip, |
101 | | unw_word_t *addr, unw_word_t end_addr, |
102 | | dwarf_stackable_reg_state_t **rs_stack, |
103 | | struct dwarf_cie_info *dci) |
104 | 30 | { |
105 | 30 | unw_addr_space_t as; |
106 | 30 | void *arg; |
107 | | |
108 | 30 | if (c->pi.flags & UNW_PI_FLAG_DEBUG_FRAME) |
109 | 0 | { |
110 | | /* .debug_frame CFI is stored in local address space. */ |
111 | 0 | as = unw_local_addr_space; |
112 | 0 | arg = NULL; |
113 | 0 | } |
114 | 30 | else |
115 | 30 | { |
116 | 30 | as = c->as; |
117 | 30 | arg = c->as_arg; |
118 | 30 | } |
119 | 30 | unw_accessors_t *a = unw_get_accessors_int (as); |
120 | 30 | int ret = 0; |
121 | | |
122 | 252 | while (*ip <= end_ip && *addr < end_addr && ret >= 0) |
123 | 222 | { |
124 | 222 | unw_word_t operand = 0, regnum, val, len; |
125 | 222 | uint8_t u8, op; |
126 | 222 | uint16_t u16; |
127 | 222 | uint32_t u32; |
128 | | |
129 | 222 | if ((ret = dwarf_readu8 (as, a, addr, &op, arg)) < 0) |
130 | 0 | break; |
131 | | |
132 | 222 | if (op & DWARF_CFA_OPCODE_MASK) |
133 | 122 | { |
134 | 122 | operand = op & DWARF_CFA_OPERAND_MASK; |
135 | 122 | op &= ~DWARF_CFA_OPERAND_MASK; |
136 | 122 | } |
137 | 222 | switch ((dwarf_cfa_t) op) |
138 | 222 | { |
139 | 48 | case DW_CFA_advance_loc: |
140 | 48 | *ip += operand * dci->code_align; |
141 | 48 | Debug (15, "CFA_advance_loc to 0x%lx\n", (long) *ip); |
142 | 48 | break; |
143 | | |
144 | 5 | case DW_CFA_advance_loc1: |
145 | 5 | if ((ret = dwarf_readu8 (as, a, addr, &u8, arg)) < 0) |
146 | 0 | break; |
147 | 5 | *ip += u8 * dci->code_align; |
148 | 5 | Debug (15, "CFA_advance_loc1 to 0x%lx\n", (long) *ip); |
149 | 5 | break; |
150 | | |
151 | 7 | case DW_CFA_advance_loc2: |
152 | 7 | if ((ret = dwarf_readu16 (as, a, addr, &u16, arg)) < 0) |
153 | 0 | break; |
154 | 7 | *ip += u16 * dci->code_align; |
155 | 7 | Debug (15, "CFA_advance_loc2 to 0x%lx\n", (long) *ip); |
156 | 7 | break; |
157 | | |
158 | 0 | case DW_CFA_advance_loc4: |
159 | 0 | if ((ret = dwarf_readu32 (as, a, addr, &u32, arg)) < 0) |
160 | 0 | break; |
161 | 0 | *ip += u32 * dci->code_align; |
162 | 0 | Debug (15, "CFA_advance_loc4 to 0x%lx\n", (long) *ip); |
163 | 0 | break; |
164 | | |
165 | 0 | case DW_CFA_MIPS_advance_loc8: |
166 | | #ifdef UNW_TARGET_MIPS |
167 | | { |
168 | | uint64_t u64 = 0; |
169 | | |
170 | | if ((ret = dwarf_readu64 (as, a, addr, &u64, arg)) < 0) |
171 | | break; |
172 | | *ip += u64 * dci->code_align; |
173 | | Debug (15, "CFA_MIPS_advance_loc8\n"); |
174 | | break; |
175 | | } |
176 | | #else |
177 | 0 | Debug (1, "DW_CFA_MIPS_advance_loc8 on non-MIPS target\n"); |
178 | 0 | ret = -UNW_EINVAL; |
179 | 0 | break; |
180 | 0 | #endif |
181 | | |
182 | 74 | case DW_CFA_offset: |
183 | 74 | regnum = operand; |
184 | 74 | if (regnum >= DWARF_NUM_PRESERVED_REGS) |
185 | 0 | { |
186 | 0 | Debug (1, "Invalid register number %u in DW_cfa_OFFSET\n", |
187 | 0 | (unsigned int) regnum); |
188 | 0 | ret = -UNW_EBADREG; |
189 | 0 | break; |
190 | 0 | } |
191 | 74 | if ((ret = dwarf_read_uleb128 (as, a, addr, &val, arg)) < 0) |
192 | 0 | break; |
193 | 74 | set_reg (sr, regnum, DWARF_WHERE_CFAREL, val * dci->data_align); |
194 | 74 | Debug (15, "CFA_offset r%lu at cfa+0x%lx\n", |
195 | 74 | (long) regnum, (long) (val * dci->data_align)); |
196 | 74 | break; |
197 | | |
198 | 0 | case DW_CFA_offset_extended: |
199 | 0 | if (((ret = read_regnum (as, a, addr, ®num, arg)) < 0) |
200 | 0 | || ((ret = dwarf_read_uleb128 (as, a, addr, &val, arg)) < 0)) |
201 | 0 | break; |
202 | 0 | set_reg (sr, regnum, DWARF_WHERE_CFAREL, val * dci->data_align); |
203 | 0 | Debug (15, "CFA_offset_extended r%lu at cf+0x%lx\n", |
204 | 0 | (long) regnum, (long) (val * dci->data_align)); |
205 | 0 | break; |
206 | | |
207 | 0 | case DW_CFA_offset_extended_sf: |
208 | 0 | if (((ret = read_regnum (as, a, addr, ®num, arg)) < 0) |
209 | 0 | || ((ret = dwarf_read_sleb128 (as, a, addr, &val, arg)) < 0)) |
210 | 0 | break; |
211 | 0 | set_reg (sr, regnum, DWARF_WHERE_CFAREL, val * dci->data_align); |
212 | 0 | Debug (15, "CFA_offset_extended_sf r%lu at cf+0x%lx\n", |
213 | 0 | (long) regnum, (long) (val * dci->data_align)); |
214 | 0 | break; |
215 | | |
216 | 0 | case DW_CFA_restore: |
217 | 0 | regnum = operand; |
218 | 0 | if (regnum >= DWARF_NUM_PRESERVED_REGS) |
219 | 0 | { |
220 | 0 | Debug (1, "Invalid register number %u in DW_CFA_restore\n", |
221 | 0 | (unsigned int) regnum); |
222 | 0 | ret = -UNW_EINVAL; |
223 | 0 | break; |
224 | 0 | } |
225 | 0 | sr->rs_current.reg.where[regnum] = sr->rs_initial.reg.where[regnum]; |
226 | 0 | sr->rs_current.reg.val[regnum] = sr->rs_initial.reg.val[regnum]; |
227 | 0 | Debug (15, "CFA_restore r%lu\n", (long) regnum); |
228 | 0 | break; |
229 | | |
230 | 0 | case DW_CFA_restore_extended: |
231 | 0 | if ((ret = dwarf_read_uleb128 (as, a, addr, ®num, arg)) < 0) |
232 | 0 | break; |
233 | 0 | if (regnum >= DWARF_NUM_PRESERVED_REGS) |
234 | 0 | { |
235 | 0 | Debug (1, "Invalid register number %u in " |
236 | 0 | "DW_CFA_restore_extended\n", (unsigned int) regnum); |
237 | 0 | ret = -UNW_EINVAL; |
238 | 0 | break; |
239 | 0 | } |
240 | 0 | sr->rs_current.reg.where[regnum] = sr->rs_initial.reg.where[regnum]; |
241 | 0 | sr->rs_current.reg.val[regnum] = sr->rs_initial.reg.val[regnum]; |
242 | 0 | Debug (15, "CFA_restore_extended r%lu\n", (long) regnum); |
243 | 0 | break; |
244 | | |
245 | 37 | case DW_CFA_nop: |
246 | 37 | break; |
247 | | |
248 | 0 | case DW_CFA_set_loc: |
249 | 0 | if ((ret = dwarf_read_encoded_pointer (as, a, addr, dci->fde_encoding, |
250 | 0 | &c->pi, ip, |
251 | 0 | arg)) < 0) |
252 | 0 | break; |
253 | 0 | Debug (15, "CFA_set_loc to 0x%lx\n", (long) *ip); |
254 | 0 | break; |
255 | | |
256 | 1 | case DW_CFA_undefined: |
257 | 1 | if ((ret = read_regnum (as, a, addr, ®num, arg)) < 0) |
258 | 0 | break; |
259 | 1 | set_reg (sr, regnum, DWARF_WHERE_UNDEF, 0); |
260 | 1 | Debug (15, "CFA_undefined r%lu\n", (long) regnum); |
261 | 1 | break; |
262 | | |
263 | 0 | case DW_CFA_same_value: |
264 | 0 | if ((ret = read_regnum (as, a, addr, ®num, arg)) < 0) |
265 | 0 | break; |
266 | 0 | set_reg (sr, regnum, DWARF_WHERE_SAME, 0); |
267 | 0 | Debug (15, "CFA_same_value r%lu\n", (long) regnum); |
268 | 0 | break; |
269 | | |
270 | 0 | case DW_CFA_register: |
271 | 0 | if (((ret = read_regnum (as, a, addr, ®num, arg)) < 0) |
272 | 0 | || ((ret = dwarf_read_uleb128 (as, a, addr, &val, arg)) < 0)) |
273 | 0 | break; |
274 | 0 | set_reg (sr, regnum, DWARF_WHERE_REG, val); |
275 | 0 | Debug (15, "CFA_register r%lu to r%lu\n", (long) regnum, (long) val); |
276 | 0 | break; |
277 | | |
278 | 0 | case DW_CFA_remember_state: |
279 | 0 | if (push_rstate_stack(rs_stack) < 0) |
280 | 0 | { |
281 | 0 | Debug (1, "Out of memory in DW_CFA_remember_state\n"); |
282 | 0 | ret = -UNW_ENOMEM; |
283 | 0 | break; |
284 | 0 | } |
285 | 0 | (*rs_stack)->state = sr->rs_current; |
286 | 0 | Debug (15, "CFA_remember_state\n"); |
287 | 0 | break; |
288 | | |
289 | 0 | case DW_CFA_restore_state: |
290 | 0 | if (!*rs_stack) |
291 | 0 | { |
292 | 0 | Debug (1, "register-state stack underflow\n"); |
293 | 0 | ret = -UNW_EINVAL; |
294 | 0 | break; |
295 | 0 | } |
296 | 0 | sr->rs_current = (*rs_stack)->state; |
297 | 0 | pop_rstate_stack(rs_stack); |
298 | 0 | Debug (15, "CFA_restore_state\n"); |
299 | 0 | break; |
300 | | |
301 | 17 | case DW_CFA_def_cfa: |
302 | 17 | if (((ret = read_regnum (as, a, addr, ®num, arg)) < 0) |
303 | 17 | || ((ret = dwarf_read_uleb128 (as, a, addr, &val, arg)) < 0)) |
304 | 0 | break; |
305 | 17 | set_reg (sr, DWARF_CFA_REG_COLUMN, DWARF_WHERE_REG, regnum); |
306 | 17 | set_reg (sr, DWARF_CFA_OFF_COLUMN, 0, val); /* NOT factored! */ |
307 | 17 | Debug (15, "CFA_def_cfa r%lu+0x%lx\n", (long) regnum, (long) val); |
308 | 17 | break; |
309 | | |
310 | 0 | case DW_CFA_def_cfa_sf: |
311 | 0 | if (((ret = read_regnum (as, a, addr, ®num, arg)) < 0) |
312 | 0 | || ((ret = dwarf_read_sleb128 (as, a, addr, &val, arg)) < 0)) |
313 | 0 | break; |
314 | 0 | set_reg (sr, DWARF_CFA_REG_COLUMN, DWARF_WHERE_REG, regnum); |
315 | 0 | set_reg (sr, DWARF_CFA_OFF_COLUMN, 0, |
316 | 0 | val * dci->data_align); /* factored! */ |
317 | 0 | Debug (15, "CFA_def_cfa_sf r%lu+0x%lx\n", |
318 | 0 | (long) regnum, (long) (val * dci->data_align)); |
319 | 0 | break; |
320 | | |
321 | 13 | case DW_CFA_def_cfa_register: |
322 | 13 | if ((ret = read_regnum (as, a, addr, ®num, arg)) < 0) |
323 | 0 | break; |
324 | 13 | set_reg (sr, DWARF_CFA_REG_COLUMN, DWARF_WHERE_REG, regnum); |
325 | 13 | Debug (15, "CFA_def_cfa_register r%lu\n", (long) regnum); |
326 | 13 | break; |
327 | | |
328 | 20 | case DW_CFA_def_cfa_offset: |
329 | 20 | if ((ret = dwarf_read_uleb128 (as, a, addr, &val, arg)) < 0) |
330 | 0 | break; |
331 | 20 | set_reg (sr, DWARF_CFA_OFF_COLUMN, 0, val); /* NOT factored! */ |
332 | 20 | Debug (15, "CFA_def_cfa_offset 0x%lx\n", (long) val); |
333 | 20 | break; |
334 | | |
335 | 0 | case DW_CFA_def_cfa_offset_sf: |
336 | 0 | if ((ret = dwarf_read_sleb128 (as, a, addr, &val, arg)) < 0) |
337 | 0 | break; |
338 | 0 | set_reg (sr, DWARF_CFA_OFF_COLUMN, 0, |
339 | 0 | val * dci->data_align); /* factored! */ |
340 | 0 | Debug (15, "CFA_def_cfa_offset_sf 0x%lx\n", |
341 | 0 | (long) (val * dci->data_align)); |
342 | 0 | break; |
343 | | |
344 | 0 | case DW_CFA_def_cfa_expression: |
345 | | /* Save the address of the DW_FORM_block for later evaluation. */ |
346 | 0 | set_reg (sr, DWARF_CFA_REG_COLUMN, DWARF_WHERE_EXPR, *addr); |
347 | |
|
348 | 0 | if ((ret = dwarf_read_uleb128 (as, a, addr, &len, arg)) < 0) |
349 | 0 | break; |
350 | | |
351 | 0 | Debug (15, "CFA_def_cfa_expr @ 0x%lx [%lu bytes]\n", |
352 | 0 | (long) *addr, (long) len); |
353 | 0 | *addr += len; |
354 | 0 | break; |
355 | | |
356 | 0 | case DW_CFA_expression: |
357 | 0 | if ((ret = read_regnum (as, a, addr, ®num, arg)) < 0) |
358 | 0 | break; |
359 | | |
360 | | /* Save the address of the DW_FORM_block for later evaluation. */ |
361 | 0 | set_reg (sr, regnum, DWARF_WHERE_EXPR, *addr); |
362 | |
|
363 | 0 | if ((ret = dwarf_read_uleb128 (as, a, addr, &len, arg)) < 0) |
364 | 0 | break; |
365 | | |
366 | 0 | Debug (15, "CFA_expression r%lu @ 0x%lx [%lu bytes]\n", |
367 | 0 | (long) regnum, (long) addr, (long) len); |
368 | 0 | *addr += len; |
369 | 0 | break; |
370 | | |
371 | 0 | case DW_CFA_val_expression: |
372 | 0 | if ((ret = read_regnum (as, a, addr, ®num, arg)) < 0) |
373 | 0 | break; |
374 | | |
375 | | /* Save the address of the DW_FORM_block for later evaluation. */ |
376 | 0 | set_reg (sr, regnum, DWARF_WHERE_VAL_EXPR, *addr); |
377 | |
|
378 | 0 | if ((ret = dwarf_read_uleb128 (as, a, addr, &len, arg)) < 0) |
379 | 0 | break; |
380 | | |
381 | 0 | Debug (15, "CFA_val_expression r%lu @ 0x%lx [%lu bytes]\n", |
382 | 0 | (long) regnum, (long) addr, (long) len); |
383 | 0 | *addr += len; |
384 | 0 | break; |
385 | | |
386 | 0 | case DW_CFA_GNU_args_size: |
387 | 0 | if ((ret = dwarf_read_uleb128 (as, a, addr, &val, arg)) < 0) |
388 | 0 | break; |
389 | 0 | sr->args_size = val; |
390 | 0 | Debug (15, "CFA_GNU_args_size %lu\n", (long) val); |
391 | 0 | break; |
392 | | |
393 | 0 | case DW_CFA_GNU_negative_offset_extended: |
394 | | /* A comment in GCC says that this is obsoleted by |
395 | | DW_CFA_offset_extended_sf, but that it's used by older |
396 | | PowerPC code. */ |
397 | 0 | if (((ret = read_regnum (as, a, addr, ®num, arg)) < 0) |
398 | 0 | || ((ret = dwarf_read_uleb128 (as, a, addr, &val, arg)) < 0)) |
399 | 0 | break; |
400 | 0 | set_reg (sr, regnum, DWARF_WHERE_CFAREL, ~(val * dci->data_align) + 1); |
401 | 0 | Debug (15, "CFA_GNU_negative_offset_extended cfa+0x%lx\n", |
402 | 0 | (long) (~(val * dci->data_align) + 1)); |
403 | 0 | break; |
404 | | |
405 | 0 | case DW_CFA_GNU_window_save: |
406 | | #ifdef UNW_TARGET_SPARC |
407 | | /* This is a special CFA to handle all 16 windowed registers |
408 | | on SPARC. */ |
409 | | for (regnum = 16; regnum < 32; ++regnum) |
410 | | set_reg (sr, regnum, DWARF_WHERE_CFAREL, |
411 | | (regnum - 16) * sizeof (unw_word_t)); |
412 | | Debug (15, "CFA_GNU_window_save\n"); |
413 | | break; |
414 | | #elif UNW_TARGET_AARCH64 |
415 | | /* This is a specific opcode on aarch64, DW_CFA_AARCH64_negate_ra_state */ |
416 | | Debug (15, "DW_CFA_AARCH64_negate_ra_state\n"); |
417 | | aarch64_negate_ra_sign_state(sr); |
418 | | break; |
419 | | #else |
420 | | /* FALL THROUGH */ |
421 | 0 | #endif |
422 | 0 | case DW_CFA_lo_user: |
423 | 0 | case DW_CFA_hi_user: |
424 | 0 | Debug (1, "Unexpected CFA opcode 0x%x\n", op); |
425 | 0 | ret = -UNW_EINVAL; |
426 | 0 | break; |
427 | 222 | } |
428 | 222 | } |
429 | | |
430 | 30 | if (ret > 0) |
431 | 0 | ret = 0; |
432 | 30 | return ret; |
433 | 30 | } |
434 | | |
435 | | static int |
436 | | fetch_proc_info (struct dwarf_cursor *c, unw_word_t ip) |
437 | 15 | { |
438 | 15 | int ret, dynamic = 1; |
439 | | |
440 | | /* The 'ip' can point either to the previous or next instruction |
441 | | depending on what type of frame we have: normal call or a place |
442 | | to resume execution (e.g. after signal frame). |
443 | | |
444 | | For a normal call frame we need to back up so we point within the |
445 | | call itself; this is important because a) the call might be the |
446 | | very last instruction of the function and the edge of the FDE, |
447 | | and b) so that run_cfi_program() runs locations up to the call |
448 | | but not more. |
449 | | |
450 | | For signal frame, we need to do the exact opposite and look |
451 | | up using the current 'ip' value. That is where execution will |
452 | | continue, and it's important we get this right, as 'ip' could be |
453 | | right at the function entry and hence FDE edge, or at instruction |
454 | | that manipulates CFA (push/pop). */ |
455 | | |
456 | 15 | if (c->use_prev_instr) |
457 | 15 | { |
458 | | #if defined(__arm__) |
459 | | /* On arm, the least bit denotes thumb/arm mode, clear it. */ |
460 | | ip &= ~(unw_word_t)0x1; |
461 | | #endif |
462 | 15 | --ip; |
463 | 15 | } |
464 | | |
465 | 15 | memset (&c->pi, 0, sizeof (c->pi)); |
466 | | |
467 | | /* check dynamic info first --- it overrides everything else */ |
468 | 15 | ret = unwi_find_dynamic_proc_info (c->as, ip, &c->pi, 1, |
469 | 15 | c->as_arg); |
470 | 15 | if (ret == -UNW_ENOINFO) |
471 | 15 | { |
472 | 15 | dynamic = 0; |
473 | 15 | if ((ret = tdep_find_proc_info (c, ip, 1)) < 0) |
474 | 0 | return ret; |
475 | 15 | } |
476 | | |
477 | 15 | if (c->pi.format != UNW_INFO_FORMAT_DYNAMIC |
478 | 15 | && c->pi.format != UNW_INFO_FORMAT_TABLE |
479 | 15 | && c->pi.format != UNW_INFO_FORMAT_REMOTE_TABLE) |
480 | 0 | return -UNW_ENOINFO; |
481 | | |
482 | 15 | c->pi_valid = 1; |
483 | 15 | c->pi_is_dynamic = dynamic; |
484 | | |
485 | | /* Let system/machine-dependent code determine frame-specific attributes. */ |
486 | 15 | if (ret >= 0) |
487 | 15 | tdep_fetch_frame (c, ip, 1); |
488 | | |
489 | 15 | return ret; |
490 | 15 | } |
491 | | |
492 | | static int |
493 | | parse_dynamic (struct dwarf_cursor *c UNUSED, |
494 | | unw_word_t ip UNUSED, |
495 | | dwarf_state_record_t *sr UNUSED) |
496 | 0 | { |
497 | 0 | Debug (1, "Not yet implemented\n"); |
498 | 0 | return -UNW_ENOINFO; |
499 | 0 | } |
500 | | |
501 | | static inline void |
502 | | put_unwind_info (struct dwarf_cursor *c, unw_proc_info_t *pi) |
503 | 15 | { |
504 | 15 | if (c->pi_is_dynamic) |
505 | 0 | unwi_put_dynamic_unwind_info (c->as, pi, c->as_arg); |
506 | 15 | else if (pi->unwind_info && pi->format == UNW_INFO_FORMAT_TABLE) |
507 | 15 | { |
508 | 15 | mempool_free (&dwarf_cie_info_pool, pi->unwind_info); |
509 | 15 | pi->unwind_info = NULL; |
510 | 15 | } |
511 | 15 | c->pi_valid = 0; |
512 | 15 | } |
513 | | |
514 | | static inline int |
515 | | setup_fde (struct dwarf_cursor *c, dwarf_state_record_t *sr) |
516 | 15 | { |
517 | 15 | int i, ret; |
518 | | |
519 | 15 | assert (c->pi_valid); |
520 | | |
521 | 15 | memset (sr, 0, sizeof (*sr)); |
522 | 300 | for (i = 0; i < DWARF_NUM_PRESERVED_REGS + 2; ++i) |
523 | 285 | set_reg (sr, i, DWARF_WHERE_SAME, 0); |
524 | | |
525 | 15 | #if !defined(UNW_TARGET_ARM) && !(defined(UNW_TARGET_MIPS) && _MIPS_SIM == _ABI64) |
526 | | // SP defaults to CFA (but is overridable) |
527 | 15 | set_reg (sr, TDEP_DWARF_SP, DWARF_WHERE_CFA, 0); |
528 | 15 | #endif |
529 | | |
530 | 15 | struct dwarf_cie_info *dci = c->pi.unwind_info; |
531 | 15 | sr->rs_current.ret_addr_column = dci->ret_addr_column; |
532 | 15 | unw_word_t addr = dci->cie_instr_start; |
533 | 15 | unw_word_t curr_ip = 0; |
534 | 15 | dwarf_stackable_reg_state_t *rs_stack = NULL; |
535 | 15 | ret = run_cfi_program (c, sr, &curr_ip, ~(unw_word_t) 0, &addr, |
536 | 15 | dci->cie_instr_end, |
537 | 15 | &rs_stack, dci); |
538 | 15 | empty_rstate_stack(&rs_stack); |
539 | 15 | if (ret < 0) |
540 | 0 | return ret; |
541 | | |
542 | 15 | memcpy (&sr->rs_initial, &sr->rs_current, sizeof (sr->rs_initial)); |
543 | 15 | return 0; |
544 | 15 | } |
545 | | |
546 | | static inline int |
547 | | parse_fde (struct dwarf_cursor *c, unw_word_t ip, dwarf_state_record_t *sr) |
548 | 15 | { |
549 | 15 | int ret; |
550 | 15 | struct dwarf_cie_info *dci = c->pi.unwind_info; |
551 | 15 | unw_word_t addr = dci->fde_instr_start; |
552 | 15 | unw_word_t curr_ip = c->pi.start_ip; |
553 | 15 | dwarf_stackable_reg_state_t *rs_stack = NULL; |
554 | | /* Process up to current `ip` for signal frame and `ip - 1` for normal call frame |
555 | | See `c->use_prev_instr` use in `fetch_proc_info` for details. */ |
556 | 15 | ret = run_cfi_program (c, sr, &curr_ip, ip - c->use_prev_instr, &addr, dci->fde_instr_end, |
557 | 15 | &rs_stack, dci); |
558 | 15 | empty_rstate_stack(&rs_stack); |
559 | 15 | if (ret < 0) |
560 | 0 | return ret; |
561 | | |
562 | 15 | return 0; |
563 | 15 | } |
564 | | |
565 | | HIDDEN int |
566 | | dwarf_flush_rs_cache (struct dwarf_rs_cache *cache) |
567 | 1 | { |
568 | 1 | int i; |
569 | | |
570 | 1 | if (cache->log_size == DWARF_DEFAULT_LOG_UNW_CACHE_SIZE |
571 | 1 | || !cache->hash) { |
572 | 1 | cache->hash = cache->default_hash; |
573 | 1 | cache->buckets = cache->default_buckets; |
574 | 1 | cache->links = cache->default_links; |
575 | 1 | cache->log_size = DWARF_DEFAULT_LOG_UNW_CACHE_SIZE; |
576 | 1 | } else { |
577 | 0 | if (cache->hash && cache->hash != cache->default_hash) |
578 | 0 | mi_munmap(cache->hash, DWARF_UNW_HASH_SIZE(cache->prev_log_size) |
579 | 0 | * sizeof (cache->hash[0])); |
580 | 0 | if (cache->buckets && cache->buckets != cache->default_buckets) |
581 | 0 | mi_munmap(cache->buckets, DWARF_UNW_CACHE_SIZE(cache->prev_log_size) |
582 | 0 | * sizeof (cache->buckets[0])); |
583 | 0 | if (cache->links && cache->links != cache->default_links) |
584 | 0 | mi_munmap(cache->links, DWARF_UNW_CACHE_SIZE(cache->prev_log_size) |
585 | 0 | * sizeof (cache->links[0])); |
586 | 0 | GET_MEMORY(cache->hash, DWARF_UNW_HASH_SIZE(cache->log_size) |
587 | 0 | * sizeof (cache->hash[0])); |
588 | 0 | GET_MEMORY(cache->buckets, DWARF_UNW_CACHE_SIZE(cache->log_size) |
589 | 0 | * sizeof (cache->buckets[0])); |
590 | 0 | GET_MEMORY(cache->links, DWARF_UNW_CACHE_SIZE(cache->log_size) |
591 | 0 | * sizeof (cache->links[0])); |
592 | 0 | if (!cache->hash || !cache->buckets || !cache->links) |
593 | 0 | { |
594 | 0 | Debug (1, "Unable to allocate cache memory"); |
595 | 0 | return -UNW_ENOMEM; |
596 | 0 | } |
597 | 0 | cache->prev_log_size = cache->log_size; |
598 | 0 | } |
599 | | |
600 | 1 | cache->rr_head = 0; |
601 | | |
602 | 129 | for (i = 0; i < DWARF_UNW_CACHE_SIZE(cache->log_size); ++i) |
603 | 128 | { |
604 | 128 | cache->links[i].coll_chain = -1; |
605 | 128 | cache->links[i].ip = 0; |
606 | 128 | cache->links[i].valid = 0; |
607 | 128 | } |
608 | 257 | for (i = 0; i< DWARF_UNW_HASH_SIZE(cache->log_size); ++i) |
609 | 256 | cache->hash[i] = -1; |
610 | | |
611 | 1 | return 0; |
612 | 1 | } |
613 | | |
614 | | static inline struct dwarf_rs_cache * |
615 | | get_rs_cache (unw_addr_space_t as, intrmask_t *saved_maskp) |
616 | 53.9k | { |
617 | 53.9k | struct dwarf_rs_cache *cache = &as->global_cache; |
618 | 53.9k | unw_caching_policy_t caching = as->caching_policy; |
619 | | |
620 | 53.9k | if (caching == UNW_CACHE_NONE) |
621 | 0 | return NULL; |
622 | | |
623 | | #if defined(HAVE___CACHE_PER_THREAD) && HAVE___CACHE_PER_THREAD |
624 | | if (likely (caching == UNW_CACHE_PER_THREAD)) |
625 | | { |
626 | | static _Thread_local struct dwarf_rs_cache tls_cache __attribute__((tls_model("initial-exec"))); |
627 | | Debug (16, "using TLS cache\n"); |
628 | | cache = &tls_cache; |
629 | | } |
630 | | else |
631 | | #else |
632 | 53.9k | if (likely (caching == UNW_CACHE_GLOBAL)) |
633 | 53.9k | #endif |
634 | 53.9k | { |
635 | 53.9k | Debug (16, "acquiring lock\n"); |
636 | 53.9k | lock_acquire (&cache->lock, *saved_maskp); |
637 | 53.9k | } |
638 | | |
639 | 53.9k | if ((atomic_load (&as->cache_generation) != atomic_load (&cache->generation)) |
640 | 53.9k | || !cache->hash) |
641 | 1 | { |
642 | | /* cache_size is only set in the global_cache, copy it over before flushing */ |
643 | 1 | cache->log_size = as->global_cache.log_size; |
644 | 1 | if (dwarf_flush_rs_cache (cache) < 0) |
645 | 0 | return NULL; |
646 | 1 | atomic_store (&cache->generation, atomic_load (&as->cache_generation)); |
647 | 1 | } |
648 | | |
649 | 53.9k | return cache; |
650 | 53.9k | } |
651 | | |
652 | | static inline void |
653 | | put_rs_cache (unw_addr_space_t as, struct dwarf_rs_cache *cache, |
654 | | intrmask_t *saved_maskp) |
655 | 53.9k | { |
656 | 53.9k | assert (as->caching_policy != UNW_CACHE_NONE); |
657 | | |
658 | 53.9k | Debug (16, "unmasking signals/interrupts and releasing lock\n"); |
659 | 53.9k | if (likely (as->caching_policy == UNW_CACHE_GLOBAL)) |
660 | 53.9k | lock_release (&cache->lock, *saved_maskp); |
661 | 53.9k | } |
662 | | |
663 | | static inline unw_hash_index_t CONST_ATTR |
664 | | hash (unw_word_t ip, unsigned short log_size) |
665 | 6.20k | { |
666 | | /* based on (sqrt(5)/2-1)*2^64 */ |
667 | 6.20k | # define magic ((unw_word_t) 0x9e3779b97f4a7c16ULL) |
668 | | |
669 | 6.20k | return (unw_hash_index_t) (ip * magic >> ((sizeof(unw_word_t) * 8) - (log_size + 1))); |
670 | 6.20k | } |
671 | | |
672 | | static inline long |
673 | | cache_match (struct dwarf_rs_cache *cache, unsigned short index, unw_word_t ip) |
674 | 54.0k | { |
675 | 54.0k | return (cache->links[index].valid && (ip == cache->links[index].ip)); |
676 | 54.0k | } |
677 | | |
678 | | static dwarf_reg_state_t * |
679 | | rs_lookup (struct dwarf_rs_cache *cache, struct dwarf_cursor *c) |
680 | 53.9k | { |
681 | 53.9k | unsigned short index; |
682 | 53.9k | unw_word_t ip = c->ip; |
683 | | |
684 | 53.9k | if (c->hint > 0) |
685 | 47.9k | { |
686 | 47.9k | index = c->hint - 1; |
687 | 47.9k | if (cache_match (cache, index, ip)) |
688 | 47.7k | return &cache->buckets[index]; |
689 | 47.9k | } |
690 | | |
691 | 6.19k | for (index = cache->hash[hash (ip, cache->log_size)]; |
692 | 6.19k | index < DWARF_UNW_CACHE_SIZE(cache->log_size); |
693 | 6.19k | index = cache->links[index].coll_chain) |
694 | 6.17k | { |
695 | 6.17k | if (cache_match (cache, index, ip)) |
696 | 6.17k | return &cache->buckets[index]; |
697 | 6.17k | } |
698 | 15 | return NULL; |
699 | 6.19k | } |
700 | | |
701 | | static inline dwarf_reg_state_t * |
702 | | rs_new (struct dwarf_rs_cache *cache, struct dwarf_cursor * c) |
703 | 15 | { |
704 | 15 | unw_hash_index_t index; |
705 | 15 | unsigned short head; |
706 | | |
707 | 15 | head = cache->rr_head; |
708 | 15 | cache->rr_head = (head + 1) & (DWARF_UNW_CACHE_SIZE(cache->log_size) - 1); |
709 | | |
710 | | /* remove the old rs from the hash table (if it's there): */ |
711 | 15 | if (cache->links[head].ip) |
712 | 0 | { |
713 | 0 | unsigned short *pindex; |
714 | 0 | for (pindex = &cache->hash[hash (cache->links[head].ip, cache->log_size)]; |
715 | 0 | *pindex < DWARF_UNW_CACHE_SIZE(cache->log_size); |
716 | 0 | pindex = &cache->links[*pindex].coll_chain) |
717 | 0 | { |
718 | 0 | if (*pindex == head) |
719 | 0 | { |
720 | 0 | *pindex = cache->links[*pindex].coll_chain; |
721 | 0 | break; |
722 | 0 | } |
723 | 0 | } |
724 | 0 | } |
725 | | |
726 | | /* enter new rs in the hash table */ |
727 | 15 | index = hash (c->ip, cache->log_size); |
728 | 15 | cache->links[head].coll_chain = cache->hash[index]; |
729 | 15 | cache->hash[index] = head; |
730 | | |
731 | 15 | cache->links[head].ip = c->ip; |
732 | 15 | cache->links[head].valid = 1; |
733 | 15 | cache->links[head].signal_frame = tdep_cache_frame(c) ? 1 : 0; |
734 | 15 | return cache->buckets + head; |
735 | 15 | } |
736 | | |
737 | | static int |
738 | | create_state_record_for (struct dwarf_cursor *c, dwarf_state_record_t *sr, |
739 | | unw_word_t ip) |
740 | 15 | { |
741 | 15 | int ret; |
742 | 15 | switch (c->pi.format) |
743 | 15 | { |
744 | 15 | case UNW_INFO_FORMAT_TABLE: |
745 | 15 | case UNW_INFO_FORMAT_REMOTE_TABLE: |
746 | 15 | if ((ret = setup_fde(c, sr)) < 0) |
747 | 0 | return ret; |
748 | 15 | ret = parse_fde (c, ip, sr); |
749 | 15 | break; |
750 | | |
751 | 0 | case UNW_INFO_FORMAT_DYNAMIC: |
752 | 0 | ret = parse_dynamic (c, ip, sr); |
753 | 0 | break; |
754 | | |
755 | 0 | default: |
756 | 0 | Debug (1, "Unexpected unwind-info format %d\n", c->pi.format); |
757 | 0 | ret = -UNW_EINVAL; |
758 | 15 | } |
759 | 15 | return ret; |
760 | 15 | } |
761 | | |
762 | | static inline int |
763 | | eval_location_expr (struct dwarf_cursor *c, unw_word_t stack_val, unw_addr_space_t as, |
764 | | unw_accessors_t *a, unw_word_t addr, |
765 | | dwarf_loc_t *locp, void *arg) |
766 | 0 | { |
767 | 0 | int ret, is_register; |
768 | 0 | unw_word_t len, val; |
769 | | |
770 | | /* read the length of the expression: */ |
771 | 0 | if ((ret = dwarf_read_uleb128 (as, a, &addr, &len, arg)) < 0) |
772 | 0 | return ret; |
773 | | |
774 | | /* evaluate the expression: */ |
775 | 0 | if ((ret = dwarf_eval_expr (c, stack_val, &addr, len, &val, &is_register)) < 0) |
776 | 0 | return ret; |
777 | | |
778 | 0 | if (is_register) |
779 | 0 | *locp = DWARF_REG_LOC (c, dwarf_to_unw_regnum (val)); |
780 | 0 | else |
781 | 0 | *locp = DWARF_MEM_LOC (c, val); |
782 | |
|
783 | 0 | return 0; |
784 | 0 | } |
785 | | |
786 | | |
787 | | #ifdef UNW_TARGET_AARCH64 |
788 | | #include "libunwind-aarch64.h" |
789 | | |
790 | | static void |
791 | | aarch64_negate_ra_sign_state(dwarf_state_record_t *sr) |
792 | | { |
793 | | unw_word_t ra_sign_state = sr->rs_current.reg.val[UNW_AARCH64_RA_SIGN_STATE]; |
794 | | ra_sign_state ^= 0x1; |
795 | | set_reg(sr, UNW_AARCH64_RA_SIGN_STATE, DWARF_WHERE_SAME, ra_sign_state); |
796 | | } |
797 | | |
798 | | static unw_word_t |
799 | | aarch64_strip_pac_remote(unw_accessors_t *a, unw_addr_space_t as, void *arg, unw_word_t old_ip) |
800 | | { |
801 | | if (a->ptrauth_insn_mask) |
802 | | { |
803 | | unw_word_t ip, insn_mask; |
804 | | |
805 | | insn_mask = a->ptrauth_insn_mask(as, arg); |
806 | | ip = old_ip & (~insn_mask); |
807 | | |
808 | | Debug(15, "stripping pac from address, before: %lx, after: %lx\n", old_ip, ip); |
809 | | return ip; |
810 | | } |
811 | | else |
812 | | { |
813 | | Debug(15, "return address %lx might be signed, but no means to obtain mask\n", old_ip); |
814 | | return old_ip; |
815 | | } |
816 | | } |
817 | | |
818 | | static unw_word_t |
819 | | aarch64_strip_pac_local(unw_word_t in_addr) |
820 | | { |
821 | | unw_word_t out_addr = in_addr; |
822 | | |
823 | | #if defined(__aarch64__) && !defined(UNW_REMOTE_ONLY) |
824 | | // Strip the PAC with XPACLRI instruction |
825 | | register unsigned long long x30 __asm__("x30") = in_addr; |
826 | | __asm__("hint 0x7" : "+r" (x30)); |
827 | | out_addr = x30; |
828 | | #endif |
829 | | |
830 | | return out_addr; |
831 | | } |
832 | | |
833 | | static unw_word_t |
834 | | aarch64_get_ra_sign_state(struct dwarf_reg_state *rs) |
835 | | { |
836 | | return rs->reg.val[UNW_AARCH64_RA_SIGN_STATE]; |
837 | | } |
838 | | |
839 | | #endif |
840 | | |
841 | | static int |
842 | | apply_reg_state (struct dwarf_cursor *c, struct dwarf_reg_state *rs) |
843 | 53.9k | { |
844 | 53.9k | unw_regnum_t regnum; |
845 | 53.9k | unw_word_t addr, cfa, ip; |
846 | 53.9k | unw_word_t prev_ip, prev_cfa; |
847 | 53.9k | unw_addr_space_t as; |
848 | 53.9k | dwarf_loc_t cfa_loc; |
849 | 53.9k | unw_accessors_t *a; |
850 | 53.9k | int i, ret; |
851 | 53.9k | void *arg; |
852 | | |
853 | | /* In the case that we have incorrect CFI, the return address column may be |
854 | | * outside the valid range of data and will read invalid data. Protect |
855 | | * against the errant read and indicate that we have a bad frame. */ |
856 | 53.9k | if (rs->ret_addr_column >= DWARF_NUM_PRESERVED_REGS) { |
857 | 0 | Dprintf ("%s: return address entry %zu is outside of range of CIE", |
858 | 0 | __FUNCTION__, rs->ret_addr_column); |
859 | 0 | return -UNW_EBADFRAME; |
860 | 0 | } |
861 | | |
862 | 53.9k | prev_ip = c->ip; |
863 | 53.9k | prev_cfa = c->cfa; |
864 | | |
865 | 53.9k | as = c->as; |
866 | 53.9k | arg = c->as_arg; |
867 | 53.9k | a = unw_get_accessors_int (as); |
868 | | |
869 | | /* Evaluate the CFA first, because it may be referred to by other |
870 | | expressions. */ |
871 | | |
872 | 53.9k | if (rs->reg.where[DWARF_CFA_REG_COLUMN] == DWARF_WHERE_REG) |
873 | 53.9k | { |
874 | | /* CFA is equal to [reg] + offset: */ |
875 | | |
876 | | /* As a special-case, if the stack-pointer is the CFA and the |
877 | | stack-pointer wasn't saved, popping the CFA implicitly pops |
878 | | the stack-pointer as well. */ |
879 | 53.9k | if ((rs->reg.val[DWARF_CFA_REG_COLUMN] == TDEP_DWARF_SP) |
880 | 53.9k | && (TDEP_DWARF_SP < ARRAY_SIZE(rs->reg.val)) |
881 | 53.9k | && (DWARF_IS_NULL_LOC(c->loc[TDEP_DWARF_SP]))) |
882 | 0 | cfa = c->cfa; |
883 | 53.9k | else |
884 | 53.9k | { |
885 | 53.9k | regnum = dwarf_to_unw_regnum ((unw_regnum_t) rs->reg.val[DWARF_CFA_REG_COLUMN]); |
886 | 53.9k | if ((ret = unw_get_reg (dwarf_to_cursor(c), regnum, &cfa)) < 0) |
887 | 0 | return ret; |
888 | 53.9k | } |
889 | 53.9k | cfa += rs->reg.val[DWARF_CFA_OFF_COLUMN]; |
890 | 53.9k | } |
891 | 0 | else |
892 | 0 | { |
893 | | /* CFA is equal to EXPR: */ |
894 | |
|
895 | 0 | assert (rs->reg.where[DWARF_CFA_REG_COLUMN] == DWARF_WHERE_EXPR); |
896 | | |
897 | 0 | addr = rs->reg.val[DWARF_CFA_REG_COLUMN]; |
898 | | /* The dwarf standard doesn't specify an initial value to be pushed on */ |
899 | | /* the stack before DW_CFA_def_cfa_expression evaluation. We push on a */ |
900 | | /* dummy value (0) to keep the eval_location_expr function consistent. */ |
901 | 0 | if ((ret = eval_location_expr (c, 0, as, a, addr, &cfa_loc, arg)) < 0) |
902 | 0 | return ret; |
903 | | /* the returned location better be a memory location... */ |
904 | 0 | if (DWARF_IS_REG_LOC (cfa_loc)) |
905 | 0 | return -UNW_EBADFRAME; |
906 | 0 | cfa = DWARF_GET_LOC (cfa_loc); |
907 | 0 | } |
908 | | |
909 | 53.9k | dwarf_loc_t new_loc[DWARF_NUM_PRESERVED_REGS]; |
910 | 53.9k | memcpy(new_loc, c->loc, sizeof(new_loc)); |
911 | | |
912 | 970k | for (i = 0; i < DWARF_NUM_PRESERVED_REGS; ++i) |
913 | 916k | { |
914 | 916k | switch ((dwarf_where_t) rs->reg.where[i]) |
915 | 916k | { |
916 | 5.99k | case DWARF_WHERE_UNDEF: |
917 | 5.99k | new_loc[i] = DWARF_NULL_LOC; |
918 | 5.99k | break; |
919 | | |
920 | 593k | case DWARF_WHERE_SAME: |
921 | 593k | break; |
922 | | |
923 | 53.9k | case DWARF_WHERE_CFA: |
924 | 53.9k | new_loc[i] = DWARF_VAL_LOC (c, cfa); |
925 | 53.9k | break; |
926 | | |
927 | 263k | case DWARF_WHERE_CFAREL: |
928 | 263k | new_loc[i] = DWARF_MEM_LOC (c, cfa + rs->reg.val[i]); |
929 | 263k | break; |
930 | | |
931 | 0 | case DWARF_WHERE_REG: |
932 | | #ifdef __s390x__ |
933 | | /* GPRs can be saved in FPRs on s390x */ |
934 | | if (unw_is_fpreg (dwarf_to_unw_regnum (rs->reg.val[i]))) |
935 | | { |
936 | | new_loc[i] = DWARF_FPREG_LOC (c, dwarf_to_unw_regnum (rs->reg.val[i])); |
937 | | break; |
938 | | } |
939 | | #endif |
940 | 0 | new_loc[i] = new_loc[rs->reg.val[i]]; |
941 | 0 | break; |
942 | | |
943 | 0 | case DWARF_WHERE_EXPR: |
944 | 0 | addr = rs->reg.val[i]; |
945 | | /* The dwarf standard requires the current CFA to be pushed on the */ |
946 | | /* stack before DW_CFA_expression evaluation. */ |
947 | 0 | if ((ret = eval_location_expr (c, cfa, as, a, addr, new_loc + i, arg)) < 0) |
948 | 0 | return ret; |
949 | 0 | break; |
950 | | |
951 | 0 | case DWARF_WHERE_VAL_EXPR: |
952 | 0 | addr = rs->reg.val[i]; |
953 | | /* The dwarf standard requires the current CFA to be pushed on the */ |
954 | | /* stack before DW_CFA_val_expression evaluation. */ |
955 | 0 | if ((ret = eval_location_expr (c, cfa, as, a, addr, new_loc + i, arg)) < 0) |
956 | 0 | return ret; |
957 | 0 | new_loc[i] = DWARF_VAL_LOC (c, DWARF_GET_LOC (new_loc[i])); |
958 | 0 | break; |
959 | 916k | } |
960 | 916k | } |
961 | | |
962 | 53.9k | memcpy(c->loc, new_loc, sizeof(new_loc)); |
963 | | |
964 | 53.9k | c->cfa = cfa; |
965 | | /* DWARF spec says undefined return address location means end of stack. */ |
966 | 53.9k | if (DWARF_IS_NULL_LOC (c->loc[rs->ret_addr_column])) |
967 | 5.99k | { |
968 | 5.99k | c->ip = 0; |
969 | 5.99k | } |
970 | 47.9k | else |
971 | 47.9k | { |
972 | 47.9k | ret = dwarf_get (c, c->loc[rs->ret_addr_column], &ip); |
973 | 47.9k | if (ret < 0) |
974 | 0 | return ret; |
975 | | #ifdef UNW_TARGET_AARCH64 |
976 | | if (aarch64_get_ra_sign_state(rs)) |
977 | | { |
978 | | if (c->as != unw_local_addr_space) |
979 | | { |
980 | | ip = aarch64_strip_pac_remote(a, as, arg, ip); |
981 | | } |
982 | | else |
983 | | { |
984 | | ip = aarch64_strip_pac_local(ip); |
985 | | } |
986 | | } |
987 | | #endif |
988 | 47.9k | c->ip = ip; |
989 | 47.9k | } |
990 | 53.9k | ret = (c->ip != 0) ? 1 : 0; |
991 | | |
992 | | /* XXX: check for ip to be code_aligned */ |
993 | 53.9k | if (c->ip == prev_ip && c->cfa == prev_cfa) |
994 | 0 | { |
995 | 0 | Dprintf ("%s: ip and cfa unchanged; stopping here (ip=0x%lx)\n", |
996 | 0 | __FUNCTION__, (long) c->ip); |
997 | 0 | return -UNW_EBADFRAME; |
998 | 0 | } |
999 | | |
1000 | 53.9k | if (c->stash_frames) |
1001 | 0 | tdep_stash_frame (c, rs); |
1002 | | |
1003 | 53.9k | return ret; |
1004 | 53.9k | } |
1005 | | |
1006 | | /* Find the saved locations. */ |
1007 | | static int |
1008 | | find_reg_state (struct dwarf_cursor *c, dwarf_state_record_t *sr) |
1009 | 53.9k | { |
1010 | 53.9k | dwarf_reg_state_t *rs = NULL; |
1011 | 53.9k | struct dwarf_rs_cache *cache; |
1012 | 53.9k | int ret = 0; |
1013 | 53.9k | intrmask_t saved_mask; |
1014 | | |
1015 | 53.9k | if ((cache = get_rs_cache(c->as, &saved_mask)) && |
1016 | 53.9k | (rs = rs_lookup(cache, c))) |
1017 | 53.9k | { |
1018 | | /* update hint; no locking needed: single-word writes are atomic */ |
1019 | 53.9k | unsigned short index = (unsigned short) (rs - cache->buckets); |
1020 | 53.9k | c->use_prev_instr = ! cache->links[index].signal_frame; |
1021 | 53.9k | memcpy (&sr->rs_current, rs, sizeof (*rs)); |
1022 | 53.9k | } |
1023 | 15 | else |
1024 | 15 | { |
1025 | 15 | ret = fetch_proc_info (c, c->ip); |
1026 | 15 | int next_use_prev_instr = c->use_prev_instr; |
1027 | 15 | if (ret >= 0) |
1028 | 15 | { |
1029 | | /* Update use_prev_instr for the next frame. */ |
1030 | 15 | assert(c->pi.unwind_info); |
1031 | 15 | struct dwarf_cie_info *dci = c->pi.unwind_info; |
1032 | 15 | next_use_prev_instr = ! dci->signal_frame; |
1033 | 15 | ret = create_state_record_for (c, sr, c->ip); |
1034 | 15 | } |
1035 | 15 | put_unwind_info (c, &c->pi); |
1036 | 15 | c->use_prev_instr = next_use_prev_instr; |
1037 | | |
1038 | 15 | if (cache && ret >= 0) |
1039 | 15 | { |
1040 | 15 | rs = rs_new (cache, c); |
1041 | 15 | cache->links[rs - cache->buckets].hint = 0; |
1042 | 15 | memcpy(rs, &sr->rs_current, sizeof(*rs)); |
1043 | 15 | } |
1044 | 15 | } |
1045 | | |
1046 | 53.9k | unsigned short index = -1; |
1047 | 53.9k | if (cache) |
1048 | 53.9k | { |
1049 | 53.9k | if (rs) |
1050 | 53.9k | { |
1051 | 53.9k | index = (unsigned short) (rs - cache->buckets); |
1052 | 53.9k | c->hint = cache->links[index].hint; |
1053 | 53.9k | cache->links[c->prev_rs].hint = index + 1; |
1054 | 53.9k | c->prev_rs = index; |
1055 | 53.9k | } |
1056 | 53.9k | if (ret >= 0) |
1057 | 53.9k | tdep_reuse_frame (c, cache->links[index].signal_frame); |
1058 | 53.9k | put_rs_cache (c->as, cache, &saved_mask); |
1059 | 53.9k | } |
1060 | 53.9k | return ret; |
1061 | 53.9k | } |
1062 | | |
1063 | | /* The function finds the saved locations and applies the register |
1064 | | state as well. */ |
1065 | | HIDDEN int |
1066 | | dwarf_step (struct dwarf_cursor *c) |
1067 | 53.9k | { |
1068 | 53.9k | int ret; |
1069 | 53.9k | dwarf_state_record_t sr; |
1070 | 53.9k | if ((ret = find_reg_state (c, &sr)) < 0) |
1071 | 0 | return ret; |
1072 | 53.9k | return apply_reg_state (c, &sr.rs_current); |
1073 | 53.9k | } |
1074 | | |
1075 | | HIDDEN int |
1076 | | dwarf_make_proc_info (struct dwarf_cursor *c) |
1077 | 0 | { |
1078 | | #if 0 |
1079 | | if (c->as->caching_policy == UNW_CACHE_NONE |
1080 | | || get_cached_proc_info (c) < 0) |
1081 | | #endif |
1082 | | /* Need to check if current frame contains |
1083 | | args_size, and set cursor appropriately. Only |
1084 | | needed for unw_resume */ |
1085 | 0 | dwarf_state_record_t sr; |
1086 | 0 | sr.args_size = 0; |
1087 | 0 | int ret; |
1088 | | |
1089 | | /* Lookup it up the slow way... */ |
1090 | 0 | ret = fetch_proc_info (c, c->ip); |
1091 | 0 | if (ret >= 0) |
1092 | 0 | ret = create_state_record_for (c, &sr, c->ip); |
1093 | 0 | put_unwind_info (c, &c->pi); |
1094 | 0 | if (ret < 0) |
1095 | 0 | return ret; |
1096 | 0 | c->args_size = sr.args_size; |
1097 | |
|
1098 | 0 | return 0; |
1099 | 0 | } |
1100 | | |
1101 | | static int |
1102 | | dwarf_reg_states_dynamic_iterate(struct dwarf_cursor *c UNUSED, |
1103 | | unw_reg_states_callback cb UNUSED, |
1104 | | void *token UNUSED) |
1105 | 0 | { |
1106 | 0 | Debug (1, "Not yet implemented\n"); |
1107 | 0 | return -UNW_ENOINFO; |
1108 | 0 | } |
1109 | | |
1110 | | static int |
1111 | | dwarf_reg_states_table_iterate(struct dwarf_cursor *c, |
1112 | | unw_reg_states_callback cb, |
1113 | | void *token) |
1114 | 0 | { |
1115 | 0 | dwarf_state_record_t sr; |
1116 | 0 | int ret = setup_fde(c, &sr); |
1117 | 0 | struct dwarf_cie_info *dci = c->pi.unwind_info; |
1118 | 0 | unw_word_t addr = dci->fde_instr_start; |
1119 | 0 | unw_word_t curr_ip = c->pi.start_ip; |
1120 | 0 | dwarf_stackable_reg_state_t *rs_stack = NULL; |
1121 | 0 | while (ret >= 0 && curr_ip < c->pi.end_ip && addr < dci->fde_instr_end) |
1122 | 0 | { |
1123 | 0 | unw_word_t prev_ip = curr_ip; |
1124 | 0 | ret = run_cfi_program (c, &sr, &curr_ip, prev_ip, &addr, dci->fde_instr_end, |
1125 | 0 | &rs_stack, dci); |
1126 | 0 | if (ret >= 0 && prev_ip < curr_ip) |
1127 | 0 | ret = cb(token, &sr.rs_current, sizeof(sr.rs_current), prev_ip, curr_ip); |
1128 | 0 | } |
1129 | 0 | empty_rstate_stack(&rs_stack); |
1130 | | #if defined(NEED_LAST_IP) |
1131 | | if (ret >= 0 && curr_ip < c->pi.last_ip) |
1132 | | /* report the dead zone after the procedure ends */ |
1133 | | ret = cb(token, &sr.rs_current, sizeof(sr.rs_current), curr_ip, c->pi.last_ip); |
1134 | | #else |
1135 | 0 | if (ret >= 0 && curr_ip < c->pi.end_ip) |
1136 | | /* report for whatever is left before procedure end */ |
1137 | 0 | ret = cb(token, &sr.rs_current, sizeof(sr.rs_current), curr_ip, c->pi.end_ip); |
1138 | 0 | #endif |
1139 | 0 | return ret; |
1140 | 0 | } |
1141 | | |
1142 | | HIDDEN int |
1143 | | dwarf_reg_states_iterate(struct dwarf_cursor *c, |
1144 | | unw_reg_states_callback cb, |
1145 | | void *token) |
1146 | 0 | { |
1147 | 0 | int ret = fetch_proc_info (c, c->ip); |
1148 | 0 | int next_use_prev_instr = c->use_prev_instr; |
1149 | 0 | if (ret >= 0) |
1150 | 0 | { |
1151 | | /* Update use_prev_instr for the next frame. */ |
1152 | 0 | assert(c->pi.unwind_info); |
1153 | 0 | struct dwarf_cie_info *dci = c->pi.unwind_info; |
1154 | 0 | next_use_prev_instr = ! dci->signal_frame; |
1155 | 0 | switch (c->pi.format) |
1156 | 0 | { |
1157 | 0 | case UNW_INFO_FORMAT_TABLE: |
1158 | 0 | case UNW_INFO_FORMAT_REMOTE_TABLE: |
1159 | 0 | ret = dwarf_reg_states_table_iterate(c, cb, token); |
1160 | 0 | break; |
1161 | | |
1162 | 0 | case UNW_INFO_FORMAT_DYNAMIC: |
1163 | 0 | ret = dwarf_reg_states_dynamic_iterate (c, cb, token); |
1164 | 0 | break; |
1165 | | |
1166 | 0 | default: |
1167 | 0 | Debug (1, "Unexpected unwind-info format %d\n", c->pi.format); |
1168 | 0 | ret = -UNW_EINVAL; |
1169 | 0 | } |
1170 | 0 | } |
1171 | 0 | put_unwind_info (c, &c->pi); |
1172 | 0 | c->use_prev_instr = next_use_prev_instr; |
1173 | 0 | return ret; |
1174 | 0 | } |
1175 | | |
1176 | | HIDDEN int |
1177 | | dwarf_apply_reg_state (struct dwarf_cursor *c, struct dwarf_reg_state *rs) |
1178 | 0 | { |
1179 | 0 | return apply_reg_state(c, rs); |
1180 | 0 | } |