/src/abseil-cpp/absl/debugging/internal/stacktrace_x86-inl.inc
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright 2017 The Abseil Authors. |
2 | | // |
3 | | // Licensed under the Apache License, Version 2.0 (the "License"); |
4 | | // you may not use this file except in compliance with the License. |
5 | | // You may obtain a copy of the License at |
6 | | // |
7 | | // https://www.apache.org/licenses/LICENSE-2.0 |
8 | | // |
9 | | // Unless required by applicable law or agreed to in writing, software |
10 | | // distributed under the License is distributed on an "AS IS" BASIS, |
11 | | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | | // See the License for the specific language governing permissions and |
13 | | // limitations under the License. |
14 | | // |
15 | | // Produce stack trace |
16 | | |
17 | | #ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_X86_INL_INC_ |
18 | | #define ABSL_DEBUGGING_INTERNAL_STACKTRACE_X86_INL_INC_ |
19 | | |
20 | | #if defined(__linux__) && (defined(__i386__) || defined(__x86_64__)) |
21 | | #include <ucontext.h> // for ucontext_t |
22 | | #endif |
23 | | |
24 | | #if !defined(_WIN32) |
25 | | #include <unistd.h> |
26 | | #endif |
27 | | |
28 | | #include <cassert> |
29 | | #include <cstdint> |
30 | | #include <limits> |
31 | | |
32 | | #include "absl/base/attributes.h" |
33 | | #include "absl/base/macros.h" |
34 | | #include "absl/base/port.h" |
35 | | #include "absl/debugging/internal/address_is_readable.h" |
36 | | #include "absl/debugging/internal/vdso_support.h" // a no-op on non-elf or non-glibc systems |
37 | | #include "absl/debugging/stacktrace.h" |
38 | | |
39 | | using absl::debugging_internal::AddressIsReadable; |
40 | | |
41 | | #if defined(__linux__) && defined(__i386__) |
42 | | // Count "push %reg" instructions in VDSO __kernel_vsyscall(), |
43 | | // preceding "syscall" or "sysenter". |
44 | | // If __kernel_vsyscall uses frame pointer, answer 0. |
45 | | // |
46 | | // kMaxBytes tells how many instruction bytes of __kernel_vsyscall |
47 | | // to analyze before giving up. Up to kMaxBytes+1 bytes of |
48 | | // instructions could be accessed. |
49 | | // |
50 | | // Here are known __kernel_vsyscall instruction sequences: |
51 | | // |
52 | | // SYSENTER (linux-2.6.26/arch/x86/vdso/vdso32/sysenter.S). |
53 | | // Used on Intel. |
54 | | // 0xffffe400 <__kernel_vsyscall+0>: push %ecx |
55 | | // 0xffffe401 <__kernel_vsyscall+1>: push %edx |
56 | | // 0xffffe402 <__kernel_vsyscall+2>: push %ebp |
57 | | // 0xffffe403 <__kernel_vsyscall+3>: mov %esp,%ebp |
58 | | // 0xffffe405 <__kernel_vsyscall+5>: sysenter |
59 | | // |
60 | | // SYSCALL (see linux-2.6.26/arch/x86/vdso/vdso32/syscall.S). |
61 | | // Used on AMD. |
62 | | // 0xffffe400 <__kernel_vsyscall+0>: push %ebp |
63 | | // 0xffffe401 <__kernel_vsyscall+1>: mov %ecx,%ebp |
64 | | // 0xffffe403 <__kernel_vsyscall+3>: syscall |
65 | | // |
66 | | |
67 | | // The sequence below isn't actually expected in Google fleet, |
68 | | // here only for completeness. Remove this comment from OSS release. |
69 | | |
70 | | // i386 (see linux-2.6.26/arch/x86/vdso/vdso32/int80.S) |
71 | | // 0xffffe400 <__kernel_vsyscall+0>: int $0x80 |
72 | | // 0xffffe401 <__kernel_vsyscall+1>: ret |
73 | | // |
74 | | static const int kMaxBytes = 10; |
75 | | |
76 | | // We use assert()s instead of DCHECK()s -- this is too low level |
77 | | // for DCHECK(). |
78 | | |
79 | | static int CountPushInstructions(const unsigned char *const addr) { |
80 | | int result = 0; |
81 | | for (int i = 0; i < kMaxBytes; ++i) { |
82 | | if (addr[i] == 0x89) { |
83 | | // "mov reg,reg" |
84 | | if (addr[i + 1] == 0xE5) { |
85 | | // Found "mov %esp,%ebp". |
86 | | return 0; |
87 | | } |
88 | | ++i; // Skip register encoding byte. |
89 | | } else if (addr[i] == 0x0F && |
90 | | (addr[i + 1] == 0x34 || addr[i + 1] == 0x05)) { |
91 | | // Found "sysenter" or "syscall". |
92 | | return result; |
93 | | } else if ((addr[i] & 0xF0) == 0x50) { |
94 | | // Found "push %reg". |
95 | | ++result; |
96 | | } else if (addr[i] == 0xCD && addr[i + 1] == 0x80) { |
97 | | // Found "int $0x80" |
98 | | assert(result == 0); |
99 | | return 0; |
100 | | } else { |
101 | | // Unexpected instruction. |
102 | | assert(false && "unexpected instruction in __kernel_vsyscall"); |
103 | | return 0; |
104 | | } |
105 | | } |
106 | | // Unexpected: didn't find SYSENTER or SYSCALL in |
107 | | // [__kernel_vsyscall, __kernel_vsyscall + kMaxBytes) interval. |
108 | | assert(false && "did not find SYSENTER or SYSCALL in __kernel_vsyscall"); |
109 | | return 0; |
110 | | } |
111 | | #endif |
112 | | |
113 | | // Assume stack frames larger than 100,000 bytes are bogus. |
114 | | static const int kMaxFrameBytes = 100000; |
115 | | // Stack end to use when we don't know the actual stack end |
116 | | // (effectively just the end of address space). |
117 | | constexpr uintptr_t kUnknownStackEnd = |
118 | | std::numeric_limits<size_t>::max() - sizeof(void *); |
119 | | |
120 | | // Returns the stack frame pointer from signal context, 0 if unknown. |
121 | | // vuc is a ucontext_t *. We use void* to avoid the use |
122 | | // of ucontext_t on non-POSIX systems. |
123 | 0 | static uintptr_t GetFP(const void *vuc) { |
124 | | #if !defined(__linux__) |
125 | | static_cast<void>(vuc); // Avoid an unused argument compiler warning. |
126 | | #else |
127 | 0 | if (vuc != nullptr) { |
128 | 0 | auto *uc = reinterpret_cast<const ucontext_t *>(vuc); |
129 | | #if defined(__i386__) |
130 | | const auto bp = uc->uc_mcontext.gregs[REG_EBP]; |
131 | | const auto sp = uc->uc_mcontext.gregs[REG_ESP]; |
132 | | #elif defined(__x86_64__) |
133 | | const auto bp = uc->uc_mcontext.gregs[REG_RBP]; |
134 | 0 | const auto sp = uc->uc_mcontext.gregs[REG_RSP]; |
135 | | #else |
136 | | const uintptr_t bp = 0; |
137 | | const uintptr_t sp = 0; |
138 | | #endif |
139 | | // Sanity-check that the base pointer is valid. It's possible that some |
140 | | // code in the process is compiled with --copt=-fomit-frame-pointer or |
141 | | // --copt=-momit-leaf-frame-pointer. |
142 | | // |
143 | | // TODO(bcmills): -momit-leaf-frame-pointer is currently the default |
144 | | // behavior when building with clang. Talk to the C++ toolchain team about |
145 | | // fixing that. |
146 | 0 | if (bp >= sp && bp - sp <= kMaxFrameBytes) |
147 | 0 | return static_cast<uintptr_t>(bp); |
148 | | |
149 | | // If bp isn't a plausible frame pointer, return the stack pointer instead. |
150 | | // If we're lucky, it points to the start of a stack frame; otherwise, we'll |
151 | | // get one frame of garbage in the stack trace and fail the sanity check on |
152 | | // the next iteration. |
153 | 0 | return static_cast<uintptr_t>(sp); |
154 | 0 | } |
155 | 0 | #endif |
156 | 0 | return 0; |
157 | 0 | } |
158 | | |
159 | | // Given a pointer to a stack frame, locate and return the calling |
160 | | // stackframe, or return null if no stackframe can be found. Perform sanity |
161 | | // checks (the strictness of which is controlled by the boolean parameter |
162 | | // "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned. |
163 | | template <bool STRICT_UNWINDING, bool WITH_CONTEXT> |
164 | | ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack. |
165 | | ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack. |
166 | | static void **NextStackFrame(void **old_fp, const void *uc, |
167 | 0 | size_t stack_low, size_t stack_high) { |
168 | 0 | void **new_fp = (void **)*old_fp; |
169 | |
|
170 | | #if defined(__linux__) && defined(__i386__) |
171 | | if (WITH_CONTEXT && uc != nullptr) { |
172 | | // How many "push %reg" instructions are there at __kernel_vsyscall? |
173 | | // This is constant for a given kernel and processor, so compute |
174 | | // it only once. |
175 | | static int num_push_instructions = -1; // Sentinel: not computed yet. |
176 | | // Initialize with sentinel value: __kernel_rt_sigreturn can not possibly |
177 | | // be there. |
178 | | static const unsigned char *kernel_rt_sigreturn_address = nullptr; |
179 | | static const unsigned char *kernel_vsyscall_address = nullptr; |
180 | | if (num_push_instructions == -1) { |
181 | | #ifdef ABSL_HAVE_VDSO_SUPPORT |
182 | | absl::debugging_internal::VDSOSupport vdso; |
183 | | if (vdso.IsPresent()) { |
184 | | absl::debugging_internal::VDSOSupport::SymbolInfo |
185 | | rt_sigreturn_symbol_info; |
186 | | absl::debugging_internal::VDSOSupport::SymbolInfo vsyscall_symbol_info; |
187 | | if (!vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_2.5", STT_FUNC, |
188 | | &rt_sigreturn_symbol_info) || |
189 | | !vdso.LookupSymbol("__kernel_vsyscall", "LINUX_2.5", STT_FUNC, |
190 | | &vsyscall_symbol_info) || |
191 | | rt_sigreturn_symbol_info.address == nullptr || |
192 | | vsyscall_symbol_info.address == nullptr) { |
193 | | // Unexpected: 32-bit VDSO is present, yet one of the expected |
194 | | // symbols is missing or null. |
195 | | assert(false && "VDSO is present, but doesn't have expected symbols"); |
196 | | num_push_instructions = 0; |
197 | | } else { |
198 | | kernel_rt_sigreturn_address = |
199 | | reinterpret_cast<const unsigned char *>( |
200 | | rt_sigreturn_symbol_info.address); |
201 | | kernel_vsyscall_address = |
202 | | reinterpret_cast<const unsigned char *>( |
203 | | vsyscall_symbol_info.address); |
204 | | num_push_instructions = |
205 | | CountPushInstructions(kernel_vsyscall_address); |
206 | | } |
207 | | } else { |
208 | | num_push_instructions = 0; |
209 | | } |
210 | | #else // ABSL_HAVE_VDSO_SUPPORT |
211 | | num_push_instructions = 0; |
212 | | #endif // ABSL_HAVE_VDSO_SUPPORT |
213 | | } |
214 | | if (num_push_instructions != 0 && kernel_rt_sigreturn_address != nullptr && |
215 | | old_fp[1] == kernel_rt_sigreturn_address) { |
216 | | const ucontext_t *ucv = static_cast<const ucontext_t *>(uc); |
217 | | // This kernel does not use frame pointer in its VDSO code, |
218 | | // and so %ebp is not suitable for unwinding. |
219 | | void **const reg_ebp = |
220 | | reinterpret_cast<void **>(ucv->uc_mcontext.gregs[REG_EBP]); |
221 | | const unsigned char *const reg_eip = |
222 | | reinterpret_cast<unsigned char *>(ucv->uc_mcontext.gregs[REG_EIP]); |
223 | | if (new_fp == reg_ebp && kernel_vsyscall_address <= reg_eip && |
224 | | reg_eip - kernel_vsyscall_address < kMaxBytes) { |
225 | | // We "stepped up" to __kernel_vsyscall, but %ebp is not usable. |
226 | | // Restore from 'ucv' instead. |
227 | | void **const reg_esp = |
228 | | reinterpret_cast<void **>(ucv->uc_mcontext.gregs[REG_ESP]); |
229 | | // Check that alleged %esp is not null and is reasonably aligned. |
230 | | if (reg_esp && |
231 | | ((uintptr_t)reg_esp & (sizeof(reg_esp) - 1)) == 0) { |
232 | | // Check that alleged %esp is actually readable. This is to prevent |
233 | | // "double fault" in case we hit the first fault due to e.g. stack |
234 | | // corruption. |
235 | | void *const reg_esp2 = reg_esp[num_push_instructions - 1]; |
236 | | if (AddressIsReadable(reg_esp2)) { |
237 | | // Alleged %esp is readable, use it for further unwinding. |
238 | | new_fp = reinterpret_cast<void **>(reg_esp2); |
239 | | } |
240 | | } |
241 | | } |
242 | | } |
243 | | } |
244 | | #endif |
245 | |
|
246 | 0 | const uintptr_t old_fp_u = reinterpret_cast<uintptr_t>(old_fp); |
247 | 0 | const uintptr_t new_fp_u = reinterpret_cast<uintptr_t>(new_fp); |
248 | | |
249 | | // Check that the transition from frame pointer old_fp to frame |
250 | | // pointer new_fp isn't clearly bogus. Skip the checks if new_fp |
251 | | // matches the signal context, so that we don't skip out early when |
252 | | // using an alternate signal stack. |
253 | | // |
254 | | // TODO(bcmills): The GetFP call should be completely unnecessary when |
255 | | // ENABLE_COMBINED_UNWINDER is set (because we should be back in the thread's |
256 | | // stack by this point), but it is empirically still needed (e.g. when the |
257 | | // stack includes a call to abort). unw_get_reg returns UNW_EBADREG for some |
258 | | // frames. Figure out why GetValidFrameAddr and/or libunwind isn't doing what |
259 | | // it's supposed to. |
260 | 0 | if (STRICT_UNWINDING && |
261 | 0 | (!WITH_CONTEXT || uc == nullptr || new_fp_u != GetFP(uc))) { |
262 | | // With the stack growing downwards, older stack frame must be |
263 | | // at a greater address that the current one. |
264 | 0 | if (new_fp_u <= old_fp_u) return nullptr; |
265 | | |
266 | | // If we get a very large frame size, it may be an indication that we |
267 | | // guessed frame pointers incorrectly and now risk a paging fault |
268 | | // dereferencing a wrong frame pointer. Or maybe not because large frames |
269 | | // are possible as well. The main stack is assumed to be readable, |
270 | | // so we assume the large frame is legit if we know the real stack bounds |
271 | | // and are within the stack. |
272 | 0 | if (new_fp_u - old_fp_u > kMaxFrameBytes) { |
273 | 0 | if (stack_high < kUnknownStackEnd && |
274 | 0 | static_cast<size_t>(getpagesize()) < stack_low) { |
275 | | // Stack bounds are known. |
276 | 0 | if (!(stack_low < new_fp_u && new_fp_u <= stack_high)) { |
277 | | // new_fp_u is not within the known stack. |
278 | 0 | return nullptr; |
279 | 0 | } |
280 | 0 | } else { |
281 | | // Stack bounds are unknown, prefer truncated stack to possible crash. |
282 | 0 | return nullptr; |
283 | 0 | } |
284 | 0 | } |
285 | 0 | if (stack_low < old_fp_u && old_fp_u <= stack_high) { |
286 | | // Old BP was in the expected stack region... |
287 | 0 | if (!(stack_low < new_fp_u && new_fp_u <= stack_high)) { |
288 | | // ... but new BP is outside of expected stack region. |
289 | | // It is most likely bogus. |
290 | 0 | return nullptr; |
291 | 0 | } |
292 | 0 | } else { |
293 | | // We may be here if we are executing in a co-routine with a |
294 | | // separate stack. We can't do safety checks in this case. |
295 | 0 | } |
296 | 0 | } else { |
297 | 0 | if (new_fp == nullptr) return nullptr; // skip AddressIsReadable() below |
298 | | // In the non-strict mode, allow discontiguous stack frames. |
299 | | // (alternate-signal-stacks for example). |
300 | 0 | if (new_fp == old_fp) return nullptr; |
301 | 0 | } |
302 | | |
303 | 0 | if (new_fp_u & (sizeof(void *) - 1)) return nullptr; |
304 | | #ifdef __i386__ |
305 | | // On 32-bit machines, the stack pointer can be very close to |
306 | | // 0xffffffff, so we explicitly check for a pointer into the |
307 | | // last two pages in the address space |
308 | | if (new_fp_u >= 0xffffe000) return nullptr; |
309 | | #endif |
310 | 0 | #if !defined(_WIN32) |
311 | 0 | if (!STRICT_UNWINDING) { |
312 | | // Lax sanity checks cause a crash in 32-bit tcmalloc/crash_reason_test |
313 | | // on AMD-based machines with VDSO-enabled kernels. |
314 | | // Make an extra sanity check to insure new_fp is readable. |
315 | | // Note: NextStackFrame<false>() is only called while the program |
316 | | // is already on its last leg, so it's ok to be slow here. |
317 | |
|
318 | 0 | if (!AddressIsReadable(new_fp)) { |
319 | 0 | return nullptr; |
320 | 0 | } |
321 | 0 | } |
322 | 0 | #endif |
323 | 0 | return new_fp; |
324 | 0 | } Unexecuted instantiation: stacktrace.cc:void** NextStackFrame<true, false>(void**, void const*, unsigned long, unsigned long) Unexecuted instantiation: stacktrace.cc:void** NextStackFrame<true, true>(void**, void const*, unsigned long, unsigned long) Unexecuted instantiation: stacktrace.cc:void** NextStackFrame<false, false>(void**, void const*, unsigned long, unsigned long) Unexecuted instantiation: stacktrace.cc:void** NextStackFrame<false, true>(void**, void const*, unsigned long, unsigned long) |
325 | | |
326 | | template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT> |
327 | | ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack. |
328 | | ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack. |
329 | | ABSL_ATTRIBUTE_NOINLINE |
330 | | static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count, |
331 | 0 | const void *ucp, int *min_dropped_frames) { |
332 | 0 | int n = 0; |
333 | 0 | void **fp = reinterpret_cast<void **>(__builtin_frame_address(0)); |
334 | | |
335 | | // Assume that the first page is not stack. |
336 | 0 | size_t stack_low = static_cast<size_t>(getpagesize()); |
337 | 0 | size_t stack_high = kUnknownStackEnd; |
338 | |
|
339 | 0 | while (fp && n < max_depth) { |
340 | 0 | if (*(fp + 1) == reinterpret_cast<void *>(0)) { |
341 | | // In 64-bit code, we often see a frame that |
342 | | // points to itself and has a return address of 0. |
343 | 0 | break; |
344 | 0 | } |
345 | 0 | void **next_fp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>( |
346 | 0 | fp, ucp, stack_low, stack_high); |
347 | 0 | if (skip_count > 0) { |
348 | 0 | skip_count--; |
349 | 0 | } else { |
350 | 0 | result[n] = *(fp + 1); |
351 | 0 | if (IS_STACK_FRAMES) { |
352 | 0 | if (next_fp > fp) { |
353 | 0 | sizes[n] = static_cast<int>( |
354 | 0 | reinterpret_cast<uintptr_t>(next_fp) - |
355 | 0 | reinterpret_cast<uintptr_t>(fp)); |
356 | 0 | } else { |
357 | | // A frame-size of 0 is used to indicate unknown frame size. |
358 | 0 | sizes[n] = 0; |
359 | 0 | } |
360 | 0 | } |
361 | 0 | n++; |
362 | 0 | } |
363 | 0 | fp = next_fp; |
364 | 0 | } |
365 | 0 | if (min_dropped_frames != nullptr) { |
366 | | // Implementation detail: we clamp the max of frames we are willing to |
367 | | // count, so as not to spend too much time in the loop below. |
368 | 0 | const int kMaxUnwind = 1000; |
369 | 0 | int num_dropped_frames = 0; |
370 | 0 | for (int j = 0; fp != nullptr && j < kMaxUnwind; j++) { |
371 | 0 | if (skip_count > 0) { |
372 | 0 | skip_count--; |
373 | 0 | } else { |
374 | 0 | num_dropped_frames++; |
375 | 0 | } |
376 | 0 | fp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(fp, ucp, stack_low, |
377 | 0 | stack_high); |
378 | 0 | } |
379 | 0 | *min_dropped_frames = num_dropped_frames; |
380 | 0 | } |
381 | 0 | return n; |
382 | 0 | } Unexecuted instantiation: stacktrace.cc:int UnwindImpl<false, false>(void**, int*, int, int, void const*, int*) Unexecuted instantiation: stacktrace.cc:int UnwindImpl<false, true>(void**, int*, int, int, void const*, int*) Unexecuted instantiation: stacktrace.cc:int UnwindImpl<true, false>(void**, int*, int, int, void const*, int*) Unexecuted instantiation: stacktrace.cc:int UnwindImpl<true, true>(void**, int*, int, int, void const*, int*) |
383 | | |
384 | | namespace absl { |
385 | | ABSL_NAMESPACE_BEGIN |
386 | | namespace debugging_internal { |
387 | 0 | bool StackTraceWorksForTest() { |
388 | 0 | return true; |
389 | 0 | } |
390 | | } // namespace debugging_internal |
391 | | ABSL_NAMESPACE_END |
392 | | } // namespace absl |
393 | | |
394 | | #endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_X86_INL_INC_ |