/src/abseil-cpp/absl/debugging/stacktrace.cc
Line | Count | Source |
1 | | // Copyright 2017 The Abseil Authors. |
2 | | // |
3 | | // Licensed under the Apache License, Version 2.0 (the "License"); |
4 | | // you may not use this file except in compliance with the License. |
5 | | // You may obtain a copy of the License at |
6 | | // |
7 | | // https://www.apache.org/licenses/LICENSE-2.0 |
8 | | // |
9 | | // Unless required by applicable law or agreed to in writing, software |
10 | | // distributed under the License is distributed on an "AS IS" BASIS, |
11 | | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | | // See the License for the specific language governing permissions and |
13 | | // limitations under the License. |
14 | | |
15 | | // Produce stack trace. |
16 | | // |
17 | | // There are three different ways we can try to get the stack trace: |
18 | | // |
19 | | // 1) Our hand-coded stack-unwinder. This depends on a certain stack |
20 | | // layout, which is used by gcc (and those systems using a |
21 | | // gcc-compatible ABI) on x86 systems, at least since gcc 2.95. |
22 | | // It uses the frame pointer to do its work. |
23 | | // |
24 | | // 2) The libunwind library. This is still in development, and as a |
25 | | // separate library adds a new dependency, but doesn't need a frame |
26 | | // pointer. It also doesn't call malloc. |
27 | | // |
28 | | // 3) The gdb unwinder -- also the one used by the c++ exception code. |
29 | | // It's obviously well-tested, but has a fatal flaw: it can call |
30 | | // malloc() from the unwinder. This is a problem because we're |
31 | | // trying to use the unwinder to instrument malloc(). |
32 | | // |
33 | | // Note: if you add a new implementation here, make sure it works |
34 | | // correctly when absl::GetStackTrace() is called with max_depth == 0. |
35 | | // Some code may do that. |
36 | | |
37 | | #include "absl/debugging/stacktrace.h" |
38 | | |
39 | | #include <stddef.h> |
40 | | #include <stdint.h> |
41 | | #include <stdlib.h> |
42 | | |
43 | | #include <algorithm> |
44 | | #include <atomic> |
45 | | #include <iterator> |
46 | | |
47 | | #include "absl/base/attributes.h" |
48 | | #include "absl/base/config.h" |
49 | | #include "absl/base/internal/low_level_alloc.h" |
50 | | #include "absl/base/optimization.h" |
51 | | #include "absl/base/port.h" |
52 | | #include "absl/debugging/internal/stacktrace_config.h" |
53 | | |
54 | | #if defined(ABSL_STACKTRACE_INL_HEADER) |
55 | | #include ABSL_STACKTRACE_INL_HEADER |
56 | | #else |
57 | | # error Cannot calculate stack trace: will need to write for your environment |
58 | | |
59 | | # include "absl/debugging/internal/stacktrace_aarch64-inl.inc" |
60 | | # include "absl/debugging/internal/stacktrace_arm-inl.inc" |
61 | | # include "absl/debugging/internal/stacktrace_emscripten-inl.inc" |
62 | | # include "absl/debugging/internal/stacktrace_generic-inl.inc" |
63 | | # include "absl/debugging/internal/stacktrace_powerpc-inl.inc" |
64 | | # include "absl/debugging/internal/stacktrace_riscv-inl.inc" |
65 | | # include "absl/debugging/internal/stacktrace_unimplemented-inl.inc" |
66 | | # include "absl/debugging/internal/stacktrace_win32-inl.inc" |
67 | | # include "absl/debugging/internal/stacktrace_x86-inl.inc" |
68 | | #endif |
69 | | |
70 | | namespace absl { |
71 | | ABSL_NAMESPACE_BEGIN |
72 | | namespace { |
73 | | |
74 | | typedef int (*Unwinder)(void**, int*, int, int, const void*, int*); |
75 | | std::atomic<Unwinder> custom; |
76 | | |
77 | | template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT> |
78 | | ABSL_ATTRIBUTE_ALWAYS_INLINE inline int Unwind(void** result, uintptr_t* frames, |
79 | | int* sizes, size_t max_depth, |
80 | | int skip_count, const void* uc, |
81 | | int* min_dropped_frames, |
82 | 1.09k | bool unwind_with_fixup = true) { |
83 | 1.09k | static constexpr size_t kMinPageSize = 4096; |
84 | | |
85 | | // Allow up to ~half a page, leaving some slack space for local variables etc. |
86 | 1.09k | static constexpr size_t kMaxStackElements = |
87 | 1.09k | (kMinPageSize / 2) / (sizeof(*frames) + sizeof(*sizes)); |
88 | | |
89 | | // Allocate a buffer dynamically, using the signal-safe allocator. |
90 | 1.09k | static constexpr auto allocate = [](size_t num_bytes) -> void* { |
91 | 0 | base_internal::InitSigSafeArena(); |
92 | 0 | return base_internal::LowLevelAlloc::AllocWithArena( |
93 | 0 | num_bytes, base_internal::SigSafeArena()); |
94 | 0 | }; Unexecuted instantiation: stacktrace.cc:absl::(anonymous namespace)::Unwind<true, false>(void**, unsigned long*, int*, unsigned long, int, void const*, int*, bool)::{lambda(unsigned long)#1}::operator()(unsigned long) constUnexecuted instantiation: stacktrace.cc:absl::(anonymous namespace)::Unwind<true, true>(void**, unsigned long*, int*, unsigned long, int, void const*, int*, bool)::{lambda(unsigned long)#1}::operator()(unsigned long) constUnexecuted instantiation: stacktrace.cc:absl::(anonymous namespace)::Unwind<false, false>(void**, unsigned long*, int*, unsigned long, int, void const*, int*, bool)::{lambda(unsigned long)#1}::operator()(unsigned long) constUnexecuted instantiation: stacktrace.cc:absl::(anonymous namespace)::Unwind<false, true>(void**, unsigned long*, int*, unsigned long, int, void const*, int*, bool)::{lambda(unsigned long)#1}::operator()(unsigned long) const |
95 | | |
96 | 1.09k | uintptr_t frames_stackbuf[kMaxStackElements]; |
97 | 1.09k | int sizes_stackbuf[kMaxStackElements]; |
98 | | |
99 | | // We only need to free the buffers if we allocated them with the signal-safe |
100 | | // allocator. |
101 | 1.09k | bool must_free_frames = false; |
102 | 1.09k | bool must_free_sizes = false; |
103 | | |
104 | 1.09k | unwind_with_fixup = |
105 | 1.09k | unwind_with_fixup && internal_stacktrace::ShouldFixUpStack(); |
106 | | |
107 | | #ifdef _WIN32 |
108 | | if (unwind_with_fixup) { |
109 | | // TODO(b/434184677): Fixups are flaky and not supported on Windows |
110 | | unwind_with_fixup = false; |
111 | | #ifndef NDEBUG |
112 | | abort(); |
113 | | #endif |
114 | | } |
115 | | #endif |
116 | | |
117 | 1.09k | if (unwind_with_fixup) { |
118 | | // Some implementations of FixUpStack may need to be passed frame |
119 | | // information from Unwind, even if the caller doesn't need that |
120 | | // information. We allocate the necessary buffers for such implementations |
121 | | // here. |
122 | |
|
123 | 0 | if (frames == nullptr) { |
124 | 0 | if (max_depth <= std::size(frames_stackbuf)) { |
125 | 0 | frames = frames_stackbuf; |
126 | 0 | } else { |
127 | 0 | frames = static_cast<uintptr_t*>(allocate(max_depth * sizeof(*frames))); |
128 | 0 | must_free_frames = true; |
129 | 0 | } |
130 | 0 | } |
131 | |
|
132 | 0 | if (sizes == nullptr) { |
133 | 0 | if (max_depth <= std::size(sizes_stackbuf)) { |
134 | 0 | sizes = sizes_stackbuf; |
135 | 0 | } else { |
136 | 0 | sizes = static_cast<int*>(allocate(max_depth * sizeof(*sizes))); |
137 | 0 | must_free_sizes = true; |
138 | 0 | } |
139 | 0 | } |
140 | 0 | } |
141 | | |
142 | 1.09k | Unwinder g = custom.load(std::memory_order_acquire); |
143 | 1.09k | size_t size; |
144 | | // Add 1 to skip count for the unwinder function itself |
145 | 1.09k | ++skip_count; |
146 | 1.09k | if (g != nullptr) { |
147 | 0 | size = static_cast<size_t>((*g)(result, sizes, static_cast<int>(max_depth), |
148 | 0 | skip_count, uc, min_dropped_frames)); |
149 | | // Frame pointers aren't returned by existing hooks, so clear them. |
150 | 0 | if (frames != nullptr) { |
151 | 0 | std::fill(frames, frames + size, uintptr_t()); |
152 | 0 | } |
153 | 1.09k | } else { |
154 | 1.09k | size = static_cast<size_t>( |
155 | 1.09k | unwind_with_fixup |
156 | 1.09k | ? UnwindImpl<true, IS_WITH_CONTEXT>( |
157 | 0 | result, frames, sizes, static_cast<int>(max_depth), |
158 | 0 | skip_count, uc, min_dropped_frames) |
159 | 1.09k | : UnwindImpl<IS_STACK_FRAMES, IS_WITH_CONTEXT>( |
160 | 1.09k | result, frames, sizes, static_cast<int>(max_depth), |
161 | 1.09k | skip_count, uc, min_dropped_frames)); |
162 | 1.09k | } |
163 | 1.09k | if (unwind_with_fixup) { |
164 | 0 | internal_stacktrace::FixUpStack(result, frames, sizes, max_depth, size); |
165 | 0 | } |
166 | | |
167 | 1.09k | if (must_free_sizes) { |
168 | 0 | base_internal::LowLevelAlloc::Free(sizes); |
169 | 0 | } |
170 | | |
171 | 1.09k | if (must_free_frames) { |
172 | 0 | base_internal::LowLevelAlloc::Free(frames); |
173 | 0 | } |
174 | | |
175 | 1.09k | ABSL_BLOCK_TAIL_CALL_OPTIMIZATION(); |
176 | 1.09k | return static_cast<int>(size); |
177 | 1.09k | } Unexecuted instantiation: stacktrace.cc:int absl::(anonymous namespace)::Unwind<true, false>(void**, unsigned long*, int*, unsigned long, int, void const*, int*, bool) Unexecuted instantiation: stacktrace.cc:int absl::(anonymous namespace)::Unwind<true, true>(void**, unsigned long*, int*, unsigned long, int, void const*, int*, bool) stacktrace.cc:int absl::(anonymous namespace)::Unwind<false, false>(void**, unsigned long*, int*, unsigned long, int, void const*, int*, bool) Line | Count | Source | 82 | 1.09k | bool unwind_with_fixup = true) { | 83 | 1.09k | static constexpr size_t kMinPageSize = 4096; | 84 | | | 85 | | // Allow up to ~half a page, leaving some slack space for local variables etc. | 86 | 1.09k | static constexpr size_t kMaxStackElements = | 87 | 1.09k | (kMinPageSize / 2) / (sizeof(*frames) + sizeof(*sizes)); | 88 | | | 89 | | // Allocate a buffer dynamically, using the signal-safe allocator. | 90 | 1.09k | static constexpr auto allocate = [](size_t num_bytes) -> void* { | 91 | 1.09k | base_internal::InitSigSafeArena(); | 92 | 1.09k | return base_internal::LowLevelAlloc::AllocWithArena( | 93 | 1.09k | num_bytes, base_internal::SigSafeArena()); | 94 | 1.09k | }; | 95 | | | 96 | 1.09k | uintptr_t frames_stackbuf[kMaxStackElements]; | 97 | 1.09k | int sizes_stackbuf[kMaxStackElements]; | 98 | | | 99 | | // We only need to free the buffers if we allocated them with the signal-safe | 100 | | // allocator. | 101 | 1.09k | bool must_free_frames = false; | 102 | 1.09k | bool must_free_sizes = false; | 103 | | | 104 | 1.09k | unwind_with_fixup = | 105 | 1.09k | unwind_with_fixup && internal_stacktrace::ShouldFixUpStack(); | 106 | | | 107 | | #ifdef _WIN32 | 108 | | if (unwind_with_fixup) { | 109 | | // TODO(b/434184677): Fixups are flaky and not supported on Windows | 110 | | unwind_with_fixup = false; | 111 | | #ifndef NDEBUG | 112 | | abort(); | 113 | | #endif | 114 | | } | 115 | | #endif | 116 | | | 117 | 1.09k | if (unwind_with_fixup) { | 118 | | // Some implementations of FixUpStack may need to be passed frame | 119 | | // information from Unwind, even if the caller doesn't need that | 120 | | // information. We allocate the necessary buffers for such implementations | 121 | | // here. | 122 | |
| 123 | 0 | if (frames == nullptr) { | 124 | 0 | if (max_depth <= std::size(frames_stackbuf)) { | 125 | 0 | frames = frames_stackbuf; | 126 | 0 | } else { | 127 | 0 | frames = static_cast<uintptr_t*>(allocate(max_depth * sizeof(*frames))); | 128 | 0 | must_free_frames = true; | 129 | 0 | } | 130 | 0 | } | 131 | |
| 132 | 0 | if (sizes == nullptr) { | 133 | 0 | if (max_depth <= std::size(sizes_stackbuf)) { | 134 | 0 | sizes = sizes_stackbuf; | 135 | 0 | } else { | 136 | 0 | sizes = static_cast<int*>(allocate(max_depth * sizeof(*sizes))); | 137 | 0 | must_free_sizes = true; | 138 | 0 | } | 139 | 0 | } | 140 | 0 | } | 141 | | | 142 | 1.09k | Unwinder g = custom.load(std::memory_order_acquire); | 143 | 1.09k | size_t size; | 144 | | // Add 1 to skip count for the unwinder function itself | 145 | 1.09k | ++skip_count; | 146 | 1.09k | if (g != nullptr) { | 147 | 0 | size = static_cast<size_t>((*g)(result, sizes, static_cast<int>(max_depth), | 148 | 0 | skip_count, uc, min_dropped_frames)); | 149 | | // Frame pointers aren't returned by existing hooks, so clear them. | 150 | 0 | if (frames != nullptr) { | 151 | 0 | std::fill(frames, frames + size, uintptr_t()); | 152 | 0 | } | 153 | 1.09k | } else { | 154 | 1.09k | size = static_cast<size_t>( | 155 | 1.09k | unwind_with_fixup | 156 | 1.09k | ? UnwindImpl<true, IS_WITH_CONTEXT>( | 157 | 0 | result, frames, sizes, static_cast<int>(max_depth), | 158 | 0 | skip_count, uc, min_dropped_frames) | 159 | 1.09k | : UnwindImpl<IS_STACK_FRAMES, IS_WITH_CONTEXT>( | 160 | 1.09k | result, frames, sizes, static_cast<int>(max_depth), | 161 | 1.09k | skip_count, uc, min_dropped_frames)); | 162 | 1.09k | } | 163 | 1.09k | if (unwind_with_fixup) { | 164 | 0 | internal_stacktrace::FixUpStack(result, frames, sizes, max_depth, size); | 165 | 0 | } | 166 | | | 167 | 1.09k | if (must_free_sizes) { | 168 | 0 | base_internal::LowLevelAlloc::Free(sizes); | 169 | 0 | } | 170 | | | 171 | 1.09k | if (must_free_frames) { | 172 | 0 | base_internal::LowLevelAlloc::Free(frames); | 173 | 0 | } | 174 | | | 175 | 1.09k | ABSL_BLOCK_TAIL_CALL_OPTIMIZATION(); | 176 | 1.09k | return static_cast<int>(size); | 177 | 1.09k | } |
Unexecuted instantiation: stacktrace.cc:int absl::(anonymous namespace)::Unwind<false, true>(void**, unsigned long*, int*, unsigned long, int, void const*, int*, bool) |
178 | | |
179 | | } // anonymous namespace |
180 | | |
181 | | ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int |
182 | | internal_stacktrace::GetStackFrames(void** result, uintptr_t* frames, |
183 | 0 | int* sizes, int max_depth, int skip_count) { |
184 | 0 | return Unwind<true, false>(result, frames, sizes, |
185 | 0 | static_cast<size_t>(max_depth), skip_count, |
186 | 0 | nullptr, nullptr); |
187 | 0 | } |
188 | | |
189 | | ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int |
190 | | internal_stacktrace::GetStackFramesWithContext(void** result, uintptr_t* frames, |
191 | | int* sizes, int max_depth, |
192 | | int skip_count, const void* uc, |
193 | 0 | int* min_dropped_frames) { |
194 | 0 | return Unwind<true, true>(result, frames, sizes, |
195 | 0 | static_cast<size_t>(max_depth), skip_count, uc, |
196 | 0 | min_dropped_frames); |
197 | 0 | } |
198 | | |
199 | | ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int |
200 | | internal_stacktrace::GetStackTraceNoFixup(void** result, int max_depth, |
201 | 0 | int skip_count) { |
202 | 0 | return Unwind<false, false>(result, nullptr, nullptr, |
203 | 0 | static_cast<size_t>(max_depth), skip_count, |
204 | 0 | nullptr, nullptr, /*unwind_with_fixup=*/false); |
205 | 0 | } |
206 | | |
207 | | ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int GetStackTrace( |
208 | 1.09k | void** result, int max_depth, int skip_count) { |
209 | 1.09k | return Unwind<false, false>(result, nullptr, nullptr, |
210 | 1.09k | static_cast<size_t>(max_depth), skip_count, |
211 | 1.09k | nullptr, nullptr); |
212 | 1.09k | } |
213 | | |
214 | | ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int |
215 | | GetStackTraceWithContext(void** result, int max_depth, int skip_count, |
216 | 0 | const void* uc, int* min_dropped_frames) { |
217 | 0 | return Unwind<false, true>(result, nullptr, nullptr, |
218 | 0 | static_cast<size_t>(max_depth), skip_count, uc, |
219 | 0 | min_dropped_frames); |
220 | 0 | } |
221 | | |
222 | 0 | void SetStackUnwinder(Unwinder w) { |
223 | 0 | custom.store(w, std::memory_order_release); |
224 | 0 | } |
225 | | |
226 | | ABSL_ATTRIBUTE_ALWAYS_INLINE static inline int DefaultStackUnwinderImpl( |
227 | | void** pcs, uintptr_t* frames, int* sizes, int depth, int skip, |
228 | 0 | const void* uc, int* min_dropped_frames) { |
229 | 0 | skip++; // For this function |
230 | 0 | decltype(&UnwindImpl<false, false>) f; |
231 | 0 | if (sizes == nullptr) { |
232 | 0 | if (uc == nullptr) { |
233 | 0 | f = &UnwindImpl<false, false>; |
234 | 0 | } else { |
235 | 0 | f = &UnwindImpl<false, true>; |
236 | 0 | } |
237 | 0 | } else { |
238 | 0 | if (uc == nullptr) { |
239 | 0 | f = &UnwindImpl<true, false>; |
240 | 0 | } else { |
241 | 0 | f = &UnwindImpl<true, true>; |
242 | 0 | } |
243 | 0 | } |
244 | 0 | return (*f)(pcs, frames, sizes, depth, skip, uc, min_dropped_frames); |
245 | 0 | } |
246 | | |
247 | | ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int |
248 | | internal_stacktrace::DefaultStackUnwinder(void** pcs, uintptr_t* frames, |
249 | | int* sizes, int depth, int skip, |
250 | | const void* uc, |
251 | 0 | int* min_dropped_frames) { |
252 | 0 | int n = DefaultStackUnwinderImpl(pcs, frames, sizes, depth, skip, uc, |
253 | 0 | min_dropped_frames); |
254 | 0 | ABSL_BLOCK_TAIL_CALL_OPTIMIZATION(); |
255 | 0 | return n; |
256 | 0 | } |
257 | | |
258 | | ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int DefaultStackUnwinder( |
259 | | void** pcs, int* sizes, int depth, int skip, const void* uc, |
260 | 0 | int* min_dropped_frames) { |
261 | 0 | int n = DefaultStackUnwinderImpl(pcs, nullptr, sizes, depth, skip, uc, |
262 | 0 | min_dropped_frames); |
263 | 0 | ABSL_BLOCK_TAIL_CALL_OPTIMIZATION(); |
264 | 0 | return n; |
265 | 0 | } |
266 | | |
267 | 1.09k | ABSL_ATTRIBUTE_WEAK bool internal_stacktrace::ShouldFixUpStack() { |
268 | 1.09k | return false; |
269 | 1.09k | } |
270 | | |
271 | | // Fixes up the stack trace of the current thread, in the first `depth` frames |
272 | | // of each buffer. The buffers need to be larger than `depth`, to accommodate |
273 | | // any newly inserted elements. `depth` is updated to reflect the new number of |
274 | | // elements valid across all the buffers. (It is therefore recommended that all |
275 | | // buffer sizes be equal.) |
276 | | // |
277 | | // The `frames` and `sizes` parameters denote the bounds of the stack frame |
278 | | // corresponding to each instruction pointer in the `pcs`. |
279 | | // Any elements inside these buffers may be zero or null, in which case that |
280 | | // information is assumed to be absent/unavailable. |
281 | | ABSL_ATTRIBUTE_WEAK void internal_stacktrace::FixUpStack(void**, uintptr_t*, |
282 | | int*, size_t, |
283 | 0 | size_t&) {} |
284 | | |
285 | | ABSL_NAMESPACE_END |
286 | | } // namespace absl |