Coverage Report

Created: 2025-11-11 07:05

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/abseil-cpp/absl/debugging/stacktrace.cc
Line
Count
Source
1
// Copyright 2017 The Abseil Authors.
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//      https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14
15
// Produce stack trace.
16
//
17
// There are three different ways we can try to get the stack trace:
18
//
19
// 1) Our hand-coded stack-unwinder.  This depends on a certain stack
20
//    layout, which is used by gcc (and those systems using a
21
//    gcc-compatible ABI) on x86 systems, at least since gcc 2.95.
22
//    It uses the frame pointer to do its work.
23
//
24
// 2) The libunwind library.  This is still in development, and as a
25
//    separate library adds a new dependency, but doesn't need a frame
26
//    pointer.  It also doesn't call malloc.
27
//
28
// 3) The gdb unwinder -- also the one used by the c++ exception code.
29
//    It's obviously well-tested, but has a fatal flaw: it can call
30
//    malloc() from the unwinder.  This is a problem because we're
31
//    trying to use the unwinder to instrument malloc().
32
//
33
// Note: if you add a new implementation here, make sure it works
34
// correctly when absl::GetStackTrace() is called with max_depth == 0.
35
// Some code may do that.
36
37
#include "absl/debugging/stacktrace.h"
38
39
#include <stddef.h>
40
#include <stdint.h>
41
#include <stdlib.h>
42
43
#include <algorithm>
44
#include <atomic>
45
#include <iterator>
46
#include <type_traits>
47
48
#include "absl/base/attributes.h"
49
#include "absl/base/config.h"
50
#include "absl/base/internal/low_level_alloc.h"
51
#include "absl/base/optimization.h"
52
#include "absl/base/port.h"
53
#include "absl/debugging/internal/stacktrace_config.h"
54
55
#if defined(ABSL_STACKTRACE_INL_HEADER)
56
#include ABSL_STACKTRACE_INL_HEADER
57
#else
58
# error Cannot calculate stack trace: will need to write for your environment
59
60
# include "absl/debugging/internal/stacktrace_aarch64-inl.inc"
61
# include "absl/debugging/internal/stacktrace_arm-inl.inc"
62
# include "absl/debugging/internal/stacktrace_emscripten-inl.inc"
63
# include "absl/debugging/internal/stacktrace_generic-inl.inc"
64
# include "absl/debugging/internal/stacktrace_powerpc-inl.inc"
65
# include "absl/debugging/internal/stacktrace_riscv-inl.inc"
66
# include "absl/debugging/internal/stacktrace_unimplemented-inl.inc"
67
# include "absl/debugging/internal/stacktrace_win32-inl.inc"
68
# include "absl/debugging/internal/stacktrace_x86-inl.inc"
69
#endif
70
71
namespace absl {
72
ABSL_NAMESPACE_BEGIN
73
namespace {
74
75
typedef int (*Unwinder)(void**, int*, int, int, const void*, int*);
76
std::atomic<Unwinder> custom;
77
78
constexpr size_t kMinPageSize = 4096;
79
80
struct FixupBuffer {
81
  static constexpr size_t kMaxStackElements = 128;  // Can be reduced if needed
82
  uintptr_t frames[kMaxStackElements];
83
  int sizes[kMaxStackElements];
84
};
85
static_assert(std::is_trivially_default_constructible_v<FixupBuffer>);
86
static_assert(sizeof(FixupBuffer) < kMinPageSize / 2,
87
              "buffer size should no larger than a small fraction of a page, "
88
              "to avoid stack overflows");
89
90
template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
91
ABSL_ATTRIBUTE_ALWAYS_INLINE inline int Unwind(
92
    void** result, uintptr_t* frames, int* sizes, size_t max_depth,
93
    int skip_count, const void* uc, int* min_dropped_frames,
94
1.16k
    FixupBuffer* fixup_buffer /* if NULL, fixups are skipped */) {
95
  // Allocate a buffer dynamically, using the signal-safe allocator.
96
1.16k
  static constexpr auto allocate = [](size_t num_bytes) -> void* {
97
0
    base_internal::InitSigSafeArena();
98
0
    return base_internal::LowLevelAlloc::AllocWithArena(
99
0
        num_bytes, base_internal::SigSafeArena());
100
0
  };
Unexecuted instantiation: stacktrace.cc:absl::(anonymous namespace)::Unwind<true, false>(void**, unsigned long*, int*, unsigned long, int, void const*, int*, absl::(anonymous namespace)::FixupBuffer*)::{lambda(unsigned long)#1}::operator()(unsigned long) const
Unexecuted instantiation: stacktrace.cc:absl::(anonymous namespace)::Unwind<true, true>(void**, unsigned long*, int*, unsigned long, int, void const*, int*, absl::(anonymous namespace)::FixupBuffer*)::{lambda(unsigned long)#1}::operator()(unsigned long) const
Unexecuted instantiation: stacktrace.cc:absl::(anonymous namespace)::Unwind<false, false>(void**, unsigned long*, int*, unsigned long, int, void const*, int*, absl::(anonymous namespace)::FixupBuffer*)::{lambda(unsigned long)#1}::operator()(unsigned long) const
Unexecuted instantiation: stacktrace.cc:absl::(anonymous namespace)::Unwind<false, true>(void**, unsigned long*, int*, unsigned long, int, void const*, int*, absl::(anonymous namespace)::FixupBuffer*)::{lambda(unsigned long)#1}::operator()(unsigned long) const
101
102
  // We only need to free the buffers if we allocated them with the signal-safe
103
  // allocator.
104
1.16k
  bool must_free_frames = false;
105
1.16k
  bool must_free_sizes = false;
106
107
1.16k
  bool unwind_with_fixup =
108
1.16k
      fixup_buffer != nullptr && internal_stacktrace::ShouldFixUpStack();
109
110
#ifdef _WIN32
111
  if (unwind_with_fixup) {
112
    // TODO(b/434184677): Fixups are flaky and not supported on Windows
113
    unwind_with_fixup = false;
114
#ifndef NDEBUG
115
    abort();
116
#endif
117
  }
118
#endif
119
120
1.16k
  if (unwind_with_fixup) {
121
    // Some implementations of FixUpStack may need to be passed frame
122
    // information from Unwind, even if the caller doesn't need that
123
    // information. We allocate the necessary buffers for such implementations
124
    // here.
125
126
0
    if (frames == nullptr) {
127
0
      if (max_depth <= std::size(fixup_buffer->frames)) {
128
0
        frames = fixup_buffer->frames;
129
0
      } else {
130
0
        frames = static_cast<uintptr_t*>(allocate(max_depth * sizeof(*frames)));
131
0
        must_free_frames = true;
132
0
      }
133
0
    }
134
135
0
    if (sizes == nullptr) {
136
0
      if (max_depth <= std::size(fixup_buffer->sizes)) {
137
0
        sizes = fixup_buffer->sizes;
138
0
      } else {
139
0
        sizes = static_cast<int*>(allocate(max_depth * sizeof(*sizes)));
140
0
        must_free_sizes = true;
141
0
      }
142
0
    }
143
0
  }
144
145
1.16k
  Unwinder g = custom.load(std::memory_order_acquire);
146
1.16k
  size_t size;
147
  // Add 1 to skip count for the unwinder function itself
148
1.16k
  ++skip_count;
149
1.16k
  if (g != nullptr) {
150
0
    size = static_cast<size_t>((*g)(result, sizes, static_cast<int>(max_depth),
151
0
                                    skip_count, uc, min_dropped_frames));
152
    // Frame pointers aren't returned by existing hooks, so clear them.
153
0
    if (frames != nullptr) {
154
0
      std::fill(frames, frames + size, uintptr_t());
155
0
    }
156
1.16k
  } else {
157
1.16k
    size = static_cast<size_t>(
158
1.16k
        unwind_with_fixup
159
1.16k
            ? UnwindImpl<true, IS_WITH_CONTEXT>(
160
0
                  result, frames, sizes, static_cast<int>(max_depth),
161
0
                  skip_count, uc, min_dropped_frames)
162
1.16k
            : UnwindImpl<IS_STACK_FRAMES, IS_WITH_CONTEXT>(
163
1.16k
                  result, frames, sizes, static_cast<int>(max_depth),
164
1.16k
                  skip_count, uc, min_dropped_frames));
165
1.16k
  }
166
1.16k
  if (unwind_with_fixup) {
167
0
    internal_stacktrace::FixUpStack(result, frames, sizes, max_depth, size);
168
0
  }
169
170
1.16k
  if (must_free_sizes) {
171
0
    base_internal::LowLevelAlloc::Free(sizes);
172
0
  }
173
174
1.16k
  if (must_free_frames) {
175
0
    base_internal::LowLevelAlloc::Free(frames);
176
0
  }
177
178
1.16k
  ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
179
1.16k
  return static_cast<int>(size);
180
1.16k
}
Unexecuted instantiation: stacktrace.cc:int absl::(anonymous namespace)::Unwind<true, false>(void**, unsigned long*, int*, unsigned long, int, void const*, int*, absl::(anonymous namespace)::FixupBuffer*)
Unexecuted instantiation: stacktrace.cc:int absl::(anonymous namespace)::Unwind<true, true>(void**, unsigned long*, int*, unsigned long, int, void const*, int*, absl::(anonymous namespace)::FixupBuffer*)
stacktrace.cc:int absl::(anonymous namespace)::Unwind<false, false>(void**, unsigned long*, int*, unsigned long, int, void const*, int*, absl::(anonymous namespace)::FixupBuffer*)
Line
Count
Source
94
1.16k
    FixupBuffer* fixup_buffer /* if NULL, fixups are skipped */) {
95
  // Allocate a buffer dynamically, using the signal-safe allocator.
96
1.16k
  static constexpr auto allocate = [](size_t num_bytes) -> void* {
97
1.16k
    base_internal::InitSigSafeArena();
98
1.16k
    return base_internal::LowLevelAlloc::AllocWithArena(
99
1.16k
        num_bytes, base_internal::SigSafeArena());
100
1.16k
  };
101
102
  // We only need to free the buffers if we allocated them with the signal-safe
103
  // allocator.
104
1.16k
  bool must_free_frames = false;
105
1.16k
  bool must_free_sizes = false;
106
107
1.16k
  bool unwind_with_fixup =
108
1.16k
      fixup_buffer != nullptr && internal_stacktrace::ShouldFixUpStack();
109
110
#ifdef _WIN32
111
  if (unwind_with_fixup) {
112
    // TODO(b/434184677): Fixups are flaky and not supported on Windows
113
    unwind_with_fixup = false;
114
#ifndef NDEBUG
115
    abort();
116
#endif
117
  }
118
#endif
119
120
1.16k
  if (unwind_with_fixup) {
121
    // Some implementations of FixUpStack may need to be passed frame
122
    // information from Unwind, even if the caller doesn't need that
123
    // information. We allocate the necessary buffers for such implementations
124
    // here.
125
126
0
    if (frames == nullptr) {
127
0
      if (max_depth <= std::size(fixup_buffer->frames)) {
128
0
        frames = fixup_buffer->frames;
129
0
      } else {
130
0
        frames = static_cast<uintptr_t*>(allocate(max_depth * sizeof(*frames)));
131
0
        must_free_frames = true;
132
0
      }
133
0
    }
134
135
0
    if (sizes == nullptr) {
136
0
      if (max_depth <= std::size(fixup_buffer->sizes)) {
137
0
        sizes = fixup_buffer->sizes;
138
0
      } else {
139
0
        sizes = static_cast<int*>(allocate(max_depth * sizeof(*sizes)));
140
0
        must_free_sizes = true;
141
0
      }
142
0
    }
143
0
  }
144
145
1.16k
  Unwinder g = custom.load(std::memory_order_acquire);
146
1.16k
  size_t size;
147
  // Add 1 to skip count for the unwinder function itself
148
1.16k
  ++skip_count;
149
1.16k
  if (g != nullptr) {
150
0
    size = static_cast<size_t>((*g)(result, sizes, static_cast<int>(max_depth),
151
0
                                    skip_count, uc, min_dropped_frames));
152
    // Frame pointers aren't returned by existing hooks, so clear them.
153
0
    if (frames != nullptr) {
154
0
      std::fill(frames, frames + size, uintptr_t());
155
0
    }
156
1.16k
  } else {
157
1.16k
    size = static_cast<size_t>(
158
1.16k
        unwind_with_fixup
159
1.16k
            ? UnwindImpl<true, IS_WITH_CONTEXT>(
160
0
                  result, frames, sizes, static_cast<int>(max_depth),
161
0
                  skip_count, uc, min_dropped_frames)
162
1.16k
            : UnwindImpl<IS_STACK_FRAMES, IS_WITH_CONTEXT>(
163
1.16k
                  result, frames, sizes, static_cast<int>(max_depth),
164
1.16k
                  skip_count, uc, min_dropped_frames));
165
1.16k
  }
166
1.16k
  if (unwind_with_fixup) {
167
0
    internal_stacktrace::FixUpStack(result, frames, sizes, max_depth, size);
168
0
  }
169
170
1.16k
  if (must_free_sizes) {
171
0
    base_internal::LowLevelAlloc::Free(sizes);
172
0
  }
173
174
1.16k
  if (must_free_frames) {
175
0
    base_internal::LowLevelAlloc::Free(frames);
176
0
  }
177
178
1.16k
  ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
179
1.16k
  return static_cast<int>(size);
180
1.16k
}
Unexecuted instantiation: stacktrace.cc:int absl::(anonymous namespace)::Unwind<false, true>(void**, unsigned long*, int*, unsigned long, int, void const*, int*, absl::(anonymous namespace)::FixupBuffer*)
181
182
}  // anonymous namespace
183
184
ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int
185
internal_stacktrace::GetStackFrames(void** result, uintptr_t* frames,
186
0
                                    int* sizes, int max_depth, int skip_count) {
187
0
  FixupBuffer fixup_stack_buf;
188
0
  return Unwind<true, false>(result, frames, sizes,
189
0
                             static_cast<size_t>(max_depth), skip_count,
190
0
                             nullptr, nullptr, &fixup_stack_buf);
191
0
}
192
193
ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int
194
internal_stacktrace::GetStackFramesWithContext(void** result, uintptr_t* frames,
195
                                               int* sizes, int max_depth,
196
                                               int skip_count, const void* uc,
197
0
                                               int* min_dropped_frames) {
198
0
  FixupBuffer fixup_stack_buf;
199
0
  return Unwind<true, true>(result, frames, sizes,
200
0
                            static_cast<size_t>(max_depth), skip_count, uc,
201
0
                            min_dropped_frames, &fixup_stack_buf);
202
0
}
203
204
ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int
205
internal_stacktrace::GetStackTraceNoFixup(void** result, int max_depth,
206
0
                                          int skip_count) {
207
0
  return Unwind<false, false>(result, nullptr, nullptr,
208
0
                              static_cast<size_t>(max_depth), skip_count,
209
0
                              nullptr, nullptr, nullptr);
210
0
}
211
212
ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int GetStackTrace(
213
1.16k
    void** result, int max_depth, int skip_count) {
214
1.16k
  FixupBuffer fixup_stack_buf;
215
1.16k
  return Unwind<false, false>(result, nullptr, nullptr,
216
1.16k
                              static_cast<size_t>(max_depth), skip_count,
217
1.16k
                              nullptr, nullptr, &fixup_stack_buf);
218
1.16k
}
219
220
ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int
221
GetStackTraceWithContext(void** result, int max_depth, int skip_count,
222
0
                         const void* uc, int* min_dropped_frames) {
223
0
  FixupBuffer fixup_stack_buf;
224
0
  return Unwind<false, true>(result, nullptr, nullptr,
225
0
                             static_cast<size_t>(max_depth), skip_count, uc,
226
0
                             min_dropped_frames, &fixup_stack_buf);
227
0
}
228
229
0
void SetStackUnwinder(Unwinder w) {
230
0
  custom.store(w, std::memory_order_release);
231
0
}
232
233
ABSL_ATTRIBUTE_ALWAYS_INLINE static inline int DefaultStackUnwinderImpl(
234
    void** pcs, uintptr_t* frames, int* sizes, int depth, int skip,
235
0
    const void* uc, int* min_dropped_frames) {
236
0
  skip++;  // For this function
237
0
  decltype(&UnwindImpl<false, false>) f;
238
0
  if (sizes == nullptr) {
239
0
    if (uc == nullptr) {
240
0
      f = &UnwindImpl<false, false>;
241
0
    } else {
242
0
      f = &UnwindImpl<false, true>;
243
0
    }
244
0
  } else {
245
0
    if (uc == nullptr) {
246
0
      f = &UnwindImpl<true, false>;
247
0
    } else {
248
0
      f = &UnwindImpl<true, true>;
249
0
    }
250
0
  }
251
0
  return (*f)(pcs, frames, sizes, depth, skip, uc, min_dropped_frames);
252
0
}
253
254
ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int
255
internal_stacktrace::DefaultStackUnwinder(void** pcs, uintptr_t* frames,
256
                                          int* sizes, int depth, int skip,
257
                                          const void* uc,
258
0
                                          int* min_dropped_frames) {
259
0
  int n = DefaultStackUnwinderImpl(pcs, frames, sizes, depth, skip, uc,
260
0
                                   min_dropped_frames);
261
0
  ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
262
0
  return n;
263
0
}
264
265
ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int DefaultStackUnwinder(
266
    void** pcs, int* sizes, int depth, int skip, const void* uc,
267
0
    int* min_dropped_frames) {
268
0
  int n = DefaultStackUnwinderImpl(pcs, nullptr, sizes, depth, skip, uc,
269
0
                                   min_dropped_frames);
270
0
  ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
271
0
  return n;
272
0
}
273
274
1.16k
ABSL_ATTRIBUTE_WEAK bool internal_stacktrace::ShouldFixUpStack() {
275
1.16k
  return false;
276
1.16k
}
277
278
// Fixes up the stack trace of the current thread, in the first `depth` frames
279
// of each buffer. The buffers need to be larger than `depth`, to accommodate
280
// any newly inserted elements. `depth` is updated to reflect the new number of
281
// elements valid across all the buffers. (It is therefore recommended that all
282
// buffer sizes be equal.)
283
//
284
// The `frames` and `sizes` parameters denote the bounds of the stack frame
285
// corresponding to each instruction pointer in the `pcs`.
286
// Any elements inside these buffers may be zero or null, in which case that
287
// information is assumed to be absent/unavailable.
288
ABSL_ATTRIBUTE_WEAK void internal_stacktrace::FixUpStack(void**, uintptr_t*,
289
                                                         int*, size_t,
290
0
                                                         size_t&) {}
291
292
ABSL_NAMESPACE_END
293
}  // namespace absl