Coverage Report

Created: 2026-04-01 06:29

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/abseil-cpp/absl/base/internal/thread_identity.h
Line
Count
Source
1
// Copyright 2017 The Abseil Authors.
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//      https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14
//
15
// Each active thread has an ThreadIdentity that may represent the thread in
16
// various level interfaces.  ThreadIdentity objects are never deallocated.
17
// When a thread terminates, its ThreadIdentity object may be reused for a
18
// thread created later.
19
20
#ifndef ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_
21
#define ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_
22
23
#ifndef _WIN32
24
#include <pthread.h>
25
// Defines __GOOGLE_GRTE_VERSION__ (via glibc-specific features.h) when
26
// supported.
27
#include <unistd.h>
28
#endif
29
30
#include <atomic>
31
#include <cstdint>
32
33
#include "absl/base/config.h"
34
#include "absl/base/internal/per_thread_tls.h"
35
#include "absl/base/optimization.h"
36
37
// Forward declare Gloop class for scheduling.
38
// TODO: b/495759467 - Remove this forward declaration.
39
namespace base {
40
namespace scheduling {
41
class Schedulable;
42
}  // namespace scheduling
43
}  // namespace base
44
45
namespace absl {
46
ABSL_NAMESPACE_BEGIN
47
48
struct SynchLocksHeld;
49
struct SynchWaitParams;
50
51
namespace base_internal {
52
53
class SpinLock;
54
struct ThreadIdentity;
55
56
// Used by the implementation of absl::Mutex and absl::CondVar.
57
struct PerThreadSynch {
58
  // The internal representation of absl::Mutex and absl::CondVar rely
59
  // on the alignment of PerThreadSynch. Both store the address of the
60
  // PerThreadSynch in the high-order bits of their internal state,
61
  // which means the low kLowZeroBits of the address of PerThreadSynch
62
  // must be zero.
63
  static constexpr int kLowZeroBits = 8;
64
  static constexpr int kAlignment = 1 << kLowZeroBits;
65
66
  // Returns the associated ThreadIdentity.
67
  // This can be implemented as a cast because we guarantee
68
  // PerThreadSynch is the first element of ThreadIdentity.
69
0
  ThreadIdentity* thread_identity() {
70
0
    return reinterpret_cast<ThreadIdentity*>(this);
71
0
  }
72
73
  PerThreadSynch* next;  // Circular waiter queue; initialized to 0.
74
  PerThreadSynch* skip;  // If non-zero, all entries in Mutex queue
75
                         // up to and including "skip" have same
76
                         // condition as this, and will be woken later
77
  bool may_skip;         // if false while on mutex queue, a mutex unlocker
78
                         // is using this PerThreadSynch as a terminator.  Its
79
                         // skip field must not be filled in because the loop
80
                         // might then skip over the terminator.
81
  bool wake;             // This thread is to be woken from a Mutex.
82
  // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the
83
  // waiter is waiting on the mutex as part of a CV Wait or Mutex Await.
84
  //
85
  // The value of "x->cond_waiter" is meaningless if "x" is not on a
86
  // Mutex waiter list.
87
  bool cond_waiter;
88
  bool maybe_unlocking;  // Valid at head of Mutex waiter queue;
89
                         // true if UnlockSlow could be searching
90
                         // for a waiter to wake.  Used for an optimization
91
                         // in Enqueue().  true is always a valid value.
92
                         // Can be reset to false when the unlocker or any
93
                         // writer releases the lock, or a reader fully
94
                         // releases the lock.  It may not be set to false
95
                         // by a reader that decrements the count to
96
                         // non-zero. protected by mutex spinlock
97
  bool suppress_fatal_errors;  // If true, try to proceed even in the face
98
                               // of broken invariants.  This is used within
99
                               // fatal signal handlers to improve the
100
                               // chances of debug logging information being
101
                               // output successfully.
102
  int priority;                // Priority of thread (updated every so often).
103
104
  // State values:
105
  //   kAvailable: This PerThreadSynch is available.
106
  //   kQueued: This PerThreadSynch is unavailable, it's currently queued on a
107
  //            Mutex or CondVar waistlist.
108
  //
109
  // Transitions from kQueued to kAvailable require a release
110
  // barrier. This is needed as a waiter may use "state" to
111
  // independently observe that it's no longer queued.
112
  //
113
  // Transitions from kAvailable to kQueued require no barrier, they
114
  // are externally ordered by the Mutex.
115
  enum State { kAvailable, kQueued };
116
  std::atomic<State> state;
117
118
  // The wait parameters of the current wait.  waitp is null if the
119
  // thread is not waiting. Transitions from null to non-null must
120
  // occur before the enqueue commit point (state = kQueued in
121
  // Enqueue() and CondVarEnqueue()). Transitions from non-null to
122
  // null must occur after the wait is finished (state = kAvailable in
123
  // Mutex::Block() and CondVar::WaitCommon()). This field may be
124
  // changed only by the thread that describes this PerThreadSynch.  A
125
  // special case is Fer(), which calls Enqueue() on another thread,
126
  // but with an identical SynchWaitParams pointer, thus leaving the
127
  // pointer unchanged.
128
  SynchWaitParams* waitp;
129
130
  intptr_t readers;  // Number of readers in mutex.
131
132
  // When priority will next be read (cycles).
133
  int64_t next_priority_read_cycles;
134
135
  // Locks held; used during deadlock detection.
136
  // Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity().
137
  SynchLocksHeld* all_locks;
138
};
139
140
// The instances of this class are allocated in NewThreadIdentity() with an
141
// alignment of PerThreadSynch::kAlignment and never destroyed. Initialization
142
// should happen in OneTimeInitThreadIdentity().
143
//
144
// Instances may be reused by new threads - fields should be reset in
145
// ResetThreadIdentityBetweenReuse().
146
//
147
// NOTE: The layout of fields in this structure is critical, please do not
148
//       add, remove, or modify the field placements without fully auditing the
149
//       layout.
150
struct ThreadIdentity {
151
  // Must be the first member.  The Mutex implementation requires that
152
  // the PerThreadSynch object associated with each thread is
153
  // PerThreadSynch::kAlignment aligned.  We provide this alignment on
154
  // ThreadIdentity itself.
155
  PerThreadSynch per_thread_synch;
156
157
  struct SchedulerState {
158
    std::atomic<base::scheduling::Schedulable*> bound_schedulable{nullptr};
159
    // Storage space for a SpinLock, which is created through a placement new to
160
    // break a dependency cycle.
161
    uint32_t association_lock_word;
162
    std::atomic<int> scheduling_disabled_depth;
163
    int potentially_blocking_depth;
164
    uint32_t schedule_next_state;
165
166
    // When true, current thread is unlocking a mutex and actively waking a
167
    // thread that was previously waiting, but that lock has yet more waiters.
168
    // Used to signal to schedulers that work being woken should get an
169
    // elevated priority.
170
    bool waking_designated_waker;
171
172
0
    inline SpinLock* association_lock() {
173
0
      return reinterpret_cast<SpinLock*>(&association_lock_word);
174
0
    }
175
176
    // TODO: b/495759467 - Migrate all callers.
177
0
    inline base::scheduling::Schedulable* get_bound_schedulable() const {
178
0
      return bound_schedulable.load(std::memory_order_relaxed);
179
0
    }
180
  } scheduler_state;  // Private: Reserved for use in Gloop
181
182
  // For worker threads that may not be doing any interesting user work, this
183
  // tracks the current state of the worker. This is used to handle those
184
  // threads differently e.g. when printing stacktraces.
185
  //
186
  // It should only be written to by the thread itself.
187
  //
188
  // Note that this is different from the mutex idle bit - threads running user
189
  // work can be waiting but still be active.
190
  //
191
  // Note: not all parts of the code-base may maintain this field correctly and
192
  // therefore this field should only be used to improve debugging/monitoring.
193
  //
194
  // Put it here to reuse some of the padding space.
195
  enum class WaitState : uint8_t {
196
    kActive = 0,
197
    kWaitingForWork = 1,
198
  };
199
  std::atomic<WaitState> wait_state;
200
  static_assert(std::atomic<WaitState>::is_always_lock_free);
201
202
  // Add a padding such that scheduler_state is on a different cache line than
203
  // waiter state.  We use padding here, so that the size of the structure does
204
  // not substantially grow due to the added padding.
205
  static constexpr size_t kToBePaddedSize =
206
      sizeof(SchedulerState) + sizeof(std::atomic<WaitState>);
207
  static_assert(ABSL_CACHELINE_SIZE >= kToBePaddedSize);
208
  char padding[ABSL_CACHELINE_SIZE - kToBePaddedSize];
209
210
  // Private: Reserved for absl::synchronization_internal::Waiter.
211
  struct WaiterState {
212
    alignas(void*) char data[256];
213
  } waiter_state;
214
215
  // Used by PerThreadSem::{Get,Set}ThreadBlockedCounter().
216
  std::atomic<int>* blocked_count_ptr;
217
218
  // The following variables are mostly read/written just by the
219
  // thread itself.  The only exception is that these are read by
220
  // a ticker thread as a hint.
221
  std::atomic<int> ticker;      // Tick counter, incremented once per second.
222
  std::atomic<int> wait_start;  // Ticker value when thread started waiting.
223
  std::atomic<bool> is_idle;    // Has thread become idle yet?
224
225
  // For tracking depth of __cxa_guard_acquire.  This used to recognize heap
226
  // allocations for function static objects.
227
  int static_initialization_depth;
228
229
  ThreadIdentity* next;
230
};
231
232
// Returns the ThreadIdentity object representing the calling thread; guaranteed
233
// to be unique for its lifetime.  The returned object will remain valid for the
234
// program's lifetime; although it may be re-assigned to a subsequent thread.
235
// If one does not exist, return nullptr instead.
236
//
237
// Does not malloc(*), and is async-signal safe.
238
// [*] Technically pthread_setspecific() does malloc on first use; however this
239
// is handled internally within tcmalloc's initialization already. Note that
240
// darwin does *not* use tcmalloc, so this can catch you if using MallocHooks
241
// on Apple platforms. Whatever function is calling your MallocHooks will need
242
// to watch for recursion on Apple platforms.
243
//
244
// New ThreadIdentity objects can be constructed and associated with a thread
245
// by calling GetOrCreateCurrentThreadIdentity() in per-thread-sem.h.
246
ThreadIdentity* CurrentThreadIdentityIfPresent();
247
248
using ThreadIdentityReclaimerFunction = void (*)(void*);
249
250
// Sets the current thread identity to the given value.  'reclaimer' is a
251
// pointer to the global function for cleaning up instances on thread
252
// destruction.
253
void SetCurrentThreadIdentity(ThreadIdentity* identity,
254
                              ThreadIdentityReclaimerFunction reclaimer);
255
256
// Removes the currently associated ThreadIdentity from the running thread.
257
// This must be called from inside the ThreadIdentityReclaimerFunction, and only
258
// from that function.
259
void ClearCurrentThreadIdentity();
260
261
// May be chosen at compile time via: -DABSL_FORCE_THREAD_IDENTITY_MODE=<mode
262
// index>
263
#ifdef ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
264
#error ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC cannot be directly set
265
#else
266
#define ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC 0
267
#endif
268
269
#ifdef ABSL_THREAD_IDENTITY_MODE_USE_TLS
270
#error ABSL_THREAD_IDENTITY_MODE_USE_TLS cannot be directly set
271
#else
272
#define ABSL_THREAD_IDENTITY_MODE_USE_TLS 1
273
#endif
274
275
#ifdef ABSL_THREAD_IDENTITY_MODE_USE_CPP11
276
#error ABSL_THREAD_IDENTITY_MODE_USE_CPP11 cannot be directly set
277
#else
278
#define ABSL_THREAD_IDENTITY_MODE_USE_CPP11 2
279
#endif
280
281
#ifdef ABSL_THREAD_IDENTITY_MODE
282
#error ABSL_THREAD_IDENTITY_MODE cannot be directly set
283
#elif defined(ABSL_FORCE_THREAD_IDENTITY_MODE)
284
#define ABSL_THREAD_IDENTITY_MODE ABSL_FORCE_THREAD_IDENTITY_MODE
285
#elif defined(_WIN32) && !defined(__MINGW32__)
286
#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
287
#elif defined(__APPLE__) && defined(ABSL_HAVE_THREAD_LOCAL)
288
#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
289
#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \
290
    (__GOOGLE_GRTE_VERSION__ >= 20140228L)
291
// Support for async-safe TLS was specifically added in GRTEv4.  It's not
292
// present in the upstream eglibc.
293
// Note:  Current default for production systems.
294
#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_TLS
295
#else
296
#define ABSL_THREAD_IDENTITY_MODE \
297
  ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
298
#endif
299
300
#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \
301
    ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
302
303
#if ABSL_PER_THREAD_TLS
304
ABSL_CONST_INIT extern ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity*
305
    thread_identity_ptr;
306
#elif defined(ABSL_HAVE_THREAD_LOCAL)
307
ABSL_CONST_INIT extern thread_local ThreadIdentity* thread_identity_ptr;
308
#else
309
#error Thread-local storage not detected on this platform
310
#endif
311
312
// thread_local variables cannot be in headers exposed by DLLs or in certain
313
// build configurations on Apple platforms. However, it is important for
314
// performance reasons in general that `CurrentThreadIdentityIfPresent` be
315
// inlined. In the other cases we opt to have the function not be inlined. Note
316
// that `CurrentThreadIdentityIfPresent` is declared above so we can exclude
317
// this entire inline definition.
318
#if !defined(__APPLE__) && !defined(ABSL_BUILD_DLL) && \
319
    !defined(ABSL_CONSUME_DLL)
320
#define ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT 1
321
#endif
322
323
#ifdef ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT
324
inline ThreadIdentity* CurrentThreadIdentityIfPresent() {
325
  return thread_identity_ptr;
326
}
327
#endif
328
329
#elif ABSL_THREAD_IDENTITY_MODE != \
330
    ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
331
#error Unknown ABSL_THREAD_IDENTITY_MODE
332
#endif
333
334
}  // namespace base_internal
335
ABSL_NAMESPACE_END
336
}  // namespace absl
337
338
#endif  // ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_