Coverage Report

Created: 2026-04-12 06:05

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/abseil-cpp/absl/base/internal/thread_identity.h
Line
Count
Source
1
// Copyright 2017 The Abseil Authors.
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//      https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14
//
15
// Each active thread has an ThreadIdentity that may represent the thread in
16
// various level interfaces.  ThreadIdentity objects are never deallocated.
17
// When a thread terminates, its ThreadIdentity object may be reused for a
18
// thread created later.
19
20
#ifndef ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_
21
#define ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_
22
23
#ifndef _WIN32
24
#include <pthread.h>
25
// Defines __GOOGLE_GRTE_VERSION__ (via glibc-specific features.h) when
26
// supported.
27
#include <unistd.h>
28
#endif
29
30
#include <atomic>
31
#include <cstdint>
32
33
#include "absl/base/config.h"
34
#include "absl/base/internal/per_thread_tls.h"
35
#include "absl/base/optimization.h"
36
37
namespace absl {
38
ABSL_NAMESPACE_BEGIN
39
40
struct SynchLocksHeld;
41
struct SynchWaitParams;
42
43
namespace base_internal {
44
45
class SpinLock;
46
struct ThreadIdentity;
47
48
// Used by the implementation of absl::Mutex and absl::CondVar.
49
struct PerThreadSynch {
50
  // The internal representation of absl::Mutex and absl::CondVar rely
51
  // on the alignment of PerThreadSynch. Both store the address of the
52
  // PerThreadSynch in the high-order bits of their internal state,
53
  // which means the low kLowZeroBits of the address of PerThreadSynch
54
  // must be zero.
55
  static constexpr int kLowZeroBits = 8;
56
  static constexpr int kAlignment = 1 << kLowZeroBits;
57
58
  // Returns the associated ThreadIdentity.
59
  // This can be implemented as a cast because we guarantee
60
  // PerThreadSynch is the first element of ThreadIdentity.
61
0
  ThreadIdentity* thread_identity() {
62
0
    return reinterpret_cast<ThreadIdentity*>(this);
63
0
  }
64
65
  PerThreadSynch* next;  // Circular waiter queue; initialized to 0.
66
  PerThreadSynch* skip;  // If non-zero, all entries in Mutex queue
67
                         // up to and including "skip" have same
68
                         // condition as this, and will be woken later
69
  bool may_skip;         // if false while on mutex queue, a mutex unlocker
70
                         // is using this PerThreadSynch as a terminator.  Its
71
                         // skip field must not be filled in because the loop
72
                         // might then skip over the terminator.
73
  bool wake;             // This thread is to be woken from a Mutex.
74
  // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the
75
  // waiter is waiting on the mutex as part of a CV Wait or Mutex Await.
76
  //
77
  // The value of "x->cond_waiter" is meaningless if "x" is not on a
78
  // Mutex waiter list.
79
  bool cond_waiter;
80
  bool maybe_unlocking;  // Valid at head of Mutex waiter queue;
81
                         // true if UnlockSlow could be searching
82
                         // for a waiter to wake.  Used for an optimization
83
                         // in Enqueue().  true is always a valid value.
84
                         // Can be reset to false when the unlocker or any
85
                         // writer releases the lock, or a reader fully
86
                         // releases the lock.  It may not be set to false
87
                         // by a reader that decrements the count to
88
                         // non-zero. protected by mutex spinlock
89
  bool suppress_fatal_errors;  // If true, try to proceed even in the face
90
                               // of broken invariants.  This is used within
91
                               // fatal signal handlers to improve the
92
                               // chances of debug logging information being
93
                               // output successfully.
94
  int priority;                // Priority of thread (updated every so often).
95
96
  // State values:
97
  //   kAvailable: This PerThreadSynch is available.
98
  //   kQueued: This PerThreadSynch is unavailable, it's currently queued on a
99
  //            Mutex or CondVar waistlist.
100
  //
101
  // Transitions from kQueued to kAvailable require a release
102
  // barrier. This is needed as a waiter may use "state" to
103
  // independently observe that it's no longer queued.
104
  //
105
  // Transitions from kAvailable to kQueued require no barrier, they
106
  // are externally ordered by the Mutex.
107
  enum State { kAvailable, kQueued };
108
  std::atomic<State> state;
109
110
  // The wait parameters of the current wait.  waitp is null if the
111
  // thread is not waiting. Transitions from null to non-null must
112
  // occur before the enqueue commit point (state = kQueued in
113
  // Enqueue() and CondVarEnqueue()). Transitions from non-null to
114
  // null must occur after the wait is finished (state = kAvailable in
115
  // Mutex::Block() and CondVar::WaitCommon()). This field may be
116
  // changed only by the thread that describes this PerThreadSynch.  A
117
  // special case is Fer(), which calls Enqueue() on another thread,
118
  // but with an identical SynchWaitParams pointer, thus leaving the
119
  // pointer unchanged.
120
  SynchWaitParams* waitp;
121
122
  intptr_t readers;  // Number of readers in mutex.
123
124
  // When priority will next be read (cycles).
125
  int64_t next_priority_read_cycles;
126
127
  // Locks held; used during deadlock detection.
128
  // Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity().
129
  SynchLocksHeld* all_locks;
130
};
131
132
// The instances of this class are allocated in NewThreadIdentity() with an
133
// alignment of PerThreadSynch::kAlignment and never destroyed. Initialization
134
// should happen in OneTimeInitThreadIdentity().
135
//
136
// Instances may be reused by new threads - fields should be reset in
137
// ResetThreadIdentityBetweenReuse().
138
//
139
// NOTE: The layout of fields in this structure is critical, please do not
140
//       add, remove, or modify the field placements without fully auditing the
141
//       layout.
142
struct ThreadIdentity {
143
  // Must be the first member.  The Mutex implementation requires that
144
  // the PerThreadSynch object associated with each thread is
145
  // PerThreadSynch::kAlignment aligned.  We provide this alignment on
146
  // ThreadIdentity itself.
147
  PerThreadSynch per_thread_synch;
148
149
  struct SchedulerState {
150
    std::atomic<void*> bound_schedulable{nullptr};
151
    // Storage space for a SpinLock, which is created through a placement new to
152
    // break a dependency cycle.
153
    uint32_t association_lock_word;
154
    std::atomic<int> scheduling_disabled_depth;
155
    int potentially_blocking_depth;
156
    uint32_t schedule_next_state;
157
158
    // When true, current thread is unlocking a mutex and actively waking a
159
    // thread that was previously waiting, but that lock has yet more waiters.
160
    // Used to signal to schedulers that work being woken should get an
161
    // elevated priority.
162
    bool waking_designated_waker;
163
164
0
    inline SpinLock* association_lock() {
165
0
      return reinterpret_cast<SpinLock*>(&association_lock_word);
166
0
    }
167
  } scheduler_state;  // Private: Reserved for use in Gloop
168
169
  // For worker threads that may not be doing any interesting user work, this
170
  // tracks the current state of the worker. This is used to handle those
171
  // threads differently e.g. when printing stacktraces.
172
  //
173
  // It should only be written to by the thread itself.
174
  //
175
  // Note that this is different from the mutex idle bit - threads running user
176
  // work can be waiting but still be active.
177
  //
178
  // Note: not all parts of the code-base may maintain this field correctly and
179
  // therefore this field should only be used to improve debugging/monitoring.
180
  //
181
  // Put it here to reuse some of the padding space.
182
  enum class WaitState : uint8_t {
183
    kActive = 0,
184
    kWaitingForWork = 1,
185
  };
186
  std::atomic<WaitState> wait_state;
187
  static_assert(std::atomic<WaitState>::is_always_lock_free);
188
189
  // Add a padding such that scheduler_state is on a different cache line than
190
  // waiter state.  We use padding here, so that the size of the structure does
191
  // not substantially grow due to the added padding.
192
  static constexpr size_t kToBePaddedSize =
193
      sizeof(SchedulerState) + sizeof(std::atomic<WaitState>);
194
  static_assert(ABSL_CACHELINE_SIZE >= kToBePaddedSize);
195
  char padding[ABSL_CACHELINE_SIZE - kToBePaddedSize];
196
197
  // Private: Reserved for absl::synchronization_internal::Waiter.
198
  struct WaiterState {
199
    alignas(void*) char data[256];
200
  } waiter_state;
201
202
  // Used by PerThreadSem::{Get,Set}ThreadBlockedCounter().
203
  std::atomic<int>* blocked_count_ptr;
204
205
  // The following variables are mostly read/written just by the
206
  // thread itself.  The only exception is that these are read by
207
  // a ticker thread as a hint.
208
  std::atomic<int> ticker;      // Tick counter, incremented once per second.
209
  std::atomic<int> wait_start;  // Ticker value when thread started waiting.
210
  std::atomic<bool> is_idle;    // Has thread become idle yet?
211
212
  // For tracking depth of __cxa_guard_acquire.  This used to recognize heap
213
  // allocations for function static objects.
214
  int static_initialization_depth;
215
216
  ThreadIdentity* next;
217
};
218
219
// Returns the ThreadIdentity object representing the calling thread; guaranteed
220
// to be unique for its lifetime.  The returned object will remain valid for the
221
// program's lifetime; although it may be re-assigned to a subsequent thread.
222
// If one does not exist, return nullptr instead.
223
//
224
// Does not malloc(*), and is async-signal safe.
225
// [*] Technically pthread_setspecific() does malloc on first use; however this
226
// is handled internally within tcmalloc's initialization already. Note that
227
// darwin does *not* use tcmalloc, so this can catch you if using MallocHooks
228
// on Apple platforms. Whatever function is calling your MallocHooks will need
229
// to watch for recursion on Apple platforms.
230
//
231
// New ThreadIdentity objects can be constructed and associated with a thread
232
// by calling GetOrCreateCurrentThreadIdentity() in per-thread-sem.h.
233
ThreadIdentity* CurrentThreadIdentityIfPresent();
234
235
using ThreadIdentityReclaimerFunction = void (*)(void*);
236
237
// Sets the current thread identity to the given value.  'reclaimer' is a
238
// pointer to the global function for cleaning up instances on thread
239
// destruction.
240
void SetCurrentThreadIdentity(ThreadIdentity* identity,
241
                              ThreadIdentityReclaimerFunction reclaimer);
242
243
// Removes the currently associated ThreadIdentity from the running thread.
244
// This must be called from inside the ThreadIdentityReclaimerFunction, and only
245
// from that function.
246
void ClearCurrentThreadIdentity();
247
248
// May be chosen at compile time via: -DABSL_FORCE_THREAD_IDENTITY_MODE=<mode
249
// index>
250
#ifdef ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
251
#error ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC cannot be directly set
252
#else
253
#define ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC 0
254
#endif
255
256
#ifdef ABSL_THREAD_IDENTITY_MODE_USE_TLS
257
#error ABSL_THREAD_IDENTITY_MODE_USE_TLS cannot be directly set
258
#else
259
#define ABSL_THREAD_IDENTITY_MODE_USE_TLS 1
260
#endif
261
262
#ifdef ABSL_THREAD_IDENTITY_MODE_USE_CPP11
263
#error ABSL_THREAD_IDENTITY_MODE_USE_CPP11 cannot be directly set
264
#else
265
#define ABSL_THREAD_IDENTITY_MODE_USE_CPP11 2
266
#endif
267
268
#ifdef ABSL_THREAD_IDENTITY_MODE
269
#error ABSL_THREAD_IDENTITY_MODE cannot be directly set
270
#elif defined(ABSL_FORCE_THREAD_IDENTITY_MODE)
271
#define ABSL_THREAD_IDENTITY_MODE ABSL_FORCE_THREAD_IDENTITY_MODE
272
#elif defined(_WIN32) && !defined(__MINGW32__)
273
#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
274
#elif defined(__APPLE__) && defined(ABSL_HAVE_THREAD_LOCAL)
275
#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
276
#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \
277
    (__GOOGLE_GRTE_VERSION__ >= 20140228L)
278
// Support for async-safe TLS was specifically added in GRTEv4.  It's not
279
// present in the upstream eglibc.
280
// Note:  Current default for production systems.
281
#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_TLS
282
#else
283
#define ABSL_THREAD_IDENTITY_MODE \
284
  ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
285
#endif
286
287
#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \
288
    ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
289
290
#if ABSL_PER_THREAD_TLS
291
ABSL_CONST_INIT extern ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity*
292
    thread_identity_ptr;
293
#elif defined(ABSL_HAVE_THREAD_LOCAL)
294
ABSL_CONST_INIT extern thread_local ThreadIdentity* thread_identity_ptr;
295
#else
296
#error Thread-local storage not detected on this platform
297
#endif
298
299
// thread_local variables cannot be in headers exposed by DLLs or in certain
300
// build configurations on Apple platforms. However, it is important for
301
// performance reasons in general that `CurrentThreadIdentityIfPresent` be
302
// inlined. In the other cases we opt to have the function not be inlined. Note
303
// that `CurrentThreadIdentityIfPresent` is declared above so we can exclude
304
// this entire inline definition.
305
#if !defined(__APPLE__) && !defined(ABSL_BUILD_DLL) && \
306
    !defined(ABSL_CONSUME_DLL)
307
#define ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT 1
308
#endif
309
310
#ifdef ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT
311
inline ThreadIdentity* CurrentThreadIdentityIfPresent() {
312
  return thread_identity_ptr;
313
}
314
#endif
315
316
#elif ABSL_THREAD_IDENTITY_MODE != \
317
    ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
318
#error Unknown ABSL_THREAD_IDENTITY_MODE
319
#endif
320
321
}  // namespace base_internal
322
ABSL_NAMESPACE_END
323
}  // namespace absl
324
325
#endif  // ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_