Coverage Report

Created: 2025-12-31 06:30

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/abseil-cpp/absl/base/internal/spinlock.h
Line
Count
Source
1
//
2
// Copyright 2017 The Abseil Authors.
3
//
4
// Licensed under the Apache License, Version 2.0 (the "License");
5
// you may not use this file except in compliance with the License.
6
// You may obtain a copy of the License at
7
//
8
//      https://www.apache.org/licenses/LICENSE-2.0
9
//
10
// Unless required by applicable law or agreed to in writing, software
11
// distributed under the License is distributed on an "AS IS" BASIS,
12
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
// See the License for the specific language governing permissions and
14
// limitations under the License.
15
//
16
17
//  Most users requiring mutual exclusion should use Mutex.
18
//  SpinLock is provided for use in two situations:
19
//   - for use by Abseil internal code that Mutex itself depends on
20
//   - for async signal safety (see below)
21
22
// SpinLock with a SchedulingMode::SCHEDULE_KERNEL_ONLY is async
23
// signal safe. If a spinlock is used within a signal handler, all code that
24
// acquires the lock must ensure that the signal cannot arrive while they are
25
// holding the lock. Typically, this is done by blocking the signal.
26
//
27
// Threads waiting on a SpinLock may be woken in an arbitrary order.
28
29
#ifndef ABSL_BASE_INTERNAL_SPINLOCK_H_
30
#define ABSL_BASE_INTERNAL_SPINLOCK_H_
31
32
#include <atomic>
33
#include <cstdint>
34
#include <mutex>
35
#include <type_traits>
36
37
#include "absl/base/attributes.h"
38
#include "absl/base/config.h"
39
#include "absl/base/const_init.h"
40
#include "absl/base/internal/low_level_scheduling.h"
41
#include "absl/base/internal/raw_logging.h"
42
#include "absl/base/internal/scheduling_mode.h"
43
#include "absl/base/internal/tsan_mutex_interface.h"
44
#include "absl/base/macros.h"
45
#include "absl/base/thread_annotations.h"
46
47
namespace tcmalloc {
48
namespace tcmalloc_internal {
49
50
class AllocationGuardSpinLockHolder;
51
class Static;
52
53
}  // namespace tcmalloc_internal
54
}  // namespace tcmalloc
55
56
namespace absl {
57
ABSL_NAMESPACE_BEGIN
58
namespace base_internal {
59
60
class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED SpinLock {
61
 public:
62
0
  constexpr SpinLock() : lockword_(kSpinLockCooperative) { RegisterWithTsan(); }
63
64
  // Constructors that allow non-cooperative spinlocks to be created for use
65
  // inside thread schedulers.  Normal clients should not use these.
66
  constexpr explicit SpinLock(SchedulingMode mode)
67
5
      : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {
68
5
    RegisterWithTsan();
69
5
  }
70
71
#if ABSL_HAVE_ATTRIBUTE(enable_if) && !defined(_WIN32)
72
  // Constructor to inline users of the default scheduling mode.
73
  //
74
  // This only needs to exists for inliner runs, but doesn't work correctly in
75
  // clang+windows builds, likely due to mangling differences.
76
  ABSL_DEPRECATE_AND_INLINE()
77
  constexpr explicit SpinLock(SchedulingMode mode)
78
      __attribute__((enable_if(mode == SCHEDULE_COOPERATIVE_AND_KERNEL,
79
                               "Cooperative use default constructor")))
80
0
      : SpinLock() {}
81
#endif
82
83
  // Constructor for global SpinLock instances.  See absl/base/const_init.h.
84
  ABSL_DEPRECATE_AND_INLINE()
85
  constexpr SpinLock(absl::ConstInitType, SchedulingMode mode)
86
0
      : SpinLock(mode) {}
87
88
  // For global SpinLock instances prefer trivial destructor when possible.
89
  // Default but non-trivial destructor in some build configurations causes an
90
  // extra static initializer.
91
#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
92
  ~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); }
93
#else
94
  ~SpinLock() = default;
95
#endif
96
97
  // Acquire this SpinLock.
98
733k
  inline void lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() {
99
733k
    ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
100
733k
    if (!TryLockImpl()) {
101
0
      SlowLock();
102
0
    }
103
733k
    ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
104
733k
  }
105
106
  ABSL_DEPRECATE_AND_INLINE()
107
0
  inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { return lock(); }
108
109
  // Try to acquire this SpinLock without blocking and return true if the
110
  // acquisition was successful.  If the lock was not acquired, false is
111
  // returned.  If this SpinLock is free at the time of the call, try_lock will
112
  // return true with high probability.
113
0
  [[nodiscard]] inline bool try_lock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
114
0
    ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
115
0
    bool res = TryLockImpl();
116
0
    ABSL_TSAN_MUTEX_POST_LOCK(
117
0
        this, __tsan_mutex_try_lock | (res ? 0 : __tsan_mutex_try_lock_failed),
118
0
        0);
119
0
    return res;
120
0
  }
121
122
  ABSL_DEPRECATE_AND_INLINE()
123
0
  [[nodiscard]] inline bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
124
0
    return try_lock();
125
0
  }
126
127
  // Release this SpinLock, which must be held by the calling thread.
128
733k
  inline void unlock() ABSL_UNLOCK_FUNCTION() {
129
733k
    ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
130
733k
    uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
131
733k
    lock_value = lockword_.exchange(lock_value & kSpinLockCooperative,
132
733k
                                    std::memory_order_release);
133
134
733k
    if ((lock_value & kSpinLockDisabledScheduling) != 0) {
135
0
      SchedulingGuard::EnableRescheduling(true);
136
0
    }
137
733k
    if ((lock_value & kWaitTimeMask) != 0) {
138
      // Collect contentionz profile info, and speed the wakeup of any waiter.
139
      // The wait_cycles value indicates how long this thread spent waiting
140
      // for the lock.
141
0
      SlowUnlock(lock_value);
142
0
    }
143
733k
    ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);
144
733k
  }
145
146
  ABSL_DEPRECATE_AND_INLINE()
147
0
  inline void Unlock() ABSL_UNLOCK_FUNCTION() { unlock(); }
148
149
  // Determine if the lock is held.  When the lock is held by the invoking
150
  // thread, true will always be returned. Intended to be used as
151
  // CHECK(lock.IsHeld()).
152
0
  [[nodiscard]] inline bool IsHeld() const {
153
0
    return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0;
154
0
  }
155
156
  // Return immediately if this thread holds the SpinLock exclusively.
157
  // Otherwise, report an error by crashing with a diagnostic.
158
0
  inline void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK() {
159
0
    if (!IsHeld()) {
160
0
      ABSL_RAW_LOG(FATAL, "thread should hold the lock on SpinLock");
161
0
    }
162
0
  }
163
164
 protected:
165
  // These should not be exported except for testing.
166
167
  // Store number of cycles between wait_start_time and wait_end_time in a
168
  // lock value.
169
  static uint32_t EncodeWaitCycles(int64_t wait_start_time,
170
                                   int64_t wait_end_time);
171
172
  // Extract number of wait cycles in a lock value.
173
  static int64_t DecodeWaitCycles(uint32_t lock_value);
174
175
  // Provide access to protected method above.  Use for testing only.
176
  friend struct SpinLockTest;
177
  friend class tcmalloc::tcmalloc_internal::AllocationGuardSpinLockHolder;
178
  friend class tcmalloc::tcmalloc_internal::Static;
179
180
0
  static int GetAdaptiveSpinCount() {
181
0
    return adaptive_spin_count_.load(std::memory_order_relaxed);
182
0
  }
183
0
  static void SetAdaptiveSpinCount(int count) {
184
0
    adaptive_spin_count_.store(count, std::memory_order_relaxed);
185
0
  }
186
187
  static std::atomic<int> adaptive_spin_count_;
188
189
 private:
190
  // lockword_ is used to store the following:
191
  //
192
  // bit[0] encodes whether a lock is being held.
193
  // bit[1] encodes whether a lock uses cooperative scheduling.
194
  // bit[2] encodes whether the current lock holder disabled scheduling when
195
  //        acquiring the lock. Only set when kSpinLockHeld is also set.
196
  // bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int.
197
  //        This is set by the lock holder to indicate how long it waited on
198
  //        the lock before eventually acquiring it. The number of cycles is
199
  //        encoded as a 29-bit unsigned int, or in the case that the current
200
  //        holder did not wait but another waiter is queued, the LSB
201
  //        (kSpinLockSleeper) is set. The implementation does not explicitly
202
  //        track the number of queued waiters beyond this. It must always be
203
  //        assumed that waiters may exist if the current holder was required to
204
  //        queue.
205
  //
206
  // Invariant: if the lock is not held, the value is either 0 or
207
  // kSpinLockCooperative.
208
  static constexpr uint32_t kSpinLockHeld = 1;
209
  static constexpr uint32_t kSpinLockCooperative = 2;
210
  static constexpr uint32_t kSpinLockDisabledScheduling = 4;
211
  static constexpr uint32_t kSpinLockSleeper = 8;
212
  // Includes kSpinLockSleeper.
213
  static constexpr uint32_t kWaitTimeMask =
214
      ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling);
215
216
  // Returns true if the provided scheduling mode is cooperative.
217
5
  static constexpr bool IsCooperative(SchedulingMode scheduling_mode) {
218
5
    return scheduling_mode == SCHEDULE_COOPERATIVE_AND_KERNEL;
219
5
  }
220
221
5
  constexpr void RegisterWithTsan() {
222
5
#if ABSL_HAVE_BUILTIN(__builtin_is_constant_evaluated)
223
5
    if (!__builtin_is_constant_evaluated()) {
224
5
      ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
225
5
    }
226
5
#endif
227
5
  }
228
229
0
  bool IsCooperative() const {
230
0
    return lockword_.load(std::memory_order_relaxed) & kSpinLockCooperative;
231
0
  }
232
233
  uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles);
234
  void SlowLock() ABSL_ATTRIBUTE_COLD;
235
  void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD;
236
  uint32_t SpinLoop();
237
238
733k
  inline bool TryLockImpl() {
239
733k
    uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
240
733k
    return (TryLockInternal(lock_value, 0) & kSpinLockHeld) == 0;
241
733k
  }
242
243
  std::atomic<uint32_t> lockword_;
244
245
  SpinLock(const SpinLock&) = delete;
246
  SpinLock& operator=(const SpinLock&) = delete;
247
};
248
249
// Corresponding locker object that arranges to acquire a spinlock for
250
// the duration of a C++ scope.
251
class ABSL_SCOPED_LOCKABLE [[nodiscard]] SpinLockHolder
252
    : public std::lock_guard<SpinLock> {
253
 public:
254
  inline explicit SpinLockHolder(
255
      SpinLock& l ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this))
256
      ABSL_EXCLUSIVE_LOCK_FUNCTION(l)
257
731k
      : std::lock_guard<SpinLock>(l) {}
258
  ABSL_DEPRECATE_AND_INLINE()
259
  inline explicit SpinLockHolder(SpinLock* l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l)
260
0
      : SpinLockHolder(*l) {}
261
262
  inline ~SpinLockHolder() ABSL_UNLOCK_FUNCTION() = default;
263
};
264
265
// Register a hook for profiling support.
266
//
267
// The function pointer registered here will be called whenever a spinlock is
268
// contended.  The callback is given an opaque handle to the contended spinlock
269
// and the number of wait cycles.  This is thread-safe, but only a single
270
// profiler can be registered.  It is an error to call this function multiple
271
// times with different arguments.
272
void RegisterSpinLockProfiler(void (*fn)(const void* lock,
273
                                         int64_t wait_cycles));
274
275
//------------------------------------------------------------------------------
276
// Public interface ends here.
277
//------------------------------------------------------------------------------
278
279
// If (result & kSpinLockHeld) == 0, then *this was successfully locked.
280
// Otherwise, returns last observed value for lockword_.
281
inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value,
282
733k
                                          uint32_t wait_cycles) {
283
733k
  if ((lock_value & kSpinLockHeld) != 0) {
284
0
    return lock_value;
285
0
  }
286
287
733k
  uint32_t sched_disabled_bit = 0;
288
733k
  if ((lock_value & kSpinLockCooperative) == 0) {
289
    // For non-cooperative locks we must make sure we mark ourselves as
290
    // non-reschedulable before we attempt to CompareAndSwap.
291
733k
    if (SchedulingGuard::DisableRescheduling()) {
292
0
      sched_disabled_bit = kSpinLockDisabledScheduling;
293
0
    }
294
733k
  }
295
296
733k
  if (!lockword_.compare_exchange_strong(
297
733k
          lock_value,
298
733k
          kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit,
299
733k
          std::memory_order_acquire, std::memory_order_relaxed)) {
300
0
    SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0);
301
0
  }
302
303
733k
  return lock_value;
304
733k
}
305
306
}  // namespace base_internal
307
ABSL_NAMESPACE_END
308
}  // namespace absl
309
310
#endif  // ABSL_BASE_INTERNAL_SPINLOCK_H_