Coverage Report

Created: 2024-09-23 06:29

/src/abseil-cpp/absl/base/internal/spinlock.h
Line
Count
Source (jump to first uncovered line)
1
//
2
// Copyright 2017 The Abseil Authors.
3
//
4
// Licensed under the Apache License, Version 2.0 (the "License");
5
// you may not use this file except in compliance with the License.
6
// You may obtain a copy of the License at
7
//
8
//      https://www.apache.org/licenses/LICENSE-2.0
9
//
10
// Unless required by applicable law or agreed to in writing, software
11
// distributed under the License is distributed on an "AS IS" BASIS,
12
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
// See the License for the specific language governing permissions and
14
// limitations under the License.
15
//
16
17
//  Most users requiring mutual exclusion should use Mutex.
18
//  SpinLock is provided for use in two situations:
19
//   - for use by Abseil internal code that Mutex itself depends on
20
//   - for async signal safety (see below)
21
22
// SpinLock with a base_internal::SchedulingMode::SCHEDULE_KERNEL_ONLY is async
23
// signal safe. If a spinlock is used within a signal handler, all code that
24
// acquires the lock must ensure that the signal cannot arrive while they are
25
// holding the lock. Typically, this is done by blocking the signal.
26
//
27
// Threads waiting on a SpinLock may be woken in an arbitrary order.
28
29
#ifndef ABSL_BASE_INTERNAL_SPINLOCK_H_
30
#define ABSL_BASE_INTERNAL_SPINLOCK_H_
31
32
#include <atomic>
33
#include <cstdint>
34
35
#include "absl/base/attributes.h"
36
#include "absl/base/const_init.h"
37
#include "absl/base/dynamic_annotations.h"
38
#include "absl/base/internal/low_level_scheduling.h"
39
#include "absl/base/internal/raw_logging.h"
40
#include "absl/base/internal/scheduling_mode.h"
41
#include "absl/base/internal/tsan_mutex_interface.h"
42
#include "absl/base/thread_annotations.h"
43
44
namespace tcmalloc {
45
namespace tcmalloc_internal {
46
47
class AllocationGuardSpinLockHolder;
48
49
}  // namespace tcmalloc_internal
50
}  // namespace tcmalloc
51
52
namespace absl {
53
ABSL_NAMESPACE_BEGIN
54
namespace base_internal {
55
56
class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED SpinLock {
57
 public:
58
0
  SpinLock() : lockword_(kSpinLockCooperative) {
59
0
    ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
60
0
  }
61
62
  // Constructors that allow non-cooperative spinlocks to be created for use
63
  // inside thread schedulers.  Normal clients should not use these.
64
  explicit SpinLock(base_internal::SchedulingMode mode);
65
66
  // Constructor for global SpinLock instances.  See absl/base/const_init.h.
67
  constexpr SpinLock(absl::ConstInitType, base_internal::SchedulingMode mode)
68
0
      : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {}
69
70
  // For global SpinLock instances prefer trivial destructor when possible.
71
  // Default but non-trivial destructor in some build configurations causes an
72
  // extra static initializer.
73
#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
74
  ~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); }
75
#else
76
  ~SpinLock() = default;
77
#endif
78
79
  // Acquire this SpinLock.
80
7.58M
  inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() {
81
7.58M
    ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
82
7.58M
    if (!TryLockImpl()) {
83
0
      SlowLock();
84
0
    }
85
7.58M
    ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
86
7.58M
  }
87
88
  // Try to acquire this SpinLock without blocking and return true if the
89
  // acquisition was successful.  If the lock was not acquired, false is
90
  // returned.  If this SpinLock is free at the time of the call, TryLock
91
  // will return true with high probability.
92
  ABSL_MUST_USE_RESULT inline bool TryLock()
93
0
      ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
94
0
    ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
95
0
    bool res = TryLockImpl();
96
0
    ABSL_TSAN_MUTEX_POST_LOCK(
97
0
        this, __tsan_mutex_try_lock | (res ? 0 : __tsan_mutex_try_lock_failed),
98
0
        0);
99
0
    return res;
100
0
  }
101
102
  // Release this SpinLock, which must be held by the calling thread.
103
7.58M
  inline void Unlock() ABSL_UNLOCK_FUNCTION() {
104
7.58M
    ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
105
7.58M
    uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
106
7.58M
    lock_value = lockword_.exchange(lock_value & kSpinLockCooperative,
107
7.58M
                                    std::memory_order_release);
108
109
7.58M
    if ((lock_value & kSpinLockDisabledScheduling) != 0) {
110
0
      base_internal::SchedulingGuard::EnableRescheduling(true);
111
0
    }
112
7.58M
    if ((lock_value & kWaitTimeMask) != 0) {
113
      // Collect contentionz profile info, and speed the wakeup of any waiter.
114
      // The wait_cycles value indicates how long this thread spent waiting
115
      // for the lock.
116
0
      SlowUnlock(lock_value);
117
0
    }
118
7.58M
    ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);
119
7.58M
  }
120
121
  // Determine if the lock is held.  When the lock is held by the invoking
122
  // thread, true will always be returned. Intended to be used as
123
  // CHECK(lock.IsHeld()).
124
0
  ABSL_MUST_USE_RESULT inline bool IsHeld() const {
125
0
    return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0;
126
0
  }
127
128
  // Return immediately if this thread holds the SpinLock exclusively.
129
  // Otherwise, report an error by crashing with a diagnostic.
130
0
  inline void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK() {
131
0
    if (!IsHeld()) {
132
0
      ABSL_RAW_LOG(FATAL, "thread should hold the lock on SpinLock");
133
0
    }
134
0
  }
135
136
 protected:
137
  // These should not be exported except for testing.
138
139
  // Store number of cycles between wait_start_time and wait_end_time in a
140
  // lock value.
141
  static uint32_t EncodeWaitCycles(int64_t wait_start_time,
142
                                   int64_t wait_end_time);
143
144
  // Extract number of wait cycles in a lock value.
145
  static int64_t DecodeWaitCycles(uint32_t lock_value);
146
147
  // Provide access to protected method above.  Use for testing only.
148
  friend struct SpinLockTest;
149
  friend class tcmalloc::tcmalloc_internal::AllocationGuardSpinLockHolder;
150
151
 private:
152
  // lockword_ is used to store the following:
153
  //
154
  // bit[0] encodes whether a lock is being held.
155
  // bit[1] encodes whether a lock uses cooperative scheduling.
156
  // bit[2] encodes whether the current lock holder disabled scheduling when
157
  //        acquiring the lock. Only set when kSpinLockHeld is also set.
158
  // bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int.
159
  //        This is set by the lock holder to indicate how long it waited on
160
  //        the lock before eventually acquiring it. The number of cycles is
161
  //        encoded as a 29-bit unsigned int, or in the case that the current
162
  //        holder did not wait but another waiter is queued, the LSB
163
  //        (kSpinLockSleeper) is set. The implementation does not explicitly
164
  //        track the number of queued waiters beyond this. It must always be
165
  //        assumed that waiters may exist if the current holder was required to
166
  //        queue.
167
  //
168
  // Invariant: if the lock is not held, the value is either 0 or
169
  // kSpinLockCooperative.
170
  static constexpr uint32_t kSpinLockHeld = 1;
171
  static constexpr uint32_t kSpinLockCooperative = 2;
172
  static constexpr uint32_t kSpinLockDisabledScheduling = 4;
173
  static constexpr uint32_t kSpinLockSleeper = 8;
174
  // Includes kSpinLockSleeper.
175
  static constexpr uint32_t kWaitTimeMask =
176
      ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling);
177
178
  // Returns true if the provided scheduling mode is cooperative.
179
  static constexpr bool IsCooperative(
180
8
      base_internal::SchedulingMode scheduling_mode) {
181
8
    return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL;
182
8
  }
183
184
0
  bool IsCooperative() const {
185
0
    return lockword_.load(std::memory_order_relaxed) & kSpinLockCooperative;
186
0
  }
187
188
  uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles);
189
  void SlowLock() ABSL_ATTRIBUTE_COLD;
190
  void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD;
191
  uint32_t SpinLoop();
192
193
7.58M
  inline bool TryLockImpl() {
194
7.58M
    uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
195
7.58M
    return (TryLockInternal(lock_value, 0) & kSpinLockHeld) == 0;
196
7.58M
  }
197
198
  std::atomic<uint32_t> lockword_;
199
200
  SpinLock(const SpinLock&) = delete;
201
  SpinLock& operator=(const SpinLock&) = delete;
202
};
203
204
// Corresponding locker object that arranges to acquire a spinlock for
205
// the duration of a C++ scope.
206
//
207
// TODO(b/176172494): Use only [[nodiscard]] when baseline is raised.
208
// TODO(b/6695610): Remove forward declaration when #ifdef is no longer needed.
209
#if ABSL_HAVE_CPP_ATTRIBUTE(nodiscard)
210
class [[nodiscard]] SpinLockHolder;
211
#else
212
class ABSL_MUST_USE_RESULT ABSL_ATTRIBUTE_TRIVIAL_ABI SpinLockHolder;
213
#endif
214
215
class ABSL_SCOPED_LOCKABLE SpinLockHolder {
216
 public:
217
  inline explicit SpinLockHolder(SpinLock* l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l)
218
3.79M
      : lock_(l) {
219
3.79M
    l->Lock();
220
3.79M
  }
221
3.79M
  inline ~SpinLockHolder() ABSL_UNLOCK_FUNCTION() { lock_->Unlock(); }
222
223
  SpinLockHolder(const SpinLockHolder&) = delete;
224
  SpinLockHolder& operator=(const SpinLockHolder&) = delete;
225
226
 private:
227
  SpinLock* lock_;
228
};
229
230
// Register a hook for profiling support.
231
//
232
// The function pointer registered here will be called whenever a spinlock is
233
// contended.  The callback is given an opaque handle to the contended spinlock
234
// and the number of wait cycles.  This is thread-safe, but only a single
235
// profiler can be registered.  It is an error to call this function multiple
236
// times with different arguments.
237
void RegisterSpinLockProfiler(void (*fn)(const void* lock,
238
                                         int64_t wait_cycles));
239
240
//------------------------------------------------------------------------------
241
// Public interface ends here.
242
//------------------------------------------------------------------------------
243
244
// If (result & kSpinLockHeld) == 0, then *this was successfully locked.
245
// Otherwise, returns last observed value for lockword_.
246
inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value,
247
7.58M
                                          uint32_t wait_cycles) {
248
7.58M
  if ((lock_value & kSpinLockHeld) != 0) {
249
0
    return lock_value;
250
0
  }
251
252
7.58M
  uint32_t sched_disabled_bit = 0;
253
7.58M
  if ((lock_value & kSpinLockCooperative) == 0) {
254
    // For non-cooperative locks we must make sure we mark ourselves as
255
    // non-reschedulable before we attempt to CompareAndSwap.
256
7.58M
    if (base_internal::SchedulingGuard::DisableRescheduling()) {
257
0
      sched_disabled_bit = kSpinLockDisabledScheduling;
258
0
    }
259
7.58M
  }
260
261
7.58M
  if (!lockword_.compare_exchange_strong(
262
7.58M
          lock_value,
263
7.58M
          kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit,
264
7.58M
          std::memory_order_acquire, std::memory_order_relaxed)) {
265
0
    base_internal::SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0);
266
0
  }
267
268
7.58M
  return lock_value;
269
7.58M
}
270
271
}  // namespace base_internal
272
ABSL_NAMESPACE_END
273
}  // namespace absl
274
275
#endif  // ABSL_BASE_INTERNAL_SPINLOCK_H_