Coverage Report

Created: 2025-04-27 06:20

/src/LPM/external.protobuf/include/absl/base/internal/spinlock.h
Line
Count
Source (jump to first uncovered line)
1
//
2
// Copyright 2017 The Abseil Authors.
3
//
4
// Licensed under the Apache License, Version 2.0 (the "License");
5
// you may not use this file except in compliance with the License.
6
// You may obtain a copy of the License at
7
//
8
//      https://www.apache.org/licenses/LICENSE-2.0
9
//
10
// Unless required by applicable law or agreed to in writing, software
11
// distributed under the License is distributed on an "AS IS" BASIS,
12
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
// See the License for the specific language governing permissions and
14
// limitations under the License.
15
//
16
17
//  Most users requiring mutual exclusion should use Mutex.
18
//  SpinLock is provided for use in two situations:
19
//   - for use by Abseil internal code that Mutex itself depends on
20
//   - for async signal safety (see below)
21
22
// SpinLock with a base_internal::SchedulingMode::SCHEDULE_KERNEL_ONLY is async
23
// signal safe. If a spinlock is used within a signal handler, all code that
24
// acquires the lock must ensure that the signal cannot arrive while they are
25
// holding the lock. Typically, this is done by blocking the signal.
26
//
27
// Threads waiting on a SpinLock may be woken in an arbitrary order.
28
29
#ifndef ABSL_BASE_INTERNAL_SPINLOCK_H_
30
#define ABSL_BASE_INTERNAL_SPINLOCK_H_
31
32
#include <atomic>
33
#include <cstdint>
34
35
#include "absl/base/attributes.h"
36
#include "absl/base/const_init.h"
37
#include "absl/base/dynamic_annotations.h"
38
#include "absl/base/internal/low_level_scheduling.h"
39
#include "absl/base/internal/raw_logging.h"
40
#include "absl/base/internal/scheduling_mode.h"
41
#include "absl/base/internal/tsan_mutex_interface.h"
42
#include "absl/base/thread_annotations.h"
43
44
namespace tcmalloc {
45
namespace tcmalloc_internal {
46
47
class AllocationGuardSpinLockHolder;
48
49
}  // namespace tcmalloc_internal
50
}  // namespace tcmalloc
51
52
namespace absl {
53
ABSL_NAMESPACE_BEGIN
54
namespace base_internal {
55
56
class ABSL_LOCKABLE SpinLock {
57
 public:
58
0
  SpinLock() : lockword_(kSpinLockCooperative) {
59
0
    ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
60
0
  }
61
62
  // Constructors that allow non-cooperative spinlocks to be created for use
63
  // inside thread schedulers.  Normal clients should not use these.
64
  explicit SpinLock(base_internal::SchedulingMode mode);
65
66
  // Constructor for global SpinLock instances.  See absl/base/const_init.h.
67
  constexpr SpinLock(absl::ConstInitType, base_internal::SchedulingMode mode)
68
0
      : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {}
69
70
  // For global SpinLock instances prefer trivial destructor when possible.
71
  // Default but non-trivial destructor in some build configurations causes an
72
  // extra static initializer.
73
#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
74
  ~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); }
75
#else
76
  ~SpinLock() = default;
77
#endif
78
79
  // Acquire this SpinLock.
80
0
  inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() {
81
0
    ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
82
0
    if (!TryLockImpl()) {
83
0
      SlowLock();
84
0
    }
85
0
    ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
86
0
  }
87
88
  // Try to acquire this SpinLock without blocking and return true if the
89
  // acquisition was successful.  If the lock was not acquired, false is
90
  // returned.  If this SpinLock is free at the time of the call, TryLock
91
  // will return true with high probability.
92
0
  inline bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
93
0
    ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
94
0
    bool res = TryLockImpl();
95
0
    ABSL_TSAN_MUTEX_POST_LOCK(
96
0
        this, __tsan_mutex_try_lock | (res ? 0 : __tsan_mutex_try_lock_failed),
97
0
        0);
98
0
    return res;
99
0
  }
100
101
  // Release this SpinLock, which must be held by the calling thread.
102
0
  inline void Unlock() ABSL_UNLOCK_FUNCTION() {
103
0
    ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
104
0
    uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
105
0
    lock_value = lockword_.exchange(lock_value & kSpinLockCooperative,
106
0
                                    std::memory_order_release);
107
0
108
0
    if ((lock_value & kSpinLockDisabledScheduling) != 0) {
109
0
      base_internal::SchedulingGuard::EnableRescheduling(true);
110
0
    }
111
0
    if ((lock_value & kWaitTimeMask) != 0) {
112
0
      // Collect contentionz profile info, and speed the wakeup of any waiter.
113
0
      // The wait_cycles value indicates how long this thread spent waiting
114
0
      // for the lock.
115
0
      SlowUnlock(lock_value);
116
0
    }
117
0
    ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);
118
0
  }
119
120
  // Determine if the lock is held.  When the lock is held by the invoking
121
  // thread, true will always be returned. Intended to be used as
122
  // CHECK(lock.IsHeld()).
123
0
  inline bool IsHeld() const {
124
0
    return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0;
125
0
  }
126
127
  // Return immediately if this thread holds the SpinLock exclusively.
128
  // Otherwise, report an error by crashing with a diagnostic.
129
0
  inline void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK() {
130
0
    if (!IsHeld()) {
131
0
      ABSL_RAW_LOG(FATAL, "thread should hold the lock on SpinLock");
132
0
    }
133
0
  }
134
135
 protected:
136
  // These should not be exported except for testing.
137
138
  // Store number of cycles between wait_start_time and wait_end_time in a
139
  // lock value.
140
  static uint32_t EncodeWaitCycles(int64_t wait_start_time,
141
                                   int64_t wait_end_time);
142
143
  // Extract number of wait cycles in a lock value.
144
  static int64_t DecodeWaitCycles(uint32_t lock_value);
145
146
  // Provide access to protected method above.  Use for testing only.
147
  friend struct SpinLockTest;
148
  friend class tcmalloc::tcmalloc_internal::AllocationGuardSpinLockHolder;
149
150
 private:
151
  // lockword_ is used to store the following:
152
  //
153
  // bit[0] encodes whether a lock is being held.
154
  // bit[1] encodes whether a lock uses cooperative scheduling.
155
  // bit[2] encodes whether the current lock holder disabled scheduling when
156
  //        acquiring the lock. Only set when kSpinLockHeld is also set.
157
  // bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int.
158
  //        This is set by the lock holder to indicate how long it waited on
159
  //        the lock before eventually acquiring it. The number of cycles is
160
  //        encoded as a 29-bit unsigned int, or in the case that the current
161
  //        holder did not wait but another waiter is queued, the LSB
162
  //        (kSpinLockSleeper) is set. The implementation does not explicitly
163
  //        track the number of queued waiters beyond this. It must always be
164
  //        assumed that waiters may exist if the current holder was required to
165
  //        queue.
166
  //
167
  // Invariant: if the lock is not held, the value is either 0 or
168
  // kSpinLockCooperative.
169
  static constexpr uint32_t kSpinLockHeld = 1;
170
  static constexpr uint32_t kSpinLockCooperative = 2;
171
  static constexpr uint32_t kSpinLockDisabledScheduling = 4;
172
  static constexpr uint32_t kSpinLockSleeper = 8;
173
  // Includes kSpinLockSleeper.
174
  static constexpr uint32_t kWaitTimeMask =
175
      ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling);
176
177
  // Returns true if the provided scheduling mode is cooperative.
178
  static constexpr bool IsCooperative(
179
0
      base_internal::SchedulingMode scheduling_mode) {
180
0
    return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL;
181
0
  }
182
183
0
  bool IsCooperative() const {
184
0
    return lockword_.load(std::memory_order_relaxed) & kSpinLockCooperative;
185
0
  }
186
187
  uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles);
188
  void SlowLock() ABSL_ATTRIBUTE_COLD;
189
  void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD;
190
  uint32_t SpinLoop();
191
192
0
  inline bool TryLockImpl() {
193
0
    uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
194
0
    return (TryLockInternal(lock_value, 0) & kSpinLockHeld) == 0;
195
0
  }
196
197
  std::atomic<uint32_t> lockword_;
198
199
  SpinLock(const SpinLock&) = delete;
200
  SpinLock& operator=(const SpinLock&) = delete;
201
};
202
203
// Corresponding locker object that arranges to acquire a spinlock for
204
// the duration of a C++ scope.
205
class ABSL_SCOPED_LOCKABLE SpinLockHolder {
206
 public:
207
  inline explicit SpinLockHolder(SpinLock* l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l)
208
0
      : lock_(l) {
209
0
    l->Lock();
210
0
  }
211
0
  inline ~SpinLockHolder() ABSL_UNLOCK_FUNCTION() { lock_->Unlock(); }
212
213
  SpinLockHolder(const SpinLockHolder&) = delete;
214
  SpinLockHolder& operator=(const SpinLockHolder&) = delete;
215
216
 private:
217
  SpinLock* lock_;
218
};
219
220
// Register a hook for profiling support.
221
//
222
// The function pointer registered here will be called whenever a spinlock is
223
// contended.  The callback is given an opaque handle to the contended spinlock
224
// and the number of wait cycles.  This is thread-safe, but only a single
225
// profiler can be registered.  It is an error to call this function multiple
226
// times with different arguments.
227
void RegisterSpinLockProfiler(void (*fn)(const void* lock,
228
                                         int64_t wait_cycles));
229
230
//------------------------------------------------------------------------------
231
// Public interface ends here.
232
//------------------------------------------------------------------------------
233
234
// If (result & kSpinLockHeld) == 0, then *this was successfully locked.
235
// Otherwise, returns last observed value for lockword_.
236
inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value,
237
0
                                          uint32_t wait_cycles) {
238
0
  if ((lock_value & kSpinLockHeld) != 0) {
239
0
    return lock_value;
240
0
  }
241
0
242
0
  uint32_t sched_disabled_bit = 0;
243
0
  if ((lock_value & kSpinLockCooperative) == 0) {
244
0
    // For non-cooperative locks we must make sure we mark ourselves as
245
0
    // non-reschedulable before we attempt to CompareAndSwap.
246
0
    if (base_internal::SchedulingGuard::DisableRescheduling()) {
247
0
      sched_disabled_bit = kSpinLockDisabledScheduling;
248
0
    }
249
0
  }
250
0
251
0
  if (!lockword_.compare_exchange_strong(
252
0
          lock_value,
253
0
          kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit,
254
0
          std::memory_order_acquire, std::memory_order_relaxed)) {
255
0
    base_internal::SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0);
256
0
  }
257
0
258
0
  return lock_value;
259
0
}
260
261
}  // namespace base_internal
262
ABSL_NAMESPACE_END
263
}  // namespace absl
264
265
#endif  // ABSL_BASE_INTERNAL_SPINLOCK_H_