Coverage Report

Created: 2023-06-07 07:09

/src/LPM/external.protobuf/include/absl/base/internal/spinlock.h
Line
Count
Source (jump to first uncovered line)
1
//
2
// Copyright 2017 The Abseil Authors.
3
//
4
// Licensed under the Apache License, Version 2.0 (the "License");
5
// you may not use this file except in compliance with the License.
6
// You may obtain a copy of the License at
7
//
8
//      https://www.apache.org/licenses/LICENSE-2.0
9
//
10
// Unless required by applicable law or agreed to in writing, software
11
// distributed under the License is distributed on an "AS IS" BASIS,
12
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
// See the License for the specific language governing permissions and
14
// limitations under the License.
15
//
16
17
//  Most users requiring mutual exclusion should use Mutex.
18
//  SpinLock is provided for use in two situations:
19
//   - for use by Abseil internal code that Mutex itself depends on
20
//   - for async signal safety (see below)
21
22
// SpinLock is async signal safe.  If a spinlock is used within a signal
23
// handler, all code that acquires the lock must ensure that the signal cannot
24
// arrive while they are holding the lock.  Typically, this is done by blocking
25
// the signal.
26
//
27
// Threads waiting on a SpinLock may be woken in an arbitrary order.
28
29
#ifndef ABSL_BASE_INTERNAL_SPINLOCK_H_
30
#define ABSL_BASE_INTERNAL_SPINLOCK_H_
31
32
#include <atomic>
33
#include <cstdint>
34
35
#include "absl/base/attributes.h"
36
#include "absl/base/const_init.h"
37
#include "absl/base/dynamic_annotations.h"
38
#include "absl/base/internal/low_level_scheduling.h"
39
#include "absl/base/internal/raw_logging.h"
40
#include "absl/base/internal/scheduling_mode.h"
41
#include "absl/base/internal/tsan_mutex_interface.h"
42
#include "absl/base/thread_annotations.h"
43
44
namespace absl {
45
ABSL_NAMESPACE_BEGIN
46
namespace base_internal {
47
48
class ABSL_LOCKABLE SpinLock {
49
 public:
50
0
  SpinLock() : lockword_(kSpinLockCooperative) {
51
0
    ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
52
0
  }
53
54
  // Constructors that allow non-cooperative spinlocks to be created for use
55
  // inside thread schedulers.  Normal clients should not use these.
56
  explicit SpinLock(base_internal::SchedulingMode mode);
57
58
  // Constructor for global SpinLock instances.  See absl/base/const_init.h.
59
  constexpr SpinLock(absl::ConstInitType, base_internal::SchedulingMode mode)
60
0
      : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {}
61
62
  // For global SpinLock instances prefer trivial destructor when possible.
63
  // Default but non-trivial destructor in some build configurations causes an
64
  // extra static initializer.
65
#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
66
  ~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); }
67
#else
68
  ~SpinLock() = default;
69
#endif
70
71
  // Acquire this SpinLock.
72
0
  inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() {
73
0
    ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
74
0
    if (!TryLockImpl()) {
75
0
      SlowLock();
76
0
    }
77
0
    ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
78
0
  }
79
80
  // Try to acquire this SpinLock without blocking and return true if the
81
  // acquisition was successful.  If the lock was not acquired, false is
82
  // returned.  If this SpinLock is free at the time of the call, TryLock
83
  // will return true with high probability.
84
0
  inline bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
85
0
    ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
86
0
    bool res = TryLockImpl();
87
0
    ABSL_TSAN_MUTEX_POST_LOCK(
88
0
        this, __tsan_mutex_try_lock | (res ? 0 : __tsan_mutex_try_lock_failed),
89
0
        0);
90
0
    return res;
91
0
  }
92
93
  // Release this SpinLock, which must be held by the calling thread.
94
0
  inline void Unlock() ABSL_UNLOCK_FUNCTION() {
95
0
    ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
96
0
    uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
97
0
    lock_value = lockword_.exchange(lock_value & kSpinLockCooperative,
98
0
                                    std::memory_order_release);
99
0
100
0
    if ((lock_value & kSpinLockDisabledScheduling) != 0) {
101
0
      base_internal::SchedulingGuard::EnableRescheduling(true);
102
0
    }
103
0
    if ((lock_value & kWaitTimeMask) != 0) {
104
0
      // Collect contentionz profile info, and speed the wakeup of any waiter.
105
0
      // The wait_cycles value indicates how long this thread spent waiting
106
0
      // for the lock.
107
0
      SlowUnlock(lock_value);
108
0
    }
109
0
    ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);
110
0
  }
111
112
  // Determine if the lock is held.  When the lock is held by the invoking
113
  // thread, true will always be returned. Intended to be used as
114
  // CHECK(lock.IsHeld()).
115
0
  inline bool IsHeld() const {
116
0
    return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0;
117
0
  }
118
119
  // Return immediately if this thread holds the SpinLock exclusively.
120
  // Otherwise, report an error by crashing with a diagnostic.
121
0
  inline void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK() {
122
0
    if (!IsHeld()) {
123
0
      ABSL_RAW_LOG(FATAL, "thread should hold the lock on SpinLock");
124
0
    }
125
0
  }
126
127
 protected:
128
  // These should not be exported except for testing.
129
130
  // Store number of cycles between wait_start_time and wait_end_time in a
131
  // lock value.
132
  static uint32_t EncodeWaitCycles(int64_t wait_start_time,
133
                                   int64_t wait_end_time);
134
135
  // Extract number of wait cycles in a lock value.
136
  static int64_t DecodeWaitCycles(uint32_t lock_value);
137
138
  // Provide access to protected method above.  Use for testing only.
139
  friend struct SpinLockTest;
140
141
 private:
142
  // lockword_ is used to store the following:
143
  //
144
  // bit[0] encodes whether a lock is being held.
145
  // bit[1] encodes whether a lock uses cooperative scheduling.
146
  // bit[2] encodes whether the current lock holder disabled scheduling when
147
  //        acquiring the lock. Only set when kSpinLockHeld is also set.
148
  // bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int.
149
  //        This is set by the lock holder to indicate how long it waited on
150
  //        the lock before eventually acquiring it. The number of cycles is
151
  //        encoded as a 29-bit unsigned int, or in the case that the current
152
  //        holder did not wait but another waiter is queued, the LSB
153
  //        (kSpinLockSleeper) is set. The implementation does not explicitly
154
  //        track the number of queued waiters beyond this. It must always be
155
  //        assumed that waiters may exist if the current holder was required to
156
  //        queue.
157
  //
158
  // Invariant: if the lock is not held, the value is either 0 or
159
  // kSpinLockCooperative.
160
  static constexpr uint32_t kSpinLockHeld = 1;
161
  static constexpr uint32_t kSpinLockCooperative = 2;
162
  static constexpr uint32_t kSpinLockDisabledScheduling = 4;
163
  static constexpr uint32_t kSpinLockSleeper = 8;
164
  // Includes kSpinLockSleeper.
165
  static constexpr uint32_t kWaitTimeMask =
166
      ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling);
167
168
  // Returns true if the provided scheduling mode is cooperative.
169
  static constexpr bool IsCooperative(
170
0
      base_internal::SchedulingMode scheduling_mode) {
171
0
    return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL;
172
0
  }
173
174
  uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles);
175
  void SlowLock() ABSL_ATTRIBUTE_COLD;
176
  void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD;
177
  uint32_t SpinLoop();
178
179
0
  inline bool TryLockImpl() {
180
0
    uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
181
0
    return (TryLockInternal(lock_value, 0) & kSpinLockHeld) == 0;
182
0
  }
183
184
  std::atomic<uint32_t> lockword_;
185
186
  SpinLock(const SpinLock&) = delete;
187
  SpinLock& operator=(const SpinLock&) = delete;
188
};
189
190
// Corresponding locker object that arranges to acquire a spinlock for
191
// the duration of a C++ scope.
192
class ABSL_SCOPED_LOCKABLE SpinLockHolder {
193
 public:
194
  inline explicit SpinLockHolder(SpinLock* l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l)
195
0
      : lock_(l) {
196
0
    l->Lock();
197
0
  }
198
0
  inline ~SpinLockHolder() ABSL_UNLOCK_FUNCTION() { lock_->Unlock(); }
199
200
  SpinLockHolder(const SpinLockHolder&) = delete;
201
  SpinLockHolder& operator=(const SpinLockHolder&) = delete;
202
203
 private:
204
  SpinLock* lock_;
205
};
206
207
// Register a hook for profiling support.
208
//
209
// The function pointer registered here will be called whenever a spinlock is
210
// contended.  The callback is given an opaque handle to the contended spinlock
211
// and the number of wait cycles.  This is thread-safe, but only a single
212
// profiler can be registered.  It is an error to call this function multiple
213
// times with different arguments.
214
void RegisterSpinLockProfiler(void (*fn)(const void* lock,
215
                                         int64_t wait_cycles));
216
217
//------------------------------------------------------------------------------
218
// Public interface ends here.
219
//------------------------------------------------------------------------------
220
221
// If (result & kSpinLockHeld) == 0, then *this was successfully locked.
222
// Otherwise, returns last observed value for lockword_.
223
inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value,
224
0
                                          uint32_t wait_cycles) {
225
0
  if ((lock_value & kSpinLockHeld) != 0) {
226
0
    return lock_value;
227
0
  }
228
0
229
0
  uint32_t sched_disabled_bit = 0;
230
0
  if ((lock_value & kSpinLockCooperative) == 0) {
231
0
    // For non-cooperative locks we must make sure we mark ourselves as
232
0
    // non-reschedulable before we attempt to CompareAndSwap.
233
0
    if (base_internal::SchedulingGuard::DisableRescheduling()) {
234
0
      sched_disabled_bit = kSpinLockDisabledScheduling;
235
0
    }
236
0
  }
237
0
238
0
  if (!lockword_.compare_exchange_strong(
239
0
          lock_value,
240
0
          kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit,
241
0
          std::memory_order_acquire, std::memory_order_relaxed)) {
242
0
    base_internal::SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0);
243
0
  }
244
0
245
0
  return lock_value;
246
0
}
247
248
}  // namespace base_internal
249
ABSL_NAMESPACE_END
250
}  // namespace absl
251
252
#endif  // ABSL_BASE_INTERNAL_SPINLOCK_H_