/src/abseil-cpp/absl/base/internal/spinlock.h
Line  | Count  | Source  | 
1  |  | //  | 
2  |  | // Copyright 2017 The Abseil Authors.  | 
3  |  | //  | 
4  |  | // Licensed under the Apache License, Version 2.0 (the "License");  | 
5  |  | // you may not use this file except in compliance with the License.  | 
6  |  | // You may obtain a copy of the License at  | 
7  |  | //  | 
8  |  | //      https://www.apache.org/licenses/LICENSE-2.0  | 
9  |  | //  | 
10  |  | // Unless required by applicable law or agreed to in writing, software  | 
11  |  | // distributed under the License is distributed on an "AS IS" BASIS,  | 
12  |  | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  | 
13  |  | // See the License for the specific language governing permissions and  | 
14  |  | // limitations under the License.  | 
15  |  | //  | 
16  |  |  | 
17  |  | //  Most users requiring mutual exclusion should use Mutex.  | 
18  |  | //  SpinLock is provided for use in two situations:  | 
19  |  | //   - for use by Abseil internal code that Mutex itself depends on  | 
20  |  | //   - for async signal safety (see below)  | 
21  |  |  | 
22  |  | // SpinLock with a SchedulingMode::SCHEDULE_KERNEL_ONLY is async  | 
23  |  | // signal safe. If a spinlock is used within a signal handler, all code that  | 
24  |  | // acquires the lock must ensure that the signal cannot arrive while they are  | 
25  |  | // holding the lock. Typically, this is done by blocking the signal.  | 
26  |  | //  | 
27  |  | // Threads waiting on a SpinLock may be woken in an arbitrary order.  | 
28  |  |  | 
29  |  | #ifndef ABSL_BASE_INTERNAL_SPINLOCK_H_  | 
30  |  | #define ABSL_BASE_INTERNAL_SPINLOCK_H_  | 
31  |  |  | 
32  |  | #include <atomic>  | 
33  |  | #include <cstdint>  | 
34  |  | #include <type_traits>  | 
35  |  |  | 
36  |  | #include "absl/base/attributes.h"  | 
37  |  | #include "absl/base/config.h"  | 
38  |  | #include "absl/base/const_init.h"  | 
39  |  | #include "absl/base/internal/low_level_scheduling.h"  | 
40  |  | #include "absl/base/internal/raw_logging.h"  | 
41  |  | #include "absl/base/internal/scheduling_mode.h"  | 
42  |  | #include "absl/base/internal/tsan_mutex_interface.h"  | 
43  |  | #include "absl/base/macros.h"  | 
44  |  | #include "absl/base/thread_annotations.h"  | 
45  |  |  | 
46  |  | namespace tcmalloc { | 
47  |  | namespace tcmalloc_internal { | 
48  |  |  | 
49  |  | class AllocationGuardSpinLockHolder;  | 
50  |  |  | 
51  |  | }  // namespace tcmalloc_internal  | 
52  |  | }  // namespace tcmalloc  | 
53  |  |  | 
54  |  | namespace absl { | 
55  |  | ABSL_NAMESPACE_BEGIN  | 
56  |  | namespace base_internal { | 
57  |  |  | 
58  |  | class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED SpinLock { | 
59  |  |  public:  | 
60  | 0  |   constexpr SpinLock() : lockword_(kSpinLockCooperative) { RegisterWithTsan(); } | 
61  |  |  | 
62  |  |   // Constructors that allow non-cooperative spinlocks to be created for use  | 
63  |  |   // inside thread schedulers.  Normal clients should not use these.  | 
64  |  |   constexpr explicit SpinLock(SchedulingMode mode)  | 
65  | 8  |       : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) { | 
66  | 8  |     RegisterWithTsan();  | 
67  | 8  |   }  | 
68  |  |  | 
69  |  | #if ABSL_HAVE_ATTRIBUTE(enable_if) && !defined(_WIN32)  | 
70  |  |   // Constructor to inline users of the default scheduling mode.  | 
71  |  |   //  | 
72  |  |   // This only needs to exists for inliner runs, but doesn't work correctly in  | 
73  |  |   // clang+windows builds, likely due to mangling differences.  | 
74  |  |   ABSL_DEPRECATE_AND_INLINE()  | 
75  |  |   constexpr explicit SpinLock(SchedulingMode mode)  | 
76  |  |       __attribute__((enable_if(mode == SCHEDULE_COOPERATIVE_AND_KERNEL,  | 
77  |  |                                "Cooperative use default constructor")))  | 
78  | 0  |       : SpinLock() {} | 
79  |  | #endif  | 
80  |  |  | 
81  |  |   // Constructor for global SpinLock instances.  See absl/base/const_init.h.  | 
82  |  |   ABSL_DEPRECATE_AND_INLINE()  | 
83  |  |   constexpr SpinLock(absl::ConstInitType, SchedulingMode mode)  | 
84  | 0  |       : SpinLock(mode) {} | 
85  |  |  | 
86  |  |   // For global SpinLock instances prefer trivial destructor when possible.  | 
87  |  |   // Default but non-trivial destructor in some build configurations causes an  | 
88  |  |   // extra static initializer.  | 
89  |  | #ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE  | 
90  |  |   ~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); } | 
91  |  | #else  | 
92  |  |   ~SpinLock() = default;  | 
93  |  | #endif  | 
94  |  |  | 
95  |  |   // Acquire this SpinLock.  | 
96  | 8.24M  |   inline void lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { | 
97  | 8.24M  |     ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);  | 
98  | 8.24M  |     if (!TryLockImpl()) { | 
99  | 0  |       SlowLock();  | 
100  | 0  |     }  | 
101  | 8.24M  |     ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);  | 
102  | 8.24M  |   }  | 
103  |  |  | 
104  |  |   ABSL_DEPRECATE_AND_INLINE()  | 
105  | 0  |   inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { return lock(); } | 
106  |  |  | 
107  |  |   // Try to acquire this SpinLock without blocking and return true if the  | 
108  |  |   // acquisition was successful.  If the lock was not acquired, false is  | 
109  |  |   // returned.  If this SpinLock is free at the time of the call, try_lock will  | 
110  |  |   // return true with high probability.  | 
111  | 0  |   [[nodiscard]] inline bool try_lock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) { | 
112  | 0  |     ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);  | 
113  | 0  |     bool res = TryLockImpl();  | 
114  | 0  |     ABSL_TSAN_MUTEX_POST_LOCK(  | 
115  | 0  |         this, __tsan_mutex_try_lock | (res ? 0 : __tsan_mutex_try_lock_failed),  | 
116  | 0  |         0);  | 
117  | 0  |     return res;  | 
118  | 0  |   }  | 
119  |  |  | 
120  |  |   ABSL_DEPRECATE_AND_INLINE()  | 
121  | 0  |   [[nodiscard]] inline bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) { | 
122  | 0  |     return try_lock();  | 
123  | 0  |   }  | 
124  |  |  | 
125  |  |   // Release this SpinLock, which must be held by the calling thread.  | 
126  | 8.24M  |   inline void unlock() ABSL_UNLOCK_FUNCTION() { | 
127  | 8.24M  |     ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);  | 
128  | 8.24M  |     uint32_t lock_value = lockword_.load(std::memory_order_relaxed);  | 
129  | 8.24M  |     lock_value = lockword_.exchange(lock_value & kSpinLockCooperative,  | 
130  | 8.24M  |                                     std::memory_order_release);  | 
131  |  |  | 
132  | 8.24M  |     if ((lock_value & kSpinLockDisabledScheduling) != 0) { | 
133  | 0  |       SchedulingGuard::EnableRescheduling(true);  | 
134  | 0  |     }  | 
135  | 8.24M  |     if ((lock_value & kWaitTimeMask) != 0) { | 
136  |  |       // Collect contentionz profile info, and speed the wakeup of any waiter.  | 
137  |  |       // The wait_cycles value indicates how long this thread spent waiting  | 
138  |  |       // for the lock.  | 
139  | 0  |       SlowUnlock(lock_value);  | 
140  | 0  |     }  | 
141  | 8.24M  |     ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);  | 
142  | 8.24M  |   }  | 
143  |  |  | 
144  |  |   ABSL_DEPRECATE_AND_INLINE()  | 
145  | 0  |   inline void Unlock() ABSL_UNLOCK_FUNCTION() { unlock(); } | 
146  |  |  | 
147  |  |   // Determine if the lock is held.  When the lock is held by the invoking  | 
148  |  |   // thread, true will always be returned. Intended to be used as  | 
149  |  |   // CHECK(lock.IsHeld()).  | 
150  | 0  |   [[nodiscard]] inline bool IsHeld() const { | 
151  | 0  |     return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0;  | 
152  | 0  |   }  | 
153  |  |  | 
154  |  |   // Return immediately if this thread holds the SpinLock exclusively.  | 
155  |  |   // Otherwise, report an error by crashing with a diagnostic.  | 
156  | 0  |   inline void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK() { | 
157  | 0  |     if (!IsHeld()) { | 
158  | 0  |       ABSL_RAW_LOG(FATAL, "thread should hold the lock on SpinLock");  | 
159  | 0  |     }  | 
160  | 0  |   }  | 
161  |  |  | 
162  |  |  protected:  | 
163  |  |   // These should not be exported except for testing.  | 
164  |  |  | 
165  |  |   // Store number of cycles between wait_start_time and wait_end_time in a  | 
166  |  |   // lock value.  | 
167  |  |   static uint32_t EncodeWaitCycles(int64_t wait_start_time,  | 
168  |  |                                    int64_t wait_end_time);  | 
169  |  |  | 
170  |  |   // Extract number of wait cycles in a lock value.  | 
171  |  |   static int64_t DecodeWaitCycles(uint32_t lock_value);  | 
172  |  |  | 
173  |  |   // Provide access to protected method above.  Use for testing only.  | 
174  |  |   friend struct SpinLockTest;  | 
175  |  |   friend class tcmalloc::tcmalloc_internal::AllocationGuardSpinLockHolder;  | 
176  |  |  | 
177  |  |  private:  | 
178  |  |   // lockword_ is used to store the following:  | 
179  |  |   //  | 
180  |  |   // bit[0] encodes whether a lock is being held.  | 
181  |  |   // bit[1] encodes whether a lock uses cooperative scheduling.  | 
182  |  |   // bit[2] encodes whether the current lock holder disabled scheduling when  | 
183  |  |   //        acquiring the lock. Only set when kSpinLockHeld is also set.  | 
184  |  |   // bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int.  | 
185  |  |   //        This is set by the lock holder to indicate how long it waited on  | 
186  |  |   //        the lock before eventually acquiring it. The number of cycles is  | 
187  |  |   //        encoded as a 29-bit unsigned int, or in the case that the current  | 
188  |  |   //        holder did not wait but another waiter is queued, the LSB  | 
189  |  |   //        (kSpinLockSleeper) is set. The implementation does not explicitly  | 
190  |  |   //        track the number of queued waiters beyond this. It must always be  | 
191  |  |   //        assumed that waiters may exist if the current holder was required to  | 
192  |  |   //        queue.  | 
193  |  |   //  | 
194  |  |   // Invariant: if the lock is not held, the value is either 0 or  | 
195  |  |   // kSpinLockCooperative.  | 
196  |  |   static constexpr uint32_t kSpinLockHeld = 1;  | 
197  |  |   static constexpr uint32_t kSpinLockCooperative = 2;  | 
198  |  |   static constexpr uint32_t kSpinLockDisabledScheduling = 4;  | 
199  |  |   static constexpr uint32_t kSpinLockSleeper = 8;  | 
200  |  |   // Includes kSpinLockSleeper.  | 
201  |  |   static constexpr uint32_t kWaitTimeMask =  | 
202  |  |       ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling);  | 
203  |  |  | 
204  |  |   // Returns true if the provided scheduling mode is cooperative.  | 
205  | 8  |   static constexpr bool IsCooperative(SchedulingMode scheduling_mode) { | 
206  | 8  |     return scheduling_mode == SCHEDULE_COOPERATIVE_AND_KERNEL;  | 
207  | 8  |   }  | 
208  |  |  | 
209  | 8  |   constexpr void RegisterWithTsan() { | 
210  | 8  | #if ABSL_HAVE_BUILTIN(__builtin_is_constant_evaluated)  | 
211  | 8  |     if (!__builtin_is_constant_evaluated()) { | 
212  | 8  |       ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);  | 
213  | 8  |     }  | 
214  | 8  | #endif  | 
215  | 8  |   }  | 
216  |  |  | 
217  | 0  |   bool IsCooperative() const { | 
218  | 0  |     return lockword_.load(std::memory_order_relaxed) & kSpinLockCooperative;  | 
219  | 0  |   }  | 
220  |  |  | 
221  |  |   uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles);  | 
222  |  |   void SlowLock() ABSL_ATTRIBUTE_COLD;  | 
223  |  |   void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD;  | 
224  |  |   uint32_t SpinLoop();  | 
225  |  |  | 
226  | 8.24M  |   inline bool TryLockImpl() { | 
227  | 8.24M  |     uint32_t lock_value = lockword_.load(std::memory_order_relaxed);  | 
228  | 8.24M  |     return (TryLockInternal(lock_value, 0) & kSpinLockHeld) == 0;  | 
229  | 8.24M  |   }  | 
230  |  |  | 
231  |  |   std::atomic<uint32_t> lockword_;  | 
232  |  |  | 
233  |  |   SpinLock(const SpinLock&) = delete;  | 
234  |  |   SpinLock& operator=(const SpinLock&) = delete;  | 
235  |  | };  | 
236  |  |  | 
237  |  | // Corresponding locker object that arranges to acquire a spinlock for  | 
238  |  | // the duration of a C++ scope.  | 
239  |  | class ABSL_SCOPED_LOCKABLE [[nodiscard]] SpinLockHolder { | 
240  |  |  public:  | 
241  |  |   inline explicit SpinLockHolder(  | 
242  |  |       SpinLock& l ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this))  | 
243  |  |       ABSL_EXCLUSIVE_LOCK_FUNCTION(l)  | 
244  | 8.24M  |       : lock_(l) { | 
245  | 8.24M  |     l.lock();  | 
246  | 8.24M  |   }  | 
247  |  |   ABSL_DEPRECATE_AND_INLINE()  | 
248  |  |   inline explicit SpinLockHolder(SpinLock* l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l)  | 
249  | 0  |       : SpinLockHolder(*l) {} | 
250  |  |  | 
251  | 8.24M  |   inline ~SpinLockHolder() ABSL_UNLOCK_FUNCTION() { lock_.unlock(); } | 
252  |  |  | 
253  |  |   SpinLockHolder(const SpinLockHolder&) = delete;  | 
254  |  |   SpinLockHolder& operator=(const SpinLockHolder&) = delete;  | 
255  |  |  | 
256  |  |  private:  | 
257  |  |   SpinLock& lock_;  | 
258  |  | };  | 
259  |  |  | 
260  |  | // Register a hook for profiling support.  | 
261  |  | //  | 
262  |  | // The function pointer registered here will be called whenever a spinlock is  | 
263  |  | // contended.  The callback is given an opaque handle to the contended spinlock  | 
264  |  | // and the number of wait cycles.  This is thread-safe, but only a single  | 
265  |  | // profiler can be registered.  It is an error to call this function multiple  | 
266  |  | // times with different arguments.  | 
267  |  | void RegisterSpinLockProfiler(void (*fn)(const void* lock,  | 
268  |  |                                          int64_t wait_cycles));  | 
269  |  |  | 
270  |  | //------------------------------------------------------------------------------  | 
271  |  | // Public interface ends here.  | 
272  |  | //------------------------------------------------------------------------------  | 
273  |  |  | 
274  |  | // If (result & kSpinLockHeld) == 0, then *this was successfully locked.  | 
275  |  | // Otherwise, returns last observed value for lockword_.  | 
276  |  | inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value,  | 
277  | 8.24M  |                                           uint32_t wait_cycles) { | 
278  | 8.24M  |   if ((lock_value & kSpinLockHeld) != 0) { | 
279  | 0  |     return lock_value;  | 
280  | 0  |   }  | 
281  |  |  | 
282  | 8.24M  |   uint32_t sched_disabled_bit = 0;  | 
283  | 8.24M  |   if ((lock_value & kSpinLockCooperative) == 0) { | 
284  |  |     // For non-cooperative locks we must make sure we mark ourselves as  | 
285  |  |     // non-reschedulable before we attempt to CompareAndSwap.  | 
286  | 8.24M  |     if (SchedulingGuard::DisableRescheduling()) { | 
287  | 0  |       sched_disabled_bit = kSpinLockDisabledScheduling;  | 
288  | 0  |     }  | 
289  | 8.24M  |   }  | 
290  |  |  | 
291  | 8.24M  |   if (!lockword_.compare_exchange_strong(  | 
292  | 8.24M  |           lock_value,  | 
293  | 8.24M  |           kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit,  | 
294  | 8.24M  |           std::memory_order_acquire, std::memory_order_relaxed)) { | 
295  | 0  |     SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0);  | 
296  | 0  |   }  | 
297  |  |  | 
298  | 8.24M  |   return lock_value;  | 
299  | 8.24M  | }  | 
300  |  |  | 
301  |  | }  // namespace base_internal  | 
302  |  | ABSL_NAMESPACE_END  | 
303  |  | }  // namespace absl  | 
304  |  |  | 
305  |  | #endif  // ABSL_BASE_INTERNAL_SPINLOCK_H_  |