/src/abseil-cpp/absl/synchronization/mutex.h
Line | Count | Source |
1 | | // Copyright 2017 The Abseil Authors. |
2 | | // |
3 | | // Licensed under the Apache License, Version 2.0 (the "License"); |
4 | | // you may not use this file except in compliance with the License. |
5 | | // You may obtain a copy of the License at |
6 | | // |
7 | | // https://www.apache.org/licenses/LICENSE-2.0 |
8 | | // |
9 | | // Unless required by applicable law or agreed to in writing, software |
10 | | // distributed under the License is distributed on an "AS IS" BASIS, |
11 | | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | | // See the License for the specific language governing permissions and |
13 | | // limitations under the License. |
14 | | // |
15 | | // ----------------------------------------------------------------------------- |
16 | | // mutex.h |
17 | | // ----------------------------------------------------------------------------- |
18 | | // |
19 | | // This header file defines a `Mutex` -- a mutually exclusive lock -- and the |
20 | | // most common type of synchronization primitive for facilitating locks on |
21 | | // shared resources. A mutex is used to prevent multiple threads from accessing |
22 | | // and/or writing to a shared resource concurrently. |
23 | | // |
24 | | // Unlike a `std::mutex`, the Abseil `Mutex` provides the following additional |
25 | | // features: |
26 | | // * Conditional predicates intrinsic to the `Mutex` object |
27 | | // * Shared/reader locks, in addition to standard exclusive/writer locks |
28 | | // * Deadlock detection and debug support. |
29 | | // |
30 | | // The following helper classes are also defined within this file: |
31 | | // |
32 | | // MutexLock - An RAII wrapper to acquire and release a `Mutex` for exclusive/ |
33 | | // write access within the current scope. |
34 | | // |
35 | | // ReaderMutexLock |
36 | | // - An RAII wrapper to acquire and release a `Mutex` for shared/read |
37 | | // access within the current scope. |
38 | | // |
39 | | // WriterMutexLock |
40 | | // - Effectively an alias for `MutexLock` above, designed for use in |
41 | | // distinguishing reader and writer locks within code. |
42 | | // |
43 | | // In addition to simple mutex locks, this file also defines ways to perform |
44 | | // locking under certain conditions. |
45 | | // |
46 | | // Condition - (Preferred) Used to wait for a particular predicate that |
47 | | // depends on state protected by the `Mutex` to become true. |
48 | | // CondVar - A lower-level variant of `Condition` that relies on |
49 | | // application code to explicitly signal the `CondVar` when |
50 | | // a condition has been met. |
51 | | // |
52 | | // See below for more information on using `Condition` or `CondVar`. |
53 | | // |
54 | | // Mutexes and mutex behavior can be quite complicated. The information within |
55 | | // this header file is limited, as a result. Please consult the Mutex guide for |
56 | | // more complete information and examples. |
57 | | |
58 | | #ifndef ABSL_SYNCHRONIZATION_MUTEX_H_ |
59 | | #define ABSL_SYNCHRONIZATION_MUTEX_H_ |
60 | | |
61 | | #include <atomic> |
62 | | #include <cstdint> |
63 | | #include <cstring> |
64 | | |
65 | | #include "absl/base/attributes.h" |
66 | | #include "absl/base/config.h" |
67 | | #include "absl/base/const_init.h" |
68 | | #include "absl/base/internal/thread_identity.h" |
69 | | #include "absl/base/internal/tsan_mutex_interface.h" |
70 | | #include "absl/base/macros.h" |
71 | | #include "absl/base/nullability.h" |
72 | | #include "absl/base/thread_annotations.h" |
73 | | #include "absl/meta/type_traits.h" |
74 | | #include "absl/synchronization/internal/kernel_timeout.h" |
75 | | #include "absl/synchronization/internal/per_thread_sem.h" |
76 | | #include "absl/time/time.h" |
77 | | |
78 | | namespace absl { |
79 | | ABSL_NAMESPACE_BEGIN |
80 | | |
81 | | class Condition; |
82 | | struct SynchWaitParams; |
83 | | |
84 | | // ----------------------------------------------------------------------------- |
85 | | // Mutex |
86 | | // ----------------------------------------------------------------------------- |
87 | | // |
88 | | // A `Mutex` is a non-reentrant (aka non-recursive) Mutually Exclusive lock |
89 | | // on some resource, typically a variable or data structure with associated |
90 | | // invariants. Proper usage of mutexes prevents concurrent access by different |
91 | | // threads to the same resource. |
92 | | // |
93 | | // A `Mutex` has two basic operations: `Mutex::lock()` and `Mutex::unlock()`. |
94 | | // The `lock()` operation *acquires* a `Mutex` (in a state known as an |
95 | | // *exclusive* -- or *write* -- lock), and the `unlock()` operation *releases* a |
96 | | // Mutex. During the span of time between the lock() and unlock() operations, |
97 | | // a mutex is said to be *held*. By design, all mutexes support exclusive/write |
98 | | // locks, as this is the most common way to use a mutex. |
99 | | // |
100 | | // Mutex operations are only allowed under certain conditions; otherwise an |
101 | | // operation is "invalid", and disallowed by the API. The conditions concern |
102 | | // both the current state of the mutex and the identity of the threads that |
103 | | // are performing the operations. |
104 | | // |
105 | | // The `Mutex` state machine for basic lock/unlock operations is quite simple: |
106 | | // |
107 | | // | | lock() | unlock() | |
108 | | // |----------------+------------------------+----------| |
109 | | // | Free | Exclusive | invalid | |
110 | | // | Exclusive | blocks, then exclusive | Free | |
111 | | // |
112 | | // The full conditions are as follows. |
113 | | // |
114 | | // * Calls to `unlock()` require that the mutex be held, and must be made in the |
115 | | // same thread that performed the corresponding `lock()` operation which |
116 | | // acquired the mutex; otherwise the call is invalid. |
117 | | // |
118 | | // * The mutex being non-reentrant (or non-recursive) means that a call to |
119 | | // `lock()` or `try_lock()` must not be made in a thread that already holds |
120 | | // the mutex; such a call is invalid. |
121 | | // |
122 | | // * In other words, the state of being "held" has both a temporal component |
123 | | // (from `lock()` until `unlock()`) as well as a thread identity component: |
124 | | // the mutex is held *by a particular thread*. |
125 | | // |
126 | | // An "invalid" operation has undefined behavior. The `Mutex` implementation |
127 | | // is allowed to do anything on an invalid call, including, but not limited to, |
128 | | // crashing with a useful error message, silently succeeding, or corrupting |
129 | | // data structures. In debug mode, the implementation may crash with a useful |
130 | | // error message. |
131 | | // |
132 | | // `Mutex` is not guaranteed to be "fair" in prioritizing waiting threads; it |
133 | | // is, however, approximately fair over long periods, and starvation-free for |
134 | | // threads at the same priority. |
135 | | // |
136 | | // The lock/unlock primitives are now annotated with lock annotations |
137 | | // defined in (base/thread_annotations.h). When writing multi-threaded code, |
138 | | // you should use lock annotations whenever possible to document your lock |
139 | | // synchronization policy. Besides acting as documentation, these annotations |
140 | | // also help compilers or static analysis tools to identify and warn about |
141 | | // issues that could potentially result in race conditions and deadlocks. |
142 | | // |
143 | | // For more information about the lock annotations, please see |
144 | | // [Thread Safety |
145 | | // Analysis](http://clang.llvm.org/docs/ThreadSafetyAnalysis.html) in the Clang |
146 | | // documentation. |
147 | | // |
148 | | // See also `MutexLock`, below, for scoped `Mutex` acquisition. |
149 | | |
150 | | class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED Mutex { |
151 | | public: |
152 | | // Creates a `Mutex` that is not held by anyone. This constructor is |
153 | | // typically used for Mutexes allocated on the heap or the stack. |
154 | | // |
155 | | // To create `Mutex` instances with static storage duration |
156 | | // (e.g. a namespace-scoped or global variable), see |
157 | | // `Mutex::Mutex(absl::kConstInit)` below instead. |
158 | | Mutex(); |
159 | | |
160 | | // Creates a mutex with static storage duration. A global variable |
161 | | // constructed this way avoids the lifetime issues that can occur on program |
162 | | // startup and shutdown. (See absl/base/const_init.h.) |
163 | | // |
164 | | // For Mutexes allocated on the heap and stack, instead use the default |
165 | | // constructor, which can interact more fully with the thread sanitizer. |
166 | | // |
167 | | // Example usage: |
168 | | // namespace foo { |
169 | | // ABSL_CONST_INIT absl::Mutex mu(absl::kConstInit); |
170 | | // } |
171 | | explicit constexpr Mutex(absl::ConstInitType); |
172 | | |
173 | | ~Mutex(); |
174 | | |
175 | | // Mutex::lock() |
176 | | // |
177 | | // Blocks the calling thread, if necessary, until this `Mutex` is free, and |
178 | | // then acquires it exclusively. (This lock is also known as a "write lock.") |
179 | | void lock() ABSL_EXCLUSIVE_LOCK_FUNCTION(); |
180 | | |
181 | | ABSL_DEPRECATE_AND_INLINE() |
182 | 0 | inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { lock(); } |
183 | | |
184 | | // Mutex::unlock() |
185 | | // |
186 | | // Releases this `Mutex` and returns it from the exclusive/write state to the |
187 | | // free state. Calling thread must hold the `Mutex` exclusively. |
188 | | void unlock() ABSL_UNLOCK_FUNCTION(); |
189 | | |
190 | | ABSL_DEPRECATE_AND_INLINE() |
191 | 0 | inline void Unlock() ABSL_UNLOCK_FUNCTION() { unlock(); } |
192 | | |
193 | | // Mutex::try_lock() |
194 | | // |
195 | | // If the mutex can be acquired without blocking, does so exclusively and |
196 | | // returns `true`. Otherwise, returns `false`. Returns `true` with high |
197 | | // probability if the `Mutex` was free. |
198 | | [[nodiscard]] bool try_lock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true); |
199 | | |
200 | | ABSL_DEPRECATE_AND_INLINE() |
201 | 0 | [[nodiscard]] bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) { |
202 | 0 | return try_lock(); |
203 | 0 | } |
204 | | |
205 | | // Mutex::AssertHeld() |
206 | | // |
207 | | // Require that the mutex be held exclusively (write mode) by this thread. |
208 | | // |
209 | | // If the mutex is not currently held by this thread, this function may report |
210 | | // an error (typically by crashing with a diagnostic) or it may do nothing. |
211 | | // This function is intended only as a tool to assist debugging; it doesn't |
212 | | // guarantee correctness. |
213 | | void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK(); |
214 | | |
215 | | // --------------------------------------------------------------------------- |
216 | | // Reader-Writer Locking |
217 | | // --------------------------------------------------------------------------- |
218 | | |
219 | | // A Mutex can also be used as a starvation-free reader-writer lock. |
220 | | // Neither read-locks nor write-locks are reentrant/recursive to avoid |
221 | | // potential client programming errors. |
222 | | // |
223 | | // The Mutex API provides `Writer*()` aliases for the existing `lock()`, |
224 | | // `unlock()` and `try_lock()` methods for use within applications mixing |
225 | | // reader/writer locks. Using `*_shared()` and `Writer*()` operations in this |
226 | | // manner can make locking behavior clearer when mixing read and write modes. |
227 | | // |
228 | | // Introducing reader locks necessarily complicates the `Mutex` state |
229 | | // machine somewhat. The table below illustrates the allowed state transitions |
230 | | // of a mutex in such cases. Note that lock_shared() may block even if the |
231 | | // lock is held in shared mode; this occurs when another thread is blocked on |
232 | | // a call to lock(). |
233 | | // |
234 | | // --------------------------------------------------------------------------- |
235 | | // Operation: lock() unlock() lock_shared() unlock_shared() |
236 | | // --------------------------------------------------------------------------- |
237 | | // State |
238 | | // --------------------------------------------------------------------------- |
239 | | // Free Exclusive invalid Shared(1) invalid |
240 | | // Shared(1) blocks invalid Shared(2) or blocks Free |
241 | | // Shared(n) n>1 blocks invalid Shared(n+1) or blocks Shared(n-1) |
242 | | // Exclusive blocks Free blocks invalid |
243 | | // --------------------------------------------------------------------------- |
244 | | // |
245 | | // In comments below, "shared" refers to a state of Shared(n) for any n > 0. |
246 | | |
247 | | // Mutex::lock_shared() |
248 | | // |
249 | | // Blocks the calling thread, if necessary, until this `Mutex` is either free, |
250 | | // or in shared mode, and then acquires a share of it. Note that |
251 | | // `lock_shared()` will block if some other thread has an exclusive/writer |
252 | | // lock on the mutex. |
253 | | void lock_shared() ABSL_SHARED_LOCK_FUNCTION(); |
254 | | |
255 | | ABSL_DEPRECATE_AND_INLINE() |
256 | 0 | void ReaderLock() ABSL_SHARED_LOCK_FUNCTION() { lock_shared(); } |
257 | | |
258 | | // Mutex::unlock_shared() |
259 | | // |
260 | | // Releases a read share of this `Mutex`. `unlock_shared` may return a mutex |
261 | | // to the free state if this thread holds the last reader lock on the mutex. |
262 | | // Note that you cannot call `unlock_shared()` on a mutex held in write mode. |
263 | | void unlock_shared() ABSL_UNLOCK_FUNCTION(); |
264 | | |
265 | | ABSL_DEPRECATE_AND_INLINE() |
266 | 0 | void ReaderUnlock() ABSL_UNLOCK_FUNCTION() { unlock_shared(); } |
267 | | |
268 | | // Mutex::try_lock_shared() |
269 | | // |
270 | | // If the mutex can be acquired without blocking, acquires this mutex for |
271 | | // shared access and returns `true`. Otherwise, returns `false`. Returns |
272 | | // `true` with high probability if the `Mutex` was free or shared. |
273 | | [[nodiscard]] bool try_lock_shared() ABSL_SHARED_TRYLOCK_FUNCTION(true); |
274 | | |
275 | | ABSL_DEPRECATE_AND_INLINE() |
276 | 0 | [[nodiscard]] bool ReaderTryLock() ABSL_SHARED_TRYLOCK_FUNCTION(true) { |
277 | 0 | return try_lock_shared(); |
278 | 0 | } |
279 | | |
280 | | // Mutex::AssertReaderHeld() |
281 | | // |
282 | | // Require that the mutex be held at least in shared mode (read mode) by this |
283 | | // thread. |
284 | | // |
285 | | // If the mutex is not currently held by this thread, this function may report |
286 | | // an error (typically by crashing with a diagnostic) or it may do nothing. |
287 | | // This function is intended only as a tool to assist debugging; it doesn't |
288 | | // guarantee correctness. |
289 | | void AssertReaderHeld() const ABSL_ASSERT_SHARED_LOCK(); |
290 | | |
291 | | // Mutex::WriterLock() |
292 | | // Mutex::WriterUnlock() |
293 | | // Mutex::WriterTryLock() |
294 | | // |
295 | | // Aliases for `Mutex::Lock()`, `Mutex::Unlock()`, and `Mutex::TryLock()`. |
296 | | // |
297 | | // These methods may be used (along with the complementary `Reader*()` |
298 | | // methods) to distinguish simple exclusive `Mutex` usage (`Lock()`, |
299 | | // etc.) from reader/writer lock usage. |
300 | | ABSL_DEPRECATE_AND_INLINE() |
301 | 0 | void WriterLock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { lock(); } |
302 | | |
303 | | ABSL_DEPRECATE_AND_INLINE() |
304 | 0 | void WriterUnlock() ABSL_UNLOCK_FUNCTION() { unlock(); } |
305 | | |
306 | | ABSL_DEPRECATE_AND_INLINE() |
307 | 0 | [[nodiscard]] bool WriterTryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) { |
308 | 0 | return try_lock(); |
309 | 0 | } |
310 | | |
311 | | // --------------------------------------------------------------------------- |
312 | | // Conditional Critical Regions |
313 | | // --------------------------------------------------------------------------- |
314 | | |
315 | | // Conditional usage of a `Mutex` can occur using two distinct paradigms: |
316 | | // |
317 | | // * Use of `Mutex` member functions with `Condition` objects. |
318 | | // * Use of the separate `CondVar` abstraction. |
319 | | // |
320 | | // In general, prefer use of `Condition` and the `Mutex` member functions |
321 | | // listed below over `CondVar`. When there are multiple threads waiting on |
322 | | // distinctly different conditions, however, a battery of `CondVar`s may be |
323 | | // more efficient. This section discusses use of `Condition` objects. |
324 | | // |
325 | | // `Mutex` contains member functions for performing lock operations only under |
326 | | // certain conditions, of class `Condition`. For correctness, the `Condition` |
327 | | // must return a boolean that is a pure function, only of state protected by |
328 | | // the `Mutex`. The condition must be invariant w.r.t. environmental state |
329 | | // such as thread, cpu id, or time, and must be `noexcept`. The condition will |
330 | | // always be invoked with the mutex held in at least read mode, so you should |
331 | | // not block it for long periods or sleep it on a timer. |
332 | | // |
333 | | // Since a condition must not depend directly on the current time, use |
334 | | // `*WithTimeout()` member function variants to make your condition |
335 | | // effectively true after a given duration, or `*WithDeadline()` variants to |
336 | | // make your condition effectively true after a given time. |
337 | | // |
338 | | // The condition function should have no side-effects aside from debug |
339 | | // logging; as a special exception, the function may acquire other mutexes |
340 | | // provided it releases all those that it acquires. (This exception was |
341 | | // required to allow logging.) |
342 | | |
343 | | // Mutex::Await() |
344 | | // |
345 | | // Unlocks this `Mutex` and blocks until simultaneously both `cond` is `true` |
346 | | // and this `Mutex` can be reacquired, then reacquires this `Mutex` in the |
347 | | // same mode in which it was previously held. If the condition is initially |
348 | | // `true`, `Await()` *may* skip the release/re-acquire step. |
349 | | // |
350 | | // `Await()` requires that this thread holds this `Mutex` in some mode. |
351 | 0 | void Await(const Condition& cond) { |
352 | 0 | AwaitCommon(cond, synchronization_internal::KernelTimeout::Never()); |
353 | 0 | } |
354 | | |
355 | | // Mutex::LockWhen() |
356 | | // Mutex::ReaderLockWhen() |
357 | | // Mutex::WriterLockWhen() |
358 | | // |
359 | | // Blocks until simultaneously both `cond` is `true` and this `Mutex` can |
360 | | // be acquired, then atomically acquires this `Mutex`. `LockWhen()` is |
361 | | // logically equivalent to `*Lock(); Await();` though they may have different |
362 | | // performance characteristics. |
363 | 0 | void LockWhen(const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION() { |
364 | 0 | LockWhenCommon(cond, synchronization_internal::KernelTimeout::Never(), |
365 | 0 | true); |
366 | 0 | } |
367 | | |
368 | 0 | void ReaderLockWhen(const Condition& cond) ABSL_SHARED_LOCK_FUNCTION() { |
369 | 0 | LockWhenCommon(cond, synchronization_internal::KernelTimeout::Never(), |
370 | 0 | false); |
371 | 0 | } |
372 | | |
373 | 0 | void WriterLockWhen(const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION() { |
374 | 0 | this->LockWhen(cond); |
375 | 0 | } |
376 | | |
377 | | // --------------------------------------------------------------------------- |
378 | | // Mutex Variants with Timeouts/Deadlines |
379 | | // --------------------------------------------------------------------------- |
380 | | |
381 | | // Mutex::AwaitWithTimeout() |
382 | | // Mutex::AwaitWithDeadline() |
383 | | // |
384 | | // Unlocks this `Mutex` and blocks until simultaneously: |
385 | | // - either `cond` is true or the {timeout has expired, deadline has passed} |
386 | | // and |
387 | | // - this `Mutex` can be reacquired, |
388 | | // then reacquire this `Mutex` in the same mode in which it was previously |
389 | | // held, returning `true` iff `cond` is `true` on return. |
390 | | // |
391 | | // If the condition is initially `true`, the implementation *may* skip the |
392 | | // release/re-acquire step and return immediately. |
393 | | // |
394 | | // Deadlines in the past are equivalent to an immediate deadline. |
395 | | // Negative timeouts are equivalent to a zero timeout. |
396 | | // |
397 | | // This method requires that this thread holds this `Mutex` in some mode. |
398 | 0 | bool AwaitWithTimeout(const Condition& cond, absl::Duration timeout) { |
399 | 0 | return AwaitCommon(cond, synchronization_internal::KernelTimeout{timeout}); |
400 | 0 | } |
401 | | |
402 | 0 | bool AwaitWithDeadline(const Condition& cond, absl::Time deadline) { |
403 | 0 | return AwaitCommon(cond, synchronization_internal::KernelTimeout{deadline}); |
404 | 0 | } |
405 | | |
406 | | // Mutex::LockWhenWithTimeout() |
407 | | // Mutex::ReaderLockWhenWithTimeout() |
408 | | // Mutex::WriterLockWhenWithTimeout() |
409 | | // |
410 | | // Blocks until simultaneously both: |
411 | | // - either `cond` is `true` or the timeout has expired, and |
412 | | // - this `Mutex` can be acquired, |
413 | | // then atomically acquires this `Mutex`, returning `true` iff `cond` is |
414 | | // `true` on return. |
415 | | // |
416 | | // Negative timeouts are equivalent to a zero timeout. |
417 | | bool LockWhenWithTimeout(const Condition& cond, absl::Duration timeout) |
418 | 0 | ABSL_EXCLUSIVE_LOCK_FUNCTION() { |
419 | 0 | return LockWhenCommon( |
420 | 0 | cond, synchronization_internal::KernelTimeout{timeout}, true); |
421 | 0 | } |
422 | | bool ReaderLockWhenWithTimeout(const Condition& cond, absl::Duration timeout) |
423 | 0 | ABSL_SHARED_LOCK_FUNCTION() { |
424 | 0 | return LockWhenCommon( |
425 | 0 | cond, synchronization_internal::KernelTimeout{timeout}, false); |
426 | 0 | } |
427 | | bool WriterLockWhenWithTimeout(const Condition& cond, absl::Duration timeout) |
428 | 0 | ABSL_EXCLUSIVE_LOCK_FUNCTION() { |
429 | 0 | return this->LockWhenWithTimeout(cond, timeout); |
430 | 0 | } |
431 | | |
432 | | // Mutex::LockWhenWithDeadline() |
433 | | // Mutex::ReaderLockWhenWithDeadline() |
434 | | // Mutex::WriterLockWhenWithDeadline() |
435 | | // |
436 | | // Blocks until simultaneously both: |
437 | | // - either `cond` is `true` or the deadline has been passed, and |
438 | | // - this `Mutex` can be acquired, |
439 | | // then atomically acquires this Mutex, returning `true` iff `cond` is `true` |
440 | | // on return. |
441 | | // |
442 | | // Deadlines in the past are equivalent to an immediate deadline. |
443 | | bool LockWhenWithDeadline(const Condition& cond, absl::Time deadline) |
444 | 0 | ABSL_EXCLUSIVE_LOCK_FUNCTION() { |
445 | 0 | return LockWhenCommon( |
446 | 0 | cond, synchronization_internal::KernelTimeout{deadline}, true); |
447 | 0 | } |
448 | | bool ReaderLockWhenWithDeadline(const Condition& cond, absl::Time deadline) |
449 | 0 | ABSL_SHARED_LOCK_FUNCTION() { |
450 | 0 | return LockWhenCommon( |
451 | 0 | cond, synchronization_internal::KernelTimeout{deadline}, false); |
452 | 0 | } |
453 | | bool WriterLockWhenWithDeadline(const Condition& cond, absl::Time deadline) |
454 | 0 | ABSL_EXCLUSIVE_LOCK_FUNCTION() { |
455 | 0 | return this->LockWhenWithDeadline(cond, deadline); |
456 | 0 | } |
457 | | |
458 | | // --------------------------------------------------------------------------- |
459 | | // Debug Support: Invariant Checking, Deadlock Detection, Logging. |
460 | | // --------------------------------------------------------------------------- |
461 | | |
462 | | // Mutex::EnableInvariantDebugging() |
463 | | // |
464 | | // If `invariant`!=null and if invariant debugging has been enabled globally, |
465 | | // cause `(*invariant)(arg)` to be called at moments when the invariant for |
466 | | // this `Mutex` should hold (for example: just after acquire, just before |
467 | | // release). |
468 | | // |
469 | | // The routine `invariant` should have no side-effects since it is not |
470 | | // guaranteed how many times it will be called; it should check the invariant |
471 | | // and crash if it does not hold. Enabling global invariant debugging may |
472 | | // substantially reduce `Mutex` performance; it should be set only for |
473 | | // non-production runs. Optimization options may also disable invariant |
474 | | // checks. |
475 | | void EnableInvariantDebugging( |
476 | | void (*absl_nullable invariant)(void* absl_nullability_unknown), |
477 | | void* absl_nullability_unknown arg); |
478 | | |
479 | | // Mutex::EnableDebugLog() |
480 | | // |
481 | | // Cause all subsequent uses of this `Mutex` to be logged via |
482 | | // `ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if no previous |
483 | | // call to `EnableInvariantDebugging()` or `EnableDebugLog()` has been made. |
484 | | // |
485 | | // Note: This method substantially reduces `Mutex` performance. |
486 | | void EnableDebugLog(const char* absl_nullable name); |
487 | | |
488 | | // Deadlock detection |
489 | | |
490 | | // Mutex::ForgetDeadlockInfo() |
491 | | // |
492 | | // Forget any deadlock-detection information previously gathered |
493 | | // about this `Mutex`. Call this method in debug mode when the lock ordering |
494 | | // of a `Mutex` changes. |
495 | | void ForgetDeadlockInfo(); |
496 | | |
497 | | // Mutex::AssertNotHeld() |
498 | | // |
499 | | // Return immediately if this thread does not hold this `Mutex` in any |
500 | | // mode; otherwise, may report an error (typically by crashing with a |
501 | | // diagnostic), or may return immediately. |
502 | | // |
503 | | // Currently this check is performed only if all of: |
504 | | // - in debug mode |
505 | | // - SetMutexDeadlockDetectionMode() has been set to kReport or kAbort |
506 | | // - number of locks concurrently held by this thread is not large. |
507 | | // are true. |
508 | | void AssertNotHeld() const; |
509 | | |
510 | | // Special cases. |
511 | | |
512 | | // A `MuHow` is a constant that indicates how a lock should be acquired. |
513 | | // Internal implementation detail. Clients should ignore. |
514 | | typedef const struct MuHowS* MuHow; |
515 | | |
516 | | // Mutex::InternalAttemptToUseMutexInFatalSignalHandler() |
517 | | // |
518 | | // Causes the `Mutex` implementation to prepare itself for re-entry caused by |
519 | | // future use of `Mutex` within a fatal signal handler. This method is |
520 | | // intended for use only for last-ditch attempts to log crash information. |
521 | | // It does not guarantee that attempts to use Mutexes within the handler will |
522 | | // not deadlock; it merely makes other faults less likely. |
523 | | // |
524 | | // WARNING: This routine must be invoked from a signal handler, and the |
525 | | // signal handler must either loop forever or terminate the process. |
526 | | // Attempts to return from (or `longjmp` out of) the signal handler once this |
527 | | // call has been made may cause arbitrary program behaviour including |
528 | | // crashes and deadlocks. |
529 | | static void InternalAttemptToUseMutexInFatalSignalHandler(); |
530 | | |
531 | | private: |
532 | | std::atomic<intptr_t> mu_; // The Mutex state. |
533 | | |
534 | | // Post()/Wait() versus associated PerThreadSem; in class for required |
535 | | // friendship with PerThreadSem. |
536 | | static void IncrementSynchSem(Mutex* absl_nonnull mu, |
537 | | base_internal::PerThreadSynch* absl_nonnull w); |
538 | | static bool DecrementSynchSem(Mutex* absl_nonnull mu, |
539 | | base_internal::PerThreadSynch* absl_nonnull w, |
540 | | synchronization_internal::KernelTimeout t); |
541 | | |
542 | | // slow path acquire |
543 | | void LockSlowLoop(SynchWaitParams* absl_nonnull waitp, int flags); |
544 | | // wrappers around LockSlowLoop() |
545 | | bool LockSlowWithDeadline(MuHow absl_nonnull how, |
546 | | const Condition* absl_nullable cond, |
547 | | synchronization_internal::KernelTimeout t, |
548 | | int flags); |
549 | | void LockSlow(MuHow absl_nonnull how, const Condition* absl_nullable cond, |
550 | | int flags) ABSL_ATTRIBUTE_COLD; |
551 | | // slow path release |
552 | | void UnlockSlow(SynchWaitParams* absl_nullable waitp) ABSL_ATTRIBUTE_COLD; |
553 | | // TryLock slow path. |
554 | | bool TryLockSlow(); |
555 | | // ReaderTryLock slow path. |
556 | | bool ReaderTryLockSlow(); |
557 | | // Common code between Await() and AwaitWithTimeout/Deadline() |
558 | | bool AwaitCommon(const Condition& cond, |
559 | | synchronization_internal::KernelTimeout t); |
560 | | bool LockWhenCommon(const Condition& cond, |
561 | | synchronization_internal::KernelTimeout t, bool write); |
562 | | // Attempt to remove thread s from queue. |
563 | | void TryRemove(base_internal::PerThreadSynch* absl_nonnull s); |
564 | | // Block a thread on mutex. |
565 | | void Block(base_internal::PerThreadSynch* absl_nonnull s); |
566 | | // Wake a thread; return successor. |
567 | | base_internal::PerThreadSynch* absl_nullable Wakeup( |
568 | | base_internal::PerThreadSynch* absl_nonnull w); |
569 | | void Dtor(); |
570 | | |
571 | | friend class CondVar; // for access to Trans()/Fer(). |
572 | | void Trans(MuHow absl_nonnull how); // used for CondVar->Mutex transfer |
573 | | void Fer(base_internal::PerThreadSynch* absl_nonnull |
574 | | w); // used for CondVar->Mutex transfer |
575 | | |
576 | | // Catch the error of writing Mutex when intending MutexLock. |
577 | 0 | explicit Mutex(const volatile Mutex* absl_nullable /*ignored*/) {} |
578 | | |
579 | | Mutex(const Mutex&) = delete; |
580 | | Mutex& operator=(const Mutex&) = delete; |
581 | | }; |
582 | | |
583 | | // ----------------------------------------------------------------------------- |
584 | | // Mutex RAII Wrappers |
585 | | // ----------------------------------------------------------------------------- |
586 | | |
587 | | // MutexLock |
588 | | // |
589 | | // `MutexLock` is a helper class, which acquires and releases a `Mutex` via |
590 | | // RAII. |
591 | | // |
592 | | // Example: |
593 | | // |
594 | | // Class Foo { |
595 | | // public: |
596 | | // Foo::Bar* Baz() { |
597 | | // MutexLock lock(mu_); |
598 | | // ... |
599 | | // return bar; |
600 | | // } |
601 | | // |
602 | | // private: |
603 | | // Mutex mu_; |
604 | | // }; |
605 | | class ABSL_SCOPED_LOCKABLE MutexLock { |
606 | | public: |
607 | | // Constructors |
608 | | |
609 | | // Calls `mu.lock()` and returns when that call returns. That is, `mu` is |
610 | | // guaranteed to be locked when this object is constructed. |
611 | | explicit MutexLock(Mutex& mu ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this)) |
612 | | ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) |
613 | 0 | : mu_(mu) { |
614 | 0 | this->mu_.lock(); |
615 | 0 | } |
616 | | |
617 | | // Calls `mu->lock()` and returns when that call returns. That is, `*mu` is |
618 | | // guaranteed to be locked when this object is constructed. Requires that |
619 | | // `mu` be dereferenceable. |
620 | | [[deprecated("Use the constructor that takes a reference instead")]] |
621 | | ABSL_REFACTOR_INLINE |
622 | | explicit MutexLock(Mutex* absl_nonnull mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) |
623 | 0 | : MutexLock(*mu) {} |
624 | | |
625 | | // Like above, but calls `mu.LockWhen(cond)` instead. That is, in addition to |
626 | | // the above, the condition given by `cond` is also guaranteed to hold when |
627 | | // this object is constructed. |
628 | | explicit MutexLock(Mutex& mu ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this), |
629 | | const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) |
630 | 0 | : mu_(mu) { |
631 | 0 | this->mu_.LockWhen(cond); |
632 | 0 | } |
633 | | |
634 | | [[deprecated("Use the constructor that takes a reference instead")]] |
635 | | ABSL_REFACTOR_INLINE |
636 | | explicit MutexLock(Mutex* absl_nonnull mu, const Condition& cond) |
637 | | ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) |
638 | 0 | : MutexLock(*mu, cond) {} |
639 | | |
640 | | MutexLock(const MutexLock&) = delete; // NOLINT(runtime/mutex) |
641 | | MutexLock(MutexLock&&) = delete; // NOLINT(runtime/mutex) |
642 | | MutexLock& operator=(const MutexLock&) = delete; |
643 | | MutexLock& operator=(MutexLock&&) = delete; |
644 | | |
645 | 0 | ~MutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_.unlock(); } |
646 | | |
647 | | private: |
648 | | Mutex& mu_; |
649 | | }; |
650 | | |
651 | | // ReaderMutexLock |
652 | | // |
653 | | // The `ReaderMutexLock` is a helper class, like `MutexLock`, which acquires and |
654 | | // releases a shared lock on a `Mutex` via RAII. |
655 | | class ABSL_SCOPED_LOCKABLE ReaderMutexLock { |
656 | | public: |
657 | | explicit ReaderMutexLock(Mutex& mu ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this)) |
658 | | ABSL_SHARED_LOCK_FUNCTION(mu) |
659 | 0 | : mu_(mu) { |
660 | 0 | mu.lock_shared(); |
661 | 0 | } |
662 | | |
663 | | [[deprecated("Use the constructor that takes a reference instead")]] |
664 | | ABSL_REFACTOR_INLINE |
665 | | explicit ReaderMutexLock(Mutex* absl_nonnull mu) ABSL_SHARED_LOCK_FUNCTION(mu) |
666 | 0 | : ReaderMutexLock(*mu) {} |
667 | | |
668 | | explicit ReaderMutexLock(Mutex& mu ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this), |
669 | | const Condition& cond) ABSL_SHARED_LOCK_FUNCTION(mu) |
670 | 0 | : mu_(mu) { |
671 | 0 | mu.ReaderLockWhen(cond); |
672 | 0 | } |
673 | | |
674 | | [[deprecated("Use the constructor that takes a reference instead")]] |
675 | | ABSL_REFACTOR_INLINE |
676 | | explicit ReaderMutexLock(Mutex* absl_nonnull mu, const Condition& cond) |
677 | | ABSL_SHARED_LOCK_FUNCTION(mu) |
678 | 0 | : ReaderMutexLock(*mu, cond) {} |
679 | | |
680 | | ReaderMutexLock(const ReaderMutexLock&) = delete; |
681 | | ReaderMutexLock(ReaderMutexLock&&) = delete; |
682 | | ReaderMutexLock& operator=(const ReaderMutexLock&) = delete; |
683 | | ReaderMutexLock& operator=(ReaderMutexLock&&) = delete; |
684 | | |
685 | 0 | ~ReaderMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_.unlock_shared(); } |
686 | | |
687 | | private: |
688 | | Mutex& mu_; |
689 | | }; |
690 | | |
691 | | // WriterMutexLock |
692 | | // |
693 | | // The `WriterMutexLock` is a helper class, like `MutexLock`, which acquires and |
694 | | // releases a write (exclusive) lock on a `Mutex` via RAII. |
695 | | class ABSL_SCOPED_LOCKABLE WriterMutexLock { |
696 | | public: |
697 | | explicit WriterMutexLock(Mutex& mu ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this)) |
698 | | ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) |
699 | 0 | : mu_(mu) { |
700 | 0 | mu.lock(); |
701 | 0 | } |
702 | | |
703 | | [[deprecated("Use the constructor that takes a reference instead")]] |
704 | | ABSL_REFACTOR_INLINE |
705 | | explicit WriterMutexLock(Mutex* absl_nonnull mu) |
706 | | ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) |
707 | 0 | : WriterMutexLock(*mu) {} |
708 | | |
709 | | explicit WriterMutexLock(Mutex& mu ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this), |
710 | | const Condition& cond) |
711 | | ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) |
712 | 0 | : mu_(mu) { |
713 | 0 | mu.WriterLockWhen(cond); |
714 | 0 | } |
715 | | |
716 | | [[deprecated("Use the constructor that takes a reference instead")]] |
717 | | ABSL_REFACTOR_INLINE |
718 | | explicit WriterMutexLock(Mutex* absl_nonnull mu, const Condition& cond) |
719 | | ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) |
720 | 0 | : WriterMutexLock(*mu, cond) {} |
721 | | |
722 | | WriterMutexLock(const WriterMutexLock&) = delete; |
723 | | WriterMutexLock(WriterMutexLock&&) = delete; |
724 | | WriterMutexLock& operator=(const WriterMutexLock&) = delete; |
725 | | WriterMutexLock& operator=(WriterMutexLock&&) = delete; |
726 | | |
727 | 0 | ~WriterMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_.unlock(); } |
728 | | |
729 | | private: |
730 | | Mutex& mu_; |
731 | | }; |
732 | | |
733 | | // ----------------------------------------------------------------------------- |
734 | | // Condition |
735 | | // ----------------------------------------------------------------------------- |
736 | | // |
737 | | // `Mutex` contains a number of member functions which take a `Condition` as an |
738 | | // argument; clients can wait for conditions to become `true` before attempting |
739 | | // to acquire the mutex. These sections are known as "condition critical" |
740 | | // sections. To use a `Condition`, you simply need to construct it, and use |
741 | | // within an appropriate `Mutex` member function; everything else in the |
742 | | // `Condition` class is an implementation detail. |
743 | | // |
744 | | // A `Condition` is specified as a function pointer which returns a boolean. |
745 | | // `Condition` functions should be pure functions -- their results should depend |
746 | | // only on passed arguments, should not consult any external state (such as |
747 | | // clocks), and should have no side-effects, aside from debug logging. Any |
748 | | // objects that the function may access should be limited to those which are |
749 | | // constant while the mutex is blocked on the condition (e.g. a stack variable), |
750 | | // or objects of state protected explicitly by the mutex. |
751 | | // |
752 | | // No matter which construction is used for `Condition`, the underlying |
753 | | // function pointer / functor / callable must not throw any |
754 | | // exceptions. Correctness of `Mutex` / `Condition` is not guaranteed in |
755 | | // the face of a throwing `Condition`. (When Abseil is allowed to depend |
756 | | // on C++17, these function pointers will be explicitly marked |
757 | | // `noexcept`; until then this requirement cannot be enforced in the |
758 | | // type system.) |
759 | | // |
760 | | // Note: to use a `Condition`, you need only construct it and pass it to a |
761 | | // suitable `Mutex' member function, such as `Mutex::Await()`, or to the |
762 | | // constructor of one of the scope guard classes. |
763 | | // |
764 | | // Example using LockWhen/Unlock: |
765 | | // |
766 | | // // assume count_ is not internal reference count |
767 | | // int count_ ABSL_GUARDED_BY(mu_); |
768 | | // Condition count_is_zero(+[](int *count) { return *count == 0; }, &count_); |
769 | | // |
770 | | // mu_.LockWhen(count_is_zero); |
771 | | // // ... |
772 | | // mu_.Unlock(); |
773 | | // |
774 | | // Example using a scope guard: |
775 | | // |
776 | | // { |
777 | | // MutexLock lock(mu_, count_is_zero); |
778 | | // // ... |
779 | | // } |
780 | | // |
781 | | // When multiple threads are waiting on exactly the same condition, make sure |
782 | | // that they are constructed with the same parameters (same pointer to function |
783 | | // + arg, or same pointer to object + method), so that the mutex implementation |
784 | | // can avoid redundantly evaluating the same condition for each thread. |
785 | | class Condition { |
786 | | public: |
787 | | // A Condition that returns the result of "(*func)(arg)" |
788 | | Condition(bool (*absl_nonnull func)(void* absl_nullability_unknown), |
789 | | void* absl_nullability_unknown arg); |
790 | | |
791 | | // Templated version for people who are averse to casts. |
792 | | // |
793 | | // To use a lambda, prepend it with unary plus, which converts the lambda |
794 | | // into a function pointer: |
795 | | // Condition(+[](T* t) { return ...; }, arg). |
796 | | // |
797 | | // Note: lambdas in this case must contain no bound variables. |
798 | | // |
799 | | // See class comment for performance advice. |
800 | | template <typename T> |
801 | | Condition(bool (*absl_nonnull func)(T* absl_nullability_unknown), |
802 | | T* absl_nullability_unknown arg); |
803 | | |
804 | | // Same as above, but allows for cases where `arg` comes from a pointer that |
805 | | // is convertible to the function parameter type `T*` but not an exact match. |
806 | | // |
807 | | // For example, the argument might be `X*` but the function takes `const X*`, |
808 | | // or the argument might be `Derived*` while the function takes `Base*`, and |
809 | | // so on for cases where the argument pointer can be implicitly converted. |
810 | | // |
811 | | // Implementation notes: This constructor overload is required in addition to |
812 | | // the one above to allow deduction of `T` from `arg` for cases such as where |
813 | | // a function template is passed as `func`. Also, the dummy `typename = void` |
814 | | // template parameter exists just to work around a MSVC mangling bug. |
815 | | template <typename T, typename = void> |
816 | | Condition( |
817 | | bool (*absl_nonnull func)(T* absl_nullability_unknown), |
818 | | typename absl::type_identity<T>::type* absl_nullability_unknown |
819 | | arg); |
820 | | |
821 | | // Templated version for invoking a method that returns a `bool`. |
822 | | // |
823 | | // `Condition(object, &Class::Method)` constructs a `Condition` that evaluates |
824 | | // `object->Method()`. |
825 | | // |
826 | | // Implementation Note: `absl::type_identity` is used to allow |
827 | | // methods to come from base classes. A simpler signature like |
828 | | // `Condition(T*, bool (T::*)())` does not suffice. |
829 | | template <typename T> |
830 | | Condition( |
831 | | T* absl_nonnull object, |
832 | | bool (absl::type_identity<T>::type::* absl_nonnull method)()); |
833 | | |
834 | | // Same as above, for const members |
835 | | template <typename T> |
836 | | Condition( |
837 | | const T* absl_nonnull object, |
838 | | bool (absl::type_identity<T>::type::* absl_nonnull method)() |
839 | | const); |
840 | | |
841 | | // A Condition that returns the value of `*cond` |
842 | | explicit Condition(const bool* absl_nonnull cond); |
843 | | |
844 | | // Templated version for invoking a functor that returns a `bool`. |
845 | | // This approach accepts pointers to non-mutable lambdas, `std::function`, |
846 | | // the result of` std::bind` and user-defined functors that define |
847 | | // `bool F::operator()() const`. |
848 | | // |
849 | | // Example: |
850 | | // |
851 | | // auto reached = [this, current]() { |
852 | | // mu_.AssertReaderHeld(); // For annotalysis. |
853 | | // return processed_ >= current; |
854 | | // }; |
855 | | // mu_.Await(Condition(&reached)); |
856 | | // |
857 | | // NOTE: never use "mu_.AssertHeld()" instead of "mu_.AssertReaderHeld()" in |
858 | | // the lambda as it may be called when the mutex is being unlocked from a |
859 | | // scope holding only a reader lock, which will make the assertion not |
860 | | // fulfilled and crash the binary. |
861 | | |
862 | | // See class comment for performance advice. In particular, if there |
863 | | // might be more than one waiter for the same condition, make sure |
864 | | // that all waiters construct the condition with the same pointers. |
865 | | |
866 | | // Implementation note: The second template parameter ensures that this |
867 | | // constructor doesn't participate in overload resolution if T doesn't have |
868 | | // `bool operator() const`. |
869 | | template <typename T, typename E = decltype(static_cast<bool (T::*)() const>( |
870 | | &T::operator()))> |
871 | | explicit Condition(const T* absl_nonnull obj) |
872 | | : Condition(obj, static_cast<bool (T::*)() const>(&T::operator())) {} |
873 | | |
874 | | // A Condition that always returns `true`. |
875 | | // kTrue is only useful in a narrow set of circumstances, mostly when |
876 | | // it's passed conditionally. For example: |
877 | | // |
878 | | // mu.LockWhen(some_flag ? kTrue : SomeOtherCondition); |
879 | | // |
880 | | // Note: {LockWhen,Await}With{Deadline,Timeout} methods with kTrue condition |
881 | | // don't return immediately when the timeout happens, they still block until |
882 | | // the Mutex becomes available. The return value of these methods does |
883 | | // not indicate if the timeout was reached; rather it indicates whether or |
884 | | // not the condition is true. |
885 | | ABSL_CONST_INIT static const Condition kTrue; |
886 | | |
887 | | // Evaluates the condition. |
888 | | bool Eval() const; |
889 | | |
890 | | // Returns `true` if the two conditions are guaranteed to return the same |
891 | | // value if evaluated at the same time, `false` if the evaluation *may* return |
892 | | // different results. |
893 | | // |
894 | | // Two `Condition` values are guaranteed equal if both their `func` and `arg` |
895 | | // components are the same. A null pointer is equivalent to a `true` |
896 | | // condition. |
897 | | static bool GuaranteedEqual(const Condition* absl_nullable a, |
898 | | const Condition* absl_nullable b); |
899 | | |
900 | | private: |
901 | | // Sizing an allocation for a method pointer can be subtle. In the Itanium |
902 | | // specifications, a method pointer has a predictable, uniform size. On the |
903 | | // other hand, MSVC ABI, method pointer sizes vary based on the |
904 | | // inheritance of the class. Specifically, method pointers from classes with |
905 | | // multiple inheritance are bigger than those of classes with single |
906 | | // inheritance. Other variations also exist. |
907 | | |
908 | | #ifndef _MSC_VER |
909 | | // Allocation for a function pointer or method pointer. |
910 | | // The {0} initializer ensures that all unused bytes of this buffer are |
911 | | // always zeroed out. This is necessary, because GuaranteedEqual() compares |
912 | | // all of the bytes, unaware of which bytes are relevant to a given `eval_`. |
913 | | using MethodPtr = bool (Condition::*)(); |
914 | | char callback_[sizeof(MethodPtr)] = {0}; |
915 | | #else |
916 | | // It is well known that the larget MSVC pointer-to-member is 24 bytes. This |
917 | | // may be the largest known pointer-to-member of any platform. For this |
918 | | // reason we will allocate 24 bytes for MSVC platform toolchains. |
919 | | char callback_[24] = {0}; |
920 | | #endif |
921 | | |
922 | | // Function with which to evaluate callbacks and/or arguments. |
923 | | bool (*absl_nullable eval_)(const Condition* absl_nonnull) = nullptr; |
924 | | |
925 | | // Either an argument for a function call or an object for a method call. |
926 | | void* absl_nullable arg_ = nullptr; |
927 | | |
928 | | // Various functions eval_ can point to: |
929 | | static bool CallVoidPtrFunction(const Condition* absl_nonnull c); |
930 | | template <typename T> |
931 | | static bool CastAndCallFunction(const Condition* absl_nonnull c); |
932 | | template <typename T, typename ConditionMethodPtr> |
933 | | static bool CastAndCallMethod(const Condition* absl_nonnull c); |
934 | | |
935 | | // Helper methods for storing, validating, and reading callback arguments. |
936 | | template <typename T> |
937 | 0 | inline void StoreCallback(T callback) { |
938 | 0 | static_assert( |
939 | 0 | sizeof(callback) <= sizeof(callback_), |
940 | 0 | "An overlarge pointer was passed as a callback to Condition."); |
941 | 0 | std::memcpy(callback_, &callback, sizeof(callback)); |
942 | 0 | } Unexecuted instantiation: void absl::Condition::StoreCallback<bool (*)(absl::SynchEvent*)>(bool (*)(absl::SynchEvent*)) Unexecuted instantiation: void absl::Condition::StoreCallback<bool (*)(void*)>(bool (*)(void*)) |
943 | | |
944 | | template <typename T> |
945 | 0 | inline void ReadCallback(T* absl_nonnull callback) const { |
946 | 0 | std::memcpy(callback, callback_, sizeof(*callback)); |
947 | 0 | } |
948 | | |
949 | 0 | static bool AlwaysTrue(const Condition* absl_nullable) { return true; } |
950 | | |
951 | | // Used only to create kTrue. |
952 | 0 | constexpr Condition() : eval_(AlwaysTrue), arg_(nullptr) {} |
953 | | }; |
954 | | |
955 | | // ----------------------------------------------------------------------------- |
956 | | // CondVar |
957 | | // ----------------------------------------------------------------------------- |
958 | | // |
959 | | // A condition variable, reflecting state evaluated separately outside of the |
960 | | // `Mutex` object, which can be signaled to wake callers. |
961 | | // This class is not normally needed; use `Mutex` member functions such as |
962 | | // `Mutex::Await()` and intrinsic `Condition` abstractions. In rare cases |
963 | | // with many threads and many conditions, `CondVar` may be faster. |
964 | | // |
965 | | // The implementation may deliver signals to any condition variable at |
966 | | // any time, even when no call to `Signal()` or `SignalAll()` is made; as a |
967 | | // result, upon being awoken, you must check the logical condition you have |
968 | | // been waiting upon. |
969 | | // |
970 | | // Examples: |
971 | | // |
972 | | // Usage for a thread waiting for some condition C protected by mutex mu: |
973 | | // mu.Lock(); |
974 | | // while (!C) { cv->Wait(&mu); } // releases and reacquires mu |
975 | | // // C holds; process data |
976 | | // mu.Unlock(); |
977 | | // |
978 | | // Usage to wake T is: |
979 | | // mu.Lock(); |
980 | | // // process data, possibly establishing C |
981 | | // if (C) { cv->Signal(); } |
982 | | // mu.Unlock(); |
983 | | // |
984 | | // If C may be useful to more than one waiter, use `SignalAll()` instead of |
985 | | // `Signal()`. |
986 | | // |
987 | | // With this implementation it is efficient to use `Signal()/SignalAll()` inside |
988 | | // the locked region; this usage can make reasoning about your program easier. |
989 | | // |
990 | | class CondVar { |
991 | | public: |
992 | | // A `CondVar` allocated on the heap or on the stack can use the this |
993 | | // constructor. |
994 | | CondVar(); |
995 | | |
996 | | // CondVar::Wait() |
997 | | // |
998 | | // Atomically releases a `Mutex` and blocks on this condition variable. |
999 | | // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a |
1000 | | // spurious wakeup), then reacquires the `Mutex` and returns. |
1001 | | // |
1002 | | // Requires and ensures that the current thread holds the `Mutex`. |
1003 | 0 | void Wait(Mutex* absl_nonnull mu) { |
1004 | 0 | WaitCommon(mu, synchronization_internal::KernelTimeout::Never()); |
1005 | 0 | } |
1006 | | |
1007 | | // CondVar::WaitWithTimeout() |
1008 | | // |
1009 | | // Atomically releases a `Mutex` and blocks on this condition variable. |
1010 | | // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a |
1011 | | // spurious wakeup), or until the timeout has expired, then reacquires |
1012 | | // the `Mutex` and returns. |
1013 | | // |
1014 | | // Returns true if the timeout has expired without this `CondVar` |
1015 | | // being signalled in any manner. If both the timeout has expired |
1016 | | // and this `CondVar` has been signalled, the implementation is free |
1017 | | // to return `true` or `false`. |
1018 | | // |
1019 | | // Requires and ensures that the current thread holds the `Mutex`. |
1020 | 0 | bool WaitWithTimeout(Mutex* absl_nonnull mu, absl::Duration timeout) { |
1021 | 0 | return WaitCommon(mu, synchronization_internal::KernelTimeout(timeout)); |
1022 | 0 | } |
1023 | | |
1024 | | // CondVar::WaitWithDeadline() |
1025 | | // |
1026 | | // Atomically releases a `Mutex` and blocks on this condition variable. |
1027 | | // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a |
1028 | | // spurious wakeup), or until the deadline has passed, then reacquires |
1029 | | // the `Mutex` and returns. |
1030 | | // |
1031 | | // Deadlines in the past are equivalent to an immediate deadline. |
1032 | | // |
1033 | | // Returns true if the deadline has passed without this `CondVar` |
1034 | | // being signalled in any manner. If both the deadline has passed |
1035 | | // and this `CondVar` has been signalled, the implementation is free |
1036 | | // to return `true` or `false`. |
1037 | | // |
1038 | | // Requires and ensures that the current thread holds the `Mutex`. |
1039 | 0 | bool WaitWithDeadline(Mutex* absl_nonnull mu, absl::Time deadline) { |
1040 | 0 | return WaitCommon(mu, synchronization_internal::KernelTimeout(deadline)); |
1041 | 0 | } |
1042 | | |
1043 | | // CondVar::Signal() |
1044 | | // |
1045 | | // Signal this `CondVar`; wake at least one waiter if one exists. |
1046 | | void Signal(); |
1047 | | |
1048 | | // CondVar::SignalAll() |
1049 | | // |
1050 | | // Signal this `CondVar`; wake all waiters. |
1051 | | void SignalAll(); |
1052 | | |
1053 | | // CondVar::EnableDebugLog() |
1054 | | // |
1055 | | // Causes all subsequent uses of this `CondVar` to be logged via |
1056 | | // `ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if `name != 0`. |
1057 | | // Note: this method substantially reduces `CondVar` performance. |
1058 | | void EnableDebugLog(const char* absl_nullable name); |
1059 | | |
1060 | | private: |
1061 | | bool WaitCommon(Mutex* absl_nonnull mutex, |
1062 | | synchronization_internal::KernelTimeout t); |
1063 | | void Remove(base_internal::PerThreadSynch* absl_nonnull s); |
1064 | | std::atomic<intptr_t> cv_; // Condition variable state. |
1065 | | CondVar(const CondVar&) = delete; |
1066 | | CondVar& operator=(const CondVar&) = delete; |
1067 | | }; |
1068 | | |
1069 | | // Variants of MutexLock. |
1070 | | // |
1071 | | // If you find yourself using one of these, consider instead using |
1072 | | // Mutex::Unlock() and/or if-statements for clarity. |
1073 | | |
1074 | | // MutexLockMaybe |
1075 | | // |
1076 | | // MutexLockMaybe is like MutexLock, but is a no-op when mu is null. |
1077 | | class ABSL_SCOPED_LOCKABLE MutexLockMaybe { |
1078 | | public: |
1079 | | explicit MutexLockMaybe(Mutex* absl_nullable mu) |
1080 | | ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) |
1081 | 0 | : mu_(mu) { |
1082 | 0 | if (this->mu_ != nullptr) { |
1083 | 0 | this->mu_->lock(); |
1084 | 0 | } |
1085 | 0 | } |
1086 | | |
1087 | | explicit MutexLockMaybe(Mutex* absl_nullable mu, const Condition& cond) |
1088 | | ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) |
1089 | 0 | : mu_(mu) { |
1090 | 0 | if (this->mu_ != nullptr) { |
1091 | 0 | this->mu_->LockWhen(cond); |
1092 | 0 | } |
1093 | 0 | } |
1094 | | |
1095 | 0 | ~MutexLockMaybe() ABSL_UNLOCK_FUNCTION() { |
1096 | 0 | if (this->mu_ != nullptr) { |
1097 | 0 | this->mu_->unlock(); |
1098 | 0 | } |
1099 | 0 | } |
1100 | | |
1101 | | private: |
1102 | | Mutex* absl_nullable const mu_; |
1103 | | MutexLockMaybe(const MutexLockMaybe&) = delete; |
1104 | | MutexLockMaybe(MutexLockMaybe&&) = delete; |
1105 | | MutexLockMaybe& operator=(const MutexLockMaybe&) = delete; |
1106 | | MutexLockMaybe& operator=(MutexLockMaybe&&) = delete; |
1107 | | }; |
1108 | | |
1109 | | // ReleasableMutexLock |
1110 | | // |
1111 | | // ReleasableMutexLock is like MutexLock, but permits `Release()` of its |
1112 | | // mutex before destruction. `Release()` may be called at most once. |
1113 | | class ABSL_SCOPED_LOCKABLE ReleasableMutexLock { |
1114 | | public: |
1115 | | explicit ReleasableMutexLock(Mutex& mu ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY( |
1116 | | this)) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) |
1117 | 0 | : mu_(&mu) { |
1118 | 0 | this->mu_->lock(); |
1119 | 0 | } |
1120 | | |
1121 | | [[deprecated("Use the constructor that takes a reference instead")]] |
1122 | | ABSL_REFACTOR_INLINE |
1123 | | explicit ReleasableMutexLock(Mutex* absl_nonnull mu) |
1124 | | ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) |
1125 | 0 | : ReleasableMutexLock(*mu) {} |
1126 | | |
1127 | | explicit ReleasableMutexLock( |
1128 | | Mutex& mu ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this), |
1129 | | const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) |
1130 | 0 | : mu_(&mu) { |
1131 | 0 | this->mu_->LockWhen(cond); |
1132 | 0 | } |
1133 | | |
1134 | | [[deprecated("Use the constructor that takes a reference instead")]] |
1135 | | ABSL_REFACTOR_INLINE |
1136 | | explicit ReleasableMutexLock(Mutex* absl_nonnull mu, const Condition& cond) |
1137 | | ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) |
1138 | 0 | : ReleasableMutexLock(*mu, cond) {} |
1139 | | |
1140 | 0 | ~ReleasableMutexLock() ABSL_UNLOCK_FUNCTION() { |
1141 | 0 | if (this->mu_ != nullptr) { |
1142 | 0 | this->mu_->unlock(); |
1143 | 0 | } |
1144 | 0 | } |
1145 | | |
1146 | | void Release() ABSL_UNLOCK_FUNCTION(); |
1147 | | |
1148 | | private: |
1149 | | Mutex* absl_nullable mu_; |
1150 | | ReleasableMutexLock(const ReleasableMutexLock&) = delete; |
1151 | | ReleasableMutexLock(ReleasableMutexLock&&) = delete; |
1152 | | ReleasableMutexLock& operator=(const ReleasableMutexLock&) = delete; |
1153 | | ReleasableMutexLock& operator=(ReleasableMutexLock&&) = delete; |
1154 | | }; |
1155 | | |
1156 | 0 | inline Mutex::Mutex() : mu_(0) { |
1157 | 0 | ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static); |
1158 | 0 | } |
1159 | | |
1160 | | inline constexpr Mutex::Mutex(absl::ConstInitType) : mu_(0) {} |
1161 | | |
1162 | | #if !defined(__APPLE__) && !defined(ABSL_BUILD_DLL) |
1163 | | ABSL_ATTRIBUTE_ALWAYS_INLINE |
1164 | 0 | inline Mutex::~Mutex() { Dtor(); } |
1165 | | #endif |
1166 | | |
1167 | | #if defined(NDEBUG) && !defined(ABSL_HAVE_THREAD_SANITIZER) && \ |
1168 | | !defined(ABSL_BUILD_DLL) |
1169 | | // Under NDEBUG and without TSAN, Dtor is normally fully inlined for |
1170 | | // performance. However, when building Abseil as a shared library |
1171 | | // (ABSL_BUILD_DLL), we must provide an out-of-line definition. This ensures the |
1172 | | // Mutex::Dtor symbol is exported from the DLL, maintaining ABI compatibility |
1173 | | // with clients that might be built in debug mode and thus expect the symbol. |
1174 | | ABSL_ATTRIBUTE_ALWAYS_INLINE |
1175 | | inline void Mutex::Dtor() {} |
1176 | | #endif |
1177 | | |
1178 | | inline CondVar::CondVar() : cv_(0) {} |
1179 | | |
1180 | | // static |
1181 | | template <typename T, typename ConditionMethodPtr> |
1182 | | bool Condition::CastAndCallMethod(const Condition* absl_nonnull c) { |
1183 | | T* object = static_cast<T*>(c->arg_); |
1184 | | ConditionMethodPtr condition_method_pointer; |
1185 | | c->ReadCallback(&condition_method_pointer); |
1186 | | return (object->*condition_method_pointer)(); |
1187 | | } |
1188 | | |
1189 | | // static |
1190 | | template <typename T> |
1191 | 0 | bool Condition::CastAndCallFunction(const Condition* absl_nonnull c) { |
1192 | 0 | bool (*function)(T*); |
1193 | 0 | c->ReadCallback(&function); |
1194 | 0 | T* argument = static_cast<T*>(c->arg_); |
1195 | 0 | return (*function)(argument); |
1196 | 0 | } |
1197 | | |
1198 | | template <typename T> |
1199 | | inline Condition::Condition( |
1200 | | bool (*absl_nonnull func)(T* absl_nullability_unknown), |
1201 | | T* absl_nullability_unknown arg) |
1202 | 0 | : eval_(&CastAndCallFunction<T>), |
1203 | 0 | arg_(const_cast<void*>(static_cast<const void*>(arg))) { |
1204 | 0 | static_assert(sizeof(&func) <= sizeof(callback_), |
1205 | 0 | "An overlarge function pointer was passed to Condition."); |
1206 | 0 | StoreCallback(func); |
1207 | 0 | } |
1208 | | |
1209 | | template <typename T, typename> |
1210 | | inline Condition::Condition( |
1211 | | bool (*absl_nonnull func)(T* absl_nullability_unknown), |
1212 | | typename absl::type_identity<T>::type* absl_nullability_unknown |
1213 | | arg) |
1214 | | // Just delegate to the overload above. |
1215 | | : Condition(func, arg) {} |
1216 | | |
1217 | | template <typename T> |
1218 | | inline Condition::Condition( |
1219 | | T* absl_nonnull object, |
1220 | | bool (absl::type_identity<T>::type::* absl_nonnull method)()) |
1221 | | : eval_(&CastAndCallMethod<T, decltype(method)>), arg_(object) { |
1222 | | static_assert(sizeof(&method) <= sizeof(callback_), |
1223 | | "An overlarge method pointer was passed to Condition."); |
1224 | | StoreCallback(method); |
1225 | | } |
1226 | | |
1227 | | template <typename T> |
1228 | | inline Condition::Condition( |
1229 | | const T* absl_nonnull object, |
1230 | | bool (absl::type_identity<T>::type::* absl_nonnull method)() |
1231 | | const) |
1232 | | : eval_(&CastAndCallMethod<const T, decltype(method)>), |
1233 | | arg_(reinterpret_cast<void*>(const_cast<T*>(object))) { |
1234 | | StoreCallback(method); |
1235 | | } |
1236 | | |
1237 | | // Register hooks for profiling support. |
1238 | | // |
1239 | | // The function pointer registered here will be called whenever a mutex is |
1240 | | // contended. The callback is given the cycles for which waiting happened (as |
1241 | | // measured by //absl/base/internal/cycleclock.h, and which may not |
1242 | | // be real "cycle" counts.) |
1243 | | // |
1244 | | // There is no ordering guarantee between when the hook is registered and when |
1245 | | // callbacks will begin. Only a single profiler can be installed in a running |
1246 | | // binary; if this function is called a second time with a different function |
1247 | | // pointer, the value is ignored (and will cause an assertion failure in debug |
1248 | | // mode.) |
1249 | | void RegisterMutexProfiler(void (*absl_nonnull fn)(int64_t wait_cycles)); |
1250 | | |
1251 | | // Register a hook for Mutex tracing. |
1252 | | // |
1253 | | // The function pointer registered here will be called whenever a mutex is |
1254 | | // contended. The callback is given an opaque handle to the contended mutex, |
1255 | | // an event name, and the number of wait cycles (as measured by |
1256 | | // //absl/base/internal/cycleclock.h, and which may not be real |
1257 | | // "cycle" counts.) |
1258 | | // |
1259 | | // The only event name currently sent is "slow release". |
1260 | | // |
1261 | | // This has the same ordering and single-use limitations as |
1262 | | // RegisterMutexProfiler() above. |
1263 | | void RegisterMutexTracer(void (*absl_nonnull fn)(const char* absl_nonnull msg, |
1264 | | const void* absl_nonnull obj, |
1265 | | int64_t wait_cycles)); |
1266 | | |
1267 | | // Register a hook for CondVar tracing. |
1268 | | // |
1269 | | // The function pointer registered here will be called here on various CondVar |
1270 | | // events. The callback is given an opaque handle to the CondVar object and |
1271 | | // a string identifying the event. This is thread-safe, but only a single |
1272 | | // tracer can be registered. |
1273 | | // |
1274 | | // Events that can be sent are "Wait", "Unwait", "Signal wakeup", and |
1275 | | // "SignalAll wakeup". |
1276 | | // |
1277 | | // This has the same ordering and single-use limitations as |
1278 | | // RegisterMutexProfiler() above. |
1279 | | void RegisterCondVarTracer(void (*absl_nonnull fn)( |
1280 | | const char* absl_nonnull msg, const void* absl_nonnull cv)); |
1281 | | |
1282 | | // EnableMutexInvariantDebugging() |
1283 | | // |
1284 | | // Enable or disable global support for Mutex invariant debugging. If enabled, |
1285 | | // then invariant predicates can be registered per-Mutex for debug checking. |
1286 | | // See Mutex::EnableInvariantDebugging(). |
1287 | | void EnableMutexInvariantDebugging(bool enabled); |
1288 | | |
1289 | | // When in debug mode, and when the feature has been enabled globally, the |
1290 | | // implementation will keep track of lock ordering and complain (or optionally |
1291 | | // crash) if a cycle is detected in the acquired-before graph. |
1292 | | |
1293 | | // Possible modes of operation for the deadlock detector in debug mode. |
1294 | | enum class OnDeadlockCycle { |
1295 | | kIgnore, // Neither report on nor attempt to track cycles in lock ordering |
1296 | | kReport, // Report lock cycles to stderr when detected |
1297 | | kAbort, // Report lock cycles to stderr when detected, then abort |
1298 | | }; |
1299 | | |
1300 | | // SetMutexDeadlockDetectionMode() |
1301 | | // |
1302 | | // Enable or disable global support for detection of potential deadlocks |
1303 | | // due to Mutex lock ordering inversions. When set to 'kIgnore', tracking of |
1304 | | // lock ordering is disabled. Otherwise, in debug builds, a lock ordering graph |
1305 | | // will be maintained internally, and detected cycles will be reported in |
1306 | | // the manner chosen here. |
1307 | | void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode); |
1308 | | |
1309 | | ABSL_NAMESPACE_END |
1310 | | } // namespace absl |
1311 | | |
1312 | | // In some build configurations we pass --detect-odr-violations to the |
1313 | | // gold linker. This causes it to flag weak symbol overrides as ODR |
1314 | | // violations. Because ODR only applies to C++ and not C, |
1315 | | // --detect-odr-violations ignores symbols not mangled with C++ names. |
1316 | | // By changing our extension points to be extern "C", we dodge this |
1317 | | // check. |
1318 | | extern "C" { |
1319 | | void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)(); |
1320 | | } // extern "C" |
1321 | | |
1322 | | #endif // ABSL_SYNCHRONIZATION_MUTEX_H_ |