/src/abseil-cpp/absl/synchronization/mutex.h
Line  | Count  | Source  | 
1  |  | // Copyright 2017 The Abseil Authors.  | 
2  |  | //  | 
3  |  | // Licensed under the Apache License, Version 2.0 (the "License");  | 
4  |  | // you may not use this file except in compliance with the License.  | 
5  |  | // You may obtain a copy of the License at  | 
6  |  | //  | 
7  |  | //      https://www.apache.org/licenses/LICENSE-2.0  | 
8  |  | //  | 
9  |  | // Unless required by applicable law or agreed to in writing, software  | 
10  |  | // distributed under the License is distributed on an "AS IS" BASIS,  | 
11  |  | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  | 
12  |  | // See the License for the specific language governing permissions and  | 
13  |  | // limitations under the License.  | 
14  |  | //  | 
15  |  | // -----------------------------------------------------------------------------  | 
16  |  | // mutex.h  | 
17  |  | // -----------------------------------------------------------------------------  | 
18  |  | //  | 
19  |  | // This header file defines a `Mutex` -- a mutually exclusive lock -- and the  | 
20  |  | // most common type of synchronization primitive for facilitating locks on  | 
21  |  | // shared resources. A mutex is used to prevent multiple threads from accessing  | 
22  |  | // and/or writing to a shared resource concurrently.  | 
23  |  | //  | 
24  |  | // Unlike a `std::mutex`, the Abseil `Mutex` provides the following additional  | 
25  |  | // features:  | 
26  |  | //   * Conditional predicates intrinsic to the `Mutex` object  | 
27  |  | //   * Shared/reader locks, in addition to standard exclusive/writer locks  | 
28  |  | //   * Deadlock detection and debug support.  | 
29  |  | //  | 
30  |  | // The following helper classes are also defined within this file:  | 
31  |  | //  | 
32  |  | //  MutexLock - An RAII wrapper to acquire and release a `Mutex` for exclusive/  | 
33  |  | //              write access within the current scope.  | 
34  |  | //  | 
35  |  | //  ReaderMutexLock  | 
36  |  | //            - An RAII wrapper to acquire and release a `Mutex` for shared/read  | 
37  |  | //              access within the current scope.  | 
38  |  | //  | 
39  |  | //  WriterMutexLock  | 
40  |  | //            - Effectively an alias for `MutexLock` above, designed for use in  | 
41  |  | //              distinguishing reader and writer locks within code.  | 
42  |  | //  | 
43  |  | // In addition to simple mutex locks, this file also defines ways to perform  | 
44  |  | // locking under certain conditions.  | 
45  |  | //  | 
46  |  | //  Condition - (Preferred) Used to wait for a particular predicate that  | 
47  |  | //              depends on state protected by the `Mutex` to become true.  | 
48  |  | //  CondVar   - A lower-level variant of `Condition` that relies on  | 
49  |  | //              application code to explicitly signal the `CondVar` when  | 
50  |  | //              a condition has been met.  | 
51  |  | //  | 
52  |  | // See below for more information on using `Condition` or `CondVar`.  | 
53  |  | //  | 
54  |  | // Mutexes and mutex behavior can be quite complicated. The information within  | 
55  |  | // this header file is limited, as a result. Please consult the Mutex guide for  | 
56  |  | // more complete information and examples.  | 
57  |  |  | 
58  |  | #ifndef ABSL_SYNCHRONIZATION_MUTEX_H_  | 
59  |  | #define ABSL_SYNCHRONIZATION_MUTEX_H_  | 
60  |  |  | 
61  |  | #include <atomic>  | 
62  |  | #include <cstdint>  | 
63  |  | #include <cstring>  | 
64  |  |  | 
65  |  | #include "absl/base/attributes.h"  | 
66  |  | #include "absl/base/config.h"  | 
67  |  | #include "absl/base/const_init.h"  | 
68  |  | #include "absl/base/internal/thread_identity.h"  | 
69  |  | #include "absl/base/internal/tsan_mutex_interface.h"  | 
70  |  | #include "absl/base/macros.h"  | 
71  |  | #include "absl/base/nullability.h"  | 
72  |  | #include "absl/base/thread_annotations.h"  | 
73  |  | #include "absl/meta/type_traits.h"  | 
74  |  | #include "absl/synchronization/internal/kernel_timeout.h"  | 
75  |  | #include "absl/synchronization/internal/per_thread_sem.h"  | 
76  |  | #include "absl/time/time.h"  | 
77  |  |  | 
78  |  | namespace absl { | 
79  |  | ABSL_NAMESPACE_BEGIN  | 
80  |  |  | 
81  |  | class Condition;  | 
82  |  | struct SynchWaitParams;  | 
83  |  |  | 
84  |  | // -----------------------------------------------------------------------------  | 
85  |  | // Mutex  | 
86  |  | // -----------------------------------------------------------------------------  | 
87  |  | //  | 
88  |  | // A `Mutex` is a non-reentrant (aka non-recursive) Mutually Exclusive lock  | 
89  |  | // on some resource, typically a variable or data structure with associated  | 
90  |  | // invariants. Proper usage of mutexes prevents concurrent access by different  | 
91  |  | // threads to the same resource.  | 
92  |  | //  | 
93  |  | // A `Mutex` has two basic operations: `Mutex::lock()` and `Mutex::unlock()`.  | 
94  |  | // The `lock()` operation *acquires* a `Mutex` (in a state known as an  | 
95  |  | // *exclusive* -- or *write* -- lock), and the `unlock()` operation *releases* a  | 
96  |  | // Mutex. During the span of time between the lock() and unlock() operations,  | 
97  |  | // a mutex is said to be *held*. By design, all mutexes support exclusive/write  | 
98  |  | // locks, as this is the most common way to use a mutex.  | 
99  |  | //  | 
100  |  | // Mutex operations are only allowed under certain conditions; otherwise an  | 
101  |  | // operation is "invalid", and disallowed by the API. The conditions concern  | 
102  |  | // both the current state of the mutex and the identity of the threads that  | 
103  |  | // are performing the operations.  | 
104  |  | //  | 
105  |  | // The `Mutex` state machine for basic lock/unlock operations is quite simple:  | 
106  |  | //  | 
107  |  | // |                | lock()                 | unlock() |  | 
108  |  | // |----------------+------------------------+----------|  | 
109  |  | // | Free           | Exclusive              | invalid  |  | 
110  |  | // | Exclusive      | blocks, then exclusive | Free     |  | 
111  |  | //  | 
112  |  | // The full conditions are as follows.  | 
113  |  | //  | 
114  |  | // * Calls to `unlock()` require that the mutex be held, and must be made in the  | 
115  |  | //   same thread that performed the corresponding `lock()` operation which  | 
116  |  | //   acquired the mutex; otherwise the call is invalid.  | 
117  |  | //  | 
118  |  | // * The mutex being non-reentrant (or non-recursive) means that a call to  | 
119  |  | //   `lock()` or `try_lock()` must not be made in a thread that already holds  | 
120  |  | //   the mutex; such a call is invalid.  | 
121  |  | //  | 
122  |  | // * In other words, the state of being "held" has both a temporal component  | 
123  |  | //   (from `lock()` until `unlock()`) as well as a thread identity component:  | 
124  |  | //   the mutex is held *by a particular thread*.  | 
125  |  | //  | 
126  |  | // An "invalid" operation has undefined behavior. The `Mutex` implementation  | 
127  |  | // is allowed to do anything on an invalid call, including, but not limited to,  | 
128  |  | // crashing with a useful error message, silently succeeding, or corrupting  | 
129  |  | // data structures. In debug mode, the implementation may crash with a useful  | 
130  |  | // error message.  | 
131  |  | //  | 
132  |  | // `Mutex` is not guaranteed to be "fair" in prioritizing waiting threads; it  | 
133  |  | // is, however, approximately fair over long periods, and starvation-free for  | 
134  |  | // threads at the same priority.  | 
135  |  | //  | 
136  |  | // The lock/unlock primitives are now annotated with lock annotations  | 
137  |  | // defined in (base/thread_annotations.h). When writing multi-threaded code,  | 
138  |  | // you should use lock annotations whenever possible to document your lock  | 
139  |  | // synchronization policy. Besides acting as documentation, these annotations  | 
140  |  | // also help compilers or static analysis tools to identify and warn about  | 
141  |  | // issues that could potentially result in race conditions and deadlocks.  | 
142  |  | //  | 
143  |  | // For more information about the lock annotations, please see  | 
144  |  | // [Thread Safety  | 
145  |  | // Analysis](http://clang.llvm.org/docs/ThreadSafetyAnalysis.html) in the Clang  | 
146  |  | // documentation.  | 
147  |  | //  | 
148  |  | // See also `MutexLock`, below, for scoped `Mutex` acquisition.  | 
149  |  |  | 
150  |  | class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED Mutex { | 
151  |  |  public:  | 
152  |  |   // Creates a `Mutex` that is not held by anyone. This constructor is  | 
153  |  |   // typically used for Mutexes allocated on the heap or the stack.  | 
154  |  |   //  | 
155  |  |   // To create `Mutex` instances with static storage duration  | 
156  |  |   // (e.g. a namespace-scoped or global variable), see  | 
157  |  |   // `Mutex::Mutex(absl::kConstInit)` below instead.  | 
158  |  |   Mutex();  | 
159  |  |  | 
160  |  |   // Creates a mutex with static storage duration.  A global variable  | 
161  |  |   // constructed this way avoids the lifetime issues that can occur on program  | 
162  |  |   // startup and shutdown.  (See absl/base/const_init.h.)  | 
163  |  |   //  | 
164  |  |   // For Mutexes allocated on the heap and stack, instead use the default  | 
165  |  |   // constructor, which can interact more fully with the thread sanitizer.  | 
166  |  |   //  | 
167  |  |   // Example usage:  | 
168  |  |   //   namespace foo { | 
169  |  |   //   ABSL_CONST_INIT absl::Mutex mu(absl::kConstInit);  | 
170  |  |   //   }  | 
171  |  |   explicit constexpr Mutex(absl::ConstInitType);  | 
172  |  |  | 
173  |  |   ~Mutex();  | 
174  |  |  | 
175  |  |   // Mutex::lock()  | 
176  |  |   //  | 
177  |  |   // Blocks the calling thread, if necessary, until this `Mutex` is free, and  | 
178  |  |   // then acquires it exclusively. (This lock is also known as a "write lock.")  | 
179  |  |   void lock() ABSL_EXCLUSIVE_LOCK_FUNCTION();  | 
180  |  |  | 
181  | 0  |   inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { lock(); } | 
182  |  |  | 
183  |  |   // Mutex::unlock()  | 
184  |  |   //  | 
185  |  |   // Releases this `Mutex` and returns it from the exclusive/write state to the  | 
186  |  |   // free state. Calling thread must hold the `Mutex` exclusively.  | 
187  |  |   void unlock() ABSL_UNLOCK_FUNCTION();  | 
188  |  |  | 
189  | 0  |   inline void Unlock() ABSL_UNLOCK_FUNCTION() { unlock(); } | 
190  |  |  | 
191  |  |   // Mutex::try_lock()  | 
192  |  |   //  | 
193  |  |   // If the mutex can be acquired without blocking, does so exclusively and  | 
194  |  |   // returns `true`. Otherwise, returns `false`. Returns `true` with high  | 
195  |  |   // probability if the `Mutex` was free.  | 
196  |  |   [[nodiscard]] bool try_lock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true);  | 
197  |  |  | 
198  | 0  |   [[nodiscard]] bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) { | 
199  | 0  |     return try_lock();  | 
200  | 0  |   }  | 
201  |  |  | 
202  |  |   // Mutex::AssertHeld()  | 
203  |  |   //  | 
204  |  |   // Require that the mutex be held exclusively (write mode) by this thread.  | 
205  |  |   //  | 
206  |  |   // If the mutex is not currently held by this thread, this function may report  | 
207  |  |   // an error (typically by crashing with a diagnostic) or it may do nothing.  | 
208  |  |   // This function is intended only as a tool to assist debugging; it doesn't  | 
209  |  |   // guarantee correctness.  | 
210  |  |   void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK();  | 
211  |  |  | 
212  |  |   // ---------------------------------------------------------------------------  | 
213  |  |   // Reader-Writer Locking  | 
214  |  |   // ---------------------------------------------------------------------------  | 
215  |  |  | 
216  |  |   // A Mutex can also be used as a starvation-free reader-writer lock.  | 
217  |  |   // Neither read-locks nor write-locks are reentrant/recursive to avoid  | 
218  |  |   // potential client programming errors.  | 
219  |  |   //  | 
220  |  |   // The Mutex API provides `Writer*()` aliases for the existing `lock()`,  | 
221  |  |   // `unlock()` and `try_lock()` methods for use within applications mixing  | 
222  |  |   // reader/writer locks. Using `*_shared()` and `Writer*()` operations in this  | 
223  |  |   // manner can make locking behavior clearer when mixing read and write modes.  | 
224  |  |   //  | 
225  |  |   // Introducing reader locks necessarily complicates the `Mutex` state  | 
226  |  |   // machine somewhat. The table below illustrates the allowed state transitions  | 
227  |  |   // of a mutex in such cases. Note that lock_shared() may block even if the  | 
228  |  |   // lock is held in shared mode; this occurs when another thread is blocked on  | 
229  |  |   // a call to lock().  | 
230  |  |   //  | 
231  |  |   // ---------------------------------------------------------------------------  | 
232  |  |   //     Operation: lock()       unlock()  lock_shared() unlock_shared()  | 
233  |  |   // ---------------------------------------------------------------------------  | 
234  |  |   // State  | 
235  |  |   // ---------------------------------------------------------------------------  | 
236  |  |   // Free           Exclusive    invalid   Shared(1)              invalid  | 
237  |  |   // Shared(1)      blocks       invalid   Shared(2) or blocks    Free  | 
238  |  |   // Shared(n) n>1  blocks       invalid   Shared(n+1) or blocks  Shared(n-1)  | 
239  |  |   // Exclusive      blocks       Free      blocks                 invalid  | 
240  |  |   // ---------------------------------------------------------------------------  | 
241  |  |   //  | 
242  |  |   // In comments below, "shared" refers to a state of Shared(n) for any n > 0.  | 
243  |  |  | 
244  |  |   // Mutex::lock_shared()  | 
245  |  |   //  | 
246  |  |   // Blocks the calling thread, if necessary, until this `Mutex` is either free,  | 
247  |  |   // or in shared mode, and then acquires a share of it. Note that  | 
248  |  |   // `lock_shared()` will block if some other thread has an exclusive/writer  | 
249  |  |   // lock on the mutex.  | 
250  |  |   void lock_shared() ABSL_SHARED_LOCK_FUNCTION();  | 
251  |  |  | 
252  | 0  |   void ReaderLock() ABSL_SHARED_LOCK_FUNCTION() { lock_shared(); } | 
253  |  |  | 
254  |  |   // Mutex::unlock_shared()  | 
255  |  |   //  | 
256  |  |   // Releases a read share of this `Mutex`. `unlock_shared` may return a mutex  | 
257  |  |   // to the free state if this thread holds the last reader lock on the mutex.  | 
258  |  |   // Note that you cannot call `unlock_shared()` on a mutex held in write mode.  | 
259  |  |   void unlock_shared() ABSL_UNLOCK_FUNCTION();  | 
260  |  |  | 
261  | 0  |   void ReaderUnlock() ABSL_UNLOCK_FUNCTION() { unlock_shared(); } | 
262  |  |  | 
263  |  |   // Mutex::try_lock_shared()  | 
264  |  |   //  | 
265  |  |   // If the mutex can be acquired without blocking, acquires this mutex for  | 
266  |  |   // shared access and returns `true`. Otherwise, returns `false`. Returns  | 
267  |  |   // `true` with high probability if the `Mutex` was free or shared.  | 
268  |  |   [[nodiscard]] bool try_lock_shared() ABSL_SHARED_TRYLOCK_FUNCTION(true);  | 
269  |  |  | 
270  | 0  |   [[nodiscard]] bool ReaderTryLock() ABSL_SHARED_TRYLOCK_FUNCTION(true) { | 
271  | 0  |     return try_lock_shared();  | 
272  | 0  |   }  | 
273  |  |  | 
274  |  |   // Mutex::AssertReaderHeld()  | 
275  |  |   //  | 
276  |  |   // Require that the mutex be held at least in shared mode (read mode) by this  | 
277  |  |   // thread.  | 
278  |  |   //  | 
279  |  |   // If the mutex is not currently held by this thread, this function may report  | 
280  |  |   // an error (typically by crashing with a diagnostic) or it may do nothing.  | 
281  |  |   // This function is intended only as a tool to assist debugging; it doesn't  | 
282  |  |   // guarantee correctness.  | 
283  |  |   void AssertReaderHeld() const ABSL_ASSERT_SHARED_LOCK();  | 
284  |  |  | 
285  |  |   // Mutex::WriterLock()  | 
286  |  |   // Mutex::WriterUnlock()  | 
287  |  |   // Mutex::WriterTryLock()  | 
288  |  |   //  | 
289  |  |   // Aliases for `Mutex::Lock()`, `Mutex::Unlock()`, and `Mutex::TryLock()`.  | 
290  |  |   //  | 
291  |  |   // These methods may be used (along with the complementary `Reader*()`  | 
292  |  |   // methods) to distinguish simple exclusive `Mutex` usage (`Lock()`,  | 
293  |  |   // etc.) from reader/writer lock usage.  | 
294  | 0  |   void WriterLock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { lock(); } | 
295  |  |  | 
296  | 0  |   void WriterUnlock() ABSL_UNLOCK_FUNCTION() { unlock(); } | 
297  |  |  | 
298  | 0  |   [[nodiscard]] bool WriterTryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) { | 
299  | 0  |     return try_lock();  | 
300  | 0  |   }  | 
301  |  |  | 
302  |  |   // ---------------------------------------------------------------------------  | 
303  |  |   // Conditional Critical Regions  | 
304  |  |   // ---------------------------------------------------------------------------  | 
305  |  |  | 
306  |  |   // Conditional usage of a `Mutex` can occur using two distinct paradigms:  | 
307  |  |   //  | 
308  |  |   //   * Use of `Mutex` member functions with `Condition` objects.  | 
309  |  |   //   * Use of the separate `CondVar` abstraction.  | 
310  |  |   //  | 
311  |  |   // In general, prefer use of `Condition` and the `Mutex` member functions  | 
312  |  |   // listed below over `CondVar`. When there are multiple threads waiting on  | 
313  |  |   // distinctly different conditions, however, a battery of `CondVar`s may be  | 
314  |  |   // more efficient. This section discusses use of `Condition` objects.  | 
315  |  |   //  | 
316  |  |   // `Mutex` contains member functions for performing lock operations only under  | 
317  |  |   // certain conditions, of class `Condition`. For correctness, the `Condition`  | 
318  |  |   // must return a boolean that is a pure function, only of state protected by  | 
319  |  |   // the `Mutex`. The condition must be invariant w.r.t. environmental state  | 
320  |  |   // such as thread, cpu id, or time, and must be `noexcept`. The condition will  | 
321  |  |   // always be invoked with the mutex held in at least read mode, so you should  | 
322  |  |   // not block it for long periods or sleep it on a timer.  | 
323  |  |   //  | 
324  |  |   // Since a condition must not depend directly on the current time, use  | 
325  |  |   // `*WithTimeout()` member function variants to make your condition  | 
326  |  |   // effectively true after a given duration, or `*WithDeadline()` variants to  | 
327  |  |   // make your condition effectively true after a given time.  | 
328  |  |   //  | 
329  |  |   // The condition function should have no side-effects aside from debug  | 
330  |  |   // logging; as a special exception, the function may acquire other mutexes  | 
331  |  |   // provided it releases all those that it acquires.  (This exception was  | 
332  |  |   // required to allow logging.)  | 
333  |  |  | 
334  |  |   // Mutex::Await()  | 
335  |  |   //  | 
336  |  |   // Unlocks this `Mutex` and blocks until simultaneously both `cond` is `true`  | 
337  |  |   // and this `Mutex` can be reacquired, then reacquires this `Mutex` in the  | 
338  |  |   // same mode in which it was previously held. If the condition is initially  | 
339  |  |   // `true`, `Await()` *may* skip the release/re-acquire step.  | 
340  |  |   //  | 
341  |  |   // `Await()` requires that this thread holds this `Mutex` in some mode.  | 
342  | 0  |   void Await(const Condition& cond) { | 
343  | 0  |     AwaitCommon(cond, synchronization_internal::KernelTimeout::Never());  | 
344  | 0  |   }  | 
345  |  |  | 
346  |  |   // Mutex::LockWhen()  | 
347  |  |   // Mutex::ReaderLockWhen()  | 
348  |  |   // Mutex::WriterLockWhen()  | 
349  |  |   //  | 
350  |  |   // Blocks until simultaneously both `cond` is `true` and this `Mutex` can  | 
351  |  |   // be acquired, then atomically acquires this `Mutex`. `LockWhen()` is  | 
352  |  |   // logically equivalent to `*Lock(); Await();` though they may have different  | 
353  |  |   // performance characteristics.  | 
354  | 0  |   void LockWhen(const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION() { | 
355  | 0  |     LockWhenCommon(cond, synchronization_internal::KernelTimeout::Never(),  | 
356  | 0  |                    true);  | 
357  | 0  |   }  | 
358  |  |  | 
359  | 0  |   void ReaderLockWhen(const Condition& cond) ABSL_SHARED_LOCK_FUNCTION() { | 
360  | 0  |     LockWhenCommon(cond, synchronization_internal::KernelTimeout::Never(),  | 
361  | 0  |                    false);  | 
362  | 0  |   }  | 
363  |  |  | 
364  | 0  |   void WriterLockWhen(const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION() { | 
365  | 0  |     this->LockWhen(cond);  | 
366  | 0  |   }  | 
367  |  |  | 
368  |  |   // ---------------------------------------------------------------------------  | 
369  |  |   // Mutex Variants with Timeouts/Deadlines  | 
370  |  |   // ---------------------------------------------------------------------------  | 
371  |  |  | 
372  |  |   // Mutex::AwaitWithTimeout()  | 
373  |  |   // Mutex::AwaitWithDeadline()  | 
374  |  |   //  | 
375  |  |   // Unlocks this `Mutex` and blocks until simultaneously:  | 
376  |  |   //   - either `cond` is true or the {timeout has expired, deadline has passed} | 
377  |  |   //     and  | 
378  |  |   //   - this `Mutex` can be reacquired,  | 
379  |  |   // then reacquire this `Mutex` in the same mode in which it was previously  | 
380  |  |   // held, returning `true` iff `cond` is `true` on return.  | 
381  |  |   //  | 
382  |  |   // If the condition is initially `true`, the implementation *may* skip the  | 
383  |  |   // release/re-acquire step and return immediately.  | 
384  |  |   //  | 
385  |  |   // Deadlines in the past are equivalent to an immediate deadline.  | 
386  |  |   // Negative timeouts are equivalent to a zero timeout.  | 
387  |  |   //  | 
388  |  |   // This method requires that this thread holds this `Mutex` in some mode.  | 
389  | 0  |   bool AwaitWithTimeout(const Condition& cond, absl::Duration timeout) { | 
390  | 0  |     return AwaitCommon(cond, synchronization_internal::KernelTimeout{timeout}); | 
391  | 0  |   }  | 
392  |  |  | 
393  | 0  |   bool AwaitWithDeadline(const Condition& cond, absl::Time deadline) { | 
394  | 0  |     return AwaitCommon(cond, synchronization_internal::KernelTimeout{deadline}); | 
395  | 0  |   }  | 
396  |  |  | 
397  |  |   // Mutex::LockWhenWithTimeout()  | 
398  |  |   // Mutex::ReaderLockWhenWithTimeout()  | 
399  |  |   // Mutex::WriterLockWhenWithTimeout()  | 
400  |  |   //  | 
401  |  |   // Blocks until simultaneously both:  | 
402  |  |   //   - either `cond` is `true` or the timeout has expired, and  | 
403  |  |   //   - this `Mutex` can be acquired,  | 
404  |  |   // then atomically acquires this `Mutex`, returning `true` iff `cond` is  | 
405  |  |   // `true` on return.  | 
406  |  |   //  | 
407  |  |   // Negative timeouts are equivalent to a zero timeout.  | 
408  |  |   bool LockWhenWithTimeout(const Condition& cond, absl::Duration timeout)  | 
409  | 0  |       ABSL_EXCLUSIVE_LOCK_FUNCTION() { | 
410  | 0  |     return LockWhenCommon(  | 
411  | 0  |         cond, synchronization_internal::KernelTimeout{timeout}, true); | 
412  | 0  |   }  | 
413  |  |   bool ReaderLockWhenWithTimeout(const Condition& cond, absl::Duration timeout)  | 
414  | 0  |       ABSL_SHARED_LOCK_FUNCTION() { | 
415  | 0  |     return LockWhenCommon(  | 
416  | 0  |         cond, synchronization_internal::KernelTimeout{timeout}, false); | 
417  | 0  |   }  | 
418  |  |   bool WriterLockWhenWithTimeout(const Condition& cond, absl::Duration timeout)  | 
419  | 0  |       ABSL_EXCLUSIVE_LOCK_FUNCTION() { | 
420  | 0  |     return this->LockWhenWithTimeout(cond, timeout);  | 
421  | 0  |   }  | 
422  |  |  | 
423  |  |   // Mutex::LockWhenWithDeadline()  | 
424  |  |   // Mutex::ReaderLockWhenWithDeadline()  | 
425  |  |   // Mutex::WriterLockWhenWithDeadline()  | 
426  |  |   //  | 
427  |  |   // Blocks until simultaneously both:  | 
428  |  |   //   - either `cond` is `true` or the deadline has been passed, and  | 
429  |  |   //   - this `Mutex` can be acquired,  | 
430  |  |   // then atomically acquires this Mutex, returning `true` iff `cond` is `true`  | 
431  |  |   // on return.  | 
432  |  |   //  | 
433  |  |   // Deadlines in the past are equivalent to an immediate deadline.  | 
434  |  |   bool LockWhenWithDeadline(const Condition& cond, absl::Time deadline)  | 
435  | 0  |       ABSL_EXCLUSIVE_LOCK_FUNCTION() { | 
436  | 0  |     return LockWhenCommon(  | 
437  | 0  |         cond, synchronization_internal::KernelTimeout{deadline}, true); | 
438  | 0  |   }  | 
439  |  |   bool ReaderLockWhenWithDeadline(const Condition& cond, absl::Time deadline)  | 
440  | 0  |       ABSL_SHARED_LOCK_FUNCTION() { | 
441  | 0  |     return LockWhenCommon(  | 
442  | 0  |         cond, synchronization_internal::KernelTimeout{deadline}, false); | 
443  | 0  |   }  | 
444  |  |   bool WriterLockWhenWithDeadline(const Condition& cond, absl::Time deadline)  | 
445  | 0  |       ABSL_EXCLUSIVE_LOCK_FUNCTION() { | 
446  | 0  |     return this->LockWhenWithDeadline(cond, deadline);  | 
447  | 0  |   }  | 
448  |  |  | 
449  |  |   // ---------------------------------------------------------------------------  | 
450  |  |   // Debug Support: Invariant Checking, Deadlock Detection, Logging.  | 
451  |  |   // ---------------------------------------------------------------------------  | 
452  |  |  | 
453  |  |   // Mutex::EnableInvariantDebugging()  | 
454  |  |   //  | 
455  |  |   // If `invariant`!=null and if invariant debugging has been enabled globally,  | 
456  |  |   // cause `(*invariant)(arg)` to be called at moments when the invariant for  | 
457  |  |   // this `Mutex` should hold (for example: just after acquire, just before  | 
458  |  |   // release).  | 
459  |  |   //  | 
460  |  |   // The routine `invariant` should have no side-effects since it is not  | 
461  |  |   // guaranteed how many times it will be called; it should check the invariant  | 
462  |  |   // and crash if it does not hold. Enabling global invariant debugging may  | 
463  |  |   // substantially reduce `Mutex` performance; it should be set only for  | 
464  |  |   // non-production runs.  Optimization options may also disable invariant  | 
465  |  |   // checks.  | 
466  |  |   void EnableInvariantDebugging(  | 
467  |  |       void (*absl_nullable invariant)(void* absl_nullability_unknown),  | 
468  |  |       void* absl_nullability_unknown arg);  | 
469  |  |  | 
470  |  |   // Mutex::EnableDebugLog()  | 
471  |  |   //  | 
472  |  |   // Cause all subsequent uses of this `Mutex` to be logged via  | 
473  |  |   // `ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if no previous  | 
474  |  |   // call to `EnableInvariantDebugging()` or `EnableDebugLog()` has been made.  | 
475  |  |   //  | 
476  |  |   // Note: This method substantially reduces `Mutex` performance.  | 
477  |  |   void EnableDebugLog(const char* absl_nullable name);  | 
478  |  |  | 
479  |  |   // Deadlock detection  | 
480  |  |  | 
481  |  |   // Mutex::ForgetDeadlockInfo()  | 
482  |  |   //  | 
483  |  |   // Forget any deadlock-detection information previously gathered  | 
484  |  |   // about this `Mutex`. Call this method in debug mode when the lock ordering  | 
485  |  |   // of a `Mutex` changes.  | 
486  |  |   void ForgetDeadlockInfo();  | 
487  |  |  | 
488  |  |   // Mutex::AssertNotHeld()  | 
489  |  |   //  | 
490  |  |   // Return immediately if this thread does not hold this `Mutex` in any  | 
491  |  |   // mode; otherwise, may report an error (typically by crashing with a  | 
492  |  |   // diagnostic), or may return immediately.  | 
493  |  |   //  | 
494  |  |   // Currently this check is performed only if all of:  | 
495  |  |   //    - in debug mode  | 
496  |  |   //    - SetMutexDeadlockDetectionMode() has been set to kReport or kAbort  | 
497  |  |   //    - number of locks concurrently held by this thread is not large.  | 
498  |  |   // are true.  | 
499  |  |   void AssertNotHeld() const;  | 
500  |  |  | 
501  |  |   // Special cases.  | 
502  |  |  | 
503  |  |   // A `MuHow` is a constant that indicates how a lock should be acquired.  | 
504  |  |   // Internal implementation detail.  Clients should ignore.  | 
505  |  |   typedef const struct MuHowS* MuHow;  | 
506  |  |  | 
507  |  |   // Mutex::InternalAttemptToUseMutexInFatalSignalHandler()  | 
508  |  |   //  | 
509  |  |   // Causes the `Mutex` implementation to prepare itself for re-entry caused by  | 
510  |  |   // future use of `Mutex` within a fatal signal handler. This method is  | 
511  |  |   // intended for use only for last-ditch attempts to log crash information.  | 
512  |  |   // It does not guarantee that attempts to use Mutexes within the handler will  | 
513  |  |   // not deadlock; it merely makes other faults less likely.  | 
514  |  |   //  | 
515  |  |   // WARNING:  This routine must be invoked from a signal handler, and the  | 
516  |  |   // signal handler must either loop forever or terminate the process.  | 
517  |  |   // Attempts to return from (or `longjmp` out of) the signal handler once this  | 
518  |  |   // call has been made may cause arbitrary program behaviour including  | 
519  |  |   // crashes and deadlocks.  | 
520  |  |   static void InternalAttemptToUseMutexInFatalSignalHandler();  | 
521  |  |  | 
522  |  |  private:  | 
523  |  |   std::atomic<intptr_t> mu_;  // The Mutex state.  | 
524  |  |  | 
525  |  |   // Post()/Wait() versus associated PerThreadSem; in class for required  | 
526  |  |   // friendship with PerThreadSem.  | 
527  |  |   static void IncrementSynchSem(Mutex* absl_nonnull mu,  | 
528  |  |                                 base_internal::PerThreadSynch* absl_nonnull w);  | 
529  |  |   static bool DecrementSynchSem(Mutex* absl_nonnull mu,  | 
530  |  |                                 base_internal::PerThreadSynch* absl_nonnull w,  | 
531  |  |                                 synchronization_internal::KernelTimeout t);  | 
532  |  |  | 
533  |  |   // slow path acquire  | 
534  |  |   void LockSlowLoop(SynchWaitParams* absl_nonnull waitp, int flags);  | 
535  |  |   // wrappers around LockSlowLoop()  | 
536  |  |   bool LockSlowWithDeadline(MuHow absl_nonnull how,  | 
537  |  |                             const Condition* absl_nullable cond,  | 
538  |  |                             synchronization_internal::KernelTimeout t,  | 
539  |  |                             int flags);  | 
540  |  |   void LockSlow(MuHow absl_nonnull how, const Condition* absl_nullable cond,  | 
541  |  |                 int flags) ABSL_ATTRIBUTE_COLD;  | 
542  |  |   // slow path release  | 
543  |  |   void UnlockSlow(SynchWaitParams* absl_nullable waitp) ABSL_ATTRIBUTE_COLD;  | 
544  |  |   // TryLock slow path.  | 
545  |  |   bool TryLockSlow();  | 
546  |  |   // ReaderTryLock slow path.  | 
547  |  |   bool ReaderTryLockSlow();  | 
548  |  |   // Common code between Await() and AwaitWithTimeout/Deadline()  | 
549  |  |   bool AwaitCommon(const Condition& cond,  | 
550  |  |                    synchronization_internal::KernelTimeout t);  | 
551  |  |   bool LockWhenCommon(const Condition& cond,  | 
552  |  |                       synchronization_internal::KernelTimeout t, bool write);  | 
553  |  |   // Attempt to remove thread s from queue.  | 
554  |  |   void TryRemove(base_internal::PerThreadSynch* absl_nonnull s);  | 
555  |  |   // Block a thread on mutex.  | 
556  |  |   void Block(base_internal::PerThreadSynch* absl_nonnull s);  | 
557  |  |   // Wake a thread; return successor.  | 
558  |  |   base_internal::PerThreadSynch* absl_nullable Wakeup(  | 
559  |  |       base_internal::PerThreadSynch* absl_nonnull w);  | 
560  |  |   void Dtor();  | 
561  |  |  | 
562  |  |   friend class CondVar;                // for access to Trans()/Fer().  | 
563  |  |   void Trans(MuHow absl_nonnull how);  // used for CondVar->Mutex transfer  | 
564  |  |   void Fer(base_internal::PerThreadSynch* absl_nonnull  | 
565  |  |                w);  // used for CondVar->Mutex transfer  | 
566  |  |  | 
567  |  |   // Catch the error of writing Mutex when intending MutexLock.  | 
568  | 0  |   explicit Mutex(const volatile Mutex* absl_nullable /*ignored*/) {} | 
569  |  |  | 
570  |  |   Mutex(const Mutex&) = delete;  | 
571  |  |   Mutex& operator=(const Mutex&) = delete;  | 
572  |  | };  | 
573  |  |  | 
574  |  | // -----------------------------------------------------------------------------  | 
575  |  | // Mutex RAII Wrappers  | 
576  |  | // -----------------------------------------------------------------------------  | 
577  |  |  | 
578  |  | // MutexLock  | 
579  |  | //  | 
580  |  | // `MutexLock` is a helper class, which acquires and releases a `Mutex` via  | 
581  |  | // RAII.  | 
582  |  | //  | 
583  |  | // Example:  | 
584  |  | //  | 
585  |  | // Class Foo { | 
586  |  | //  public:  | 
587  |  | //   Foo::Bar* Baz() { | 
588  |  | //     MutexLock lock(mu_);  | 
589  |  | //     ...  | 
590  |  | //     return bar;  | 
591  |  | //   }  | 
592  |  | //  | 
593  |  | // private:  | 
594  |  | //   Mutex mu_;  | 
595  |  | // };  | 
596  |  | class ABSL_SCOPED_LOCKABLE MutexLock { | 
597  |  |  public:  | 
598  |  |   // Constructors  | 
599  |  |  | 
600  |  |   // Calls `mu.lock()` and returns when that call returns. That is, `mu` is  | 
601  |  |   // guaranteed to be locked when this object is constructed.  | 
602  |  |   explicit MutexLock(Mutex& mu ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this))  | 
603  |  |       ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)  | 
604  | 32  |       : mu_(mu) { | 
605  | 32  |     this->mu_.lock();  | 
606  | 32  |   }  | 
607  |  |  | 
608  |  |   // Calls `mu->lock()` and returns when that call returns. That is, `*mu` is  | 
609  |  |   // guaranteed to be locked when this object is constructed. Requires that  | 
610  |  |   // `mu` be dereferenceable.  | 
611  |  |   explicit MutexLock(Mutex* absl_nonnull mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)  | 
612  | 0  |       : MutexLock(*mu) {} | 
613  |  |  | 
614  |  |   // Like above, but calls `mu.LockWhen(cond)` instead. That is, in addition to  | 
615  |  |   // the above, the condition given by `cond` is also guaranteed to hold when  | 
616  |  |   // this object is constructed.  | 
617  |  |   explicit MutexLock(Mutex& mu ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this),  | 
618  |  |                      const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)  | 
619  | 0  |       : mu_(mu) { | 
620  | 0  |     this->mu_.LockWhen(cond);  | 
621  | 0  |   }  | 
622  |  |  | 
623  |  |   explicit MutexLock(Mutex* absl_nonnull mu, const Condition& cond)  | 
624  |  |       ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)  | 
625  | 0  |       : MutexLock(*mu, cond) {} | 
626  |  |  | 
627  |  |   MutexLock(const MutexLock&) = delete;  // NOLINT(runtime/mutex)  | 
628  |  |   MutexLock(MutexLock&&) = delete;       // NOLINT(runtime/mutex)  | 
629  |  |   MutexLock& operator=(const MutexLock&) = delete;  | 
630  |  |   MutexLock& operator=(MutexLock&&) = delete;  | 
631  |  |  | 
632  | 32  |   ~MutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_.unlock(); } | 
633  |  |  | 
634  |  |  private:  | 
635  |  |   Mutex& mu_;  | 
636  |  | };  | 
637  |  |  | 
638  |  | // ReaderMutexLock  | 
639  |  | //  | 
640  |  | // The `ReaderMutexLock` is a helper class, like `MutexLock`, which acquires and  | 
641  |  | // releases a shared lock on a `Mutex` via RAII.  | 
642  |  | class ABSL_SCOPED_LOCKABLE ReaderMutexLock { | 
643  |  |  public:  | 
644  |  |   explicit ReaderMutexLock(Mutex& mu ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this))  | 
645  |  |       ABSL_SHARED_LOCK_FUNCTION(mu)  | 
646  | 4.12M  |       : mu_(mu) { | 
647  | 4.12M  |     mu.lock_shared();  | 
648  | 4.12M  |   }  | 
649  |  |  | 
650  |  |   explicit ReaderMutexLock(Mutex* absl_nonnull mu) ABSL_SHARED_LOCK_FUNCTION(mu)  | 
651  | 0  |       : ReaderMutexLock(*mu) {} | 
652  |  |  | 
653  |  |   explicit ReaderMutexLock(Mutex& mu ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this),  | 
654  |  |                            const Condition& cond) ABSL_SHARED_LOCK_FUNCTION(mu)  | 
655  | 0  |       : mu_(mu) { | 
656  | 0  |     mu.ReaderLockWhen(cond);  | 
657  | 0  |   }  | 
658  |  |  | 
659  |  |   explicit ReaderMutexLock(Mutex* absl_nonnull mu, const Condition& cond)  | 
660  |  |       ABSL_SHARED_LOCK_FUNCTION(mu)  | 
661  | 0  |       : ReaderMutexLock(*mu, cond) {} | 
662  |  |  | 
663  |  |   ReaderMutexLock(const ReaderMutexLock&) = delete;  | 
664  |  |   ReaderMutexLock(ReaderMutexLock&&) = delete;  | 
665  |  |   ReaderMutexLock& operator=(const ReaderMutexLock&) = delete;  | 
666  |  |   ReaderMutexLock& operator=(ReaderMutexLock&&) = delete;  | 
667  |  |  | 
668  | 4.12M  |   ~ReaderMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_.unlock_shared(); } | 
669  |  |  | 
670  |  |  private:  | 
671  |  |   Mutex& mu_;  | 
672  |  | };  | 
673  |  |  | 
674  |  | // WriterMutexLock  | 
675  |  | //  | 
676  |  | // The `WriterMutexLock` is a helper class, like `MutexLock`, which acquires and  | 
677  |  | // releases a write (exclusive) lock on a `Mutex` via RAII.  | 
678  |  | class ABSL_SCOPED_LOCKABLE WriterMutexLock { | 
679  |  |  public:  | 
680  |  |   explicit WriterMutexLock(Mutex& mu ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this))  | 
681  |  |       ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)  | 
682  | 1  |       : mu_(mu) { | 
683  | 1  |     mu.lock();  | 
684  | 1  |   }  | 
685  |  |  | 
686  |  |   explicit WriterMutexLock(Mutex* absl_nonnull mu)  | 
687  |  |       ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)  | 
688  | 0  |       : WriterMutexLock(*mu) {} | 
689  |  |  | 
690  |  |   explicit WriterMutexLock(Mutex& mu ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this),  | 
691  |  |                            const Condition& cond)  | 
692  |  |       ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)  | 
693  | 0  |       : mu_(mu) { | 
694  | 0  |     mu.WriterLockWhen(cond);  | 
695  | 0  |   }  | 
696  |  |  | 
697  |  |   explicit WriterMutexLock(Mutex* absl_nonnull mu, const Condition& cond)  | 
698  |  |       ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)  | 
699  | 0  |       : WriterMutexLock(*mu, cond) {} | 
700  |  |  | 
701  |  |   WriterMutexLock(const WriterMutexLock&) = delete;  | 
702  |  |   WriterMutexLock(WriterMutexLock&&) = delete;  | 
703  |  |   WriterMutexLock& operator=(const WriterMutexLock&) = delete;  | 
704  |  |   WriterMutexLock& operator=(WriterMutexLock&&) = delete;  | 
705  |  |  | 
706  | 1  |   ~WriterMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_.unlock(); } | 
707  |  |  | 
708  |  |  private:  | 
709  |  |   Mutex& mu_;  | 
710  |  | };  | 
711  |  |  | 
712  |  | // -----------------------------------------------------------------------------  | 
713  |  | // Condition  | 
714  |  | // -----------------------------------------------------------------------------  | 
715  |  | //  | 
716  |  | // `Mutex` contains a number of member functions which take a `Condition` as an  | 
717  |  | // argument; clients can wait for conditions to become `true` before attempting  | 
718  |  | // to acquire the mutex. These sections are known as "condition critical"  | 
719  |  | // sections. To use a `Condition`, you simply need to construct it, and use  | 
720  |  | // within an appropriate `Mutex` member function; everything else in the  | 
721  |  | // `Condition` class is an implementation detail.  | 
722  |  | //  | 
723  |  | // A `Condition` is specified as a function pointer which returns a boolean.  | 
724  |  | // `Condition` functions should be pure functions -- their results should depend  | 
725  |  | // only on passed arguments, should not consult any external state (such as  | 
726  |  | // clocks), and should have no side-effects, aside from debug logging. Any  | 
727  |  | // objects that the function may access should be limited to those which are  | 
728  |  | // constant while the mutex is blocked on the condition (e.g. a stack variable),  | 
729  |  | // or objects of state protected explicitly by the mutex.  | 
730  |  | //  | 
731  |  | // No matter which construction is used for `Condition`, the underlying  | 
732  |  | // function pointer / functor / callable must not throw any  | 
733  |  | // exceptions. Correctness of `Mutex` / `Condition` is not guaranteed in  | 
734  |  | // the face of a throwing `Condition`. (When Abseil is allowed to depend  | 
735  |  | // on C++17, these function pointers will be explicitly marked  | 
736  |  | // `noexcept`; until then this requirement cannot be enforced in the  | 
737  |  | // type system.)  | 
738  |  | //  | 
739  |  | // Note: to use a `Condition`, you need only construct it and pass it to a  | 
740  |  | // suitable `Mutex' member function, such as `Mutex::Await()`, or to the  | 
741  |  | // constructor of one of the scope guard classes.  | 
742  |  | //  | 
743  |  | // Example using LockWhen/Unlock:  | 
744  |  | //  | 
745  |  | //   // assume count_ is not internal reference count  | 
746  |  | //   int count_ ABSL_GUARDED_BY(mu_);  | 
747  |  | //   Condition count_is_zero(+[](int *count) { return *count == 0; }, &count_); | 
748  |  | //  | 
749  |  | //   mu_.LockWhen(count_is_zero);  | 
750  |  | //   // ...  | 
751  |  | //   mu_.Unlock();  | 
752  |  | //  | 
753  |  | // Example using a scope guard:  | 
754  |  | //  | 
755  |  | //   { | 
756  |  | //     MutexLock lock(mu_, count_is_zero);  | 
757  |  | //     // ...  | 
758  |  | //   }  | 
759  |  | //  | 
760  |  | // When multiple threads are waiting on exactly the same condition, make sure  | 
761  |  | // that they are constructed with the same parameters (same pointer to function  | 
762  |  | // + arg, or same pointer to object + method), so that the mutex implementation  | 
763  |  | // can avoid redundantly evaluating the same condition for each thread.  | 
764  |  | class Condition { | 
765  |  |  public:  | 
766  |  |   // A Condition that returns the result of "(*func)(arg)"  | 
767  |  |   Condition(bool (*absl_nonnull func)(void* absl_nullability_unknown),  | 
768  |  |             void* absl_nullability_unknown arg);  | 
769  |  |  | 
770  |  |   // Templated version for people who are averse to casts.  | 
771  |  |   //  | 
772  |  |   // To use a lambda, prepend it with unary plus, which converts the lambda  | 
773  |  |   // into a function pointer:  | 
774  |  |   //     Condition(+[](T* t) { return ...; }, arg). | 
775  |  |   //  | 
776  |  |   // Note: lambdas in this case must contain no bound variables.  | 
777  |  |   //  | 
778  |  |   // See class comment for performance advice.  | 
779  |  |   template <typename T>  | 
780  |  |   Condition(bool (*absl_nonnull func)(T* absl_nullability_unknown),  | 
781  |  |             T* absl_nullability_unknown arg);  | 
782  |  |  | 
783  |  |   // Same as above, but allows for cases where `arg` comes from a pointer that  | 
784  |  |   // is convertible to the function parameter type `T*` but not an exact match.  | 
785  |  |   //  | 
786  |  |   // For example, the argument might be `X*` but the function takes `const X*`,  | 
787  |  |   // or the argument might be `Derived*` while the function takes `Base*`, and  | 
788  |  |   // so on for cases where the argument pointer can be implicitly converted.  | 
789  |  |   //  | 
790  |  |   // Implementation notes: This constructor overload is required in addition to  | 
791  |  |   // the one above to allow deduction of `T` from `arg` for cases such as where  | 
792  |  |   // a function template is passed as `func`. Also, the dummy `typename = void`  | 
793  |  |   // template parameter exists just to work around a MSVC mangling bug.  | 
794  |  |   template <typename T, typename = void>  | 
795  |  |   Condition(  | 
796  |  |       bool (*absl_nonnull func)(T* absl_nullability_unknown),  | 
797  |  |       typename absl::type_identity<T>::type* absl_nullability_unknown  | 
798  |  |           arg);  | 
799  |  |  | 
800  |  |   // Templated version for invoking a method that returns a `bool`.  | 
801  |  |   //  | 
802  |  |   // `Condition(object, &Class::Method)` constructs a `Condition` that evaluates  | 
803  |  |   // `object->Method()`.  | 
804  |  |   //  | 
805  |  |   // Implementation Note: `absl::type_identity` is used to allow  | 
806  |  |   // methods to come from base classes. A simpler signature like  | 
807  |  |   // `Condition(T*, bool (T::*)())` does not suffice.  | 
808  |  |   template <typename T>  | 
809  |  |   Condition(  | 
810  |  |       T* absl_nonnull object,  | 
811  |  |       bool (absl::type_identity<T>::type::* absl_nonnull method)());  | 
812  |  |  | 
813  |  |   // Same as above, for const members  | 
814  |  |   template <typename T>  | 
815  |  |   Condition(  | 
816  |  |       const T* absl_nonnull object,  | 
817  |  |       bool (absl::type_identity<T>::type::* absl_nonnull method)()  | 
818  |  |           const);  | 
819  |  |  | 
820  |  |   // A Condition that returns the value of `*cond`  | 
821  |  |   explicit Condition(const bool* absl_nonnull cond);  | 
822  |  |  | 
823  |  |   // Templated version for invoking a functor that returns a `bool`.  | 
824  |  |   // This approach accepts pointers to non-mutable lambdas, `std::function`,  | 
825  |  |   // the result of` std::bind` and user-defined functors that define  | 
826  |  |   // `bool F::operator()() const`.  | 
827  |  |   //  | 
828  |  |   // Example:  | 
829  |  |   //  | 
830  |  |   //   auto reached = [this, current]() { | 
831  |  |   //     mu_.AssertReaderHeld();                // For annotalysis.  | 
832  |  |   //     return processed_ >= current;  | 
833  |  |   //   };  | 
834  |  |   //   mu_.Await(Condition(&reached));  | 
835  |  |   //  | 
836  |  |   // NOTE: never use "mu_.AssertHeld()" instead of "mu_.AssertReaderHeld()" in  | 
837  |  |   // the lambda as it may be called when the mutex is being unlocked from a  | 
838  |  |   // scope holding only a reader lock, which will make the assertion not  | 
839  |  |   // fulfilled and crash the binary.  | 
840  |  |  | 
841  |  |   // See class comment for performance advice. In particular, if there  | 
842  |  |   // might be more than one waiter for the same condition, make sure  | 
843  |  |   // that all waiters construct the condition with the same pointers.  | 
844  |  |  | 
845  |  |   // Implementation note: The second template parameter ensures that this  | 
846  |  |   // constructor doesn't participate in overload resolution if T doesn't have  | 
847  |  |   // `bool operator() const`.  | 
848  |  |   template <typename T, typename E = decltype(static_cast<bool (T::*)() const>(  | 
849  |  |                             &T::operator()))>  | 
850  |  |   explicit Condition(const T* absl_nonnull obj)  | 
851  |  |       : Condition(obj, static_cast<bool (T::*)() const>(&T::operator())) {} | 
852  |  |  | 
853  |  |   // A Condition that always returns `true`.  | 
854  |  |   // kTrue is only useful in a narrow set of circumstances, mostly when  | 
855  |  |   // it's passed conditionally. For example:  | 
856  |  |   //  | 
857  |  |   //   mu.LockWhen(some_flag ? kTrue : SomeOtherCondition);  | 
858  |  |   //  | 
859  |  |   // Note: {LockWhen,Await}With{Deadline,Timeout} methods with kTrue condition | 
860  |  |   // don't return immediately when the timeout happens, they still block until  | 
861  |  |   // the Mutex becomes available. The return value of these methods does  | 
862  |  |   // not indicate if the timeout was reached; rather it indicates whether or  | 
863  |  |   // not the condition is true.  | 
864  |  |   ABSL_CONST_INIT static const Condition kTrue;  | 
865  |  |  | 
866  |  |   // Evaluates the condition.  | 
867  |  |   bool Eval() const;  | 
868  |  |  | 
869  |  |   // Returns `true` if the two conditions are guaranteed to return the same  | 
870  |  |   // value if evaluated at the same time, `false` if the evaluation *may* return  | 
871  |  |   // different results.  | 
872  |  |   //  | 
873  |  |   // Two `Condition` values are guaranteed equal if both their `func` and `arg`  | 
874  |  |   // components are the same. A null pointer is equivalent to a `true`  | 
875  |  |   // condition.  | 
876  |  |   static bool GuaranteedEqual(const Condition* absl_nullable a,  | 
877  |  |                               const Condition* absl_nullable b);  | 
878  |  |  | 
879  |  |  private:  | 
880  |  |   // Sizing an allocation for a method pointer can be subtle. In the Itanium  | 
881  |  |   // specifications, a method pointer has a predictable, uniform size. On the  | 
882  |  |   // other hand, MSVC ABI, method pointer sizes vary based on the  | 
883  |  |   // inheritance of the class. Specifically, method pointers from classes with  | 
884  |  |   // multiple inheritance are bigger than those of classes with single  | 
885  |  |   // inheritance. Other variations also exist.  | 
886  |  |  | 
887  |  | #ifndef _MSC_VER  | 
888  |  |   // Allocation for a function pointer or method pointer.  | 
889  |  |   // The {0} initializer ensures that all unused bytes of this buffer are | 
890  |  |   // always zeroed out.  This is necessary, because GuaranteedEqual() compares  | 
891  |  |   // all of the bytes, unaware of which bytes are relevant to a given `eval_`.  | 
892  |  |   using MethodPtr = bool (Condition::*)();  | 
893  |  |   char callback_[sizeof(MethodPtr)] = {0}; | 
894  |  | #else  | 
895  |  |   // It is well known that the larget MSVC pointer-to-member is 24 bytes. This  | 
896  |  |   // may be the largest known pointer-to-member of any platform. For this  | 
897  |  |   // reason we will allocate 24 bytes for MSVC platform toolchains.  | 
898  |  |   char callback_[24] = {0}; | 
899  |  | #endif  | 
900  |  |  | 
901  |  |   // Function with which to evaluate callbacks and/or arguments.  | 
902  |  |   bool (*absl_nullable eval_)(const Condition* absl_nonnull) = nullptr;  | 
903  |  |  | 
904  |  |   // Either an argument for a function call or an object for a method call.  | 
905  |  |   void* absl_nullable arg_ = nullptr;  | 
906  |  |  | 
907  |  |   // Various functions eval_ can point to:  | 
908  |  |   static bool CallVoidPtrFunction(const Condition* absl_nonnull c);  | 
909  |  |   template <typename T>  | 
910  |  |   static bool CastAndCallFunction(const Condition* absl_nonnull c);  | 
911  |  |   template <typename T, typename ConditionMethodPtr>  | 
912  |  |   static bool CastAndCallMethod(const Condition* absl_nonnull c);  | 
913  |  |  | 
914  |  |   // Helper methods for storing, validating, and reading callback arguments.  | 
915  |  |   template <typename T>  | 
916  | 0  |   inline void StoreCallback(T callback) { | 
917  | 0  |     static_assert(  | 
918  | 0  |         sizeof(callback) <= sizeof(callback_),  | 
919  | 0  |         "An overlarge pointer was passed as a callback to Condition.");  | 
920  | 0  |     std::memcpy(callback_, &callback, sizeof(callback));  | 
921  | 0  |   } Unexecuted instantiation: void absl::Condition::StoreCallback<bool (*)(absl::SynchEvent*)>(bool (*)(absl::SynchEvent*)) Unexecuted instantiation: void absl::Condition::StoreCallback<bool (*)(void*)>(bool (*)(void*))  | 
922  |  |  | 
923  |  |   template <typename T>  | 
924  | 0  |   inline void ReadCallback(T* absl_nonnull callback) const { | 
925  | 0  |     std::memcpy(callback, callback_, sizeof(*callback));  | 
926  | 0  |   }  | 
927  |  |  | 
928  | 0  |   static bool AlwaysTrue(const Condition* absl_nullable) { return true; } | 
929  |  |  | 
930  |  |   // Used only to create kTrue.  | 
931  | 0  |   constexpr Condition() : eval_(AlwaysTrue), arg_(nullptr) {} | 
932  |  | };  | 
933  |  |  | 
934  |  | // -----------------------------------------------------------------------------  | 
935  |  | // CondVar  | 
936  |  | // -----------------------------------------------------------------------------  | 
937  |  | //  | 
938  |  | // A condition variable, reflecting state evaluated separately outside of the  | 
939  |  | // `Mutex` object, which can be signaled to wake callers.  | 
940  |  | // This class is not normally needed; use `Mutex` member functions such as  | 
941  |  | // `Mutex::Await()` and intrinsic `Condition` abstractions. In rare cases  | 
942  |  | // with many threads and many conditions, `CondVar` may be faster.  | 
943  |  | //  | 
944  |  | // The implementation may deliver signals to any condition variable at  | 
945  |  | // any time, even when no call to `Signal()` or `SignalAll()` is made; as a  | 
946  |  | // result, upon being awoken, you must check the logical condition you have  | 
947  |  | // been waiting upon.  | 
948  |  | //  | 
949  |  | // Examples:  | 
950  |  | //  | 
951  |  | // Usage for a thread waiting for some condition C protected by mutex mu:  | 
952  |  | //       mu.Lock();  | 
953  |  | //       while (!C) { cv->Wait(&mu); }        // releases and reacquires mu | 
954  |  | //       //  C holds; process data  | 
955  |  | //       mu.Unlock();  | 
956  |  | //  | 
957  |  | // Usage to wake T is:  | 
958  |  | //       mu.Lock();  | 
959  |  | //       // process data, possibly establishing C  | 
960  |  | //       if (C) { cv->Signal(); } | 
961  |  | //       mu.Unlock();  | 
962  |  | //  | 
963  |  | // If C may be useful to more than one waiter, use `SignalAll()` instead of  | 
964  |  | // `Signal()`.  | 
965  |  | //  | 
966  |  | // With this implementation it is efficient to use `Signal()/SignalAll()` inside  | 
967  |  | // the locked region; this usage can make reasoning about your program easier.  | 
968  |  | //  | 
969  |  | class CondVar { | 
970  |  |  public:  | 
971  |  |   // A `CondVar` allocated on the heap or on the stack can use the this  | 
972  |  |   // constructor.  | 
973  |  |   CondVar();  | 
974  |  |  | 
975  |  |   // CondVar::Wait()  | 
976  |  |   //  | 
977  |  |   // Atomically releases a `Mutex` and blocks on this condition variable.  | 
978  |  |   // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a  | 
979  |  |   // spurious wakeup), then reacquires the `Mutex` and returns.  | 
980  |  |   //  | 
981  |  |   // Requires and ensures that the current thread holds the `Mutex`.  | 
982  | 0  |   void Wait(Mutex* absl_nonnull mu) { | 
983  | 0  |     WaitCommon(mu, synchronization_internal::KernelTimeout::Never());  | 
984  | 0  |   }  | 
985  |  |  | 
986  |  |   // CondVar::WaitWithTimeout()  | 
987  |  |   //  | 
988  |  |   // Atomically releases a `Mutex` and blocks on this condition variable.  | 
989  |  |   // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a  | 
990  |  |   // spurious wakeup), or until the timeout has expired, then reacquires  | 
991  |  |   // the `Mutex` and returns.  | 
992  |  |   //  | 
993  |  |   // Returns true if the timeout has expired without this `CondVar`  | 
994  |  |   // being signalled in any manner. If both the timeout has expired  | 
995  |  |   // and this `CondVar` has been signalled, the implementation is free  | 
996  |  |   // to return `true` or `false`.  | 
997  |  |   //  | 
998  |  |   // Requires and ensures that the current thread holds the `Mutex`.  | 
999  | 0  |   bool WaitWithTimeout(Mutex* absl_nonnull mu, absl::Duration timeout) { | 
1000  | 0  |     return WaitCommon(mu, synchronization_internal::KernelTimeout(timeout));  | 
1001  | 0  |   }  | 
1002  |  |  | 
1003  |  |   // CondVar::WaitWithDeadline()  | 
1004  |  |   //  | 
1005  |  |   // Atomically releases a `Mutex` and blocks on this condition variable.  | 
1006  |  |   // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a  | 
1007  |  |   // spurious wakeup), or until the deadline has passed, then reacquires  | 
1008  |  |   // the `Mutex` and returns.  | 
1009  |  |   //  | 
1010  |  |   // Deadlines in the past are equivalent to an immediate deadline.  | 
1011  |  |   //  | 
1012  |  |   // Returns true if the deadline has passed without this `CondVar`  | 
1013  |  |   // being signalled in any manner. If both the deadline has passed  | 
1014  |  |   // and this `CondVar` has been signalled, the implementation is free  | 
1015  |  |   // to return `true` or `false`.  | 
1016  |  |   //  | 
1017  |  |   // Requires and ensures that the current thread holds the `Mutex`.  | 
1018  | 0  |   bool WaitWithDeadline(Mutex* absl_nonnull mu, absl::Time deadline) { | 
1019  | 0  |     return WaitCommon(mu, synchronization_internal::KernelTimeout(deadline));  | 
1020  | 0  |   }  | 
1021  |  |  | 
1022  |  |   // CondVar::Signal()  | 
1023  |  |   //  | 
1024  |  |   // Signal this `CondVar`; wake at least one waiter if one exists.  | 
1025  |  |   void Signal();  | 
1026  |  |  | 
1027  |  |   // CondVar::SignalAll()  | 
1028  |  |   //  | 
1029  |  |   // Signal this `CondVar`; wake all waiters.  | 
1030  |  |   void SignalAll();  | 
1031  |  |  | 
1032  |  |   // CondVar::EnableDebugLog()  | 
1033  |  |   //  | 
1034  |  |   // Causes all subsequent uses of this `CondVar` to be logged via  | 
1035  |  |   // `ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if `name != 0`.  | 
1036  |  |   // Note: this method substantially reduces `CondVar` performance.  | 
1037  |  |   void EnableDebugLog(const char* absl_nullable name);  | 
1038  |  |  | 
1039  |  |  private:  | 
1040  |  |   bool WaitCommon(Mutex* absl_nonnull mutex,  | 
1041  |  |                   synchronization_internal::KernelTimeout t);  | 
1042  |  |   void Remove(base_internal::PerThreadSynch* absl_nonnull s);  | 
1043  |  |   std::atomic<intptr_t> cv_;  // Condition variable state.  | 
1044  |  |   CondVar(const CondVar&) = delete;  | 
1045  |  |   CondVar& operator=(const CondVar&) = delete;  | 
1046  |  | };  | 
1047  |  |  | 
1048  |  | // Variants of MutexLock.  | 
1049  |  | //  | 
1050  |  | // If you find yourself using one of these, consider instead using  | 
1051  |  | // Mutex::Unlock() and/or if-statements for clarity.  | 
1052  |  |  | 
1053  |  | // MutexLockMaybe  | 
1054  |  | //  | 
1055  |  | // MutexLockMaybe is like MutexLock, but is a no-op when mu is null.  | 
1056  |  | class ABSL_SCOPED_LOCKABLE MutexLockMaybe { | 
1057  |  |  public:  | 
1058  |  |   explicit MutexLockMaybe(Mutex* absl_nullable mu)  | 
1059  |  |       ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)  | 
1060  | 0  |       : mu_(mu) { | 
1061  | 0  |     if (this->mu_ != nullptr) { | 
1062  | 0  |       this->mu_->lock();  | 
1063  | 0  |     }  | 
1064  | 0  |   }  | 
1065  |  |  | 
1066  |  |   explicit MutexLockMaybe(Mutex* absl_nullable mu, const Condition& cond)  | 
1067  |  |       ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)  | 
1068  | 0  |       : mu_(mu) { | 
1069  | 0  |     if (this->mu_ != nullptr) { | 
1070  | 0  |       this->mu_->LockWhen(cond);  | 
1071  | 0  |     }  | 
1072  | 0  |   }  | 
1073  |  |  | 
1074  | 0  |   ~MutexLockMaybe() ABSL_UNLOCK_FUNCTION() { | 
1075  | 0  |     if (this->mu_ != nullptr) { | 
1076  | 0  |       this->mu_->unlock();  | 
1077  | 0  |     }  | 
1078  | 0  |   }  | 
1079  |  |  | 
1080  |  |  private:  | 
1081  |  |   Mutex* absl_nullable const mu_;  | 
1082  |  |   MutexLockMaybe(const MutexLockMaybe&) = delete;  | 
1083  |  |   MutexLockMaybe(MutexLockMaybe&&) = delete;  | 
1084  |  |   MutexLockMaybe& operator=(const MutexLockMaybe&) = delete;  | 
1085  |  |   MutexLockMaybe& operator=(MutexLockMaybe&&) = delete;  | 
1086  |  | };  | 
1087  |  |  | 
1088  |  | // ReleasableMutexLock  | 
1089  |  | //  | 
1090  |  | // ReleasableMutexLock is like MutexLock, but permits `Release()` of its  | 
1091  |  | // mutex before destruction. `Release()` may be called at most once.  | 
1092  |  | class ABSL_SCOPED_LOCKABLE ReleasableMutexLock { | 
1093  |  |  public:  | 
1094  |  |   explicit ReleasableMutexLock(Mutex& mu ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(  | 
1095  |  |       this)) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)  | 
1096  | 0  |       : mu_(&mu) { | 
1097  | 0  |     this->mu_->lock();  | 
1098  | 0  |   }  | 
1099  |  |  | 
1100  |  |   explicit ReleasableMutexLock(Mutex* absl_nonnull mu)  | 
1101  |  |       ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)  | 
1102  | 0  |       : ReleasableMutexLock(*mu) {} | 
1103  |  |  | 
1104  |  |   explicit ReleasableMutexLock(  | 
1105  |  |       Mutex& mu ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this),  | 
1106  |  |       const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)  | 
1107  | 0  |       : mu_(&mu) { | 
1108  | 0  |     this->mu_->LockWhen(cond);  | 
1109  | 0  |   }  | 
1110  |  |  | 
1111  |  |   explicit ReleasableMutexLock(Mutex* absl_nonnull mu, const Condition& cond)  | 
1112  |  |       ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)  | 
1113  | 0  |       : ReleasableMutexLock(*mu, cond) {} | 
1114  |  |  | 
1115  | 0  |   ~ReleasableMutexLock() ABSL_UNLOCK_FUNCTION() { | 
1116  | 0  |     if (this->mu_ != nullptr) { | 
1117  | 0  |       this->mu_->unlock();  | 
1118  | 0  |     }  | 
1119  | 0  |   }  | 
1120  |  |  | 
1121  |  |   void Release() ABSL_UNLOCK_FUNCTION();  | 
1122  |  |  | 
1123  |  |  private:  | 
1124  |  |   Mutex* absl_nullable mu_;  | 
1125  |  |   ReleasableMutexLock(const ReleasableMutexLock&) = delete;  | 
1126  |  |   ReleasableMutexLock(ReleasableMutexLock&&) = delete;  | 
1127  |  |   ReleasableMutexLock& operator=(const ReleasableMutexLock&) = delete;  | 
1128  |  |   ReleasableMutexLock& operator=(ReleasableMutexLock&&) = delete;  | 
1129  |  | };  | 
1130  |  |  | 
1131  | 6  | inline Mutex::Mutex() : mu_(0) { | 
1132  | 6  |   ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);  | 
1133  | 6  | }  | 
1134  |  |  | 
1135  |  | inline constexpr Mutex::Mutex(absl::ConstInitType) : mu_(0) {} | 
1136  |  |  | 
1137  |  | #if !defined(__APPLE__) && !defined(ABSL_BUILD_DLL)  | 
1138  |  | ABSL_ATTRIBUTE_ALWAYS_INLINE  | 
1139  | 0  | inline Mutex::~Mutex() { Dtor(); } | 
1140  |  | #endif  | 
1141  |  |  | 
1142  |  | #if defined(NDEBUG) && !defined(ABSL_HAVE_THREAD_SANITIZER)  | 
1143  |  | // Use default (empty) destructor in release build for performance reasons.  | 
1144  |  | // We need to mark both Dtor and ~Mutex as always inline for inconsistent  | 
1145  |  | // builds that use both NDEBUG and !NDEBUG with dynamic libraries. In these  | 
1146  |  | // cases we want the empty functions to dissolve entirely rather than being  | 
1147  |  | // exported from dynamic libraries and potentially override the non-empty ones.  | 
1148  |  | ABSL_ATTRIBUTE_ALWAYS_INLINE  | 
1149  |  | inline void Mutex::Dtor() {} | 
1150  |  | #endif  | 
1151  |  |  | 
1152  |  | inline CondVar::CondVar() : cv_(0) {} | 
1153  |  |  | 
1154  |  | // static  | 
1155  |  | template <typename T, typename ConditionMethodPtr>  | 
1156  |  | bool Condition::CastAndCallMethod(const Condition* absl_nonnull c) { | 
1157  |  |   T* object = static_cast<T*>(c->arg_);  | 
1158  |  |   ConditionMethodPtr condition_method_pointer;  | 
1159  |  |   c->ReadCallback(&condition_method_pointer);  | 
1160  |  |   return (object->*condition_method_pointer)();  | 
1161  |  | }  | 
1162  |  |  | 
1163  |  | // static  | 
1164  |  | template <typename T>  | 
1165  | 0  | bool Condition::CastAndCallFunction(const Condition* absl_nonnull c) { | 
1166  | 0  |   bool (*function)(T*);  | 
1167  | 0  |   c->ReadCallback(&function);  | 
1168  | 0  |   T* argument = static_cast<T*>(c->arg_);  | 
1169  | 0  |   return (*function)(argument);  | 
1170  | 0  | }  | 
1171  |  |  | 
1172  |  | template <typename T>  | 
1173  |  | inline Condition::Condition(  | 
1174  |  |     bool (*absl_nonnull func)(T* absl_nullability_unknown),  | 
1175  |  |     T* absl_nullability_unknown arg)  | 
1176  | 0  |     : eval_(&CastAndCallFunction<T>),  | 
1177  | 0  |       arg_(const_cast<void*>(static_cast<const void*>(arg))) { | 
1178  | 0  |   static_assert(sizeof(&func) <= sizeof(callback_),  | 
1179  | 0  |                 "An overlarge function pointer was passed to Condition.");  | 
1180  | 0  |   StoreCallback(func);  | 
1181  | 0  | }  | 
1182  |  |  | 
1183  |  | template <typename T, typename>  | 
1184  |  | inline Condition::Condition(  | 
1185  |  |     bool (*absl_nonnull func)(T* absl_nullability_unknown),  | 
1186  |  |     typename absl::type_identity<T>::type* absl_nullability_unknown  | 
1187  |  |         arg)  | 
1188  |  |     // Just delegate to the overload above.  | 
1189  |  |     : Condition(func, arg) {} | 
1190  |  |  | 
1191  |  | template <typename T>  | 
1192  |  | inline Condition::Condition(  | 
1193  |  |     T* absl_nonnull object,  | 
1194  |  |     bool (absl::type_identity<T>::type::* absl_nonnull method)())  | 
1195  |  |     : eval_(&CastAndCallMethod<T, decltype(method)>), arg_(object) { | 
1196  |  |   static_assert(sizeof(&method) <= sizeof(callback_),  | 
1197  |  |                 "An overlarge method pointer was passed to Condition.");  | 
1198  |  |   StoreCallback(method);  | 
1199  |  | }  | 
1200  |  |  | 
1201  |  | template <typename T>  | 
1202  |  | inline Condition::Condition(  | 
1203  |  |     const T* absl_nonnull object,  | 
1204  |  |     bool (absl::type_identity<T>::type::* absl_nonnull method)()  | 
1205  |  |         const)  | 
1206  |  |     : eval_(&CastAndCallMethod<const T, decltype(method)>),  | 
1207  |  |       arg_(reinterpret_cast<void*>(const_cast<T*>(object))) { | 
1208  |  |   StoreCallback(method);  | 
1209  |  | }  | 
1210  |  |  | 
1211  |  | // Register hooks for profiling support.  | 
1212  |  | //  | 
1213  |  | // The function pointer registered here will be called whenever a mutex is  | 
1214  |  | // contended.  The callback is given the cycles for which waiting happened (as  | 
1215  |  | // measured by //absl/base/internal/cycleclock.h, and which may not  | 
1216  |  | // be real "cycle" counts.)  | 
1217  |  | //  | 
1218  |  | // There is no ordering guarantee between when the hook is registered and when  | 
1219  |  | // callbacks will begin.  Only a single profiler can be installed in a running  | 
1220  |  | // binary; if this function is called a second time with a different function  | 
1221  |  | // pointer, the value is ignored (and will cause an assertion failure in debug  | 
1222  |  | // mode.)  | 
1223  |  | void RegisterMutexProfiler(void (*absl_nonnull fn)(int64_t wait_cycles));  | 
1224  |  |  | 
1225  |  | // Register a hook for Mutex tracing.  | 
1226  |  | //  | 
1227  |  | // The function pointer registered here will be called whenever a mutex is  | 
1228  |  | // contended.  The callback is given an opaque handle to the contended mutex,  | 
1229  |  | // an event name, and the number of wait cycles (as measured by  | 
1230  |  | // //absl/base/internal/cycleclock.h, and which may not be real  | 
1231  |  | // "cycle" counts.)  | 
1232  |  | //  | 
1233  |  | // The only event name currently sent is "slow release".  | 
1234  |  | //  | 
1235  |  | // This has the same ordering and single-use limitations as  | 
1236  |  | // RegisterMutexProfiler() above.  | 
1237  |  | void RegisterMutexTracer(void (*absl_nonnull fn)(const char* absl_nonnull msg,  | 
1238  |  |                                                  const void* absl_nonnull obj,  | 
1239  |  |                                                  int64_t wait_cycles));  | 
1240  |  |  | 
1241  |  | // Register a hook for CondVar tracing.  | 
1242  |  | //  | 
1243  |  | // The function pointer registered here will be called here on various CondVar  | 
1244  |  | // events.  The callback is given an opaque handle to the CondVar object and  | 
1245  |  | // a string identifying the event.  This is thread-safe, but only a single  | 
1246  |  | // tracer can be registered.  | 
1247  |  | //  | 
1248  |  | // Events that can be sent are "Wait", "Unwait", "Signal wakeup", and  | 
1249  |  | // "SignalAll wakeup".  | 
1250  |  | //  | 
1251  |  | // This has the same ordering and single-use limitations as  | 
1252  |  | // RegisterMutexProfiler() above.  | 
1253  |  | void RegisterCondVarTracer(void (*absl_nonnull fn)(  | 
1254  |  |     const char* absl_nonnull msg, const void* absl_nonnull cv));  | 
1255  |  |  | 
1256  |  | // EnableMutexInvariantDebugging()  | 
1257  |  | //  | 
1258  |  | // Enable or disable global support for Mutex invariant debugging.  If enabled,  | 
1259  |  | // then invariant predicates can be registered per-Mutex for debug checking.  | 
1260  |  | // See Mutex::EnableInvariantDebugging().  | 
1261  |  | void EnableMutexInvariantDebugging(bool enabled);  | 
1262  |  |  | 
1263  |  | // When in debug mode, and when the feature has been enabled globally, the  | 
1264  |  | // implementation will keep track of lock ordering and complain (or optionally  | 
1265  |  | // crash) if a cycle is detected in the acquired-before graph.  | 
1266  |  |  | 
1267  |  | // Possible modes of operation for the deadlock detector in debug mode.  | 
1268  |  | enum class OnDeadlockCycle { | 
1269  |  |   kIgnore,  // Neither report on nor attempt to track cycles in lock ordering  | 
1270  |  |   kReport,  // Report lock cycles to stderr when detected  | 
1271  |  |   kAbort,   // Report lock cycles to stderr when detected, then abort  | 
1272  |  | };  | 
1273  |  |  | 
1274  |  | // SetMutexDeadlockDetectionMode()  | 
1275  |  | //  | 
1276  |  | // Enable or disable global support for detection of potential deadlocks  | 
1277  |  | // due to Mutex lock ordering inversions.  When set to 'kIgnore', tracking of  | 
1278  |  | // lock ordering is disabled.  Otherwise, in debug builds, a lock ordering graph  | 
1279  |  | // will be maintained internally, and detected cycles will be reported in  | 
1280  |  | // the manner chosen here.  | 
1281  |  | void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode);  | 
1282  |  |  | 
1283  |  | ABSL_NAMESPACE_END  | 
1284  |  | }  // namespace absl  | 
1285  |  |  | 
1286  |  | // In some build configurations we pass --detect-odr-violations to the  | 
1287  |  | // gold linker.  This causes it to flag weak symbol overrides as ODR  | 
1288  |  | // violations.  Because ODR only applies to C++ and not C,  | 
1289  |  | // --detect-odr-violations ignores symbols not mangled with C++ names.  | 
1290  |  | // By changing our extension points to be extern "C", we dodge this  | 
1291  |  | // check.  | 
1292  |  | extern "C" { | 
1293  |  | void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();  | 
1294  |  | }  // extern "C"  | 
1295  |  |  | 
1296  |  | #endif  // ABSL_SYNCHRONIZATION_MUTEX_H_  |