Coverage Report

Created: 2025-08-02 06:33

/src/abseil-cpp/absl/synchronization/mutex.h
Line
Count
Source (jump to first uncovered line)
1
// Copyright 2017 The Abseil Authors.
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//      https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14
//
15
// -----------------------------------------------------------------------------
16
// mutex.h
17
// -----------------------------------------------------------------------------
18
//
19
// This header file defines a `Mutex` -- a mutually exclusive lock -- and the
20
// most common type of synchronization primitive for facilitating locks on
21
// shared resources. A mutex is used to prevent multiple threads from accessing
22
// and/or writing to a shared resource concurrently.
23
//
24
// Unlike a `std::mutex`, the Abseil `Mutex` provides the following additional
25
// features:
26
//   * Conditional predicates intrinsic to the `Mutex` object
27
//   * Shared/reader locks, in addition to standard exclusive/writer locks
28
//   * Deadlock detection and debug support.
29
//
30
// The following helper classes are also defined within this file:
31
//
32
//  MutexLock - An RAII wrapper to acquire and release a `Mutex` for exclusive/
33
//              write access within the current scope.
34
//
35
//  ReaderMutexLock
36
//            - An RAII wrapper to acquire and release a `Mutex` for shared/read
37
//              access within the current scope.
38
//
39
//  WriterMutexLock
40
//            - Effectively an alias for `MutexLock` above, designed for use in
41
//              distinguishing reader and writer locks within code.
42
//
43
// In addition to simple mutex locks, this file also defines ways to perform
44
// locking under certain conditions.
45
//
46
//  Condition - (Preferred) Used to wait for a particular predicate that
47
//              depends on state protected by the `Mutex` to become true.
48
//  CondVar   - A lower-level variant of `Condition` that relies on
49
//              application code to explicitly signal the `CondVar` when
50
//              a condition has been met.
51
//
52
// See below for more information on using `Condition` or `CondVar`.
53
//
54
// Mutexes and mutex behavior can be quite complicated. The information within
55
// this header file is limited, as a result. Please consult the Mutex guide for
56
// more complete information and examples.
57
58
#ifndef ABSL_SYNCHRONIZATION_MUTEX_H_
59
#define ABSL_SYNCHRONIZATION_MUTEX_H_
60
61
#include <atomic>
62
#include <cstdint>
63
#include <cstring>
64
#include <iterator>
65
#include <string>
66
67
#include "absl/base/attributes.h"
68
#include "absl/base/const_init.h"
69
#include "absl/base/internal/identity.h"
70
#include "absl/base/internal/low_level_alloc.h"
71
#include "absl/base/internal/thread_identity.h"
72
#include "absl/base/internal/tsan_mutex_interface.h"
73
#include "absl/base/nullability.h"
74
#include "absl/base/port.h"
75
#include "absl/base/thread_annotations.h"
76
#include "absl/synchronization/internal/kernel_timeout.h"
77
#include "absl/synchronization/internal/per_thread_sem.h"
78
#include "absl/time/time.h"
79
80
namespace absl {
81
ABSL_NAMESPACE_BEGIN
82
83
class Condition;
84
struct SynchWaitParams;
85
86
// -----------------------------------------------------------------------------
87
// Mutex
88
// -----------------------------------------------------------------------------
89
//
90
// A `Mutex` is a non-reentrant (aka non-recursive) Mutually Exclusive lock
91
// on some resource, typically a variable or data structure with associated
92
// invariants. Proper usage of mutexes prevents concurrent access by different
93
// threads to the same resource.
94
//
95
// A `Mutex` has two basic operations: `Mutex::lock()` and `Mutex::unlock()`.
96
// The `lock()` operation *acquires* a `Mutex` (in a state known as an
97
// *exclusive* -- or *write* -- lock), and the `unlock()` operation *releases* a
98
// Mutex. During the span of time between the lock() and unlock() operations,
99
// a mutex is said to be *held*. By design, all mutexes support exclusive/write
100
// locks, as this is the most common way to use a mutex.
101
//
102
// Mutex operations are only allowed under certain conditions; otherwise an
103
// operation is "invalid", and disallowed by the API. The conditions concern
104
// both the current state of the mutex and the identity of the threads that
105
// are performing the operations.
106
//
107
// The `Mutex` state machine for basic lock/unlock operations is quite simple:
108
//
109
// |                | lock()                 | unlock() |
110
// |----------------+------------------------+----------|
111
// | Free           | Exclusive              | invalid  |
112
// | Exclusive      | blocks, then exclusive | Free     |
113
//
114
// The full conditions are as follows.
115
//
116
// * Calls to `unlock()` require that the mutex be held, and must be made in the
117
//   same thread that performed the corresponding `lock()` operation which
118
//   acquired the mutex; otherwise the call is invalid.
119
//
120
// * The mutex being non-reentrant (or non-recursive) means that a call to
121
//   `lock()` or `try_lock()` must not be made in a thread that already holds
122
//   the mutex; such a call is invalid.
123
//
124
// * In other words, the state of being "held" has both a temporal component
125
//   (from `lock()` until `unlock()`) as well as a thread identity component:
126
//   the mutex is held *by a particular thread*.
127
//
128
// An "invalid" operation has undefined behavior. The `Mutex` implementation
129
// is allowed to do anything on an invalid call, including, but not limited to,
130
// crashing with a useful error message, silently succeeding, or corrupting
131
// data structures. In debug mode, the implementation may crash with a useful
132
// error message.
133
//
134
// `Mutex` is not guaranteed to be "fair" in prioritizing waiting threads; it
135
// is, however, approximately fair over long periods, and starvation-free for
136
// threads at the same priority.
137
//
138
// The lock/unlock primitives are now annotated with lock annotations
139
// defined in (base/thread_annotations.h). When writing multi-threaded code,
140
// you should use lock annotations whenever possible to document your lock
141
// synchronization policy. Besides acting as documentation, these annotations
142
// also help compilers or static analysis tools to identify and warn about
143
// issues that could potentially result in race conditions and deadlocks.
144
//
145
// For more information about the lock annotations, please see
146
// [Thread Safety
147
// Analysis](http://clang.llvm.org/docs/ThreadSafetyAnalysis.html) in the Clang
148
// documentation.
149
//
150
// See also `MutexLock`, below, for scoped `Mutex` acquisition.
151
152
class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED Mutex {
153
 public:
154
  // Creates a `Mutex` that is not held by anyone. This constructor is
155
  // typically used for Mutexes allocated on the heap or the stack.
156
  //
157
  // To create `Mutex` instances with static storage duration
158
  // (e.g. a namespace-scoped or global variable), see
159
  // `Mutex::Mutex(absl::kConstInit)` below instead.
160
  Mutex();
161
162
  // Creates a mutex with static storage duration.  A global variable
163
  // constructed this way avoids the lifetime issues that can occur on program
164
  // startup and shutdown.  (See absl/base/const_init.h.)
165
  //
166
  // For Mutexes allocated on the heap and stack, instead use the default
167
  // constructor, which can interact more fully with the thread sanitizer.
168
  //
169
  // Example usage:
170
  //   namespace foo {
171
  //   ABSL_CONST_INIT absl::Mutex mu(absl::kConstInit);
172
  //   }
173
  explicit constexpr Mutex(absl::ConstInitType);
174
175
  ~Mutex();
176
177
  // Mutex::lock()
178
  //
179
  // Blocks the calling thread, if necessary, until this `Mutex` is free, and
180
  // then acquires it exclusively. (This lock is also known as a "write lock.")
181
  void lock() ABSL_EXCLUSIVE_LOCK_FUNCTION();
182
183
0
  inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { lock(); }
184
185
  // Mutex::unlock()
186
  //
187
  // Releases this `Mutex` and returns it from the exclusive/write state to the
188
  // free state. Calling thread must hold the `Mutex` exclusively.
189
  void unlock() ABSL_UNLOCK_FUNCTION();
190
191
0
  inline void Unlock() ABSL_UNLOCK_FUNCTION() { unlock(); }
192
193
  // Mutex::try_lock()
194
  //
195
  // If the mutex can be acquired without blocking, does so exclusively and
196
  // returns `true`. Otherwise, returns `false`. Returns `true` with high
197
  // probability if the `Mutex` was free.
198
  [[nodiscard]] bool try_lock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true);
199
200
0
  [[nodiscard]] bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
201
0
    return try_lock();
202
0
  }
203
204
  // Mutex::AssertHeld()
205
  //
206
  // Require that the mutex be held exclusively (write mode) by this thread.
207
  //
208
  // If the mutex is not currently held by this thread, this function may report
209
  // an error (typically by crashing with a diagnostic) or it may do nothing.
210
  // This function is intended only as a tool to assist debugging; it doesn't
211
  // guarantee correctness.
212
  void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK();
213
214
  // ---------------------------------------------------------------------------
215
  // Reader-Writer Locking
216
  // ---------------------------------------------------------------------------
217
218
  // A Mutex can also be used as a starvation-free reader-writer lock.
219
  // Neither read-locks nor write-locks are reentrant/recursive to avoid
220
  // potential client programming errors.
221
  //
222
  // The Mutex API provides `Writer*()` aliases for the existing `lock()`,
223
  // `unlock()` and `try_lock()` methods for use within applications mixing
224
  // reader/writer locks. Using `*_shared()` and `Writer*()` operations in this
225
  // manner can make locking behavior clearer when mixing read and write modes.
226
  //
227
  // Introducing reader locks necessarily complicates the `Mutex` state
228
  // machine somewhat. The table below illustrates the allowed state transitions
229
  // of a mutex in such cases. Note that lock_shared() may block even if the
230
  // lock is held in shared mode; this occurs when another thread is blocked on
231
  // a call to lock().
232
  //
233
  // ---------------------------------------------------------------------------
234
  //     Operation: lock()       unlock()  lock_shared() unlock_shared()
235
  // ---------------------------------------------------------------------------
236
  // State
237
  // ---------------------------------------------------------------------------
238
  // Free           Exclusive    invalid   Shared(1)              invalid
239
  // Shared(1)      blocks       invalid   Shared(2) or blocks    Free
240
  // Shared(n) n>1  blocks       invalid   Shared(n+1) or blocks  Shared(n-1)
241
  // Exclusive      blocks       Free      blocks                 invalid
242
  // ---------------------------------------------------------------------------
243
  //
244
  // In comments below, "shared" refers to a state of Shared(n) for any n > 0.
245
246
  // Mutex::lock_shared()
247
  //
248
  // Blocks the calling thread, if necessary, until this `Mutex` is either free,
249
  // or in shared mode, and then acquires a share of it. Note that
250
  // `lock_shared()` will block if some other thread has an exclusive/writer
251
  // lock on the mutex.
252
  void lock_shared() ABSL_SHARED_LOCK_FUNCTION();
253
254
1.02M
  void ReaderLock() ABSL_SHARED_LOCK_FUNCTION() { lock_shared(); }
255
256
  // Mutex::unlock_shared()
257
  //
258
  // Releases a read share of this `Mutex`. `unlock_shared` may return a mutex
259
  // to the free state if this thread holds the last reader lock on the mutex.
260
  // Note that you cannot call `unlock_shared()` on a mutex held in write mode.
261
  void unlock_shared() ABSL_UNLOCK_FUNCTION();
262
263
0
  void ReaderUnlock() ABSL_UNLOCK_FUNCTION() { unlock_shared(); }
264
265
  // Mutex::try_lock_shared()
266
  //
267
  // If the mutex can be acquired without blocking, acquires this mutex for
268
  // shared access and returns `true`. Otherwise, returns `false`. Returns
269
  // `true` with high probability if the `Mutex` was free or shared.
270
  [[nodiscard]] bool try_lock_shared() ABSL_SHARED_TRYLOCK_FUNCTION(true);
271
272
0
  [[nodiscard]] bool ReaderTryLock() ABSL_SHARED_TRYLOCK_FUNCTION(true) {
273
0
    return try_lock_shared();
274
0
  }
275
276
  // Mutex::AssertReaderHeld()
277
  //
278
  // Require that the mutex be held at least in shared mode (read mode) by this
279
  // thread.
280
  //
281
  // If the mutex is not currently held by this thread, this function may report
282
  // an error (typically by crashing with a diagnostic) or it may do nothing.
283
  // This function is intended only as a tool to assist debugging; it doesn't
284
  // guarantee correctness.
285
  void AssertReaderHeld() const ABSL_ASSERT_SHARED_LOCK();
286
287
  // Mutex::WriterLock()
288
  // Mutex::WriterUnlock()
289
  // Mutex::WriterTryLock()
290
  //
291
  // Aliases for `Mutex::Lock()`, `Mutex::Unlock()`, and `Mutex::TryLock()`.
292
  //
293
  // These methods may be used (along with the complementary `Reader*()`
294
  // methods) to distinguish simple exclusive `Mutex` usage (`Lock()`,
295
  // etc.) from reader/writer lock usage.
296
1
  void WriterLock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { this->lock(); }
297
298
1
  void WriterUnlock() ABSL_UNLOCK_FUNCTION() { this->unlock(); }
299
300
0
  [[nodiscard]] bool WriterTryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
301
0
    return this->try_lock();
302
0
  }
303
304
  // ---------------------------------------------------------------------------
305
  // Conditional Critical Regions
306
  // ---------------------------------------------------------------------------
307
308
  // Conditional usage of a `Mutex` can occur using two distinct paradigms:
309
  //
310
  //   * Use of `Mutex` member functions with `Condition` objects.
311
  //   * Use of the separate `CondVar` abstraction.
312
  //
313
  // In general, prefer use of `Condition` and the `Mutex` member functions
314
  // listed below over `CondVar`. When there are multiple threads waiting on
315
  // distinctly different conditions, however, a battery of `CondVar`s may be
316
  // more efficient. This section discusses use of `Condition` objects.
317
  //
318
  // `Mutex` contains member functions for performing lock operations only under
319
  // certain conditions, of class `Condition`. For correctness, the `Condition`
320
  // must return a boolean that is a pure function, only of state protected by
321
  // the `Mutex`. The condition must be invariant w.r.t. environmental state
322
  // such as thread, cpu id, or time, and must be `noexcept`. The condition will
323
  // always be invoked with the mutex held in at least read mode, so you should
324
  // not block it for long periods or sleep it on a timer.
325
  //
326
  // Since a condition must not depend directly on the current time, use
327
  // `*WithTimeout()` member function variants to make your condition
328
  // effectively true after a given duration, or `*WithDeadline()` variants to
329
  // make your condition effectively true after a given time.
330
  //
331
  // The condition function should have no side-effects aside from debug
332
  // logging; as a special exception, the function may acquire other mutexes
333
  // provided it releases all those that it acquires.  (This exception was
334
  // required to allow logging.)
335
336
  // Mutex::Await()
337
  //
338
  // Unlocks this `Mutex` and blocks until simultaneously both `cond` is `true`
339
  // and this `Mutex` can be reacquired, then reacquires this `Mutex` in the
340
  // same mode in which it was previously held. If the condition is initially
341
  // `true`, `Await()` *may* skip the release/re-acquire step.
342
  //
343
  // `Await()` requires that this thread holds this `Mutex` in some mode.
344
0
  void Await(const Condition& cond) {
345
0
    AwaitCommon(cond, synchronization_internal::KernelTimeout::Never());
346
0
  }
347
348
  // Mutex::LockWhen()
349
  // Mutex::ReaderLockWhen()
350
  // Mutex::WriterLockWhen()
351
  //
352
  // Blocks until simultaneously both `cond` is `true` and this `Mutex` can
353
  // be acquired, then atomically acquires this `Mutex`. `LockWhen()` is
354
  // logically equivalent to `*Lock(); Await();` though they may have different
355
  // performance characteristics.
356
0
  void LockWhen(const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION() {
357
0
    LockWhenCommon(cond, synchronization_internal::KernelTimeout::Never(),
358
0
                   true);
359
0
  }
360
361
0
  void ReaderLockWhen(const Condition& cond) ABSL_SHARED_LOCK_FUNCTION() {
362
0
    LockWhenCommon(cond, synchronization_internal::KernelTimeout::Never(),
363
0
                   false);
364
0
  }
365
366
0
  void WriterLockWhen(const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION() {
367
0
    this->LockWhen(cond);
368
0
  }
369
370
  // ---------------------------------------------------------------------------
371
  // Mutex Variants with Timeouts/Deadlines
372
  // ---------------------------------------------------------------------------
373
374
  // Mutex::AwaitWithTimeout()
375
  // Mutex::AwaitWithDeadline()
376
  //
377
  // Unlocks this `Mutex` and blocks until simultaneously:
378
  //   - either `cond` is true or the {timeout has expired, deadline has passed}
379
  //     and
380
  //   - this `Mutex` can be reacquired,
381
  // then reacquire this `Mutex` in the same mode in which it was previously
382
  // held, returning `true` iff `cond` is `true` on return.
383
  //
384
  // If the condition is initially `true`, the implementation *may* skip the
385
  // release/re-acquire step and return immediately.
386
  //
387
  // Deadlines in the past are equivalent to an immediate deadline.
388
  // Negative timeouts are equivalent to a zero timeout.
389
  //
390
  // This method requires that this thread holds this `Mutex` in some mode.
391
0
  bool AwaitWithTimeout(const Condition& cond, absl::Duration timeout) {
392
0
    return AwaitCommon(cond, synchronization_internal::KernelTimeout{timeout});
393
0
  }
394
395
0
  bool AwaitWithDeadline(const Condition& cond, absl::Time deadline) {
396
0
    return AwaitCommon(cond, synchronization_internal::KernelTimeout{deadline});
397
0
  }
398
399
  // Mutex::LockWhenWithTimeout()
400
  // Mutex::ReaderLockWhenWithTimeout()
401
  // Mutex::WriterLockWhenWithTimeout()
402
  //
403
  // Blocks until simultaneously both:
404
  //   - either `cond` is `true` or the timeout has expired, and
405
  //   - this `Mutex` can be acquired,
406
  // then atomically acquires this `Mutex`, returning `true` iff `cond` is
407
  // `true` on return.
408
  //
409
  // Negative timeouts are equivalent to a zero timeout.
410
  bool LockWhenWithTimeout(const Condition& cond, absl::Duration timeout)
411
0
      ABSL_EXCLUSIVE_LOCK_FUNCTION() {
412
0
    return LockWhenCommon(
413
0
        cond, synchronization_internal::KernelTimeout{timeout}, true);
414
0
  }
415
  bool ReaderLockWhenWithTimeout(const Condition& cond, absl::Duration timeout)
416
0
      ABSL_SHARED_LOCK_FUNCTION() {
417
0
    return LockWhenCommon(
418
0
        cond, synchronization_internal::KernelTimeout{timeout}, false);
419
0
  }
420
  bool WriterLockWhenWithTimeout(const Condition& cond, absl::Duration timeout)
421
0
      ABSL_EXCLUSIVE_LOCK_FUNCTION() {
422
0
    return this->LockWhenWithTimeout(cond, timeout);
423
0
  }
424
425
  // Mutex::LockWhenWithDeadline()
426
  // Mutex::ReaderLockWhenWithDeadline()
427
  // Mutex::WriterLockWhenWithDeadline()
428
  //
429
  // Blocks until simultaneously both:
430
  //   - either `cond` is `true` or the deadline has been passed, and
431
  //   - this `Mutex` can be acquired,
432
  // then atomically acquires this Mutex, returning `true` iff `cond` is `true`
433
  // on return.
434
  //
435
  // Deadlines in the past are equivalent to an immediate deadline.
436
  bool LockWhenWithDeadline(const Condition& cond, absl::Time deadline)
437
0
      ABSL_EXCLUSIVE_LOCK_FUNCTION() {
438
0
    return LockWhenCommon(
439
0
        cond, synchronization_internal::KernelTimeout{deadline}, true);
440
0
  }
441
  bool ReaderLockWhenWithDeadline(const Condition& cond, absl::Time deadline)
442
0
      ABSL_SHARED_LOCK_FUNCTION() {
443
0
    return LockWhenCommon(
444
0
        cond, synchronization_internal::KernelTimeout{deadline}, false);
445
0
  }
446
  bool WriterLockWhenWithDeadline(const Condition& cond, absl::Time deadline)
447
0
      ABSL_EXCLUSIVE_LOCK_FUNCTION() {
448
0
    return this->LockWhenWithDeadline(cond, deadline);
449
0
  }
450
451
  // ---------------------------------------------------------------------------
452
  // Debug Support: Invariant Checking, Deadlock Detection, Logging.
453
  // ---------------------------------------------------------------------------
454
455
  // Mutex::EnableInvariantDebugging()
456
  //
457
  // If `invariant`!=null and if invariant debugging has been enabled globally,
458
  // cause `(*invariant)(arg)` to be called at moments when the invariant for
459
  // this `Mutex` should hold (for example: just after acquire, just before
460
  // release).
461
  //
462
  // The routine `invariant` should have no side-effects since it is not
463
  // guaranteed how many times it will be called; it should check the invariant
464
  // and crash if it does not hold. Enabling global invariant debugging may
465
  // substantially reduce `Mutex` performance; it should be set only for
466
  // non-production runs.  Optimization options may also disable invariant
467
  // checks.
468
  void EnableInvariantDebugging(
469
      void (*absl_nullable invariant)(void* absl_nullability_unknown),
470
      void* absl_nullability_unknown arg);
471
472
  // Mutex::EnableDebugLog()
473
  //
474
  // Cause all subsequent uses of this `Mutex` to be logged via
475
  // `ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if no previous
476
  // call to `EnableInvariantDebugging()` or `EnableDebugLog()` has been made.
477
  //
478
  // Note: This method substantially reduces `Mutex` performance.
479
  void EnableDebugLog(const char* absl_nullable name);
480
481
  // Deadlock detection
482
483
  // Mutex::ForgetDeadlockInfo()
484
  //
485
  // Forget any deadlock-detection information previously gathered
486
  // about this `Mutex`. Call this method in debug mode when the lock ordering
487
  // of a `Mutex` changes.
488
  void ForgetDeadlockInfo();
489
490
  // Mutex::AssertNotHeld()
491
  //
492
  // Return immediately if this thread does not hold this `Mutex` in any
493
  // mode; otherwise, may report an error (typically by crashing with a
494
  // diagnostic), or may return immediately.
495
  //
496
  // Currently this check is performed only if all of:
497
  //    - in debug mode
498
  //    - SetMutexDeadlockDetectionMode() has been set to kReport or kAbort
499
  //    - number of locks concurrently held by this thread is not large.
500
  // are true.
501
  void AssertNotHeld() const;
502
503
  // Special cases.
504
505
  // A `MuHow` is a constant that indicates how a lock should be acquired.
506
  // Internal implementation detail.  Clients should ignore.
507
  typedef const struct MuHowS* MuHow;
508
509
  // Mutex::InternalAttemptToUseMutexInFatalSignalHandler()
510
  //
511
  // Causes the `Mutex` implementation to prepare itself for re-entry caused by
512
  // future use of `Mutex` within a fatal signal handler. This method is
513
  // intended for use only for last-ditch attempts to log crash information.
514
  // It does not guarantee that attempts to use Mutexes within the handler will
515
  // not deadlock; it merely makes other faults less likely.
516
  //
517
  // WARNING:  This routine must be invoked from a signal handler, and the
518
  // signal handler must either loop forever or terminate the process.
519
  // Attempts to return from (or `longjmp` out of) the signal handler once this
520
  // call has been made may cause arbitrary program behaviour including
521
  // crashes and deadlocks.
522
  static void InternalAttemptToUseMutexInFatalSignalHandler();
523
524
 private:
525
  std::atomic<intptr_t> mu_;  // The Mutex state.
526
527
  // Post()/Wait() versus associated PerThreadSem; in class for required
528
  // friendship with PerThreadSem.
529
  static void IncrementSynchSem(Mutex* absl_nonnull mu,
530
                                base_internal::PerThreadSynch* absl_nonnull w);
531
  static bool DecrementSynchSem(Mutex* absl_nonnull mu,
532
                                base_internal::PerThreadSynch* absl_nonnull w,
533
                                synchronization_internal::KernelTimeout t);
534
535
  // slow path acquire
536
  void LockSlowLoop(SynchWaitParams* absl_nonnull waitp, int flags);
537
  // wrappers around LockSlowLoop()
538
  bool LockSlowWithDeadline(MuHow absl_nonnull how,
539
                            const Condition* absl_nullable cond,
540
                            synchronization_internal::KernelTimeout t,
541
                            int flags);
542
  void LockSlow(MuHow absl_nonnull how, const Condition* absl_nullable cond,
543
                int flags) ABSL_ATTRIBUTE_COLD;
544
  // slow path release
545
  void UnlockSlow(SynchWaitParams* absl_nullable waitp) ABSL_ATTRIBUTE_COLD;
546
  // TryLock slow path.
547
  bool TryLockSlow();
548
  // ReaderTryLock slow path.
549
  bool ReaderTryLockSlow();
550
  // Common code between Await() and AwaitWithTimeout/Deadline()
551
  bool AwaitCommon(const Condition& cond,
552
                   synchronization_internal::KernelTimeout t);
553
  bool LockWhenCommon(const Condition& cond,
554
                      synchronization_internal::KernelTimeout t, bool write);
555
  // Attempt to remove thread s from queue.
556
  void TryRemove(base_internal::PerThreadSynch* absl_nonnull s);
557
  // Block a thread on mutex.
558
  void Block(base_internal::PerThreadSynch* absl_nonnull s);
559
  // Wake a thread; return successor.
560
  base_internal::PerThreadSynch* absl_nullable Wakeup(
561
      base_internal::PerThreadSynch* absl_nonnull w);
562
  void Dtor();
563
564
  friend class CondVar;   // for access to Trans()/Fer().
565
  void Trans(MuHow absl_nonnull how);  // used for CondVar->Mutex transfer
566
  void Fer(base_internal::PerThreadSynch* absl_nonnull
567
           w);  // used for CondVar->Mutex transfer
568
569
  // Catch the error of writing Mutex when intending MutexLock.
570
0
  explicit Mutex(const volatile Mutex* absl_nullable /*ignored*/) {}
571
572
  Mutex(const Mutex&) = delete;
573
  Mutex& operator=(const Mutex&) = delete;
574
};
575
576
// -----------------------------------------------------------------------------
577
// Mutex RAII Wrappers
578
// -----------------------------------------------------------------------------
579
580
// MutexLock
581
//
582
// `MutexLock` is a helper class, which acquires and releases a `Mutex` via
583
// RAII.
584
//
585
// Example:
586
//
587
// Class Foo {
588
//  public:
589
//   Foo::Bar* Baz() {
590
//     MutexLock lock(mu_);
591
//     ...
592
//     return bar;
593
//   }
594
//
595
// private:
596
//   Mutex mu_;
597
// };
598
class ABSL_SCOPED_LOCKABLE MutexLock {
599
 public:
600
  // Constructors
601
602
  // Calls `mu->lock()` and returns when that call returns. That is, `*mu` is
603
  // guaranteed to be locked when this object is constructed. Requires that
604
  // `mu` be dereferenceable.
605
32
  explicit MutexLock(Mutex& mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
606
32
    this->mu_.lock();
607
32
  }
608
609
  explicit MutexLock(Mutex* absl_nonnull mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
610
0
      : MutexLock(*mu) {}
611
612
  // Like above, but calls `mu->LockWhen(cond)` instead. That is, in addition to
613
  // the above, the condition given by `cond` is also guaranteed to hold when
614
  // this object is constructed.
615
  explicit MutexLock(Mutex* absl_nonnull mu, const Condition& cond)
616
      ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
617
0
      : mu_(*mu) {
618
0
    this->mu_.LockWhen(cond);
619
0
  }
620
621
  MutexLock(const MutexLock&) = delete;  // NOLINT(runtime/mutex)
622
  MutexLock(MutexLock&&) = delete;       // NOLINT(runtime/mutex)
623
  MutexLock& operator=(const MutexLock&) = delete;
624
  MutexLock& operator=(MutexLock&&) = delete;
625
626
32
  ~MutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_.unlock(); }
627
628
 private:
629
  Mutex& mu_;
630
};
631
632
// ReaderMutexLock
633
//
634
// The `ReaderMutexLock` is a helper class, like `MutexLock`, which acquires and
635
// releases a shared lock on a `Mutex` via RAII.
636
class ABSL_SCOPED_LOCKABLE ReaderMutexLock {
637
 public:
638
1.02M
  explicit ReaderMutexLock(Mutex& mu) ABSL_SHARED_LOCK_FUNCTION(mu) : mu_(mu) {
639
1.02M
    mu.ReaderLock();
640
1.02M
  }
641
642
  explicit ReaderMutexLock(Mutex* absl_nonnull mu) ABSL_SHARED_LOCK_FUNCTION(mu)
643
0
      : ReaderMutexLock(*mu) {}
644
645
  explicit ReaderMutexLock(Mutex& mu, const Condition& cond)
646
      ABSL_SHARED_LOCK_FUNCTION(mu)
647
0
      : mu_(mu) {
648
0
    mu.ReaderLockWhen(cond);
649
0
  }
650
651
  explicit ReaderMutexLock(Mutex* absl_nonnull mu, const Condition& cond)
652
      ABSL_SHARED_LOCK_FUNCTION(mu)
653
0
      : ReaderMutexLock(*mu, cond) {}
654
655
  ReaderMutexLock(const ReaderMutexLock&) = delete;
656
  ReaderMutexLock(ReaderMutexLock&&) = delete;
657
  ReaderMutexLock& operator=(const ReaderMutexLock&) = delete;
658
  ReaderMutexLock& operator=(ReaderMutexLock&&) = delete;
659
660
1.02M
  ~ReaderMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_.unlock_shared(); }
661
662
 private:
663
  Mutex& mu_;
664
};
665
666
// WriterMutexLock
667
//
668
// The `WriterMutexLock` is a helper class, like `MutexLock`, which acquires and
669
// releases a write (exclusive) lock on a `Mutex` via RAII.
670
class ABSL_SCOPED_LOCKABLE WriterMutexLock {
671
 public:
672
  explicit WriterMutexLock(Mutex& mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
673
1
      : mu_(mu) {
674
1
    mu.WriterLock();
675
1
  }
676
677
  explicit WriterMutexLock(Mutex* absl_nonnull mu)
678
      ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
679
0
      : WriterMutexLock(*mu) {}
680
681
  explicit WriterMutexLock(Mutex& mu, const Condition& cond)
682
      ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
683
0
      : mu_(mu) {
684
0
    mu.WriterLockWhen(cond);
685
0
  }
686
687
  explicit WriterMutexLock(Mutex* absl_nonnull mu, const Condition& cond)
688
      ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
689
0
      : WriterMutexLock(*mu, cond) {}
690
691
  WriterMutexLock(const WriterMutexLock&) = delete;
692
  WriterMutexLock(WriterMutexLock&&) = delete;
693
  WriterMutexLock& operator=(const WriterMutexLock&) = delete;
694
  WriterMutexLock& operator=(WriterMutexLock&&) = delete;
695
696
1
  ~WriterMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_.WriterUnlock(); }
697
698
 private:
699
  Mutex& mu_;
700
};
701
702
// -----------------------------------------------------------------------------
703
// Condition
704
// -----------------------------------------------------------------------------
705
//
706
// `Mutex` contains a number of member functions which take a `Condition` as an
707
// argument; clients can wait for conditions to become `true` before attempting
708
// to acquire the mutex. These sections are known as "condition critical"
709
// sections. To use a `Condition`, you simply need to construct it, and use
710
// within an appropriate `Mutex` member function; everything else in the
711
// `Condition` class is an implementation detail.
712
//
713
// A `Condition` is specified as a function pointer which returns a boolean.
714
// `Condition` functions should be pure functions -- their results should depend
715
// only on passed arguments, should not consult any external state (such as
716
// clocks), and should have no side-effects, aside from debug logging. Any
717
// objects that the function may access should be limited to those which are
718
// constant while the mutex is blocked on the condition (e.g. a stack variable),
719
// or objects of state protected explicitly by the mutex.
720
//
721
// No matter which construction is used for `Condition`, the underlying
722
// function pointer / functor / callable must not throw any
723
// exceptions. Correctness of `Mutex` / `Condition` is not guaranteed in
724
// the face of a throwing `Condition`. (When Abseil is allowed to depend
725
// on C++17, these function pointers will be explicitly marked
726
// `noexcept`; until then this requirement cannot be enforced in the
727
// type system.)
728
//
729
// Note: to use a `Condition`, you need only construct it and pass it to a
730
// suitable `Mutex' member function, such as `Mutex::Await()`, or to the
731
// constructor of one of the scope guard classes.
732
//
733
// Example using LockWhen/Unlock:
734
//
735
//   // assume count_ is not internal reference count
736
//   int count_ ABSL_GUARDED_BY(mu_);
737
//   Condition count_is_zero(+[](int *count) { return *count == 0; }, &count_);
738
//
739
//   mu_.LockWhen(count_is_zero);
740
//   // ...
741
//   mu_.Unlock();
742
//
743
// Example using a scope guard:
744
//
745
//   {
746
//     MutexLock lock(mu_, count_is_zero);
747
//     // ...
748
//   }
749
//
750
// When multiple threads are waiting on exactly the same condition, make sure
751
// that they are constructed with the same parameters (same pointer to function
752
// + arg, or same pointer to object + method), so that the mutex implementation
753
// can avoid redundantly evaluating the same condition for each thread.
754
class Condition {
755
 public:
756
  // A Condition that returns the result of "(*func)(arg)"
757
  Condition(bool (*absl_nonnull func)(void* absl_nullability_unknown),
758
            void* absl_nullability_unknown arg);
759
760
  // Templated version for people who are averse to casts.
761
  //
762
  // To use a lambda, prepend it with unary plus, which converts the lambda
763
  // into a function pointer:
764
  //     Condition(+[](T* t) { return ...; }, arg).
765
  //
766
  // Note: lambdas in this case must contain no bound variables.
767
  //
768
  // See class comment for performance advice.
769
  template <typename T>
770
  Condition(bool (*absl_nonnull func)(T* absl_nullability_unknown),
771
            T* absl_nullability_unknown arg);
772
773
  // Same as above, but allows for cases where `arg` comes from a pointer that
774
  // is convertible to the function parameter type `T*` but not an exact match.
775
  //
776
  // For example, the argument might be `X*` but the function takes `const X*`,
777
  // or the argument might be `Derived*` while the function takes `Base*`, and
778
  // so on for cases where the argument pointer can be implicitly converted.
779
  //
780
  // Implementation notes: This constructor overload is required in addition to
781
  // the one above to allow deduction of `T` from `arg` for cases such as where
782
  // a function template is passed as `func`. Also, the dummy `typename = void`
783
  // template parameter exists just to work around a MSVC mangling bug.
784
  template <typename T, typename = void>
785
  Condition(
786
      bool (*absl_nonnull func)(T* absl_nullability_unknown),
787
      typename absl::internal::type_identity<T>::type* absl_nullability_unknown
788
      arg);
789
790
  // Templated version for invoking a method that returns a `bool`.
791
  //
792
  // `Condition(object, &Class::Method)` constructs a `Condition` that evaluates
793
  // `object->Method()`.
794
  //
795
  // Implementation Note: `absl::internal::type_identity` is used to allow
796
  // methods to come from base classes. A simpler signature like
797
  // `Condition(T*, bool (T::*)())` does not suffice.
798
  template <typename T>
799
  Condition(
800
      T* absl_nonnull object,
801
      bool (absl::internal::type_identity<T>::type::* absl_nonnull method)());
802
803
  // Same as above, for const members
804
  template <typename T>
805
  Condition(
806
      const T* absl_nonnull object,
807
      bool (absl::internal::type_identity<T>::type::* absl_nonnull method)()
808
          const);
809
810
  // A Condition that returns the value of `*cond`
811
  explicit Condition(const bool* absl_nonnull cond);
812
813
  // Templated version for invoking a functor that returns a `bool`.
814
  // This approach accepts pointers to non-mutable lambdas, `std::function`,
815
  // the result of` std::bind` and user-defined functors that define
816
  // `bool F::operator()() const`.
817
  //
818
  // Example:
819
  //
820
  //   auto reached = [this, current]() {
821
  //     mu_.AssertReaderHeld();                // For annotalysis.
822
  //     return processed_ >= current;
823
  //   };
824
  //   mu_.Await(Condition(&reached));
825
  //
826
  // NOTE: never use "mu_.AssertHeld()" instead of "mu_.AssertReaderHeld()" in
827
  // the lambda as it may be called when the mutex is being unlocked from a
828
  // scope holding only a reader lock, which will make the assertion not
829
  // fulfilled and crash the binary.
830
831
  // See class comment for performance advice. In particular, if there
832
  // might be more than one waiter for the same condition, make sure
833
  // that all waiters construct the condition with the same pointers.
834
835
  // Implementation note: The second template parameter ensures that this
836
  // constructor doesn't participate in overload resolution if T doesn't have
837
  // `bool operator() const`.
838
  template <typename T, typename E = decltype(static_cast<bool (T::*)() const>(
839
                            &T::operator()))>
840
  explicit Condition(const T* absl_nonnull obj)
841
      : Condition(obj, static_cast<bool (T::*)() const>(&T::operator())) {}
842
843
  // A Condition that always returns `true`.
844
  // kTrue is only useful in a narrow set of circumstances, mostly when
845
  // it's passed conditionally. For example:
846
  //
847
  //   mu.LockWhen(some_flag ? kTrue : SomeOtherCondition);
848
  //
849
  // Note: {LockWhen,Await}With{Deadline,Timeout} methods with kTrue condition
850
  // don't return immediately when the timeout happens, they still block until
851
  // the Mutex becomes available. The return value of these methods does
852
  // not indicate if the timeout was reached; rather it indicates whether or
853
  // not the condition is true.
854
  ABSL_CONST_INIT static const Condition kTrue;
855
856
  // Evaluates the condition.
857
  bool Eval() const;
858
859
  // Returns `true` if the two conditions are guaranteed to return the same
860
  // value if evaluated at the same time, `false` if the evaluation *may* return
861
  // different results.
862
  //
863
  // Two `Condition` values are guaranteed equal if both their `func` and `arg`
864
  // components are the same. A null pointer is equivalent to a `true`
865
  // condition.
866
  static bool GuaranteedEqual(const Condition* absl_nullable a,
867
                              const Condition* absl_nullable b);
868
869
 private:
870
  // Sizing an allocation for a method pointer can be subtle. In the Itanium
871
  // specifications, a method pointer has a predictable, uniform size. On the
872
  // other hand, MSVC ABI, method pointer sizes vary based on the
873
  // inheritance of the class. Specifically, method pointers from classes with
874
  // multiple inheritance are bigger than those of classes with single
875
  // inheritance. Other variations also exist.
876
877
#ifndef _MSC_VER
878
  // Allocation for a function pointer or method pointer.
879
  // The {0} initializer ensures that all unused bytes of this buffer are
880
  // always zeroed out.  This is necessary, because GuaranteedEqual() compares
881
  // all of the bytes, unaware of which bytes are relevant to a given `eval_`.
882
  using MethodPtr = bool (Condition::*)();
883
  char callback_[sizeof(MethodPtr)] = {0};
884
#else
885
  // It is well known that the larget MSVC pointer-to-member is 24 bytes. This
886
  // may be the largest known pointer-to-member of any platform. For this
887
  // reason we will allocate 24 bytes for MSVC platform toolchains.
888
  char callback_[24] = {0};
889
#endif
890
891
  // Function with which to evaluate callbacks and/or arguments.
892
  bool (*absl_nullable eval_)(const Condition* absl_nonnull) = nullptr;
893
894
  // Either an argument for a function call or an object for a method call.
895
  void* absl_nullable arg_ = nullptr;
896
897
  // Various functions eval_ can point to:
898
  static bool CallVoidPtrFunction(const Condition* absl_nonnull c);
899
  template <typename T>
900
  static bool CastAndCallFunction(const Condition* absl_nonnull c);
901
  template <typename T, typename ConditionMethodPtr>
902
  static bool CastAndCallMethod(const Condition* absl_nonnull c);
903
904
  // Helper methods for storing, validating, and reading callback arguments.
905
  template <typename T>
906
0
  inline void StoreCallback(T callback) {
907
0
    static_assert(
908
0
        sizeof(callback) <= sizeof(callback_),
909
0
        "An overlarge pointer was passed as a callback to Condition.");
910
0
    std::memcpy(callback_, &callback, sizeof(callback));
911
0
  }
Unexecuted instantiation: void absl::Condition::StoreCallback<bool (*)(absl::SynchEvent*)>(bool (*)(absl::SynchEvent*))
Unexecuted instantiation: void absl::Condition::StoreCallback<bool (*)(void*)>(bool (*)(void*))
912
913
  template <typename T>
914
0
  inline void ReadCallback(T* absl_nonnull callback) const {
915
0
    std::memcpy(callback, callback_, sizeof(*callback));
916
0
  }
917
918
0
  static bool AlwaysTrue(const Condition* absl_nullable) { return true; }
919
920
  // Used only to create kTrue.
921
0
  constexpr Condition() : eval_(AlwaysTrue), arg_(nullptr) {}
922
};
923
924
// -----------------------------------------------------------------------------
925
// CondVar
926
// -----------------------------------------------------------------------------
927
//
928
// A condition variable, reflecting state evaluated separately outside of the
929
// `Mutex` object, which can be signaled to wake callers.
930
// This class is not normally needed; use `Mutex` member functions such as
931
// `Mutex::Await()` and intrinsic `Condition` abstractions. In rare cases
932
// with many threads and many conditions, `CondVar` may be faster.
933
//
934
// The implementation may deliver signals to any condition variable at
935
// any time, even when no call to `Signal()` or `SignalAll()` is made; as a
936
// result, upon being awoken, you must check the logical condition you have
937
// been waiting upon.
938
//
939
// Examples:
940
//
941
// Usage for a thread waiting for some condition C protected by mutex mu:
942
//       mu.Lock();
943
//       while (!C) { cv->Wait(&mu); }        // releases and reacquires mu
944
//       //  C holds; process data
945
//       mu.Unlock();
946
//
947
// Usage to wake T is:
948
//       mu.Lock();
949
//       // process data, possibly establishing C
950
//       if (C) { cv->Signal(); }
951
//       mu.Unlock();
952
//
953
// If C may be useful to more than one waiter, use `SignalAll()` instead of
954
// `Signal()`.
955
//
956
// With this implementation it is efficient to use `Signal()/SignalAll()` inside
957
// the locked region; this usage can make reasoning about your program easier.
958
//
959
class CondVar {
960
 public:
961
  // A `CondVar` allocated on the heap or on the stack can use the this
962
  // constructor.
963
  CondVar();
964
965
  // CondVar::Wait()
966
  //
967
  // Atomically releases a `Mutex` and blocks on this condition variable.
968
  // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a
969
  // spurious wakeup), then reacquires the `Mutex` and returns.
970
  //
971
  // Requires and ensures that the current thread holds the `Mutex`.
972
0
  void Wait(Mutex* absl_nonnull mu) {
973
0
    WaitCommon(mu, synchronization_internal::KernelTimeout::Never());
974
0
  }
975
976
  // CondVar::WaitWithTimeout()
977
  //
978
  // Atomically releases a `Mutex` and blocks on this condition variable.
979
  // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a
980
  // spurious wakeup), or until the timeout has expired, then reacquires
981
  // the `Mutex` and returns.
982
  //
983
  // Returns true if the timeout has expired without this `CondVar`
984
  // being signalled in any manner. If both the timeout has expired
985
  // and this `CondVar` has been signalled, the implementation is free
986
  // to return `true` or `false`.
987
  //
988
  // Requires and ensures that the current thread holds the `Mutex`.
989
0
  bool WaitWithTimeout(Mutex* absl_nonnull mu, absl::Duration timeout) {
990
0
    return WaitCommon(mu, synchronization_internal::KernelTimeout(timeout));
991
0
  }
992
993
  // CondVar::WaitWithDeadline()
994
  //
995
  // Atomically releases a `Mutex` and blocks on this condition variable.
996
  // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a
997
  // spurious wakeup), or until the deadline has passed, then reacquires
998
  // the `Mutex` and returns.
999
  //
1000
  // Deadlines in the past are equivalent to an immediate deadline.
1001
  //
1002
  // Returns true if the deadline has passed without this `CondVar`
1003
  // being signalled in any manner. If both the deadline has passed
1004
  // and this `CondVar` has been signalled, the implementation is free
1005
  // to return `true` or `false`.
1006
  //
1007
  // Requires and ensures that the current thread holds the `Mutex`.
1008
0
  bool WaitWithDeadline(Mutex* absl_nonnull mu, absl::Time deadline) {
1009
0
    return WaitCommon(mu, synchronization_internal::KernelTimeout(deadline));
1010
0
  }
1011
1012
  // CondVar::Signal()
1013
  //
1014
  // Signal this `CondVar`; wake at least one waiter if one exists.
1015
  void Signal();
1016
1017
  // CondVar::SignalAll()
1018
  //
1019
  // Signal this `CondVar`; wake all waiters.
1020
  void SignalAll();
1021
1022
  // CondVar::EnableDebugLog()
1023
  //
1024
  // Causes all subsequent uses of this `CondVar` to be logged via
1025
  // `ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if `name != 0`.
1026
  // Note: this method substantially reduces `CondVar` performance.
1027
  void EnableDebugLog(const char* absl_nullable name);
1028
1029
 private:
1030
  bool WaitCommon(Mutex* absl_nonnull mutex,
1031
                  synchronization_internal::KernelTimeout t);
1032
  void Remove(base_internal::PerThreadSynch* absl_nonnull s);
1033
  std::atomic<intptr_t> cv_;  // Condition variable state.
1034
  CondVar(const CondVar&) = delete;
1035
  CondVar& operator=(const CondVar&) = delete;
1036
};
1037
1038
// Variants of MutexLock.
1039
//
1040
// If you find yourself using one of these, consider instead using
1041
// Mutex::Unlock() and/or if-statements for clarity.
1042
1043
// MutexLockMaybe
1044
//
1045
// MutexLockMaybe is like MutexLock, but is a no-op when mu is null.
1046
class ABSL_SCOPED_LOCKABLE MutexLockMaybe {
1047
 public:
1048
  explicit MutexLockMaybe(Mutex* absl_nullable mu)
1049
      ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
1050
0
      : mu_(mu) {
1051
0
    if (this->mu_ != nullptr) {
1052
0
      this->mu_->lock();
1053
0
    }
1054
0
  }
1055
1056
  explicit MutexLockMaybe(Mutex* absl_nullable mu, const Condition& cond)
1057
      ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
1058
0
      : mu_(mu) {
1059
0
    if (this->mu_ != nullptr) {
1060
0
      this->mu_->LockWhen(cond);
1061
0
    }
1062
0
  }
1063
1064
0
  ~MutexLockMaybe() ABSL_UNLOCK_FUNCTION() {
1065
0
    if (this->mu_ != nullptr) {
1066
0
      this->mu_->unlock();
1067
0
    }
1068
0
  }
1069
1070
 private:
1071
  Mutex* absl_nullable const mu_;
1072
  MutexLockMaybe(const MutexLockMaybe&) = delete;
1073
  MutexLockMaybe(MutexLockMaybe&&) = delete;
1074
  MutexLockMaybe& operator=(const MutexLockMaybe&) = delete;
1075
  MutexLockMaybe& operator=(MutexLockMaybe&&) = delete;
1076
};
1077
1078
// ReleasableMutexLock
1079
//
1080
// ReleasableMutexLock is like MutexLock, but permits `Release()` of its
1081
// mutex before destruction. `Release()` may be called at most once.
1082
class ABSL_SCOPED_LOCKABLE ReleasableMutexLock {
1083
 public:
1084
  explicit ReleasableMutexLock(Mutex* absl_nonnull mu)
1085
      ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
1086
0
      : mu_(mu) {
1087
0
    this->mu_->lock();
1088
0
  }
1089
1090
  explicit ReleasableMutexLock(Mutex* absl_nonnull mu, const Condition& cond)
1091
      ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
1092
0
      : mu_(mu) {
1093
0
    this->mu_->LockWhen(cond);
1094
0
  }
1095
1096
0
  ~ReleasableMutexLock() ABSL_UNLOCK_FUNCTION() {
1097
0
    if (this->mu_ != nullptr) {
1098
0
      this->mu_->unlock();
1099
0
    }
1100
0
  }
1101
1102
  void Release() ABSL_UNLOCK_FUNCTION();
1103
1104
 private:
1105
  Mutex* absl_nonnull mu_;
1106
  ReleasableMutexLock(const ReleasableMutexLock&) = delete;
1107
  ReleasableMutexLock(ReleasableMutexLock&&) = delete;
1108
  ReleasableMutexLock& operator=(const ReleasableMutexLock&) = delete;
1109
  ReleasableMutexLock& operator=(ReleasableMutexLock&&) = delete;
1110
};
1111
1112
6
inline Mutex::Mutex() : mu_(0) {
1113
6
  ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
1114
6
}
1115
1116
inline constexpr Mutex::Mutex(absl::ConstInitType) : mu_(0) {}
1117
1118
#if !defined(__APPLE__) && !defined(ABSL_BUILD_DLL)
1119
ABSL_ATTRIBUTE_ALWAYS_INLINE
1120
0
inline Mutex::~Mutex() { Dtor(); }
1121
#endif
1122
1123
#if defined(NDEBUG) && !defined(ABSL_HAVE_THREAD_SANITIZER)
1124
// Use default (empty) destructor in release build for performance reasons.
1125
// We need to mark both Dtor and ~Mutex as always inline for inconsistent
1126
// builds that use both NDEBUG and !NDEBUG with dynamic libraries. In these
1127
// cases we want the empty functions to dissolve entirely rather than being
1128
// exported from dynamic libraries and potentially override the non-empty ones.
1129
ABSL_ATTRIBUTE_ALWAYS_INLINE
1130
inline void Mutex::Dtor() {}
1131
#endif
1132
1133
inline CondVar::CondVar() : cv_(0) {}
1134
1135
// static
1136
template <typename T, typename ConditionMethodPtr>
1137
bool Condition::CastAndCallMethod(const Condition* absl_nonnull c) {
1138
  T* object = static_cast<T*>(c->arg_);
1139
  ConditionMethodPtr condition_method_pointer;
1140
  c->ReadCallback(&condition_method_pointer);
1141
  return (object->*condition_method_pointer)();
1142
}
1143
1144
// static
1145
template <typename T>
1146
0
bool Condition::CastAndCallFunction(const Condition* absl_nonnull c) {
1147
0
  bool (*function)(T*);
1148
0
  c->ReadCallback(&function);
1149
0
  T* argument = static_cast<T*>(c->arg_);
1150
0
  return (*function)(argument);
1151
0
}
1152
1153
template <typename T>
1154
inline Condition::Condition(
1155
    bool (*absl_nonnull func)(T* absl_nullability_unknown),
1156
    T* absl_nullability_unknown arg)
1157
0
    : eval_(&CastAndCallFunction<T>),
1158
0
      arg_(const_cast<void*>(static_cast<const void*>(arg))) {
1159
0
  static_assert(sizeof(&func) <= sizeof(callback_),
1160
0
                "An overlarge function pointer was passed to Condition.");
1161
0
  StoreCallback(func);
1162
0
}
1163
1164
template <typename T, typename>
1165
inline Condition::Condition(
1166
    bool (*absl_nonnull func)(T* absl_nullability_unknown),
1167
    typename absl::internal::type_identity<T>::type* absl_nullability_unknown
1168
    arg)
1169
    // Just delegate to the overload above.
1170
    : Condition(func, arg) {}
1171
1172
template <typename T>
1173
inline Condition::Condition(
1174
    T* absl_nonnull object,
1175
    bool (absl::internal::type_identity<T>::type::* absl_nonnull method)())
1176
    : eval_(&CastAndCallMethod<T, decltype(method)>), arg_(object) {
1177
  static_assert(sizeof(&method) <= sizeof(callback_),
1178
                "An overlarge method pointer was passed to Condition.");
1179
  StoreCallback(method);
1180
}
1181
1182
template <typename T>
1183
inline Condition::Condition(
1184
    const T* absl_nonnull object,
1185
    bool (absl::internal::type_identity<T>::type::* absl_nonnull method)()
1186
        const)
1187
    : eval_(&CastAndCallMethod<const T, decltype(method)>),
1188
      arg_(reinterpret_cast<void*>(const_cast<T*>(object))) {
1189
  StoreCallback(method);
1190
}
1191
1192
// Register hooks for profiling support.
1193
//
1194
// The function pointer registered here will be called whenever a mutex is
1195
// contended.  The callback is given the cycles for which waiting happened (as
1196
// measured by //absl/base/internal/cycleclock.h, and which may not
1197
// be real "cycle" counts.)
1198
//
1199
// There is no ordering guarantee between when the hook is registered and when
1200
// callbacks will begin.  Only a single profiler can be installed in a running
1201
// binary; if this function is called a second time with a different function
1202
// pointer, the value is ignored (and will cause an assertion failure in debug
1203
// mode.)
1204
void RegisterMutexProfiler(void (*absl_nonnull fn)(int64_t wait_cycles));
1205
1206
// Register a hook for Mutex tracing.
1207
//
1208
// The function pointer registered here will be called whenever a mutex is
1209
// contended.  The callback is given an opaque handle to the contended mutex,
1210
// an event name, and the number of wait cycles (as measured by
1211
// //absl/base/internal/cycleclock.h, and which may not be real
1212
// "cycle" counts.)
1213
//
1214
// The only event name currently sent is "slow release".
1215
//
1216
// This has the same ordering and single-use limitations as
1217
// RegisterMutexProfiler() above.
1218
void RegisterMutexTracer(void (*absl_nonnull fn)(const char* absl_nonnull msg,
1219
                                                 const void* absl_nonnull obj,
1220
                                                 int64_t wait_cycles));
1221
1222
// Register a hook for CondVar tracing.
1223
//
1224
// The function pointer registered here will be called here on various CondVar
1225
// events.  The callback is given an opaque handle to the CondVar object and
1226
// a string identifying the event.  This is thread-safe, but only a single
1227
// tracer can be registered.
1228
//
1229
// Events that can be sent are "Wait", "Unwait", "Signal wakeup", and
1230
// "SignalAll wakeup".
1231
//
1232
// This has the same ordering and single-use limitations as
1233
// RegisterMutexProfiler() above.
1234
void RegisterCondVarTracer(void (*absl_nonnull fn)(
1235
    const char* absl_nonnull msg, const void* absl_nonnull cv));
1236
1237
// EnableMutexInvariantDebugging()
1238
//
1239
// Enable or disable global support for Mutex invariant debugging.  If enabled,
1240
// then invariant predicates can be registered per-Mutex for debug checking.
1241
// See Mutex::EnableInvariantDebugging().
1242
void EnableMutexInvariantDebugging(bool enabled);
1243
1244
// When in debug mode, and when the feature has been enabled globally, the
1245
// implementation will keep track of lock ordering and complain (or optionally
1246
// crash) if a cycle is detected in the acquired-before graph.
1247
1248
// Possible modes of operation for the deadlock detector in debug mode.
1249
enum class OnDeadlockCycle {
1250
  kIgnore,  // Neither report on nor attempt to track cycles in lock ordering
1251
  kReport,  // Report lock cycles to stderr when detected
1252
  kAbort,   // Report lock cycles to stderr when detected, then abort
1253
};
1254
1255
// SetMutexDeadlockDetectionMode()
1256
//
1257
// Enable or disable global support for detection of potential deadlocks
1258
// due to Mutex lock ordering inversions.  When set to 'kIgnore', tracking of
1259
// lock ordering is disabled.  Otherwise, in debug builds, a lock ordering graph
1260
// will be maintained internally, and detected cycles will be reported in
1261
// the manner chosen here.
1262
void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode);
1263
1264
ABSL_NAMESPACE_END
1265
}  // namespace absl
1266
1267
// In some build configurations we pass --detect-odr-violations to the
1268
// gold linker.  This causes it to flag weak symbol overrides as ODR
1269
// violations.  Because ODR only applies to C++ and not C,
1270
// --detect-odr-violations ignores symbols not mangled with C++ names.
1271
// By changing our extension points to be extern "C", we dodge this
1272
// check.
1273
extern "C" {
1274
void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
1275
}  // extern "C"
1276
1277
#endif  // ABSL_SYNCHRONIZATION_MUTEX_H_