Coverage Report

Created: 2025-08-25 06:55

/src/abseil-cpp/absl/synchronization/mutex.h
Line
Count
Source (jump to first uncovered line)
1
// Copyright 2017 The Abseil Authors.
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//      https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14
//
15
// -----------------------------------------------------------------------------
16
// mutex.h
17
// -----------------------------------------------------------------------------
18
//
19
// This header file defines a `Mutex` -- a mutually exclusive lock -- and the
20
// most common type of synchronization primitive for facilitating locks on
21
// shared resources. A mutex is used to prevent multiple threads from accessing
22
// and/or writing to a shared resource concurrently.
23
//
24
// Unlike a `std::mutex`, the Abseil `Mutex` provides the following additional
25
// features:
26
//   * Conditional predicates intrinsic to the `Mutex` object
27
//   * Shared/reader locks, in addition to standard exclusive/writer locks
28
//   * Deadlock detection and debug support.
29
//
30
// The following helper classes are also defined within this file:
31
//
32
//  MutexLock - An RAII wrapper to acquire and release a `Mutex` for exclusive/
33
//              write access within the current scope.
34
//
35
//  ReaderMutexLock
36
//            - An RAII wrapper to acquire and release a `Mutex` for shared/read
37
//              access within the current scope.
38
//
39
//  WriterMutexLock
40
//            - Effectively an alias for `MutexLock` above, designed for use in
41
//              distinguishing reader and writer locks within code.
42
//
43
// In addition to simple mutex locks, this file also defines ways to perform
44
// locking under certain conditions.
45
//
46
//  Condition - (Preferred) Used to wait for a particular predicate that
47
//              depends on state protected by the `Mutex` to become true.
48
//  CondVar   - A lower-level variant of `Condition` that relies on
49
//              application code to explicitly signal the `CondVar` when
50
//              a condition has been met.
51
//
52
// See below for more information on using `Condition` or `CondVar`.
53
//
54
// Mutexes and mutex behavior can be quite complicated. The information within
55
// this header file is limited, as a result. Please consult the Mutex guide for
56
// more complete information and examples.
57
58
#ifndef ABSL_SYNCHRONIZATION_MUTEX_H_
59
#define ABSL_SYNCHRONIZATION_MUTEX_H_
60
61
#include <atomic>
62
#include <cstdint>
63
#include <cstring>
64
#include <iterator>
65
#include <string>
66
67
#include "absl/base/attributes.h"
68
#include "absl/base/const_init.h"
69
#include "absl/base/internal/identity.h"
70
#include "absl/base/internal/low_level_alloc.h"
71
#include "absl/base/internal/thread_identity.h"
72
#include "absl/base/internal/tsan_mutex_interface.h"
73
#include "absl/base/nullability.h"
74
#include "absl/base/port.h"
75
#include "absl/base/thread_annotations.h"
76
#include "absl/synchronization/internal/kernel_timeout.h"
77
#include "absl/synchronization/internal/per_thread_sem.h"
78
#include "absl/time/time.h"
79
80
namespace absl {
81
ABSL_NAMESPACE_BEGIN
82
83
class Condition;
84
struct SynchWaitParams;
85
86
// -----------------------------------------------------------------------------
87
// Mutex
88
// -----------------------------------------------------------------------------
89
//
90
// A `Mutex` is a non-reentrant (aka non-recursive) Mutually Exclusive lock
91
// on some resource, typically a variable or data structure with associated
92
// invariants. Proper usage of mutexes prevents concurrent access by different
93
// threads to the same resource.
94
//
95
// A `Mutex` has two basic operations: `Mutex::lock()` and `Mutex::unlock()`.
96
// The `lock()` operation *acquires* a `Mutex` (in a state known as an
97
// *exclusive* -- or *write* -- lock), and the `unlock()` operation *releases* a
98
// Mutex. During the span of time between the lock() and unlock() operations,
99
// a mutex is said to be *held*. By design, all mutexes support exclusive/write
100
// locks, as this is the most common way to use a mutex.
101
//
102
// Mutex operations are only allowed under certain conditions; otherwise an
103
// operation is "invalid", and disallowed by the API. The conditions concern
104
// both the current state of the mutex and the identity of the threads that
105
// are performing the operations.
106
//
107
// The `Mutex` state machine for basic lock/unlock operations is quite simple:
108
//
109
// |                | lock()                 | unlock() |
110
// |----------------+------------------------+----------|
111
// | Free           | Exclusive              | invalid  |
112
// | Exclusive      | blocks, then exclusive | Free     |
113
//
114
// The full conditions are as follows.
115
//
116
// * Calls to `unlock()` require that the mutex be held, and must be made in the
117
//   same thread that performed the corresponding `lock()` operation which
118
//   acquired the mutex; otherwise the call is invalid.
119
//
120
// * The mutex being non-reentrant (or non-recursive) means that a call to
121
//   `lock()` or `try_lock()` must not be made in a thread that already holds
122
//   the mutex; such a call is invalid.
123
//
124
// * In other words, the state of being "held" has both a temporal component
125
//   (from `lock()` until `unlock()`) as well as a thread identity component:
126
//   the mutex is held *by a particular thread*.
127
//
128
// An "invalid" operation has undefined behavior. The `Mutex` implementation
129
// is allowed to do anything on an invalid call, including, but not limited to,
130
// crashing with a useful error message, silently succeeding, or corrupting
131
// data structures. In debug mode, the implementation may crash with a useful
132
// error message.
133
//
134
// `Mutex` is not guaranteed to be "fair" in prioritizing waiting threads; it
135
// is, however, approximately fair over long periods, and starvation-free for
136
// threads at the same priority.
137
//
138
// The lock/unlock primitives are now annotated with lock annotations
139
// defined in (base/thread_annotations.h). When writing multi-threaded code,
140
// you should use lock annotations whenever possible to document your lock
141
// synchronization policy. Besides acting as documentation, these annotations
142
// also help compilers or static analysis tools to identify and warn about
143
// issues that could potentially result in race conditions and deadlocks.
144
//
145
// For more information about the lock annotations, please see
146
// [Thread Safety
147
// Analysis](http://clang.llvm.org/docs/ThreadSafetyAnalysis.html) in the Clang
148
// documentation.
149
//
150
// See also `MutexLock`, below, for scoped `Mutex` acquisition.
151
152
class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED Mutex {
153
 public:
154
  // Creates a `Mutex` that is not held by anyone. This constructor is
155
  // typically used for Mutexes allocated on the heap or the stack.
156
  //
157
  // To create `Mutex` instances with static storage duration
158
  // (e.g. a namespace-scoped or global variable), see
159
  // `Mutex::Mutex(absl::kConstInit)` below instead.
160
  Mutex();
161
162
  // Creates a mutex with static storage duration.  A global variable
163
  // constructed this way avoids the lifetime issues that can occur on program
164
  // startup and shutdown.  (See absl/base/const_init.h.)
165
  //
166
  // For Mutexes allocated on the heap and stack, instead use the default
167
  // constructor, which can interact more fully with the thread sanitizer.
168
  //
169
  // Example usage:
170
  //   namespace foo {
171
  //   ABSL_CONST_INIT absl::Mutex mu(absl::kConstInit);
172
  //   }
173
  explicit constexpr Mutex(absl::ConstInitType);
174
175
  ~Mutex();
176
177
  // Mutex::lock()
178
  //
179
  // Blocks the calling thread, if necessary, until this `Mutex` is free, and
180
  // then acquires it exclusively. (This lock is also known as a "write lock.")
181
  void lock() ABSL_EXCLUSIVE_LOCK_FUNCTION();
182
183
0
  inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { lock(); }
184
185
  // Mutex::unlock()
186
  //
187
  // Releases this `Mutex` and returns it from the exclusive/write state to the
188
  // free state. Calling thread must hold the `Mutex` exclusively.
189
  void unlock() ABSL_UNLOCK_FUNCTION();
190
191
0
  inline void Unlock() ABSL_UNLOCK_FUNCTION() { unlock(); }
192
193
  // Mutex::try_lock()
194
  //
195
  // If the mutex can be acquired without blocking, does so exclusively and
196
  // returns `true`. Otherwise, returns `false`. Returns `true` with high
197
  // probability if the `Mutex` was free.
198
  [[nodiscard]] bool try_lock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true);
199
200
0
  [[nodiscard]] bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
201
0
    return try_lock();
202
0
  }
203
204
  // Mutex::AssertHeld()
205
  //
206
  // Require that the mutex be held exclusively (write mode) by this thread.
207
  //
208
  // If the mutex is not currently held by this thread, this function may report
209
  // an error (typically by crashing with a diagnostic) or it may do nothing.
210
  // This function is intended only as a tool to assist debugging; it doesn't
211
  // guarantee correctness.
212
  void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK();
213
214
  // ---------------------------------------------------------------------------
215
  // Reader-Writer Locking
216
  // ---------------------------------------------------------------------------
217
218
  // A Mutex can also be used as a starvation-free reader-writer lock.
219
  // Neither read-locks nor write-locks are reentrant/recursive to avoid
220
  // potential client programming errors.
221
  //
222
  // The Mutex API provides `Writer*()` aliases for the existing `lock()`,
223
  // `unlock()` and `try_lock()` methods for use within applications mixing
224
  // reader/writer locks. Using `*_shared()` and `Writer*()` operations in this
225
  // manner can make locking behavior clearer when mixing read and write modes.
226
  //
227
  // Introducing reader locks necessarily complicates the `Mutex` state
228
  // machine somewhat. The table below illustrates the allowed state transitions
229
  // of a mutex in such cases. Note that lock_shared() may block even if the
230
  // lock is held in shared mode; this occurs when another thread is blocked on
231
  // a call to lock().
232
  //
233
  // ---------------------------------------------------------------------------
234
  //     Operation: lock()       unlock()  lock_shared() unlock_shared()
235
  // ---------------------------------------------------------------------------
236
  // State
237
  // ---------------------------------------------------------------------------
238
  // Free           Exclusive    invalid   Shared(1)              invalid
239
  // Shared(1)      blocks       invalid   Shared(2) or blocks    Free
240
  // Shared(n) n>1  blocks       invalid   Shared(n+1) or blocks  Shared(n-1)
241
  // Exclusive      blocks       Free      blocks                 invalid
242
  // ---------------------------------------------------------------------------
243
  //
244
  // In comments below, "shared" refers to a state of Shared(n) for any n > 0.
245
246
  // Mutex::lock_shared()
247
  //
248
  // Blocks the calling thread, if necessary, until this `Mutex` is either free,
249
  // or in shared mode, and then acquires a share of it. Note that
250
  // `lock_shared()` will block if some other thread has an exclusive/writer
251
  // lock on the mutex.
252
  void lock_shared() ABSL_SHARED_LOCK_FUNCTION();
253
254
0
  void ReaderLock() ABSL_SHARED_LOCK_FUNCTION() { lock_shared(); }
255
256
  // Mutex::unlock_shared()
257
  //
258
  // Releases a read share of this `Mutex`. `unlock_shared` may return a mutex
259
  // to the free state if this thread holds the last reader lock on the mutex.
260
  // Note that you cannot call `unlock_shared()` on a mutex held in write mode.
261
  void unlock_shared() ABSL_UNLOCK_FUNCTION();
262
263
0
  void ReaderUnlock() ABSL_UNLOCK_FUNCTION() { unlock_shared(); }
264
265
  // Mutex::try_lock_shared()
266
  //
267
  // If the mutex can be acquired without blocking, acquires this mutex for
268
  // shared access and returns `true`. Otherwise, returns `false`. Returns
269
  // `true` with high probability if the `Mutex` was free or shared.
270
  [[nodiscard]] bool try_lock_shared() ABSL_SHARED_TRYLOCK_FUNCTION(true);
271
272
0
  [[nodiscard]] bool ReaderTryLock() ABSL_SHARED_TRYLOCK_FUNCTION(true) {
273
0
    return try_lock_shared();
274
0
  }
275
276
  // Mutex::AssertReaderHeld()
277
  //
278
  // Require that the mutex be held at least in shared mode (read mode) by this
279
  // thread.
280
  //
281
  // If the mutex is not currently held by this thread, this function may report
282
  // an error (typically by crashing with a diagnostic) or it may do nothing.
283
  // This function is intended only as a tool to assist debugging; it doesn't
284
  // guarantee correctness.
285
  void AssertReaderHeld() const ABSL_ASSERT_SHARED_LOCK();
286
287
  // Mutex::WriterLock()
288
  // Mutex::WriterUnlock()
289
  // Mutex::WriterTryLock()
290
  //
291
  // Aliases for `Mutex::Lock()`, `Mutex::Unlock()`, and `Mutex::TryLock()`.
292
  //
293
  // These methods may be used (along with the complementary `Reader*()`
294
  // methods) to distinguish simple exclusive `Mutex` usage (`Lock()`,
295
  // etc.) from reader/writer lock usage.
296
0
  void WriterLock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { lock(); }
297
298
0
  void WriterUnlock() ABSL_UNLOCK_FUNCTION() { unlock(); }
299
300
0
  [[nodiscard]] bool WriterTryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
301
0
    return try_lock();
302
0
  }
303
304
  // ---------------------------------------------------------------------------
305
  // Conditional Critical Regions
306
  // ---------------------------------------------------------------------------
307
308
  // Conditional usage of a `Mutex` can occur using two distinct paradigms:
309
  //
310
  //   * Use of `Mutex` member functions with `Condition` objects.
311
  //   * Use of the separate `CondVar` abstraction.
312
  //
313
  // In general, prefer use of `Condition` and the `Mutex` member functions
314
  // listed below over `CondVar`. When there are multiple threads waiting on
315
  // distinctly different conditions, however, a battery of `CondVar`s may be
316
  // more efficient. This section discusses use of `Condition` objects.
317
  //
318
  // `Mutex` contains member functions for performing lock operations only under
319
  // certain conditions, of class `Condition`. For correctness, the `Condition`
320
  // must return a boolean that is a pure function, only of state protected by
321
  // the `Mutex`. The condition must be invariant w.r.t. environmental state
322
  // such as thread, cpu id, or time, and must be `noexcept`. The condition will
323
  // always be invoked with the mutex held in at least read mode, so you should
324
  // not block it for long periods or sleep it on a timer.
325
  //
326
  // Since a condition must not depend directly on the current time, use
327
  // `*WithTimeout()` member function variants to make your condition
328
  // effectively true after a given duration, or `*WithDeadline()` variants to
329
  // make your condition effectively true after a given time.
330
  //
331
  // The condition function should have no side-effects aside from debug
332
  // logging; as a special exception, the function may acquire other mutexes
333
  // provided it releases all those that it acquires.  (This exception was
334
  // required to allow logging.)
335
336
  // Mutex::Await()
337
  //
338
  // Unlocks this `Mutex` and blocks until simultaneously both `cond` is `true`
339
  // and this `Mutex` can be reacquired, then reacquires this `Mutex` in the
340
  // same mode in which it was previously held. If the condition is initially
341
  // `true`, `Await()` *may* skip the release/re-acquire step.
342
  //
343
  // `Await()` requires that this thread holds this `Mutex` in some mode.
344
0
  void Await(const Condition& cond) {
345
0
    AwaitCommon(cond, synchronization_internal::KernelTimeout::Never());
346
0
  }
347
348
  // Mutex::LockWhen()
349
  // Mutex::ReaderLockWhen()
350
  // Mutex::WriterLockWhen()
351
  //
352
  // Blocks until simultaneously both `cond` is `true` and this `Mutex` can
353
  // be acquired, then atomically acquires this `Mutex`. `LockWhen()` is
354
  // logically equivalent to `*Lock(); Await();` though they may have different
355
  // performance characteristics.
356
0
  void LockWhen(const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION() {
357
0
    LockWhenCommon(cond, synchronization_internal::KernelTimeout::Never(),
358
0
                   true);
359
0
  }
360
361
0
  void ReaderLockWhen(const Condition& cond) ABSL_SHARED_LOCK_FUNCTION() {
362
0
    LockWhenCommon(cond, synchronization_internal::KernelTimeout::Never(),
363
0
                   false);
364
0
  }
365
366
0
  void WriterLockWhen(const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION() {
367
0
    this->LockWhen(cond);
368
0
  }
369
370
  // ---------------------------------------------------------------------------
371
  // Mutex Variants with Timeouts/Deadlines
372
  // ---------------------------------------------------------------------------
373
374
  // Mutex::AwaitWithTimeout()
375
  // Mutex::AwaitWithDeadline()
376
  //
377
  // Unlocks this `Mutex` and blocks until simultaneously:
378
  //   - either `cond` is true or the {timeout has expired, deadline has passed}
379
  //     and
380
  //   - this `Mutex` can be reacquired,
381
  // then reacquire this `Mutex` in the same mode in which it was previously
382
  // held, returning `true` iff `cond` is `true` on return.
383
  //
384
  // If the condition is initially `true`, the implementation *may* skip the
385
  // release/re-acquire step and return immediately.
386
  //
387
  // Deadlines in the past are equivalent to an immediate deadline.
388
  // Negative timeouts are equivalent to a zero timeout.
389
  //
390
  // This method requires that this thread holds this `Mutex` in some mode.
391
0
  bool AwaitWithTimeout(const Condition& cond, absl::Duration timeout) {
392
0
    return AwaitCommon(cond, synchronization_internal::KernelTimeout{timeout});
393
0
  }
394
395
0
  bool AwaitWithDeadline(const Condition& cond, absl::Time deadline) {
396
0
    return AwaitCommon(cond, synchronization_internal::KernelTimeout{deadline});
397
0
  }
398
399
  // Mutex::LockWhenWithTimeout()
400
  // Mutex::ReaderLockWhenWithTimeout()
401
  // Mutex::WriterLockWhenWithTimeout()
402
  //
403
  // Blocks until simultaneously both:
404
  //   - either `cond` is `true` or the timeout has expired, and
405
  //   - this `Mutex` can be acquired,
406
  // then atomically acquires this `Mutex`, returning `true` iff `cond` is
407
  // `true` on return.
408
  //
409
  // Negative timeouts are equivalent to a zero timeout.
410
  bool LockWhenWithTimeout(const Condition& cond, absl::Duration timeout)
411
0
      ABSL_EXCLUSIVE_LOCK_FUNCTION() {
412
0
    return LockWhenCommon(
413
0
        cond, synchronization_internal::KernelTimeout{timeout}, true);
414
0
  }
415
  bool ReaderLockWhenWithTimeout(const Condition& cond, absl::Duration timeout)
416
0
      ABSL_SHARED_LOCK_FUNCTION() {
417
0
    return LockWhenCommon(
418
0
        cond, synchronization_internal::KernelTimeout{timeout}, false);
419
0
  }
420
  bool WriterLockWhenWithTimeout(const Condition& cond, absl::Duration timeout)
421
0
      ABSL_EXCLUSIVE_LOCK_FUNCTION() {
422
0
    return this->LockWhenWithTimeout(cond, timeout);
423
0
  }
424
425
  // Mutex::LockWhenWithDeadline()
426
  // Mutex::ReaderLockWhenWithDeadline()
427
  // Mutex::WriterLockWhenWithDeadline()
428
  //
429
  // Blocks until simultaneously both:
430
  //   - either `cond` is `true` or the deadline has been passed, and
431
  //   - this `Mutex` can be acquired,
432
  // then atomically acquires this Mutex, returning `true` iff `cond` is `true`
433
  // on return.
434
  //
435
  // Deadlines in the past are equivalent to an immediate deadline.
436
  bool LockWhenWithDeadline(const Condition& cond, absl::Time deadline)
437
0
      ABSL_EXCLUSIVE_LOCK_FUNCTION() {
438
0
    return LockWhenCommon(
439
0
        cond, synchronization_internal::KernelTimeout{deadline}, true);
440
0
  }
441
  bool ReaderLockWhenWithDeadline(const Condition& cond, absl::Time deadline)
442
0
      ABSL_SHARED_LOCK_FUNCTION() {
443
0
    return LockWhenCommon(
444
0
        cond, synchronization_internal::KernelTimeout{deadline}, false);
445
0
  }
446
  bool WriterLockWhenWithDeadline(const Condition& cond, absl::Time deadline)
447
0
      ABSL_EXCLUSIVE_LOCK_FUNCTION() {
448
0
    return this->LockWhenWithDeadline(cond, deadline);
449
0
  }
450
451
  // ---------------------------------------------------------------------------
452
  // Debug Support: Invariant Checking, Deadlock Detection, Logging.
453
  // ---------------------------------------------------------------------------
454
455
  // Mutex::EnableInvariantDebugging()
456
  //
457
  // If `invariant`!=null and if invariant debugging has been enabled globally,
458
  // cause `(*invariant)(arg)` to be called at moments when the invariant for
459
  // this `Mutex` should hold (for example: just after acquire, just before
460
  // release).
461
  //
462
  // The routine `invariant` should have no side-effects since it is not
463
  // guaranteed how many times it will be called; it should check the invariant
464
  // and crash if it does not hold. Enabling global invariant debugging may
465
  // substantially reduce `Mutex` performance; it should be set only for
466
  // non-production runs.  Optimization options may also disable invariant
467
  // checks.
468
  void EnableInvariantDebugging(
469
      void (*absl_nullable invariant)(void* absl_nullability_unknown),
470
      void* absl_nullability_unknown arg);
471
472
  // Mutex::EnableDebugLog()
473
  //
474
  // Cause all subsequent uses of this `Mutex` to be logged via
475
  // `ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if no previous
476
  // call to `EnableInvariantDebugging()` or `EnableDebugLog()` has been made.
477
  //
478
  // Note: This method substantially reduces `Mutex` performance.
479
  void EnableDebugLog(const char* absl_nullable name);
480
481
  // Deadlock detection
482
483
  // Mutex::ForgetDeadlockInfo()
484
  //
485
  // Forget any deadlock-detection information previously gathered
486
  // about this `Mutex`. Call this method in debug mode when the lock ordering
487
  // of a `Mutex` changes.
488
  void ForgetDeadlockInfo();
489
490
  // Mutex::AssertNotHeld()
491
  //
492
  // Return immediately if this thread does not hold this `Mutex` in any
493
  // mode; otherwise, may report an error (typically by crashing with a
494
  // diagnostic), or may return immediately.
495
  //
496
  // Currently this check is performed only if all of:
497
  //    - in debug mode
498
  //    - SetMutexDeadlockDetectionMode() has been set to kReport or kAbort
499
  //    - number of locks concurrently held by this thread is not large.
500
  // are true.
501
  void AssertNotHeld() const;
502
503
  // Special cases.
504
505
  // A `MuHow` is a constant that indicates how a lock should be acquired.
506
  // Internal implementation detail.  Clients should ignore.
507
  typedef const struct MuHowS* MuHow;
508
509
  // Mutex::InternalAttemptToUseMutexInFatalSignalHandler()
510
  //
511
  // Causes the `Mutex` implementation to prepare itself for re-entry caused by
512
  // future use of `Mutex` within a fatal signal handler. This method is
513
  // intended for use only for last-ditch attempts to log crash information.
514
  // It does not guarantee that attempts to use Mutexes within the handler will
515
  // not deadlock; it merely makes other faults less likely.
516
  //
517
  // WARNING:  This routine must be invoked from a signal handler, and the
518
  // signal handler must either loop forever or terminate the process.
519
  // Attempts to return from (or `longjmp` out of) the signal handler once this
520
  // call has been made may cause arbitrary program behaviour including
521
  // crashes and deadlocks.
522
  static void InternalAttemptToUseMutexInFatalSignalHandler();
523
524
 private:
525
  std::atomic<intptr_t> mu_;  // The Mutex state.
526
527
  // Post()/Wait() versus associated PerThreadSem; in class for required
528
  // friendship with PerThreadSem.
529
  static void IncrementSynchSem(Mutex* absl_nonnull mu,
530
                                base_internal::PerThreadSynch* absl_nonnull w);
531
  static bool DecrementSynchSem(Mutex* absl_nonnull mu,
532
                                base_internal::PerThreadSynch* absl_nonnull w,
533
                                synchronization_internal::KernelTimeout t);
534
535
  // slow path acquire
536
  void LockSlowLoop(SynchWaitParams* absl_nonnull waitp, int flags);
537
  // wrappers around LockSlowLoop()
538
  bool LockSlowWithDeadline(MuHow absl_nonnull how,
539
                            const Condition* absl_nullable cond,
540
                            synchronization_internal::KernelTimeout t,
541
                            int flags);
542
  void LockSlow(MuHow absl_nonnull how, const Condition* absl_nullable cond,
543
                int flags) ABSL_ATTRIBUTE_COLD;
544
  // slow path release
545
  void UnlockSlow(SynchWaitParams* absl_nullable waitp) ABSL_ATTRIBUTE_COLD;
546
  // TryLock slow path.
547
  bool TryLockSlow();
548
  // ReaderTryLock slow path.
549
  bool ReaderTryLockSlow();
550
  // Common code between Await() and AwaitWithTimeout/Deadline()
551
  bool AwaitCommon(const Condition& cond,
552
                   synchronization_internal::KernelTimeout t);
553
  bool LockWhenCommon(const Condition& cond,
554
                      synchronization_internal::KernelTimeout t, bool write);
555
  // Attempt to remove thread s from queue.
556
  void TryRemove(base_internal::PerThreadSynch* absl_nonnull s);
557
  // Block a thread on mutex.
558
  void Block(base_internal::PerThreadSynch* absl_nonnull s);
559
  // Wake a thread; return successor.
560
  base_internal::PerThreadSynch* absl_nullable Wakeup(
561
      base_internal::PerThreadSynch* absl_nonnull w);
562
  void Dtor();
563
564
  friend class CondVar;   // for access to Trans()/Fer().
565
  void Trans(MuHow absl_nonnull how);  // used for CondVar->Mutex transfer
566
  void Fer(base_internal::PerThreadSynch* absl_nonnull
567
           w);  // used for CondVar->Mutex transfer
568
569
  // Catch the error of writing Mutex when intending MutexLock.
570
0
  explicit Mutex(const volatile Mutex* absl_nullable /*ignored*/) {}
571
572
  Mutex(const Mutex&) = delete;
573
  Mutex& operator=(const Mutex&) = delete;
574
};
575
576
// -----------------------------------------------------------------------------
577
// Mutex RAII Wrappers
578
// -----------------------------------------------------------------------------
579
580
// MutexLock
581
//
582
// `MutexLock` is a helper class, which acquires and releases a `Mutex` via
583
// RAII.
584
//
585
// Example:
586
//
587
// Class Foo {
588
//  public:
589
//   Foo::Bar* Baz() {
590
//     MutexLock lock(mu_);
591
//     ...
592
//     return bar;
593
//   }
594
//
595
// private:
596
//   Mutex mu_;
597
// };
598
class ABSL_SCOPED_LOCKABLE MutexLock {
599
 public:
600
  // Constructors
601
602
  // Calls `mu.lock()` and returns when that call returns. That is, `mu` is
603
  // guaranteed to be locked when this object is constructed.
604
  explicit MutexLock(Mutex& mu ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this))
605
      ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
606
32
      : mu_(mu) {
607
32
    this->mu_.lock();
608
32
  }
609
610
  // Calls `mu->lock()` and returns when that call returns. That is, `*mu` is
611
  // guaranteed to be locked when this object is constructed. Requires that
612
  // `mu` be dereferenceable.
613
  explicit MutexLock(Mutex* absl_nonnull mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
614
0
      : MutexLock(*mu) {}
615
616
  // Like above, but calls `mu.LockWhen(cond)` instead. That is, in addition to
617
  // the above, the condition given by `cond` is also guaranteed to hold when
618
  // this object is constructed.
619
  explicit MutexLock(Mutex& mu ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this),
620
                     const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
621
0
      : mu_(mu) {
622
0
    this->mu_.LockWhen(cond);
623
0
  }
624
625
  explicit MutexLock(Mutex* absl_nonnull mu, const Condition& cond)
626
      ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
627
0
      : MutexLock(*mu, cond) {}
628
629
  MutexLock(const MutexLock&) = delete;  // NOLINT(runtime/mutex)
630
  MutexLock(MutexLock&&) = delete;       // NOLINT(runtime/mutex)
631
  MutexLock& operator=(const MutexLock&) = delete;
632
  MutexLock& operator=(MutexLock&&) = delete;
633
634
32
  ~MutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_.unlock(); }
635
636
 private:
637
  Mutex& mu_;
638
};
639
640
// ReaderMutexLock
641
//
642
// The `ReaderMutexLock` is a helper class, like `MutexLock`, which acquires and
643
// releases a shared lock on a `Mutex` via RAII.
644
class ABSL_SCOPED_LOCKABLE ReaderMutexLock {
645
 public:
646
  explicit ReaderMutexLock(Mutex& mu ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this))
647
      ABSL_SHARED_LOCK_FUNCTION(mu)
648
976k
      : mu_(mu) {
649
976k
    mu.lock_shared();
650
976k
  }
651
652
  explicit ReaderMutexLock(Mutex* absl_nonnull mu) ABSL_SHARED_LOCK_FUNCTION(mu)
653
0
      : ReaderMutexLock(*mu) {}
654
655
  explicit ReaderMutexLock(Mutex& mu ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this),
656
                           const Condition& cond) ABSL_SHARED_LOCK_FUNCTION(mu)
657
0
      : mu_(mu) {
658
0
    mu.ReaderLockWhen(cond);
659
0
  }
660
661
  explicit ReaderMutexLock(Mutex* absl_nonnull mu, const Condition& cond)
662
      ABSL_SHARED_LOCK_FUNCTION(mu)
663
0
      : ReaderMutexLock(*mu, cond) {}
664
665
  ReaderMutexLock(const ReaderMutexLock&) = delete;
666
  ReaderMutexLock(ReaderMutexLock&&) = delete;
667
  ReaderMutexLock& operator=(const ReaderMutexLock&) = delete;
668
  ReaderMutexLock& operator=(ReaderMutexLock&&) = delete;
669
670
976k
  ~ReaderMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_.unlock_shared(); }
671
672
 private:
673
  Mutex& mu_;
674
};
675
676
// WriterMutexLock
677
//
678
// The `WriterMutexLock` is a helper class, like `MutexLock`, which acquires and
679
// releases a write (exclusive) lock on a `Mutex` via RAII.
680
class ABSL_SCOPED_LOCKABLE WriterMutexLock {
681
 public:
682
  explicit WriterMutexLock(Mutex& mu ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this))
683
      ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
684
1
      : mu_(mu) {
685
1
    mu.lock();
686
1
  }
687
688
  explicit WriterMutexLock(Mutex* absl_nonnull mu)
689
      ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
690
0
      : WriterMutexLock(*mu) {}
691
692
  explicit WriterMutexLock(Mutex& mu ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this),
693
                           const Condition& cond)
694
      ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
695
0
      : mu_(mu) {
696
0
    mu.WriterLockWhen(cond);
697
0
  }
698
699
  explicit WriterMutexLock(Mutex* absl_nonnull mu, const Condition& cond)
700
      ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
701
0
      : WriterMutexLock(*mu, cond) {}
702
703
  WriterMutexLock(const WriterMutexLock&) = delete;
704
  WriterMutexLock(WriterMutexLock&&) = delete;
705
  WriterMutexLock& operator=(const WriterMutexLock&) = delete;
706
  WriterMutexLock& operator=(WriterMutexLock&&) = delete;
707
708
1
  ~WriterMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_.unlock(); }
709
710
 private:
711
  Mutex& mu_;
712
};
713
714
// -----------------------------------------------------------------------------
715
// Condition
716
// -----------------------------------------------------------------------------
717
//
718
// `Mutex` contains a number of member functions which take a `Condition` as an
719
// argument; clients can wait for conditions to become `true` before attempting
720
// to acquire the mutex. These sections are known as "condition critical"
721
// sections. To use a `Condition`, you simply need to construct it, and use
722
// within an appropriate `Mutex` member function; everything else in the
723
// `Condition` class is an implementation detail.
724
//
725
// A `Condition` is specified as a function pointer which returns a boolean.
726
// `Condition` functions should be pure functions -- their results should depend
727
// only on passed arguments, should not consult any external state (such as
728
// clocks), and should have no side-effects, aside from debug logging. Any
729
// objects that the function may access should be limited to those which are
730
// constant while the mutex is blocked on the condition (e.g. a stack variable),
731
// or objects of state protected explicitly by the mutex.
732
//
733
// No matter which construction is used for `Condition`, the underlying
734
// function pointer / functor / callable must not throw any
735
// exceptions. Correctness of `Mutex` / `Condition` is not guaranteed in
736
// the face of a throwing `Condition`. (When Abseil is allowed to depend
737
// on C++17, these function pointers will be explicitly marked
738
// `noexcept`; until then this requirement cannot be enforced in the
739
// type system.)
740
//
741
// Note: to use a `Condition`, you need only construct it and pass it to a
742
// suitable `Mutex' member function, such as `Mutex::Await()`, or to the
743
// constructor of one of the scope guard classes.
744
//
745
// Example using LockWhen/Unlock:
746
//
747
//   // assume count_ is not internal reference count
748
//   int count_ ABSL_GUARDED_BY(mu_);
749
//   Condition count_is_zero(+[](int *count) { return *count == 0; }, &count_);
750
//
751
//   mu_.LockWhen(count_is_zero);
752
//   // ...
753
//   mu_.Unlock();
754
//
755
// Example using a scope guard:
756
//
757
//   {
758
//     MutexLock lock(mu_, count_is_zero);
759
//     // ...
760
//   }
761
//
762
// When multiple threads are waiting on exactly the same condition, make sure
763
// that they are constructed with the same parameters (same pointer to function
764
// + arg, or same pointer to object + method), so that the mutex implementation
765
// can avoid redundantly evaluating the same condition for each thread.
766
class Condition {
767
 public:
768
  // A Condition that returns the result of "(*func)(arg)"
769
  Condition(bool (*absl_nonnull func)(void* absl_nullability_unknown),
770
            void* absl_nullability_unknown arg);
771
772
  // Templated version for people who are averse to casts.
773
  //
774
  // To use a lambda, prepend it with unary plus, which converts the lambda
775
  // into a function pointer:
776
  //     Condition(+[](T* t) { return ...; }, arg).
777
  //
778
  // Note: lambdas in this case must contain no bound variables.
779
  //
780
  // See class comment for performance advice.
781
  template <typename T>
782
  Condition(bool (*absl_nonnull func)(T* absl_nullability_unknown),
783
            T* absl_nullability_unknown arg);
784
785
  // Same as above, but allows for cases where `arg` comes from a pointer that
786
  // is convertible to the function parameter type `T*` but not an exact match.
787
  //
788
  // For example, the argument might be `X*` but the function takes `const X*`,
789
  // or the argument might be `Derived*` while the function takes `Base*`, and
790
  // so on for cases where the argument pointer can be implicitly converted.
791
  //
792
  // Implementation notes: This constructor overload is required in addition to
793
  // the one above to allow deduction of `T` from `arg` for cases such as where
794
  // a function template is passed as `func`. Also, the dummy `typename = void`
795
  // template parameter exists just to work around a MSVC mangling bug.
796
  template <typename T, typename = void>
797
  Condition(
798
      bool (*absl_nonnull func)(T* absl_nullability_unknown),
799
      typename absl::internal::type_identity<T>::type* absl_nullability_unknown
800
      arg);
801
802
  // Templated version for invoking a method that returns a `bool`.
803
  //
804
  // `Condition(object, &Class::Method)` constructs a `Condition` that evaluates
805
  // `object->Method()`.
806
  //
807
  // Implementation Note: `absl::internal::type_identity` is used to allow
808
  // methods to come from base classes. A simpler signature like
809
  // `Condition(T*, bool (T::*)())` does not suffice.
810
  template <typename T>
811
  Condition(
812
      T* absl_nonnull object,
813
      bool (absl::internal::type_identity<T>::type::* absl_nonnull method)());
814
815
  // Same as above, for const members
816
  template <typename T>
817
  Condition(
818
      const T* absl_nonnull object,
819
      bool (absl::internal::type_identity<T>::type::* absl_nonnull method)()
820
          const);
821
822
  // A Condition that returns the value of `*cond`
823
  explicit Condition(const bool* absl_nonnull cond);
824
825
  // Templated version for invoking a functor that returns a `bool`.
826
  // This approach accepts pointers to non-mutable lambdas, `std::function`,
827
  // the result of` std::bind` and user-defined functors that define
828
  // `bool F::operator()() const`.
829
  //
830
  // Example:
831
  //
832
  //   auto reached = [this, current]() {
833
  //     mu_.AssertReaderHeld();                // For annotalysis.
834
  //     return processed_ >= current;
835
  //   };
836
  //   mu_.Await(Condition(&reached));
837
  //
838
  // NOTE: never use "mu_.AssertHeld()" instead of "mu_.AssertReaderHeld()" in
839
  // the lambda as it may be called when the mutex is being unlocked from a
840
  // scope holding only a reader lock, which will make the assertion not
841
  // fulfilled and crash the binary.
842
843
  // See class comment for performance advice. In particular, if there
844
  // might be more than one waiter for the same condition, make sure
845
  // that all waiters construct the condition with the same pointers.
846
847
  // Implementation note: The second template parameter ensures that this
848
  // constructor doesn't participate in overload resolution if T doesn't have
849
  // `bool operator() const`.
850
  template <typename T, typename E = decltype(static_cast<bool (T::*)() const>(
851
                            &T::operator()))>
852
  explicit Condition(const T* absl_nonnull obj)
853
      : Condition(obj, static_cast<bool (T::*)() const>(&T::operator())) {}
854
855
  // A Condition that always returns `true`.
856
  // kTrue is only useful in a narrow set of circumstances, mostly when
857
  // it's passed conditionally. For example:
858
  //
859
  //   mu.LockWhen(some_flag ? kTrue : SomeOtherCondition);
860
  //
861
  // Note: {LockWhen,Await}With{Deadline,Timeout} methods with kTrue condition
862
  // don't return immediately when the timeout happens, they still block until
863
  // the Mutex becomes available. The return value of these methods does
864
  // not indicate if the timeout was reached; rather it indicates whether or
865
  // not the condition is true.
866
  ABSL_CONST_INIT static const Condition kTrue;
867
868
  // Evaluates the condition.
869
  bool Eval() const;
870
871
  // Returns `true` if the two conditions are guaranteed to return the same
872
  // value if evaluated at the same time, `false` if the evaluation *may* return
873
  // different results.
874
  //
875
  // Two `Condition` values are guaranteed equal if both their `func` and `arg`
876
  // components are the same. A null pointer is equivalent to a `true`
877
  // condition.
878
  static bool GuaranteedEqual(const Condition* absl_nullable a,
879
                              const Condition* absl_nullable b);
880
881
 private:
882
  // Sizing an allocation for a method pointer can be subtle. In the Itanium
883
  // specifications, a method pointer has a predictable, uniform size. On the
884
  // other hand, MSVC ABI, method pointer sizes vary based on the
885
  // inheritance of the class. Specifically, method pointers from classes with
886
  // multiple inheritance are bigger than those of classes with single
887
  // inheritance. Other variations also exist.
888
889
#ifndef _MSC_VER
890
  // Allocation for a function pointer or method pointer.
891
  // The {0} initializer ensures that all unused bytes of this buffer are
892
  // always zeroed out.  This is necessary, because GuaranteedEqual() compares
893
  // all of the bytes, unaware of which bytes are relevant to a given `eval_`.
894
  using MethodPtr = bool (Condition::*)();
895
  char callback_[sizeof(MethodPtr)] = {0};
896
#else
897
  // It is well known that the larget MSVC pointer-to-member is 24 bytes. This
898
  // may be the largest known pointer-to-member of any platform. For this
899
  // reason we will allocate 24 bytes for MSVC platform toolchains.
900
  char callback_[24] = {0};
901
#endif
902
903
  // Function with which to evaluate callbacks and/or arguments.
904
  bool (*absl_nullable eval_)(const Condition* absl_nonnull) = nullptr;
905
906
  // Either an argument for a function call or an object for a method call.
907
  void* absl_nullable arg_ = nullptr;
908
909
  // Various functions eval_ can point to:
910
  static bool CallVoidPtrFunction(const Condition* absl_nonnull c);
911
  template <typename T>
912
  static bool CastAndCallFunction(const Condition* absl_nonnull c);
913
  template <typename T, typename ConditionMethodPtr>
914
  static bool CastAndCallMethod(const Condition* absl_nonnull c);
915
916
  // Helper methods for storing, validating, and reading callback arguments.
917
  template <typename T>
918
0
  inline void StoreCallback(T callback) {
919
0
    static_assert(
920
0
        sizeof(callback) <= sizeof(callback_),
921
0
        "An overlarge pointer was passed as a callback to Condition.");
922
0
    std::memcpy(callback_, &callback, sizeof(callback));
923
0
  }
Unexecuted instantiation: void absl::Condition::StoreCallback<bool (*)(absl::SynchEvent*)>(bool (*)(absl::SynchEvent*))
Unexecuted instantiation: void absl::Condition::StoreCallback<bool (*)(void*)>(bool (*)(void*))
924
925
  template <typename T>
926
0
  inline void ReadCallback(T* absl_nonnull callback) const {
927
0
    std::memcpy(callback, callback_, sizeof(*callback));
928
0
  }
929
930
0
  static bool AlwaysTrue(const Condition* absl_nullable) { return true; }
931
932
  // Used only to create kTrue.
933
0
  constexpr Condition() : eval_(AlwaysTrue), arg_(nullptr) {}
934
};
935
936
// -----------------------------------------------------------------------------
937
// CondVar
938
// -----------------------------------------------------------------------------
939
//
940
// A condition variable, reflecting state evaluated separately outside of the
941
// `Mutex` object, which can be signaled to wake callers.
942
// This class is not normally needed; use `Mutex` member functions such as
943
// `Mutex::Await()` and intrinsic `Condition` abstractions. In rare cases
944
// with many threads and many conditions, `CondVar` may be faster.
945
//
946
// The implementation may deliver signals to any condition variable at
947
// any time, even when no call to `Signal()` or `SignalAll()` is made; as a
948
// result, upon being awoken, you must check the logical condition you have
949
// been waiting upon.
950
//
951
// Examples:
952
//
953
// Usage for a thread waiting for some condition C protected by mutex mu:
954
//       mu.Lock();
955
//       while (!C) { cv->Wait(&mu); }        // releases and reacquires mu
956
//       //  C holds; process data
957
//       mu.Unlock();
958
//
959
// Usage to wake T is:
960
//       mu.Lock();
961
//       // process data, possibly establishing C
962
//       if (C) { cv->Signal(); }
963
//       mu.Unlock();
964
//
965
// If C may be useful to more than one waiter, use `SignalAll()` instead of
966
// `Signal()`.
967
//
968
// With this implementation it is efficient to use `Signal()/SignalAll()` inside
969
// the locked region; this usage can make reasoning about your program easier.
970
//
971
class CondVar {
972
 public:
973
  // A `CondVar` allocated on the heap or on the stack can use the this
974
  // constructor.
975
  CondVar();
976
977
  // CondVar::Wait()
978
  //
979
  // Atomically releases a `Mutex` and blocks on this condition variable.
980
  // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a
981
  // spurious wakeup), then reacquires the `Mutex` and returns.
982
  //
983
  // Requires and ensures that the current thread holds the `Mutex`.
984
0
  void Wait(Mutex* absl_nonnull mu) {
985
0
    WaitCommon(mu, synchronization_internal::KernelTimeout::Never());
986
0
  }
987
988
  // CondVar::WaitWithTimeout()
989
  //
990
  // Atomically releases a `Mutex` and blocks on this condition variable.
991
  // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a
992
  // spurious wakeup), or until the timeout has expired, then reacquires
993
  // the `Mutex` and returns.
994
  //
995
  // Returns true if the timeout has expired without this `CondVar`
996
  // being signalled in any manner. If both the timeout has expired
997
  // and this `CondVar` has been signalled, the implementation is free
998
  // to return `true` or `false`.
999
  //
1000
  // Requires and ensures that the current thread holds the `Mutex`.
1001
0
  bool WaitWithTimeout(Mutex* absl_nonnull mu, absl::Duration timeout) {
1002
0
    return WaitCommon(mu, synchronization_internal::KernelTimeout(timeout));
1003
0
  }
1004
1005
  // CondVar::WaitWithDeadline()
1006
  //
1007
  // Atomically releases a `Mutex` and blocks on this condition variable.
1008
  // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a
1009
  // spurious wakeup), or until the deadline has passed, then reacquires
1010
  // the `Mutex` and returns.
1011
  //
1012
  // Deadlines in the past are equivalent to an immediate deadline.
1013
  //
1014
  // Returns true if the deadline has passed without this `CondVar`
1015
  // being signalled in any manner. If both the deadline has passed
1016
  // and this `CondVar` has been signalled, the implementation is free
1017
  // to return `true` or `false`.
1018
  //
1019
  // Requires and ensures that the current thread holds the `Mutex`.
1020
0
  bool WaitWithDeadline(Mutex* absl_nonnull mu, absl::Time deadline) {
1021
0
    return WaitCommon(mu, synchronization_internal::KernelTimeout(deadline));
1022
0
  }
1023
1024
  // CondVar::Signal()
1025
  //
1026
  // Signal this `CondVar`; wake at least one waiter if one exists.
1027
  void Signal();
1028
1029
  // CondVar::SignalAll()
1030
  //
1031
  // Signal this `CondVar`; wake all waiters.
1032
  void SignalAll();
1033
1034
  // CondVar::EnableDebugLog()
1035
  //
1036
  // Causes all subsequent uses of this `CondVar` to be logged via
1037
  // `ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if `name != 0`.
1038
  // Note: this method substantially reduces `CondVar` performance.
1039
  void EnableDebugLog(const char* absl_nullable name);
1040
1041
 private:
1042
  bool WaitCommon(Mutex* absl_nonnull mutex,
1043
                  synchronization_internal::KernelTimeout t);
1044
  void Remove(base_internal::PerThreadSynch* absl_nonnull s);
1045
  std::atomic<intptr_t> cv_;  // Condition variable state.
1046
  CondVar(const CondVar&) = delete;
1047
  CondVar& operator=(const CondVar&) = delete;
1048
};
1049
1050
// Variants of MutexLock.
1051
//
1052
// If you find yourself using one of these, consider instead using
1053
// Mutex::Unlock() and/or if-statements for clarity.
1054
1055
// MutexLockMaybe
1056
//
1057
// MutexLockMaybe is like MutexLock, but is a no-op when mu is null.
1058
class ABSL_SCOPED_LOCKABLE MutexLockMaybe {
1059
 public:
1060
  explicit MutexLockMaybe(Mutex* absl_nullable mu)
1061
      ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
1062
0
      : mu_(mu) {
1063
0
    if (this->mu_ != nullptr) {
1064
0
      this->mu_->lock();
1065
0
    }
1066
0
  }
1067
1068
  explicit MutexLockMaybe(Mutex* absl_nullable mu, const Condition& cond)
1069
      ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
1070
0
      : mu_(mu) {
1071
0
    if (this->mu_ != nullptr) {
1072
0
      this->mu_->LockWhen(cond);
1073
0
    }
1074
0
  }
1075
1076
0
  ~MutexLockMaybe() ABSL_UNLOCK_FUNCTION() {
1077
0
    if (this->mu_ != nullptr) {
1078
0
      this->mu_->unlock();
1079
0
    }
1080
0
  }
1081
1082
 private:
1083
  Mutex* absl_nullable const mu_;
1084
  MutexLockMaybe(const MutexLockMaybe&) = delete;
1085
  MutexLockMaybe(MutexLockMaybe&&) = delete;
1086
  MutexLockMaybe& operator=(const MutexLockMaybe&) = delete;
1087
  MutexLockMaybe& operator=(MutexLockMaybe&&) = delete;
1088
};
1089
1090
// ReleasableMutexLock
1091
//
1092
// ReleasableMutexLock is like MutexLock, but permits `Release()` of its
1093
// mutex before destruction. `Release()` may be called at most once.
1094
class ABSL_SCOPED_LOCKABLE ReleasableMutexLock {
1095
 public:
1096
  explicit ReleasableMutexLock(Mutex& mu ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(
1097
      this)) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
1098
0
      : mu_(&mu) {
1099
0
    this->mu_->lock();
1100
0
  }
1101
1102
  explicit ReleasableMutexLock(Mutex* absl_nonnull mu)
1103
      ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
1104
0
      : ReleasableMutexLock(*mu) {}
1105
1106
  explicit ReleasableMutexLock(
1107
      Mutex& mu ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this),
1108
      const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
1109
0
      : mu_(&mu) {
1110
0
    this->mu_->LockWhen(cond);
1111
0
  }
1112
1113
  explicit ReleasableMutexLock(Mutex* absl_nonnull mu, const Condition& cond)
1114
      ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
1115
0
      : ReleasableMutexLock(*mu, cond) {}
1116
1117
0
  ~ReleasableMutexLock() ABSL_UNLOCK_FUNCTION() {
1118
0
    if (this->mu_ != nullptr) {
1119
0
      this->mu_->unlock();
1120
0
    }
1121
0
  }
1122
1123
  void Release() ABSL_UNLOCK_FUNCTION();
1124
1125
 private:
1126
  Mutex* absl_nonnull mu_;
1127
  ReleasableMutexLock(const ReleasableMutexLock&) = delete;
1128
  ReleasableMutexLock(ReleasableMutexLock&&) = delete;
1129
  ReleasableMutexLock& operator=(const ReleasableMutexLock&) = delete;
1130
  ReleasableMutexLock& operator=(ReleasableMutexLock&&) = delete;
1131
};
1132
1133
6
inline Mutex::Mutex() : mu_(0) {
1134
6
  ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
1135
6
}
1136
1137
inline constexpr Mutex::Mutex(absl::ConstInitType) : mu_(0) {}
1138
1139
#if !defined(__APPLE__) && !defined(ABSL_BUILD_DLL)
1140
ABSL_ATTRIBUTE_ALWAYS_INLINE
1141
0
inline Mutex::~Mutex() { Dtor(); }
1142
#endif
1143
1144
#if defined(NDEBUG) && !defined(ABSL_HAVE_THREAD_SANITIZER)
1145
// Use default (empty) destructor in release build for performance reasons.
1146
// We need to mark both Dtor and ~Mutex as always inline for inconsistent
1147
// builds that use both NDEBUG and !NDEBUG with dynamic libraries. In these
1148
// cases we want the empty functions to dissolve entirely rather than being
1149
// exported from dynamic libraries and potentially override the non-empty ones.
1150
ABSL_ATTRIBUTE_ALWAYS_INLINE
1151
inline void Mutex::Dtor() {}
1152
#endif
1153
1154
inline CondVar::CondVar() : cv_(0) {}
1155
1156
// static
1157
template <typename T, typename ConditionMethodPtr>
1158
bool Condition::CastAndCallMethod(const Condition* absl_nonnull c) {
1159
  T* object = static_cast<T*>(c->arg_);
1160
  ConditionMethodPtr condition_method_pointer;
1161
  c->ReadCallback(&condition_method_pointer);
1162
  return (object->*condition_method_pointer)();
1163
}
1164
1165
// static
1166
template <typename T>
1167
0
bool Condition::CastAndCallFunction(const Condition* absl_nonnull c) {
1168
0
  bool (*function)(T*);
1169
0
  c->ReadCallback(&function);
1170
0
  T* argument = static_cast<T*>(c->arg_);
1171
0
  return (*function)(argument);
1172
0
}
1173
1174
template <typename T>
1175
inline Condition::Condition(
1176
    bool (*absl_nonnull func)(T* absl_nullability_unknown),
1177
    T* absl_nullability_unknown arg)
1178
0
    : eval_(&CastAndCallFunction<T>),
1179
0
      arg_(const_cast<void*>(static_cast<const void*>(arg))) {
1180
0
  static_assert(sizeof(&func) <= sizeof(callback_),
1181
0
                "An overlarge function pointer was passed to Condition.");
1182
0
  StoreCallback(func);
1183
0
}
1184
1185
template <typename T, typename>
1186
inline Condition::Condition(
1187
    bool (*absl_nonnull func)(T* absl_nullability_unknown),
1188
    typename absl::internal::type_identity<T>::type* absl_nullability_unknown
1189
    arg)
1190
    // Just delegate to the overload above.
1191
    : Condition(func, arg) {}
1192
1193
template <typename T>
1194
inline Condition::Condition(
1195
    T* absl_nonnull object,
1196
    bool (absl::internal::type_identity<T>::type::* absl_nonnull method)())
1197
    : eval_(&CastAndCallMethod<T, decltype(method)>), arg_(object) {
1198
  static_assert(sizeof(&method) <= sizeof(callback_),
1199
                "An overlarge method pointer was passed to Condition.");
1200
  StoreCallback(method);
1201
}
1202
1203
template <typename T>
1204
inline Condition::Condition(
1205
    const T* absl_nonnull object,
1206
    bool (absl::internal::type_identity<T>::type::* absl_nonnull method)()
1207
        const)
1208
    : eval_(&CastAndCallMethod<const T, decltype(method)>),
1209
      arg_(reinterpret_cast<void*>(const_cast<T*>(object))) {
1210
  StoreCallback(method);
1211
}
1212
1213
// Register hooks for profiling support.
1214
//
1215
// The function pointer registered here will be called whenever a mutex is
1216
// contended.  The callback is given the cycles for which waiting happened (as
1217
// measured by //absl/base/internal/cycleclock.h, and which may not
1218
// be real "cycle" counts.)
1219
//
1220
// There is no ordering guarantee between when the hook is registered and when
1221
// callbacks will begin.  Only a single profiler can be installed in a running
1222
// binary; if this function is called a second time with a different function
1223
// pointer, the value is ignored (and will cause an assertion failure in debug
1224
// mode.)
1225
void RegisterMutexProfiler(void (*absl_nonnull fn)(int64_t wait_cycles));
1226
1227
// Register a hook for Mutex tracing.
1228
//
1229
// The function pointer registered here will be called whenever a mutex is
1230
// contended.  The callback is given an opaque handle to the contended mutex,
1231
// an event name, and the number of wait cycles (as measured by
1232
// //absl/base/internal/cycleclock.h, and which may not be real
1233
// "cycle" counts.)
1234
//
1235
// The only event name currently sent is "slow release".
1236
//
1237
// This has the same ordering and single-use limitations as
1238
// RegisterMutexProfiler() above.
1239
void RegisterMutexTracer(void (*absl_nonnull fn)(const char* absl_nonnull msg,
1240
                                                 const void* absl_nonnull obj,
1241
                                                 int64_t wait_cycles));
1242
1243
// Register a hook for CondVar tracing.
1244
//
1245
// The function pointer registered here will be called here on various CondVar
1246
// events.  The callback is given an opaque handle to the CondVar object and
1247
// a string identifying the event.  This is thread-safe, but only a single
1248
// tracer can be registered.
1249
//
1250
// Events that can be sent are "Wait", "Unwait", "Signal wakeup", and
1251
// "SignalAll wakeup".
1252
//
1253
// This has the same ordering and single-use limitations as
1254
// RegisterMutexProfiler() above.
1255
void RegisterCondVarTracer(void (*absl_nonnull fn)(
1256
    const char* absl_nonnull msg, const void* absl_nonnull cv));
1257
1258
// EnableMutexInvariantDebugging()
1259
//
1260
// Enable or disable global support for Mutex invariant debugging.  If enabled,
1261
// then invariant predicates can be registered per-Mutex for debug checking.
1262
// See Mutex::EnableInvariantDebugging().
1263
void EnableMutexInvariantDebugging(bool enabled);
1264
1265
// When in debug mode, and when the feature has been enabled globally, the
1266
// implementation will keep track of lock ordering and complain (or optionally
1267
// crash) if a cycle is detected in the acquired-before graph.
1268
1269
// Possible modes of operation for the deadlock detector in debug mode.
1270
enum class OnDeadlockCycle {
1271
  kIgnore,  // Neither report on nor attempt to track cycles in lock ordering
1272
  kReport,  // Report lock cycles to stderr when detected
1273
  kAbort,   // Report lock cycles to stderr when detected, then abort
1274
};
1275
1276
// SetMutexDeadlockDetectionMode()
1277
//
1278
// Enable or disable global support for detection of potential deadlocks
1279
// due to Mutex lock ordering inversions.  When set to 'kIgnore', tracking of
1280
// lock ordering is disabled.  Otherwise, in debug builds, a lock ordering graph
1281
// will be maintained internally, and detected cycles will be reported in
1282
// the manner chosen here.
1283
void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode);
1284
1285
ABSL_NAMESPACE_END
1286
}  // namespace absl
1287
1288
// In some build configurations we pass --detect-odr-violations to the
1289
// gold linker.  This causes it to flag weak symbol overrides as ODR
1290
// violations.  Because ODR only applies to C++ and not C,
1291
// --detect-odr-violations ignores symbols not mangled with C++ names.
1292
// By changing our extension points to be extern "C", we dodge this
1293
// check.
1294
extern "C" {
1295
void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
1296
}  // extern "C"
1297
1298
#endif  // ABSL_SYNCHRONIZATION_MUTEX_H_