Coverage Report

Created: 2025-07-11 06:37

/src/abseil-cpp/absl/synchronization/mutex.h
Line
Count
Source (jump to first uncovered line)
1
// Copyright 2017 The Abseil Authors.
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//      https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14
//
15
// -----------------------------------------------------------------------------
16
// mutex.h
17
// -----------------------------------------------------------------------------
18
//
19
// This header file defines a `Mutex` -- a mutually exclusive lock -- and the
20
// most common type of synchronization primitive for facilitating locks on
21
// shared resources. A mutex is used to prevent multiple threads from accessing
22
// and/or writing to a shared resource concurrently.
23
//
24
// Unlike a `std::mutex`, the Abseil `Mutex` provides the following additional
25
// features:
26
//   * Conditional predicates intrinsic to the `Mutex` object
27
//   * Shared/reader locks, in addition to standard exclusive/writer locks
28
//   * Deadlock detection and debug support.
29
//
30
// The following helper classes are also defined within this file:
31
//
32
//  MutexLock - An RAII wrapper to acquire and release a `Mutex` for exclusive/
33
//              write access within the current scope.
34
//
35
//  ReaderMutexLock
36
//            - An RAII wrapper to acquire and release a `Mutex` for shared/read
37
//              access within the current scope.
38
//
39
//  WriterMutexLock
40
//            - Effectively an alias for `MutexLock` above, designed for use in
41
//              distinguishing reader and writer locks within code.
42
//
43
// In addition to simple mutex locks, this file also defines ways to perform
44
// locking under certain conditions.
45
//
46
//  Condition - (Preferred) Used to wait for a particular predicate that
47
//              depends on state protected by the `Mutex` to become true.
48
//  CondVar   - A lower-level variant of `Condition` that relies on
49
//              application code to explicitly signal the `CondVar` when
50
//              a condition has been met.
51
//
52
// See below for more information on using `Condition` or `CondVar`.
53
//
54
// Mutexes and mutex behavior can be quite complicated. The information within
55
// this header file is limited, as a result. Please consult the Mutex guide for
56
// more complete information and examples.
57
58
#ifndef ABSL_SYNCHRONIZATION_MUTEX_H_
59
#define ABSL_SYNCHRONIZATION_MUTEX_H_
60
61
#include <atomic>
62
#include <cstdint>
63
#include <cstring>
64
#include <iterator>
65
#include <string>
66
67
#include "absl/base/attributes.h"
68
#include "absl/base/const_init.h"
69
#include "absl/base/internal/identity.h"
70
#include "absl/base/internal/low_level_alloc.h"
71
#include "absl/base/internal/thread_identity.h"
72
#include "absl/base/internal/tsan_mutex_interface.h"
73
#include "absl/base/nullability.h"
74
#include "absl/base/port.h"
75
#include "absl/base/thread_annotations.h"
76
#include "absl/synchronization/internal/kernel_timeout.h"
77
#include "absl/synchronization/internal/per_thread_sem.h"
78
#include "absl/time/time.h"
79
80
namespace absl {
81
ABSL_NAMESPACE_BEGIN
82
83
class Condition;
84
struct SynchWaitParams;
85
86
// -----------------------------------------------------------------------------
87
// Mutex
88
// -----------------------------------------------------------------------------
89
//
90
// A `Mutex` is a non-reentrant (aka non-recursive) Mutually Exclusive lock
91
// on some resource, typically a variable or data structure with associated
92
// invariants. Proper usage of mutexes prevents concurrent access by different
93
// threads to the same resource.
94
//
95
// A `Mutex` has two basic operations: `Mutex::Lock()` and `Mutex::Unlock()`.
96
// The `Lock()` operation *acquires* a `Mutex` (in a state known as an
97
// *exclusive* -- or *write* -- lock), and the `Unlock()` operation *releases* a
98
// Mutex. During the span of time between the Lock() and Unlock() operations,
99
// a mutex is said to be *held*. By design, all mutexes support exclusive/write
100
// locks, as this is the most common way to use a mutex.
101
//
102
// Mutex operations are only allowed under certain conditions; otherwise an
103
// operation is "invalid", and disallowed by the API. The conditions concern
104
// both the current state of the mutex and the identity of the threads that
105
// are performing the operations.
106
//
107
// The `Mutex` state machine for basic lock/unlock operations is quite simple:
108
//
109
// |                | Lock()                 | Unlock() |
110
// |----------------+------------------------+----------|
111
// | Free           | Exclusive              | invalid  |
112
// | Exclusive      | blocks, then exclusive | Free     |
113
//
114
// The full conditions are as follows.
115
//
116
// * Calls to `Unlock()` require that the mutex be held, and must be made in the
117
//   same thread that performed the corresponding `Lock()` operation which
118
//   acquired the mutex; otherwise the call is invalid.
119
//
120
// * The mutex being non-reentrant (or non-recursive) means that a call to
121
//   `Lock()` or `TryLock()` must not be made in a thread that already holds the
122
//   mutex; such a call is invalid.
123
//
124
// * In other words, the state of being "held" has both a temporal component
125
//   (from `Lock()` until `Unlock()`) as well as a thread identity component:
126
//   the mutex is held *by a particular thread*.
127
//
128
// An "invalid" operation has undefined behavior. The `Mutex` implementation
129
// is allowed to do anything on an invalid call, including, but not limited to,
130
// crashing with a useful error message, silently succeeding, or corrupting
131
// data structures. In debug mode, the implementation may crash with a useful
132
// error message.
133
//
134
// `Mutex` is not guaranteed to be "fair" in prioritizing waiting threads; it
135
// is, however, approximately fair over long periods, and starvation-free for
136
// threads at the same priority.
137
//
138
// The lock/unlock primitives are now annotated with lock annotations
139
// defined in (base/thread_annotations.h). When writing multi-threaded code,
140
// you should use lock annotations whenever possible to document your lock
141
// synchronization policy. Besides acting as documentation, these annotations
142
// also help compilers or static analysis tools to identify and warn about
143
// issues that could potentially result in race conditions and deadlocks.
144
//
145
// For more information about the lock annotations, please see
146
// [Thread Safety
147
// Analysis](http://clang.llvm.org/docs/ThreadSafetyAnalysis.html) in the Clang
148
// documentation.
149
//
150
// See also `MutexLock`, below, for scoped `Mutex` acquisition.
151
152
class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED Mutex {
153
 public:
154
  // Creates a `Mutex` that is not held by anyone. This constructor is
155
  // typically used for Mutexes allocated on the heap or the stack.
156
  //
157
  // To create `Mutex` instances with static storage duration
158
  // (e.g. a namespace-scoped or global variable), see
159
  // `Mutex::Mutex(absl::kConstInit)` below instead.
160
  Mutex();
161
162
  // Creates a mutex with static storage duration.  A global variable
163
  // constructed this way avoids the lifetime issues that can occur on program
164
  // startup and shutdown.  (See absl/base/const_init.h.)
165
  //
166
  // For Mutexes allocated on the heap and stack, instead use the default
167
  // constructor, which can interact more fully with the thread sanitizer.
168
  //
169
  // Example usage:
170
  //   namespace foo {
171
  //   ABSL_CONST_INIT absl::Mutex mu(absl::kConstInit);
172
  //   }
173
  explicit constexpr Mutex(absl::ConstInitType);
174
175
  ~Mutex();
176
177
  // Mutex::Lock()
178
  //
179
  // Blocks the calling thread, if necessary, until this `Mutex` is free, and
180
  // then acquires it exclusively. (This lock is also known as a "write lock.")
181
  void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION();
182
183
  // Mutex::Unlock()
184
  //
185
  // Releases this `Mutex` and returns it from the exclusive/write state to the
186
  // free state. Calling thread must hold the `Mutex` exclusively.
187
  void Unlock() ABSL_UNLOCK_FUNCTION();
188
189
  // Mutex::TryLock()
190
  //
191
  // If the mutex can be acquired without blocking, does so exclusively and
192
  // returns `true`. Otherwise, returns `false`. Returns `true` with high
193
  // probability if the `Mutex` was free.
194
  [[nodiscard]] bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true);
195
196
  // Mutex::AssertHeld()
197
  //
198
  // Require that the mutex be held exclusively (write mode) by this thread.
199
  //
200
  // If the mutex is not currently held by this thread, this function may report
201
  // an error (typically by crashing with a diagnostic) or it may do nothing.
202
  // This function is intended only as a tool to assist debugging; it doesn't
203
  // guarantee correctness.
204
  void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK();
205
206
  // ---------------------------------------------------------------------------
207
  // Reader-Writer Locking
208
  // ---------------------------------------------------------------------------
209
210
  // A Mutex can also be used as a starvation-free reader-writer lock.
211
  // Neither read-locks nor write-locks are reentrant/recursive to avoid
212
  // potential client programming errors.
213
  //
214
  // The Mutex API provides `Writer*()` aliases for the existing `Lock()`,
215
  // `Unlock()` and `TryLock()` methods for use within applications mixing
216
  // reader/writer locks. Using `Reader*()` and `Writer*()` operations in this
217
  // manner can make locking behavior clearer when mixing read and write modes.
218
  //
219
  // Introducing reader locks necessarily complicates the `Mutex` state
220
  // machine somewhat. The table below illustrates the allowed state transitions
221
  // of a mutex in such cases. Note that ReaderLock() may block even if the lock
222
  // is held in shared mode; this occurs when another thread is blocked on a
223
  // call to WriterLock().
224
  //
225
  // ---------------------------------------------------------------------------
226
  //     Operation: WriterLock() Unlock()  ReaderLock()           ReaderUnlock()
227
  // ---------------------------------------------------------------------------
228
  // State
229
  // ---------------------------------------------------------------------------
230
  // Free           Exclusive    invalid   Shared(1)              invalid
231
  // Shared(1)      blocks       invalid   Shared(2) or blocks    Free
232
  // Shared(n) n>1  blocks       invalid   Shared(n+1) or blocks  Shared(n-1)
233
  // Exclusive      blocks       Free      blocks                 invalid
234
  // ---------------------------------------------------------------------------
235
  //
236
  // In comments below, "shared" refers to a state of Shared(n) for any n > 0.
237
238
  // Mutex::ReaderLock()
239
  //
240
  // Blocks the calling thread, if necessary, until this `Mutex` is either free,
241
  // or in shared mode, and then acquires a share of it. Note that
242
  // `ReaderLock()` will block if some other thread has an exclusive/writer lock
243
  // on the mutex.
244
245
  void ReaderLock() ABSL_SHARED_LOCK_FUNCTION();
246
247
  // Mutex::ReaderUnlock()
248
  //
249
  // Releases a read share of this `Mutex`. `ReaderUnlock` may return a mutex to
250
  // the free state if this thread holds the last reader lock on the mutex. Note
251
  // that you cannot call `ReaderUnlock()` on a mutex held in write mode.
252
  void ReaderUnlock() ABSL_UNLOCK_FUNCTION();
253
254
  // Mutex::ReaderTryLock()
255
  //
256
  // If the mutex can be acquired without blocking, acquires this mutex for
257
  // shared access and returns `true`. Otherwise, returns `false`. Returns
258
  // `true` with high probability if the `Mutex` was free or shared.
259
  [[nodiscard]] bool ReaderTryLock() ABSL_SHARED_TRYLOCK_FUNCTION(true);
260
261
  // Mutex::AssertReaderHeld()
262
  //
263
  // Require that the mutex be held at least in shared mode (read mode) by this
264
  // thread.
265
  //
266
  // If the mutex is not currently held by this thread, this function may report
267
  // an error (typically by crashing with a diagnostic) or it may do nothing.
268
  // This function is intended only as a tool to assist debugging; it doesn't
269
  // guarantee correctness.
270
  void AssertReaderHeld() const ABSL_ASSERT_SHARED_LOCK();
271
272
  // Mutex::WriterLock()
273
  // Mutex::WriterUnlock()
274
  // Mutex::WriterTryLock()
275
  //
276
  // Aliases for `Mutex::Lock()`, `Mutex::Unlock()`, and `Mutex::TryLock()`.
277
  //
278
  // These methods may be used (along with the complementary `Reader*()`
279
  // methods) to distinguish simple exclusive `Mutex` usage (`Lock()`,
280
  // etc.) from reader/writer lock usage.
281
13
  void WriterLock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { this->Lock(); }
282
283
13
  void WriterUnlock() ABSL_UNLOCK_FUNCTION() { this->Unlock(); }
284
285
0
  [[nodiscard]] bool WriterTryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
286
0
    return this->TryLock();
287
0
  }
288
289
  // ---------------------------------------------------------------------------
290
  // Conditional Critical Regions
291
  // ---------------------------------------------------------------------------
292
293
  // Conditional usage of a `Mutex` can occur using two distinct paradigms:
294
  //
295
  //   * Use of `Mutex` member functions with `Condition` objects.
296
  //   * Use of the separate `CondVar` abstraction.
297
  //
298
  // In general, prefer use of `Condition` and the `Mutex` member functions
299
  // listed below over `CondVar`. When there are multiple threads waiting on
300
  // distinctly different conditions, however, a battery of `CondVar`s may be
301
  // more efficient. This section discusses use of `Condition` objects.
302
  //
303
  // `Mutex` contains member functions for performing lock operations only under
304
  // certain conditions, of class `Condition`. For correctness, the `Condition`
305
  // must return a boolean that is a pure function, only of state protected by
306
  // the `Mutex`. The condition must be invariant w.r.t. environmental state
307
  // such as thread, cpu id, or time, and must be `noexcept`. The condition will
308
  // always be invoked with the mutex held in at least read mode, so you should
309
  // not block it for long periods or sleep it on a timer.
310
  //
311
  // Since a condition must not depend directly on the current time, use
312
  // `*WithTimeout()` member function variants to make your condition
313
  // effectively true after a given duration, or `*WithDeadline()` variants to
314
  // make your condition effectively true after a given time.
315
  //
316
  // The condition function should have no side-effects aside from debug
317
  // logging; as a special exception, the function may acquire other mutexes
318
  // provided it releases all those that it acquires.  (This exception was
319
  // required to allow logging.)
320
321
  // Mutex::Await()
322
  //
323
  // Unlocks this `Mutex` and blocks until simultaneously both `cond` is `true`
324
  // and this `Mutex` can be reacquired, then reacquires this `Mutex` in the
325
  // same mode in which it was previously held. If the condition is initially
326
  // `true`, `Await()` *may* skip the release/re-acquire step.
327
  //
328
  // `Await()` requires that this thread holds this `Mutex` in some mode.
329
0
  void Await(const Condition& cond) {
330
0
    AwaitCommon(cond, synchronization_internal::KernelTimeout::Never());
331
0
  }
332
333
  // Mutex::LockWhen()
334
  // Mutex::ReaderLockWhen()
335
  // Mutex::WriterLockWhen()
336
  //
337
  // Blocks until simultaneously both `cond` is `true` and this `Mutex` can
338
  // be acquired, then atomically acquires this `Mutex`. `LockWhen()` is
339
  // logically equivalent to `*Lock(); Await();` though they may have different
340
  // performance characteristics.
341
0
  void LockWhen(const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION() {
342
0
    LockWhenCommon(cond, synchronization_internal::KernelTimeout::Never(),
343
0
                   true);
344
0
  }
345
346
0
  void ReaderLockWhen(const Condition& cond) ABSL_SHARED_LOCK_FUNCTION() {
347
0
    LockWhenCommon(cond, synchronization_internal::KernelTimeout::Never(),
348
0
                   false);
349
0
  }
350
351
0
  void WriterLockWhen(const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION() {
352
0
    this->LockWhen(cond);
353
0
  }
354
355
  // ---------------------------------------------------------------------------
356
  // Mutex Variants with Timeouts/Deadlines
357
  // ---------------------------------------------------------------------------
358
359
  // Mutex::AwaitWithTimeout()
360
  // Mutex::AwaitWithDeadline()
361
  //
362
  // Unlocks this `Mutex` and blocks until simultaneously:
363
  //   - either `cond` is true or the {timeout has expired, deadline has passed}
364
  //     and
365
  //   - this `Mutex` can be reacquired,
366
  // then reacquire this `Mutex` in the same mode in which it was previously
367
  // held, returning `true` iff `cond` is `true` on return.
368
  //
369
  // If the condition is initially `true`, the implementation *may* skip the
370
  // release/re-acquire step and return immediately.
371
  //
372
  // Deadlines in the past are equivalent to an immediate deadline.
373
  // Negative timeouts are equivalent to a zero timeout.
374
  //
375
  // This method requires that this thread holds this `Mutex` in some mode.
376
0
  bool AwaitWithTimeout(const Condition& cond, absl::Duration timeout) {
377
0
    return AwaitCommon(cond, synchronization_internal::KernelTimeout{timeout});
378
0
  }
379
380
0
  bool AwaitWithDeadline(const Condition& cond, absl::Time deadline) {
381
0
    return AwaitCommon(cond, synchronization_internal::KernelTimeout{deadline});
382
0
  }
383
384
  // Mutex::LockWhenWithTimeout()
385
  // Mutex::ReaderLockWhenWithTimeout()
386
  // Mutex::WriterLockWhenWithTimeout()
387
  //
388
  // Blocks until simultaneously both:
389
  //   - either `cond` is `true` or the timeout has expired, and
390
  //   - this `Mutex` can be acquired,
391
  // then atomically acquires this `Mutex`, returning `true` iff `cond` is
392
  // `true` on return.
393
  //
394
  // Negative timeouts are equivalent to a zero timeout.
395
  bool LockWhenWithTimeout(const Condition& cond, absl::Duration timeout)
396
0
      ABSL_EXCLUSIVE_LOCK_FUNCTION() {
397
0
    return LockWhenCommon(
398
0
        cond, synchronization_internal::KernelTimeout{timeout}, true);
399
0
  }
400
  bool ReaderLockWhenWithTimeout(const Condition& cond, absl::Duration timeout)
401
0
      ABSL_SHARED_LOCK_FUNCTION() {
402
0
    return LockWhenCommon(
403
0
        cond, synchronization_internal::KernelTimeout{timeout}, false);
404
0
  }
405
  bool WriterLockWhenWithTimeout(const Condition& cond, absl::Duration timeout)
406
0
      ABSL_EXCLUSIVE_LOCK_FUNCTION() {
407
0
    return this->LockWhenWithTimeout(cond, timeout);
408
0
  }
409
410
  // Mutex::LockWhenWithDeadline()
411
  // Mutex::ReaderLockWhenWithDeadline()
412
  // Mutex::WriterLockWhenWithDeadline()
413
  //
414
  // Blocks until simultaneously both:
415
  //   - either `cond` is `true` or the deadline has been passed, and
416
  //   - this `Mutex` can be acquired,
417
  // then atomically acquires this Mutex, returning `true` iff `cond` is `true`
418
  // on return.
419
  //
420
  // Deadlines in the past are equivalent to an immediate deadline.
421
  bool LockWhenWithDeadline(const Condition& cond, absl::Time deadline)
422
0
      ABSL_EXCLUSIVE_LOCK_FUNCTION() {
423
0
    return LockWhenCommon(
424
0
        cond, synchronization_internal::KernelTimeout{deadline}, true);
425
0
  }
426
  bool ReaderLockWhenWithDeadline(const Condition& cond, absl::Time deadline)
427
0
      ABSL_SHARED_LOCK_FUNCTION() {
428
0
    return LockWhenCommon(
429
0
        cond, synchronization_internal::KernelTimeout{deadline}, false);
430
0
  }
431
  bool WriterLockWhenWithDeadline(const Condition& cond, absl::Time deadline)
432
0
      ABSL_EXCLUSIVE_LOCK_FUNCTION() {
433
0
    return this->LockWhenWithDeadline(cond, deadline);
434
0
  }
435
436
  // ---------------------------------------------------------------------------
437
  // Debug Support: Invariant Checking, Deadlock Detection, Logging.
438
  // ---------------------------------------------------------------------------
439
440
  // Mutex::EnableInvariantDebugging()
441
  //
442
  // If `invariant`!=null and if invariant debugging has been enabled globally,
443
  // cause `(*invariant)(arg)` to be called at moments when the invariant for
444
  // this `Mutex` should hold (for example: just after acquire, just before
445
  // release).
446
  //
447
  // The routine `invariant` should have no side-effects since it is not
448
  // guaranteed how many times it will be called; it should check the invariant
449
  // and crash if it does not hold. Enabling global invariant debugging may
450
  // substantially reduce `Mutex` performance; it should be set only for
451
  // non-production runs.  Optimization options may also disable invariant
452
  // checks.
453
  void EnableInvariantDebugging(
454
      void (*absl_nullable invariant)(void* absl_nullability_unknown),
455
      void* absl_nullability_unknown arg);
456
457
  // Mutex::EnableDebugLog()
458
  //
459
  // Cause all subsequent uses of this `Mutex` to be logged via
460
  // `ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if no previous
461
  // call to `EnableInvariantDebugging()` or `EnableDebugLog()` has been made.
462
  //
463
  // Note: This method substantially reduces `Mutex` performance.
464
  void EnableDebugLog(const char* absl_nullable name);
465
466
  // Deadlock detection
467
468
  // Mutex::ForgetDeadlockInfo()
469
  //
470
  // Forget any deadlock-detection information previously gathered
471
  // about this `Mutex`. Call this method in debug mode when the lock ordering
472
  // of a `Mutex` changes.
473
  void ForgetDeadlockInfo();
474
475
  // Mutex::AssertNotHeld()
476
  //
477
  // Return immediately if this thread does not hold this `Mutex` in any
478
  // mode; otherwise, may report an error (typically by crashing with a
479
  // diagnostic), or may return immediately.
480
  //
481
  // Currently this check is performed only if all of:
482
  //    - in debug mode
483
  //    - SetMutexDeadlockDetectionMode() has been set to kReport or kAbort
484
  //    - number of locks concurrently held by this thread is not large.
485
  // are true.
486
  void AssertNotHeld() const;
487
488
  // Special cases.
489
490
  // A `MuHow` is a constant that indicates how a lock should be acquired.
491
  // Internal implementation detail.  Clients should ignore.
492
  typedef const struct MuHowS* MuHow;
493
494
  // Mutex::InternalAttemptToUseMutexInFatalSignalHandler()
495
  //
496
  // Causes the `Mutex` implementation to prepare itself for re-entry caused by
497
  // future use of `Mutex` within a fatal signal handler. This method is
498
  // intended for use only for last-ditch attempts to log crash information.
499
  // It does not guarantee that attempts to use Mutexes within the handler will
500
  // not deadlock; it merely makes other faults less likely.
501
  //
502
  // WARNING:  This routine must be invoked from a signal handler, and the
503
  // signal handler must either loop forever or terminate the process.
504
  // Attempts to return from (or `longjmp` out of) the signal handler once this
505
  // call has been made may cause arbitrary program behaviour including
506
  // crashes and deadlocks.
507
  static void InternalAttemptToUseMutexInFatalSignalHandler();
508
509
 private:
510
  std::atomic<intptr_t> mu_;  // The Mutex state.
511
512
  // Post()/Wait() versus associated PerThreadSem; in class for required
513
  // friendship with PerThreadSem.
514
  static void IncrementSynchSem(Mutex* absl_nonnull mu,
515
                                base_internal::PerThreadSynch* absl_nonnull w);
516
  static bool DecrementSynchSem(Mutex* absl_nonnull mu,
517
                                base_internal::PerThreadSynch* absl_nonnull w,
518
                                synchronization_internal::KernelTimeout t);
519
520
  // slow path acquire
521
  void LockSlowLoop(SynchWaitParams* absl_nonnull waitp, int flags);
522
  // wrappers around LockSlowLoop()
523
  bool LockSlowWithDeadline(MuHow absl_nonnull how,
524
                            const Condition* absl_nullable cond,
525
                            synchronization_internal::KernelTimeout t,
526
                            int flags);
527
  void LockSlow(MuHow absl_nonnull how, const Condition* absl_nullable cond,
528
                int flags) ABSL_ATTRIBUTE_COLD;
529
  // slow path release
530
  void UnlockSlow(SynchWaitParams* absl_nullable waitp) ABSL_ATTRIBUTE_COLD;
531
  // TryLock slow path.
532
  bool TryLockSlow();
533
  // ReaderTryLock slow path.
534
  bool ReaderTryLockSlow();
535
  // Common code between Await() and AwaitWithTimeout/Deadline()
536
  bool AwaitCommon(const Condition& cond,
537
                   synchronization_internal::KernelTimeout t);
538
  bool LockWhenCommon(const Condition& cond,
539
                      synchronization_internal::KernelTimeout t, bool write);
540
  // Attempt to remove thread s from queue.
541
  void TryRemove(base_internal::PerThreadSynch* absl_nonnull s);
542
  // Block a thread on mutex.
543
  void Block(base_internal::PerThreadSynch* absl_nonnull s);
544
  // Wake a thread; return successor.
545
  base_internal::PerThreadSynch* absl_nullable Wakeup(
546
      base_internal::PerThreadSynch* absl_nonnull w);
547
  void Dtor();
548
549
  friend class CondVar;   // for access to Trans()/Fer().
550
  void Trans(MuHow absl_nonnull how);  // used for CondVar->Mutex transfer
551
  void Fer(base_internal::PerThreadSynch* absl_nonnull
552
           w);  // used for CondVar->Mutex transfer
553
554
  // Catch the error of writing Mutex when intending MutexLock.
555
0
  explicit Mutex(const volatile Mutex* absl_nullable /*ignored*/) {}
556
557
  Mutex(const Mutex&) = delete;
558
  Mutex& operator=(const Mutex&) = delete;
559
};
560
561
// -----------------------------------------------------------------------------
562
// Mutex RAII Wrappers
563
// -----------------------------------------------------------------------------
564
565
// MutexLock
566
//
567
// `MutexLock` is a helper class, which acquires and releases a `Mutex` via
568
// RAII.
569
//
570
// Example:
571
//
572
// Class Foo {
573
//  public:
574
//   Foo::Bar* Baz() {
575
//     MutexLock lock(&mu_);
576
//     ...
577
//     return bar;
578
//   }
579
//
580
// private:
581
//   Mutex mu_;
582
// };
583
class ABSL_SCOPED_LOCKABLE MutexLock {
584
 public:
585
  // Constructors
586
587
  // Calls `mu->Lock()` and returns when that call returns. That is, `*mu` is
588
  // guaranteed to be locked when this object is constructed. Requires that
589
  // `mu` be dereferenceable.
590
  explicit MutexLock(Mutex* absl_nonnull mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
591
293k
      : mu_(mu) {
592
293k
    this->mu_->Lock();
593
293k
  }
594
595
  // Like above, but calls `mu->LockWhen(cond)` instead. That is, in addition to
596
  // the above, the condition given by `cond` is also guaranteed to hold when
597
  // this object is constructed.
598
  explicit MutexLock(Mutex* absl_nonnull mu, const Condition& cond)
599
      ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
600
0
      : mu_(mu) {
601
0
    this->mu_->LockWhen(cond);
602
0
  }
603
604
  MutexLock(const MutexLock&) = delete;  // NOLINT(runtime/mutex)
605
  MutexLock(MutexLock&&) = delete;       // NOLINT(runtime/mutex)
606
  MutexLock& operator=(const MutexLock&) = delete;
607
  MutexLock& operator=(MutexLock&&) = delete;
608
609
293k
  ~MutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->Unlock(); }
610
611
 private:
612
  Mutex* absl_nonnull const mu_;
613
};
614
615
// ReaderMutexLock
616
//
617
// The `ReaderMutexLock` is a helper class, like `MutexLock`, which acquires and
618
// releases a shared lock on a `Mutex` via RAII.
619
class ABSL_SCOPED_LOCKABLE ReaderMutexLock {
620
 public:
621
  explicit ReaderMutexLock(Mutex* absl_nonnull mu) ABSL_SHARED_LOCK_FUNCTION(mu)
622
0
      : mu_(mu) {
623
0
    mu->ReaderLock();
624
0
  }
625
626
  explicit ReaderMutexLock(Mutex* absl_nonnull mu, const Condition& cond)
627
      ABSL_SHARED_LOCK_FUNCTION(mu)
628
0
      : mu_(mu) {
629
0
    mu->ReaderLockWhen(cond);
630
0
  }
631
632
  ReaderMutexLock(const ReaderMutexLock&) = delete;
633
  ReaderMutexLock(ReaderMutexLock&&) = delete;
634
  ReaderMutexLock& operator=(const ReaderMutexLock&) = delete;
635
  ReaderMutexLock& operator=(ReaderMutexLock&&) = delete;
636
637
0
  ~ReaderMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->ReaderUnlock(); }
638
639
 private:
640
  Mutex* absl_nonnull const mu_;
641
};
642
643
// WriterMutexLock
644
//
645
// The `WriterMutexLock` is a helper class, like `MutexLock`, which acquires and
646
// releases a write (exclusive) lock on a `Mutex` via RAII.
647
class ABSL_SCOPED_LOCKABLE WriterMutexLock {
648
 public:
649
  explicit WriterMutexLock(Mutex* absl_nonnull mu)
650
      ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
651
0
      : mu_(mu) {
652
0
    mu->WriterLock();
653
0
  }
654
655
  explicit WriterMutexLock(Mutex* absl_nonnull mu, const Condition& cond)
656
      ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
657
0
      : mu_(mu) {
658
0
    mu->WriterLockWhen(cond);
659
0
  }
660
661
  WriterMutexLock(const WriterMutexLock&) = delete;
662
  WriterMutexLock(WriterMutexLock&&) = delete;
663
  WriterMutexLock& operator=(const WriterMutexLock&) = delete;
664
  WriterMutexLock& operator=(WriterMutexLock&&) = delete;
665
666
0
  ~WriterMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->WriterUnlock(); }
667
668
 private:
669
  Mutex* absl_nonnull const mu_;
670
};
671
672
// -----------------------------------------------------------------------------
673
// Condition
674
// -----------------------------------------------------------------------------
675
//
676
// `Mutex` contains a number of member functions which take a `Condition` as an
677
// argument; clients can wait for conditions to become `true` before attempting
678
// to acquire the mutex. These sections are known as "condition critical"
679
// sections. To use a `Condition`, you simply need to construct it, and use
680
// within an appropriate `Mutex` member function; everything else in the
681
// `Condition` class is an implementation detail.
682
//
683
// A `Condition` is specified as a function pointer which returns a boolean.
684
// `Condition` functions should be pure functions -- their results should depend
685
// only on passed arguments, should not consult any external state (such as
686
// clocks), and should have no side-effects, aside from debug logging. Any
687
// objects that the function may access should be limited to those which are
688
// constant while the mutex is blocked on the condition (e.g. a stack variable),
689
// or objects of state protected explicitly by the mutex.
690
//
691
// No matter which construction is used for `Condition`, the underlying
692
// function pointer / functor / callable must not throw any
693
// exceptions. Correctness of `Mutex` / `Condition` is not guaranteed in
694
// the face of a throwing `Condition`. (When Abseil is allowed to depend
695
// on C++17, these function pointers will be explicitly marked
696
// `noexcept`; until then this requirement cannot be enforced in the
697
// type system.)
698
//
699
// Note: to use a `Condition`, you need only construct it and pass it to a
700
// suitable `Mutex' member function, such as `Mutex::Await()`, or to the
701
// constructor of one of the scope guard classes.
702
//
703
// Example using LockWhen/Unlock:
704
//
705
//   // assume count_ is not internal reference count
706
//   int count_ ABSL_GUARDED_BY(mu_);
707
//   Condition count_is_zero(+[](int *count) { return *count == 0; }, &count_);
708
//
709
//   mu_.LockWhen(count_is_zero);
710
//   // ...
711
//   mu_.Unlock();
712
//
713
// Example using a scope guard:
714
//
715
//   {
716
//     MutexLock lock(&mu_, count_is_zero);
717
//     // ...
718
//   }
719
//
720
// When multiple threads are waiting on exactly the same condition, make sure
721
// that they are constructed with the same parameters (same pointer to function
722
// + arg, or same pointer to object + method), so that the mutex implementation
723
// can avoid redundantly evaluating the same condition for each thread.
724
class Condition {
725
 public:
726
  // A Condition that returns the result of "(*func)(arg)"
727
  Condition(bool (*absl_nonnull func)(void* absl_nullability_unknown),
728
            void* absl_nullability_unknown arg);
729
730
  // Templated version for people who are averse to casts.
731
  //
732
  // To use a lambda, prepend it with unary plus, which converts the lambda
733
  // into a function pointer:
734
  //     Condition(+[](T* t) { return ...; }, arg).
735
  //
736
  // Note: lambdas in this case must contain no bound variables.
737
  //
738
  // See class comment for performance advice.
739
  template <typename T>
740
  Condition(bool (*absl_nonnull func)(T* absl_nullability_unknown),
741
            T* absl_nullability_unknown arg);
742
743
  // Same as above, but allows for cases where `arg` comes from a pointer that
744
  // is convertible to the function parameter type `T*` but not an exact match.
745
  //
746
  // For example, the argument might be `X*` but the function takes `const X*`,
747
  // or the argument might be `Derived*` while the function takes `Base*`, and
748
  // so on for cases where the argument pointer can be implicitly converted.
749
  //
750
  // Implementation notes: This constructor overload is required in addition to
751
  // the one above to allow deduction of `T` from `arg` for cases such as where
752
  // a function template is passed as `func`. Also, the dummy `typename = void`
753
  // template parameter exists just to work around a MSVC mangling bug.
754
  template <typename T, typename = void>
755
  Condition(
756
      bool (*absl_nonnull func)(T* absl_nullability_unknown),
757
      typename absl::internal::type_identity<T>::type* absl_nullability_unknown
758
      arg);
759
760
  // Templated version for invoking a method that returns a `bool`.
761
  //
762
  // `Condition(object, &Class::Method)` constructs a `Condition` that evaluates
763
  // `object->Method()`.
764
  //
765
  // Implementation Note: `absl::internal::type_identity` is used to allow
766
  // methods to come from base classes. A simpler signature like
767
  // `Condition(T*, bool (T::*)())` does not suffice.
768
  template <typename T>
769
  Condition(
770
      T* absl_nonnull object,
771
      bool (absl::internal::type_identity<T>::type::* absl_nonnull method)());
772
773
  // Same as above, for const members
774
  template <typename T>
775
  Condition(
776
      const T* absl_nonnull object,
777
      bool (absl::internal::type_identity<T>::type::* absl_nonnull method)()
778
          const);
779
780
  // A Condition that returns the value of `*cond`
781
  explicit Condition(const bool* absl_nonnull cond);
782
783
  // Templated version for invoking a functor that returns a `bool`.
784
  // This approach accepts pointers to non-mutable lambdas, `std::function`,
785
  // the result of` std::bind` and user-defined functors that define
786
  // `bool F::operator()() const`.
787
  //
788
  // Example:
789
  //
790
  //   auto reached = [this, current]() {
791
  //     mu_.AssertReaderHeld();                // For annotalysis.
792
  //     return processed_ >= current;
793
  //   };
794
  //   mu_.Await(Condition(&reached));
795
  //
796
  // NOTE: never use "mu_.AssertHeld()" instead of "mu_.AssertReaderHeld()" in
797
  // the lambda as it may be called when the mutex is being unlocked from a
798
  // scope holding only a reader lock, which will make the assertion not
799
  // fulfilled and crash the binary.
800
801
  // See class comment for performance advice. In particular, if there
802
  // might be more than one waiter for the same condition, make sure
803
  // that all waiters construct the condition with the same pointers.
804
805
  // Implementation note: The second template parameter ensures that this
806
  // constructor doesn't participate in overload resolution if T doesn't have
807
  // `bool operator() const`.
808
  template <typename T, typename E = decltype(static_cast<bool (T::*)() const>(
809
                            &T::operator()))>
810
  explicit Condition(const T* absl_nonnull obj)
811
      : Condition(obj, static_cast<bool (T::*)() const>(&T::operator())) {}
812
813
  // A Condition that always returns `true`.
814
  // kTrue is only useful in a narrow set of circumstances, mostly when
815
  // it's passed conditionally. For example:
816
  //
817
  //   mu.LockWhen(some_flag ? kTrue : SomeOtherCondition);
818
  //
819
  // Note: {LockWhen,Await}With{Deadline,Timeout} methods with kTrue condition
820
  // don't return immediately when the timeout happens, they still block until
821
  // the Mutex becomes available. The return value of these methods does
822
  // not indicate if the timeout was reached; rather it indicates whether or
823
  // not the condition is true.
824
  ABSL_CONST_INIT static const Condition kTrue;
825
826
  // Evaluates the condition.
827
  bool Eval() const;
828
829
  // Returns `true` if the two conditions are guaranteed to return the same
830
  // value if evaluated at the same time, `false` if the evaluation *may* return
831
  // different results.
832
  //
833
  // Two `Condition` values are guaranteed equal if both their `func` and `arg`
834
  // components are the same. A null pointer is equivalent to a `true`
835
  // condition.
836
  static bool GuaranteedEqual(const Condition* absl_nullable a,
837
                              const Condition* absl_nullable b);
838
839
 private:
840
  // Sizing an allocation for a method pointer can be subtle. In the Itanium
841
  // specifications, a method pointer has a predictable, uniform size. On the
842
  // other hand, MSVC ABI, method pointer sizes vary based on the
843
  // inheritance of the class. Specifically, method pointers from classes with
844
  // multiple inheritance are bigger than those of classes with single
845
  // inheritance. Other variations also exist.
846
847
#ifndef _MSC_VER
848
  // Allocation for a function pointer or method pointer.
849
  // The {0} initializer ensures that all unused bytes of this buffer are
850
  // always zeroed out.  This is necessary, because GuaranteedEqual() compares
851
  // all of the bytes, unaware of which bytes are relevant to a given `eval_`.
852
  using MethodPtr = bool (Condition::*)();
853
  char callback_[sizeof(MethodPtr)] = {0};
854
#else
855
  // It is well known that the larget MSVC pointer-to-member is 24 bytes. This
856
  // may be the largest known pointer-to-member of any platform. For this
857
  // reason we will allocate 24 bytes for MSVC platform toolchains.
858
  char callback_[24] = {0};
859
#endif
860
861
  // Function with which to evaluate callbacks and/or arguments.
862
  bool (*absl_nullable eval_)(const Condition* absl_nonnull) = nullptr;
863
864
  // Either an argument for a function call or an object for a method call.
865
  void* absl_nullable arg_ = nullptr;
866
867
  // Various functions eval_ can point to:
868
  static bool CallVoidPtrFunction(const Condition* absl_nonnull c);
869
  template <typename T>
870
  static bool CastAndCallFunction(const Condition* absl_nonnull c);
871
  template <typename T, typename ConditionMethodPtr>
872
  static bool CastAndCallMethod(const Condition* absl_nonnull c);
873
874
  // Helper methods for storing, validating, and reading callback arguments.
875
  template <typename T>
876
0
  inline void StoreCallback(T callback) {
877
0
    static_assert(
878
0
        sizeof(callback) <= sizeof(callback_),
879
0
        "An overlarge pointer was passed as a callback to Condition.");
880
0
    std::memcpy(callback_, &callback, sizeof(callback));
881
0
  }
Unexecuted instantiation: void absl::Condition::StoreCallback<bool (*)(absl::SynchEvent*)>(bool (*)(absl::SynchEvent*))
Unexecuted instantiation: void absl::Condition::StoreCallback<bool (*)(void*)>(bool (*)(void*))
882
883
  template <typename T>
884
0
  inline void ReadCallback(T* absl_nonnull callback) const {
885
0
    std::memcpy(callback, callback_, sizeof(*callback));
886
0
  }
887
888
0
  static bool AlwaysTrue(const Condition* absl_nullable) { return true; }
889
890
  // Used only to create kTrue.
891
0
  constexpr Condition() : eval_(AlwaysTrue), arg_(nullptr) {}
892
};
893
894
// -----------------------------------------------------------------------------
895
// CondVar
896
// -----------------------------------------------------------------------------
897
//
898
// A condition variable, reflecting state evaluated separately outside of the
899
// `Mutex` object, which can be signaled to wake callers.
900
// This class is not normally needed; use `Mutex` member functions such as
901
// `Mutex::Await()` and intrinsic `Condition` abstractions. In rare cases
902
// with many threads and many conditions, `CondVar` may be faster.
903
//
904
// The implementation may deliver signals to any condition variable at
905
// any time, even when no call to `Signal()` or `SignalAll()` is made; as a
906
// result, upon being awoken, you must check the logical condition you have
907
// been waiting upon.
908
//
909
// Examples:
910
//
911
// Usage for a thread waiting for some condition C protected by mutex mu:
912
//       mu.Lock();
913
//       while (!C) { cv->Wait(&mu); }        // releases and reacquires mu
914
//       //  C holds; process data
915
//       mu.Unlock();
916
//
917
// Usage to wake T is:
918
//       mu.Lock();
919
//       // process data, possibly establishing C
920
//       if (C) { cv->Signal(); }
921
//       mu.Unlock();
922
//
923
// If C may be useful to more than one waiter, use `SignalAll()` instead of
924
// `Signal()`.
925
//
926
// With this implementation it is efficient to use `Signal()/SignalAll()` inside
927
// the locked region; this usage can make reasoning about your program easier.
928
//
929
class CondVar {
930
 public:
931
  // A `CondVar` allocated on the heap or on the stack can use the this
932
  // constructor.
933
  CondVar();
934
935
  // CondVar::Wait()
936
  //
937
  // Atomically releases a `Mutex` and blocks on this condition variable.
938
  // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a
939
  // spurious wakeup), then reacquires the `Mutex` and returns.
940
  //
941
  // Requires and ensures that the current thread holds the `Mutex`.
942
0
  void Wait(Mutex* absl_nonnull mu) {
943
0
    WaitCommon(mu, synchronization_internal::KernelTimeout::Never());
944
0
  }
945
946
  // CondVar::WaitWithTimeout()
947
  //
948
  // Atomically releases a `Mutex` and blocks on this condition variable.
949
  // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a
950
  // spurious wakeup), or until the timeout has expired, then reacquires
951
  // the `Mutex` and returns.
952
  //
953
  // Returns true if the timeout has expired without this `CondVar`
954
  // being signalled in any manner. If both the timeout has expired
955
  // and this `CondVar` has been signalled, the implementation is free
956
  // to return `true` or `false`.
957
  //
958
  // Requires and ensures that the current thread holds the `Mutex`.
959
0
  bool WaitWithTimeout(Mutex* absl_nonnull mu, absl::Duration timeout) {
960
0
    return WaitCommon(mu, synchronization_internal::KernelTimeout(timeout));
961
0
  }
962
963
  // CondVar::WaitWithDeadline()
964
  //
965
  // Atomically releases a `Mutex` and blocks on this condition variable.
966
  // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a
967
  // spurious wakeup), or until the deadline has passed, then reacquires
968
  // the `Mutex` and returns.
969
  //
970
  // Deadlines in the past are equivalent to an immediate deadline.
971
  //
972
  // Returns true if the deadline has passed without this `CondVar`
973
  // being signalled in any manner. If both the deadline has passed
974
  // and this `CondVar` has been signalled, the implementation is free
975
  // to return `true` or `false`.
976
  //
977
  // Requires and ensures that the current thread holds the `Mutex`.
978
0
  bool WaitWithDeadline(Mutex* absl_nonnull mu, absl::Time deadline) {
979
0
    return WaitCommon(mu, synchronization_internal::KernelTimeout(deadline));
980
0
  }
981
982
  // CondVar::Signal()
983
  //
984
  // Signal this `CondVar`; wake at least one waiter if one exists.
985
  void Signal();
986
987
  // CondVar::SignalAll()
988
  //
989
  // Signal this `CondVar`; wake all waiters.
990
  void SignalAll();
991
992
  // CondVar::EnableDebugLog()
993
  //
994
  // Causes all subsequent uses of this `CondVar` to be logged via
995
  // `ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if `name != 0`.
996
  // Note: this method substantially reduces `CondVar` performance.
997
  void EnableDebugLog(const char* absl_nullable name);
998
999
 private:
1000
  bool WaitCommon(Mutex* absl_nonnull mutex,
1001
                  synchronization_internal::KernelTimeout t);
1002
  void Remove(base_internal::PerThreadSynch* absl_nonnull s);
1003
  std::atomic<intptr_t> cv_;  // Condition variable state.
1004
  CondVar(const CondVar&) = delete;
1005
  CondVar& operator=(const CondVar&) = delete;
1006
};
1007
1008
// Variants of MutexLock.
1009
//
1010
// If you find yourself using one of these, consider instead using
1011
// Mutex::Unlock() and/or if-statements for clarity.
1012
1013
// MutexLockMaybe
1014
//
1015
// MutexLockMaybe is like MutexLock, but is a no-op when mu is null.
1016
class ABSL_SCOPED_LOCKABLE MutexLockMaybe {
1017
 public:
1018
  explicit MutexLockMaybe(Mutex* absl_nullable mu)
1019
      ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
1020
0
      : mu_(mu) {
1021
0
    if (this->mu_ != nullptr) {
1022
0
      this->mu_->Lock();
1023
0
    }
1024
0
  }
1025
1026
  explicit MutexLockMaybe(Mutex* absl_nullable mu, const Condition& cond)
1027
      ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
1028
0
      : mu_(mu) {
1029
0
    if (this->mu_ != nullptr) {
1030
0
      this->mu_->LockWhen(cond);
1031
0
    }
1032
0
  }
1033
1034
0
  ~MutexLockMaybe() ABSL_UNLOCK_FUNCTION() {
1035
0
    if (this->mu_ != nullptr) {
1036
0
      this->mu_->Unlock();
1037
0
    }
1038
0
  }
1039
1040
 private:
1041
  Mutex* absl_nullable const mu_;
1042
  MutexLockMaybe(const MutexLockMaybe&) = delete;
1043
  MutexLockMaybe(MutexLockMaybe&&) = delete;
1044
  MutexLockMaybe& operator=(const MutexLockMaybe&) = delete;
1045
  MutexLockMaybe& operator=(MutexLockMaybe&&) = delete;
1046
};
1047
1048
// ReleasableMutexLock
1049
//
1050
// ReleasableMutexLock is like MutexLock, but permits `Release()` of its
1051
// mutex before destruction. `Release()` may be called at most once.
1052
class ABSL_SCOPED_LOCKABLE ReleasableMutexLock {
1053
 public:
1054
  explicit ReleasableMutexLock(Mutex* absl_nonnull mu)
1055
      ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
1056
0
      : mu_(mu) {
1057
0
    this->mu_->Lock();
1058
0
  }
1059
1060
  explicit ReleasableMutexLock(Mutex* absl_nonnull mu, const Condition& cond)
1061
      ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
1062
0
      : mu_(mu) {
1063
0
    this->mu_->LockWhen(cond);
1064
0
  }
1065
1066
0
  ~ReleasableMutexLock() ABSL_UNLOCK_FUNCTION() {
1067
0
    if (this->mu_ != nullptr) {
1068
0
      this->mu_->Unlock();
1069
0
    }
1070
0
  }
1071
1072
  void Release() ABSL_UNLOCK_FUNCTION();
1073
1074
 private:
1075
  Mutex* absl_nonnull mu_;
1076
  ReleasableMutexLock(const ReleasableMutexLock&) = delete;
1077
  ReleasableMutexLock(ReleasableMutexLock&&) = delete;
1078
  ReleasableMutexLock& operator=(const ReleasableMutexLock&) = delete;
1079
  ReleasableMutexLock& operator=(ReleasableMutexLock&&) = delete;
1080
};
1081
1082
69.2k
inline Mutex::Mutex() : mu_(0) {
1083
69.2k
  ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
1084
69.2k
}
1085
1086
inline constexpr Mutex::Mutex(absl::ConstInitType) : mu_(0) {}
1087
1088
#if !defined(__APPLE__) && !defined(ABSL_BUILD_DLL)
1089
ABSL_ATTRIBUTE_ALWAYS_INLINE
1090
69.2k
inline Mutex::~Mutex() { Dtor(); }
1091
#endif
1092
1093
#if defined(NDEBUG) && !defined(ABSL_HAVE_THREAD_SANITIZER)
1094
// Use default (empty) destructor in release build for performance reasons.
1095
// We need to mark both Dtor and ~Mutex as always inline for inconsistent
1096
// builds that use both NDEBUG and !NDEBUG with dynamic libraries. In these
1097
// cases we want the empty functions to dissolve entirely rather than being
1098
// exported from dynamic libraries and potentially override the non-empty ones.
1099
ABSL_ATTRIBUTE_ALWAYS_INLINE
1100
inline void Mutex::Dtor() {}
1101
#endif
1102
1103
inline CondVar::CondVar() : cv_(0) {}
1104
1105
// static
1106
template <typename T, typename ConditionMethodPtr>
1107
bool Condition::CastAndCallMethod(const Condition* absl_nonnull c) {
1108
  T* object = static_cast<T*>(c->arg_);
1109
  ConditionMethodPtr condition_method_pointer;
1110
  c->ReadCallback(&condition_method_pointer);
1111
  return (object->*condition_method_pointer)();
1112
}
1113
1114
// static
1115
template <typename T>
1116
0
bool Condition::CastAndCallFunction(const Condition* absl_nonnull c) {
1117
0
  bool (*function)(T*);
1118
0
  c->ReadCallback(&function);
1119
0
  T* argument = static_cast<T*>(c->arg_);
1120
0
  return (*function)(argument);
1121
0
}
1122
1123
template <typename T>
1124
inline Condition::Condition(
1125
    bool (*absl_nonnull func)(T* absl_nullability_unknown),
1126
    T* absl_nullability_unknown arg)
1127
0
    : eval_(&CastAndCallFunction<T>),
1128
0
      arg_(const_cast<void*>(static_cast<const void*>(arg))) {
1129
0
  static_assert(sizeof(&func) <= sizeof(callback_),
1130
0
                "An overlarge function pointer was passed to Condition.");
1131
0
  StoreCallback(func);
1132
0
}
1133
1134
template <typename T, typename>
1135
inline Condition::Condition(
1136
    bool (*absl_nonnull func)(T* absl_nullability_unknown),
1137
    typename absl::internal::type_identity<T>::type* absl_nullability_unknown
1138
    arg)
1139
    // Just delegate to the overload above.
1140
    : Condition(func, arg) {}
1141
1142
template <typename T>
1143
inline Condition::Condition(
1144
    T* absl_nonnull object,
1145
    bool (absl::internal::type_identity<T>::type::* absl_nonnull method)())
1146
    : eval_(&CastAndCallMethod<T, decltype(method)>), arg_(object) {
1147
  static_assert(sizeof(&method) <= sizeof(callback_),
1148
                "An overlarge method pointer was passed to Condition.");
1149
  StoreCallback(method);
1150
}
1151
1152
template <typename T>
1153
inline Condition::Condition(
1154
    const T* absl_nonnull object,
1155
    bool (absl::internal::type_identity<T>::type::* absl_nonnull method)()
1156
        const)
1157
    : eval_(&CastAndCallMethod<const T, decltype(method)>),
1158
      arg_(reinterpret_cast<void*>(const_cast<T*>(object))) {
1159
  StoreCallback(method);
1160
}
1161
1162
// Register hooks for profiling support.
1163
//
1164
// The function pointer registered here will be called whenever a mutex is
1165
// contended.  The callback is given the cycles for which waiting happened (as
1166
// measured by //absl/base/internal/cycleclock.h, and which may not
1167
// be real "cycle" counts.)
1168
//
1169
// There is no ordering guarantee between when the hook is registered and when
1170
// callbacks will begin.  Only a single profiler can be installed in a running
1171
// binary; if this function is called a second time with a different function
1172
// pointer, the value is ignored (and will cause an assertion failure in debug
1173
// mode.)
1174
void RegisterMutexProfiler(void (*absl_nonnull fn)(int64_t wait_cycles));
1175
1176
// Register a hook for Mutex tracing.
1177
//
1178
// The function pointer registered here will be called whenever a mutex is
1179
// contended.  The callback is given an opaque handle to the contended mutex,
1180
// an event name, and the number of wait cycles (as measured by
1181
// //absl/base/internal/cycleclock.h, and which may not be real
1182
// "cycle" counts.)
1183
//
1184
// The only event name currently sent is "slow release".
1185
//
1186
// This has the same ordering and single-use limitations as
1187
// RegisterMutexProfiler() above.
1188
void RegisterMutexTracer(void (*absl_nonnull fn)(const char* absl_nonnull msg,
1189
                                                 const void* absl_nonnull obj,
1190
                                                 int64_t wait_cycles));
1191
1192
// Register a hook for CondVar tracing.
1193
//
1194
// The function pointer registered here will be called here on various CondVar
1195
// events.  The callback is given an opaque handle to the CondVar object and
1196
// a string identifying the event.  This is thread-safe, but only a single
1197
// tracer can be registered.
1198
//
1199
// Events that can be sent are "Wait", "Unwait", "Signal wakeup", and
1200
// "SignalAll wakeup".
1201
//
1202
// This has the same ordering and single-use limitations as
1203
// RegisterMutexProfiler() above.
1204
void RegisterCondVarTracer(void (*absl_nonnull fn)(
1205
    const char* absl_nonnull msg, const void* absl_nonnull cv));
1206
1207
// EnableMutexInvariantDebugging()
1208
//
1209
// Enable or disable global support for Mutex invariant debugging.  If enabled,
1210
// then invariant predicates can be registered per-Mutex for debug checking.
1211
// See Mutex::EnableInvariantDebugging().
1212
void EnableMutexInvariantDebugging(bool enabled);
1213
1214
// When in debug mode, and when the feature has been enabled globally, the
1215
// implementation will keep track of lock ordering and complain (or optionally
1216
// crash) if a cycle is detected in the acquired-before graph.
1217
1218
// Possible modes of operation for the deadlock detector in debug mode.
1219
enum class OnDeadlockCycle {
1220
  kIgnore,  // Neither report on nor attempt to track cycles in lock ordering
1221
  kReport,  // Report lock cycles to stderr when detected
1222
  kAbort,   // Report lock cycles to stderr when detected, then abort
1223
};
1224
1225
// SetMutexDeadlockDetectionMode()
1226
//
1227
// Enable or disable global support for detection of potential deadlocks
1228
// due to Mutex lock ordering inversions.  When set to 'kIgnore', tracking of
1229
// lock ordering is disabled.  Otherwise, in debug builds, a lock ordering graph
1230
// will be maintained internally, and detected cycles will be reported in
1231
// the manner chosen here.
1232
void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode);
1233
1234
ABSL_NAMESPACE_END
1235
}  // namespace absl
1236
1237
// In some build configurations we pass --detect-odr-violations to the
1238
// gold linker.  This causes it to flag weak symbol overrides as ODR
1239
// violations.  Because ODR only applies to C++ and not C,
1240
// --detect-odr-violations ignores symbols not mangled with C++ names.
1241
// By changing our extension points to be extern "C", we dodge this
1242
// check.
1243
extern "C" {
1244
void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
1245
}  // extern "C"
1246
1247
#endif  // ABSL_SYNCHRONIZATION_MUTEX_H_