Coverage Report

Created: 2025-08-25 06:55

/src/abseil-cpp/absl/synchronization/mutex.cc
Line
Count
Source (jump to first uncovered line)
1
// Copyright 2017 The Abseil Authors.
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//      https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14
15
#include "absl/synchronization/mutex.h"
16
17
#ifdef _WIN32
18
#include <windows.h>
19
#ifdef ERROR
20
#undef ERROR
21
#endif
22
#else
23
#include <fcntl.h>
24
#include <pthread.h>
25
#include <sched.h>
26
#include <sys/time.h>
27
#endif
28
29
#include <assert.h>
30
#include <errno.h>
31
#include <stdio.h>
32
#include <stdlib.h>
33
#include <string.h>
34
#include <time.h>
35
36
#include <algorithm>
37
#include <atomic>
38
#include <cstddef>
39
#include <cstdlib>
40
#include <cstring>
41
#include <thread>  // NOLINT(build/c++11)
42
43
#include "absl/base/attributes.h"
44
#include "absl/base/call_once.h"
45
#include "absl/base/config.h"
46
#include "absl/base/dynamic_annotations.h"
47
#include "absl/base/internal/atomic_hook.h"
48
#include "absl/base/internal/cycleclock.h"
49
#include "absl/base/internal/hide_ptr.h"
50
#include "absl/base/internal/low_level_alloc.h"
51
#include "absl/base/internal/raw_logging.h"
52
#include "absl/base/internal/spinlock.h"
53
#include "absl/base/internal/sysinfo.h"
54
#include "absl/base/internal/thread_identity.h"
55
#include "absl/base/internal/tsan_mutex_interface.h"
56
#include "absl/base/optimization.h"
57
#include "absl/debugging/stacktrace.h"
58
#include "absl/debugging/symbolize.h"
59
#include "absl/synchronization/internal/graphcycles.h"
60
#include "absl/synchronization/internal/per_thread_sem.h"
61
#include "absl/time/time.h"
62
63
using absl::base_internal::CurrentThreadIdentityIfPresent;
64
using absl::base_internal::CycleClock;
65
using absl::base_internal::PerThreadSynch;
66
using absl::base_internal::SchedulingGuard;
67
using absl::base_internal::ThreadIdentity;
68
using absl::synchronization_internal::GetOrCreateCurrentThreadIdentity;
69
using absl::synchronization_internal::GraphCycles;
70
using absl::synchronization_internal::GraphId;
71
using absl::synchronization_internal::InvalidGraphId;
72
using absl::synchronization_internal::KernelTimeout;
73
using absl::synchronization_internal::PerThreadSem;
74
75
extern "C" {
76
0
ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)() {
77
0
  std::this_thread::yield();
78
0
}
79
}  // extern "C"
80
81
namespace absl {
82
ABSL_NAMESPACE_BEGIN
83
84
namespace {
85
86
#if defined(ABSL_HAVE_THREAD_SANITIZER)
87
constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kIgnore;
88
#else
89
constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kAbort;
90
#endif
91
92
ABSL_CONST_INIT std::atomic<OnDeadlockCycle> synch_deadlock_detection(
93
    kDeadlockDetectionDefault);
94
ABSL_CONST_INIT std::atomic<bool> synch_check_invariants(false);
95
96
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
97
absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)>
98
    submit_profile_data;
99
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<void (*)(
100
    const char* msg, const void* obj, int64_t wait_cycles)>
101
    mutex_tracer;
102
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
103
absl::base_internal::AtomicHook<void (*)(const char* msg, const void* cv)>
104
    cond_var_tracer;
105
106
}  // namespace
107
108
static inline bool EvalConditionAnnotated(const Condition* cond, Mutex* mu,
109
                                          bool locking, bool trylock,
110
                                          bool read_lock);
111
112
0
void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles)) {
113
0
  submit_profile_data.Store(fn);
114
0
}
115
116
void RegisterMutexTracer(void (*fn)(const char* msg, const void* obj,
117
0
                                    int64_t wait_cycles)) {
118
0
  mutex_tracer.Store(fn);
119
0
}
120
121
0
void RegisterCondVarTracer(void (*fn)(const char* msg, const void* cv)) {
122
0
  cond_var_tracer.Store(fn);
123
0
}
124
125
namespace {
126
// Represents the strategy for spin and yield.
127
// See the comment in GetMutexGlobals() for more information.
128
enum DelayMode { AGGRESSIVE, GENTLE };
129
130
struct ABSL_CACHELINE_ALIGNED MutexGlobals {
131
  absl::once_flag once;
132
  // Note: this variable is initialized separately in Mutex::LockSlow,
133
  // so that Mutex::Lock does not have a stack frame in optimized build.
134
  std::atomic<int> spinloop_iterations{0};
135
  int32_t mutex_sleep_spins[2] = {};
136
  absl::Duration mutex_sleep_time;
137
};
138
139
ABSL_CONST_INIT static MutexGlobals globals;
140
141
0
absl::Duration MeasureTimeToYield() {
142
0
  absl::Time before = absl::Now();
143
0
  ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
144
0
  return absl::Now() - before;
145
0
}
146
147
0
const MutexGlobals& GetMutexGlobals() {
148
0
  absl::base_internal::LowLevelCallOnce(&globals.once, [&]() {
149
0
    if (absl::base_internal::NumCPUs() > 1) {
150
      // If the mode is aggressive then spin many times before yielding.
151
      // If the mode is gentle then spin only a few times before yielding.
152
      // Aggressive spinning is used to ensure that an Unlock() call,
153
      // which must get the spin lock for any thread to make progress gets it
154
      // without undue delay.
155
0
      globals.mutex_sleep_spins[AGGRESSIVE] = 5000;
156
0
      globals.mutex_sleep_spins[GENTLE] = 250;
157
0
      globals.mutex_sleep_time = absl::Microseconds(10);
158
0
    } else {
159
      // If this a uniprocessor, only yield/sleep. Real-time threads are often
160
      // unable to yield, so the sleep time needs to be long enough to keep
161
      // the calling thread asleep until scheduling happens.
162
0
      globals.mutex_sleep_spins[AGGRESSIVE] = 0;
163
0
      globals.mutex_sleep_spins[GENTLE] = 0;
164
0
      globals.mutex_sleep_time = MeasureTimeToYield() * 5;
165
0
      globals.mutex_sleep_time =
166
0
          std::min(globals.mutex_sleep_time, absl::Milliseconds(1));
167
0
      globals.mutex_sleep_time =
168
0
          std::max(globals.mutex_sleep_time, absl::Microseconds(10));
169
0
    }
170
0
  });
171
0
  return globals;
172
0
}
173
}  // namespace
174
175
namespace synchronization_internal {
176
// Returns the Mutex delay on iteration `c` depending on the given `mode`.
177
// The returned value should be used as `c` for the next call to `MutexDelay`.
178
0
int MutexDelay(int32_t c, int mode) {
179
0
  const int32_t limit = GetMutexGlobals().mutex_sleep_spins[mode];
180
0
  const absl::Duration sleep_time = GetMutexGlobals().mutex_sleep_time;
181
0
  if (c < limit) {
182
    // Spin.
183
0
    c++;
184
0
  } else {
185
0
    SchedulingGuard::ScopedEnable enable_rescheduling;
186
0
    ABSL_TSAN_MUTEX_PRE_DIVERT(nullptr, 0);
187
0
    if (c == limit) {
188
      // Yield once.
189
0
      ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
190
0
      c++;
191
0
    } else {
192
      // Then wait.
193
0
      absl::SleepFor(sleep_time);
194
0
      c = 0;
195
0
    }
196
0
    ABSL_TSAN_MUTEX_POST_DIVERT(nullptr, 0);
197
0
  }
198
0
  return c;
199
0
}
200
}  // namespace synchronization_internal
201
202
// --------------------------Generic atomic ops
203
// Ensure that "(*pv & bits) == bits" by doing an atomic update of "*pv" to
204
// "*pv | bits" if necessary.  Wait until (*pv & wait_until_clear)==0
205
// before making any change.
206
// Returns true if bits were previously unset and set by the call.
207
// This is used to set flags in mutex and condition variable words.
208
static bool AtomicSetBits(std::atomic<intptr_t>* pv, intptr_t bits,
209
0
                          intptr_t wait_until_clear) {
210
0
  for (;;) {
211
0
    intptr_t v = pv->load(std::memory_order_relaxed);
212
0
    if ((v & bits) == bits) {
213
0
      return false;
214
0
    }
215
0
    if ((v & wait_until_clear) != 0) {
216
0
      continue;
217
0
    }
218
0
    if (pv->compare_exchange_weak(v, v | bits, std::memory_order_release,
219
0
                                  std::memory_order_relaxed)) {
220
0
      return true;
221
0
    }
222
0
  }
223
0
}
224
225
//------------------------------------------------------------------
226
227
// Data for doing deadlock detection.
228
ABSL_CONST_INIT static absl::base_internal::SpinLock deadlock_graph_mu(
229
    base_internal::SCHEDULE_KERNEL_ONLY);
230
231
// Graph used to detect deadlocks.
232
ABSL_CONST_INIT static GraphCycles* deadlock_graph
233
    ABSL_GUARDED_BY(deadlock_graph_mu) ABSL_PT_GUARDED_BY(deadlock_graph_mu);
234
235
//------------------------------------------------------------------
236
// An event mechanism for debugging mutex use.
237
// It also allows mutexes to be given names for those who can't handle
238
// addresses, and instead like to give their data structures names like
239
// "Henry", "Fido", or "Rupert IV, King of Yondavia".
240
241
namespace {  // to prevent name pollution
242
enum {       // Mutex and CondVar events passed as "ev" to PostSynchEvent
243
             // Mutex events
244
  SYNCH_EV_TRYLOCK_SUCCESS,
245
  SYNCH_EV_TRYLOCK_FAILED,
246
  SYNCH_EV_READERTRYLOCK_SUCCESS,
247
  SYNCH_EV_READERTRYLOCK_FAILED,
248
  SYNCH_EV_LOCK,
249
  SYNCH_EV_LOCK_RETURNING,
250
  SYNCH_EV_READERLOCK,
251
  SYNCH_EV_READERLOCK_RETURNING,
252
  SYNCH_EV_UNLOCK,
253
  SYNCH_EV_READERUNLOCK,
254
255
  // CondVar events
256
  SYNCH_EV_WAIT,
257
  SYNCH_EV_WAIT_RETURNING,
258
  SYNCH_EV_SIGNAL,
259
  SYNCH_EV_SIGNALALL,
260
};
261
262
enum {                    // Event flags
263
  SYNCH_F_R = 0x01,       // reader event
264
  SYNCH_F_LCK = 0x02,     // PostSynchEvent called with mutex held
265
  SYNCH_F_TRY = 0x04,     // TryLock or ReaderTryLock
266
  SYNCH_F_UNLOCK = 0x08,  // Unlock or ReaderUnlock
267
268
  SYNCH_F_LCK_W = SYNCH_F_LCK,
269
  SYNCH_F_LCK_R = SYNCH_F_LCK | SYNCH_F_R,
270
};
271
}  // anonymous namespace
272
273
// Properties of the events.
274
static const struct {
275
  int flags;
276
  const char* msg;
277
} event_properties[] = {
278
    {SYNCH_F_LCK_W | SYNCH_F_TRY, "TryLock succeeded "},
279
    {0, "TryLock failed "},
280
    {SYNCH_F_LCK_R | SYNCH_F_TRY, "ReaderTryLock succeeded "},
281
    {0, "ReaderTryLock failed "},
282
    {0, "Lock blocking "},
283
    {SYNCH_F_LCK_W, "Lock returning "},
284
    {0, "ReaderLock blocking "},
285
    {SYNCH_F_LCK_R, "ReaderLock returning "},
286
    {SYNCH_F_LCK_W | SYNCH_F_UNLOCK, "Unlock "},
287
    {SYNCH_F_LCK_R | SYNCH_F_UNLOCK, "ReaderUnlock "},
288
    {0, "Wait on "},
289
    {0, "Wait unblocked "},
290
    {0, "Signal on "},
291
    {0, "SignalAll on "},
292
};
293
294
ABSL_CONST_INIT static absl::base_internal::SpinLock synch_event_mu(
295
    base_internal::SCHEDULE_KERNEL_ONLY);
296
297
// Hash table size; should be prime > 2.
298
// Can't be too small, as it's used for deadlock detection information.
299
static constexpr uint32_t kNSynchEvent = 1031;
300
301
static struct SynchEvent {  // this is a trivial hash table for the events
302
  // struct is freed when refcount reaches 0
303
  int refcount ABSL_GUARDED_BY(synch_event_mu);
304
305
  // buckets have linear, 0-terminated  chains
306
  SynchEvent* next ABSL_GUARDED_BY(synch_event_mu);
307
308
  // Constant after initialization
309
  uintptr_t masked_addr;  // object at this address is called "name"
310
311
  // No explicit synchronization used.  Instead we assume that the
312
  // client who enables/disables invariants/logging on a Mutex does so
313
  // while the Mutex is not being concurrently accessed by others.
314
  void (*invariant)(void* arg);  // called on each event
315
  void* arg;                     // first arg to (*invariant)()
316
  bool log;                      // logging turned on
317
318
  // Constant after initialization
319
  char name[1];  // actually longer---NUL-terminated string
320
}* synch_event[kNSynchEvent] ABSL_GUARDED_BY(synch_event_mu);
321
322
// Ensure that the object at "addr" has a SynchEvent struct associated with it,
323
// set "bits" in the word there (waiting until lockbit is clear before doing
324
// so), and return a refcounted reference that will remain valid until
325
// UnrefSynchEvent() is called.  If a new SynchEvent is allocated,
326
// the string name is copied into it.
327
// When used with a mutex, the caller should also ensure that kMuEvent
328
// is set in the mutex word, and similarly for condition variables and kCVEvent.
329
static SynchEvent* EnsureSynchEvent(std::atomic<intptr_t>* addr,
330
                                    const char* name, intptr_t bits,
331
0
                                    intptr_t lockbit) {
332
0
  uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
333
0
  synch_event_mu.lock();
334
  // When a Mutex/CondVar is destroyed, we don't remove the associated
335
  // SynchEvent to keep destructors empty in release builds for performance
336
  // reasons. If the current call is the first to set bits (kMuEvent/kCVEvent),
337
  // we don't look up the existing even because (if it exists, it must be for
338
  // the previous Mutex/CondVar that existed at the same address).
339
  // The leaking events must not be a problem for tests, which should create
340
  // bounded amount of events. And debug logging is not supposed to be enabled
341
  // in production. However, if it's accidentally enabled, or briefly enabled
342
  // for some debugging, we don't want to crash the program. Instead we drop
343
  // all events, if we accumulated too many of them. Size of a single event
344
  // is ~48 bytes, so 100K events is ~5 MB.
345
  // Additionally we could delete the old event for the same address,
346
  // but it would require a better hashmap (if we accumulate too many events,
347
  // linked lists will grow and traversing them will be very slow).
348
0
  constexpr size_t kMaxSynchEventCount = 100 << 10;
349
  // Total number of live synch events.
350
0
  static size_t synch_event_count ABSL_GUARDED_BY(synch_event_mu);
351
0
  if (++synch_event_count > kMaxSynchEventCount) {
352
0
    synch_event_count = 0;
353
0
    ABSL_RAW_LOG(ERROR,
354
0
                 "Accumulated %zu Mutex debug objects. If you see this"
355
0
                 " in production, it may mean that the production code"
356
0
                 " accidentally calls "
357
0
                 "Mutex/CondVar::EnableDebugLog/EnableInvariantDebugging.",
358
0
                 kMaxSynchEventCount);
359
0
    for (auto*& head : synch_event) {
360
0
      for (auto* e = head; e != nullptr;) {
361
0
        SynchEvent* next = e->next;
362
0
        if (--(e->refcount) == 0) {
363
0
          base_internal::LowLevelAlloc::Free(e);
364
0
        }
365
0
        e = next;
366
0
      }
367
0
      head = nullptr;
368
0
    }
369
0
  }
370
0
  SynchEvent* e = nullptr;
371
0
  if (!AtomicSetBits(addr, bits, lockbit)) {
372
0
    for (e = synch_event[h];
373
0
         e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
374
0
         e = e->next) {
375
0
    }
376
0
  }
377
0
  if (e == nullptr) {  // no SynchEvent struct found; make one.
378
0
    if (name == nullptr) {
379
0
      name = "";
380
0
    }
381
0
    size_t l = strlen(name);
382
0
    e = reinterpret_cast<SynchEvent*>(
383
0
        base_internal::LowLevelAlloc::Alloc(sizeof(*e) + l));
384
0
    e->refcount = 2;  // one for return value, one for linked list
385
0
    e->masked_addr = base_internal::HidePtr(addr);
386
0
    e->invariant = nullptr;
387
0
    e->arg = nullptr;
388
0
    e->log = false;
389
0
    strcpy(e->name, name);  // NOLINT(runtime/printf)
390
0
    e->next = synch_event[h];
391
0
    synch_event[h] = e;
392
0
  } else {
393
0
    e->refcount++;  // for return value
394
0
  }
395
0
  synch_event_mu.unlock();
396
0
  return e;
397
0
}
398
399
// Decrement the reference count of *e, or do nothing if e==null.
400
0
static void UnrefSynchEvent(SynchEvent* e) {
401
0
  if (e != nullptr) {
402
0
    synch_event_mu.lock();
403
0
    bool del = (--(e->refcount) == 0);
404
0
    synch_event_mu.unlock();
405
0
    if (del) {
406
0
      base_internal::LowLevelAlloc::Free(e);
407
0
    }
408
0
  }
409
0
}
410
411
// Return a refcounted reference to the SynchEvent of the object at address
412
// "addr", if any.  The pointer returned is valid until the UnrefSynchEvent() is
413
// called.
414
0
static SynchEvent* GetSynchEvent(const void* addr) {
415
0
  uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
416
0
  SynchEvent* e;
417
0
  synch_event_mu.lock();
418
0
  for (e = synch_event[h];
419
0
       e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
420
0
       e = e->next) {
421
0
  }
422
0
  if (e != nullptr) {
423
0
    e->refcount++;
424
0
  }
425
0
  synch_event_mu.unlock();
426
0
  return e;
427
0
}
428
429
// Called when an event "ev" occurs on a Mutex of CondVar "obj"
430
// if event recording is on
431
0
static void PostSynchEvent(void* obj, int ev) {
432
0
  SynchEvent* e = GetSynchEvent(obj);
433
  // logging is on if event recording is on and either there's no event struct,
434
  // or it explicitly says to log
435
0
  if (e == nullptr || e->log) {
436
0
    void* pcs[40];
437
0
    int n = absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 1);
438
    // A buffer with enough space for the ASCII for all the PCs, even on a
439
    // 64-bit machine.
440
0
    char buffer[ABSL_ARRAYSIZE(pcs) * 24];
441
0
    int pos = snprintf(buffer, sizeof(buffer), " @");
442
0
    for (int i = 0; i != n; i++) {
443
0
      int b = snprintf(&buffer[pos], sizeof(buffer) - static_cast<size_t>(pos),
444
0
                       " %p", pcs[i]);
445
0
      if (b < 0 ||
446
0
          static_cast<size_t>(b) >= sizeof(buffer) - static_cast<size_t>(pos)) {
447
0
        break;
448
0
      }
449
0
      pos += b;
450
0
    }
451
0
    ABSL_RAW_LOG(INFO, "%s%p %s %s", event_properties[ev].msg, obj,
452
0
                 (e == nullptr ? "" : e->name), buffer);
453
0
  }
454
0
  const int flags = event_properties[ev].flags;
455
0
  if ((flags & SYNCH_F_LCK) != 0 && e != nullptr && e->invariant != nullptr) {
456
    // Calling the invariant as is causes problems under ThreadSanitizer.
457
    // We are currently inside of Mutex Lock/Unlock and are ignoring all
458
    // memory accesses and synchronization. If the invariant transitively
459
    // synchronizes something else and we ignore the synchronization, we will
460
    // get false positive race reports later.
461
    // Reuse EvalConditionAnnotated to properly call into user code.
462
0
    struct local {
463
0
      static bool pred(SynchEvent* ev) {
464
0
        (*ev->invariant)(ev->arg);
465
0
        return false;
466
0
      }
467
0
    };
468
0
    Condition cond(&local::pred, e);
469
0
    Mutex* mu = static_cast<Mutex*>(obj);
470
0
    const bool locking = (flags & SYNCH_F_UNLOCK) == 0;
471
0
    const bool trylock = (flags & SYNCH_F_TRY) != 0;
472
0
    const bool read_lock = (flags & SYNCH_F_R) != 0;
473
0
    EvalConditionAnnotated(&cond, mu, locking, trylock, read_lock);
474
0
  }
475
0
  UnrefSynchEvent(e);
476
0
}
477
478
//------------------------------------------------------------------
479
480
// The SynchWaitParams struct encapsulates the way in which a thread is waiting:
481
// whether it has a timeout, the condition, exclusive/shared, and whether a
482
// condition variable wait has an associated Mutex (as opposed to another
483
// type of lock).  It also points to the PerThreadSynch struct of its thread.
484
// cv_word tells Enqueue() to enqueue on a CondVar using CondVarEnqueue().
485
//
486
// This structure is held on the stack rather than directly in
487
// PerThreadSynch because a thread can be waiting on multiple Mutexes if,
488
// while waiting on one Mutex, the implementation calls a client callback
489
// (such as a Condition function) that acquires another Mutex. We don't
490
// strictly need to allow this, but programmers become confused if we do not
491
// allow them to use functions such a LOG() within Condition functions.  The
492
// PerThreadSynch struct points at the most recent SynchWaitParams struct when
493
// the thread is on a Mutex's waiter queue.
494
struct SynchWaitParams {
495
  SynchWaitParams(Mutex::MuHow how_arg, const Condition* cond_arg,
496
                  KernelTimeout timeout_arg, Mutex* cvmu_arg,
497
                  PerThreadSynch* thread_arg,
498
                  std::atomic<intptr_t>* cv_word_arg)
499
0
      : how(how_arg),
500
0
        cond(cond_arg),
501
0
        timeout(timeout_arg),
502
0
        cvmu(cvmu_arg),
503
0
        thread(thread_arg),
504
0
        cv_word(cv_word_arg),
505
0
        contention_start_cycles(CycleClock::Now()),
506
0
        should_submit_contention_data(false) {}
507
508
  const Mutex::MuHow how;  // How this thread needs to wait.
509
  const Condition* cond;   // The condition that this thread is waiting for.
510
                           // In Mutex, this field is set to zero if a timeout
511
                           // expires.
512
  KernelTimeout timeout;   // timeout expiry---absolute time
513
                           // In Mutex, this field is set to zero if a timeout
514
                           // expires.
515
  Mutex* const cvmu;       // used for transfer from cond var to mutex
516
  PerThreadSynch* const thread;  // thread that is waiting
517
518
  // If not null, thread should be enqueued on the CondVar whose state
519
  // word is cv_word instead of queueing normally on the Mutex.
520
  std::atomic<intptr_t>* cv_word;
521
522
  int64_t contention_start_cycles;  // Time (in cycles) when this thread started
523
                                    // to contend for the mutex.
524
  bool should_submit_contention_data;
525
};
526
527
struct SynchLocksHeld {
528
  int n;          // number of valid entries in locks[]
529
  bool overflow;  // true iff we overflowed the array at some point
530
  struct {
531
    Mutex* mu;      // lock acquired
532
    int32_t count;  // times acquired
533
    GraphId id;     // deadlock_graph id of acquired lock
534
  } locks[40];
535
  // If a thread overfills the array during deadlock detection, we
536
  // continue, discarding information as needed.  If no overflow has
537
  // taken place, we can provide more error checking, such as
538
  // detecting when a thread releases a lock it does not hold.
539
};
540
541
// A sentinel value in lists that is not 0.
542
// A 0 value is used to mean "not on a list".
543
static PerThreadSynch* const kPerThreadSynchNull =
544
    reinterpret_cast<PerThreadSynch*>(1);
545
546
2
static SynchLocksHeld* LocksHeldAlloc() {
547
2
  SynchLocksHeld* ret = reinterpret_cast<SynchLocksHeld*>(
548
2
      base_internal::LowLevelAlloc::Alloc(sizeof(SynchLocksHeld)));
549
2
  ret->n = 0;
550
2
  ret->overflow = false;
551
2
  return ret;
552
2
}
553
554
// Return the PerThreadSynch-struct for this thread.
555
2.92M
static PerThreadSynch* Synch_GetPerThread() {
556
2.92M
  ThreadIdentity* identity = GetOrCreateCurrentThreadIdentity();
557
2.92M
  return &identity->per_thread_synch;
558
2.92M
}
559
560
0
static PerThreadSynch* Synch_GetPerThreadAnnotated(Mutex* mu) {
561
0
  if (mu) {
562
0
    ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
563
0
  }
564
0
  PerThreadSynch* w = Synch_GetPerThread();
565
0
  if (mu) {
566
0
    ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
567
0
  }
568
0
  return w;
569
0
}
570
571
2.92M
static SynchLocksHeld* Synch_GetAllLocks() {
572
2.92M
  PerThreadSynch* s = Synch_GetPerThread();
573
2.92M
  if (s->all_locks == nullptr) {
574
2
    s->all_locks = LocksHeldAlloc();  // Freed by ReclaimThreadIdentity.
575
2
  }
576
2.92M
  return s->all_locks;
577
2.92M
}
578
579
// Post on "w"'s associated PerThreadSem.
580
0
void Mutex::IncrementSynchSem(Mutex* mu, PerThreadSynch* w) {
581
0
  static_cast<void>(mu);  // Prevent unused param warning in non-TSAN builds.
582
0
  ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
583
  // We miss synchronization around passing PerThreadSynch between threads
584
  // since it happens inside of the Mutex code, so we need to ignore all
585
  // accesses to the object.
586
0
  ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
587
0
  PerThreadSem::Post(w->thread_identity());
588
0
  ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END();
589
0
  ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
590
0
}
591
592
// Wait on "w"'s associated PerThreadSem; returns false if timeout expired.
593
0
bool Mutex::DecrementSynchSem(Mutex* mu, PerThreadSynch* w, KernelTimeout t) {
594
0
  static_cast<void>(mu);  // Prevent unused param warning in non-TSAN builds.
595
0
  ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
596
0
  assert(w == Synch_GetPerThread());
597
0
  static_cast<void>(w);
598
0
  bool res = PerThreadSem::Wait(t);
599
0
  ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
600
0
  return res;
601
0
}
602
603
// We're in a fatal signal handler that hopes to use Mutex and to get
604
// lucky by not deadlocking.  We try to improve its chances of success
605
// by effectively disabling some of the consistency checks.  This will
606
// prevent certain ABSL_RAW_CHECK() statements from being triggered when
607
// re-rentry is detected.  The ABSL_RAW_CHECK() statements are those in the
608
// Mutex code checking that the "waitp" field has not been reused.
609
0
void Mutex::InternalAttemptToUseMutexInFatalSignalHandler() {
610
  // Fix the per-thread state only if it exists.
611
0
  ThreadIdentity* identity = CurrentThreadIdentityIfPresent();
612
0
  if (identity != nullptr) {
613
0
    identity->per_thread_synch.suppress_fatal_errors = true;
614
0
  }
615
  // Don't do deadlock detection when we are already failing.
616
0
  synch_deadlock_detection.store(OnDeadlockCycle::kIgnore,
617
0
                                 std::memory_order_release);
618
0
}
619
620
// --------------------------Mutexes
621
622
// In the layout below, the msb of the bottom byte is currently unused.  Also,
623
// the following constraints were considered in choosing the layout:
624
//  o Both the debug allocator's "uninitialized" and "freed" patterns (0xab and
625
//    0xcd) are illegal: reader and writer lock both held.
626
//  o kMuWriter and kMuEvent should exceed kMuDesig and kMuWait, to enable the
627
//    bit-twiddling trick in Mutex::Unlock().
628
//  o kMuWriter / kMuReader == kMuWrWait / kMuWait,
629
//    to enable the bit-twiddling trick in CheckForMutexCorruption().
630
static const intptr_t kMuReader = 0x0001L;  // a reader holds the lock
631
// There's a designated waker.
632
// INVARIANT1:  there's a thread that was blocked on the mutex, is
633
// no longer, yet has not yet acquired the mutex.  If there's a
634
// designated waker, all threads can avoid taking the slow path in
635
// unlock because the designated waker will subsequently acquire
636
// the lock and wake someone.  To maintain INVARIANT1 the bit is
637
// set when a thread is unblocked(INV1a), and threads that were
638
// unblocked reset the bit when they either acquire or re-block (INV1b).
639
static const intptr_t kMuDesig = 0x0002L;
640
static const intptr_t kMuWait = 0x0004L;    // threads are waiting
641
static const intptr_t kMuWriter = 0x0008L;  // a writer holds the lock
642
static const intptr_t kMuEvent = 0x0010L;   // record this mutex's events
643
// Runnable writer is waiting for a reader.
644
// If set, new readers will not lock the mutex to avoid writer starvation.
645
// Note: if a reader has higher priority than the writer, it will still lock
646
// the mutex ahead of the waiting writer, but in a very inefficient manner:
647
// the reader will first queue itself and block, but then the last unlocking
648
// reader will wake it.
649
static const intptr_t kMuWrWait = 0x0020L;
650
static const intptr_t kMuSpin = 0x0040L;  // spinlock protects wait list
651
static const intptr_t kMuLow = 0x00ffL;   // mask all mutex bits
652
static const intptr_t kMuHigh = ~kMuLow;  // mask pointer/reader count
653
654
static_assert((0xab & (kMuWriter | kMuReader)) == (kMuWriter | kMuReader),
655
              "The debug allocator's uninitialized pattern (0xab) must be an "
656
              "invalid mutex state");
657
static_assert((0xcd & (kMuWriter | kMuReader)) == (kMuWriter | kMuReader),
658
              "The debug allocator's freed pattern (0xcd) must be an invalid "
659
              "mutex state");
660
661
// Hack to make constant values available to gdb pretty printer
662
enum {
663
  kGdbMuSpin = kMuSpin,
664
  kGdbMuEvent = kMuEvent,
665
  kGdbMuWait = kMuWait,
666
  kGdbMuWriter = kMuWriter,
667
  kGdbMuDesig = kMuDesig,
668
  kGdbMuWrWait = kMuWrWait,
669
  kGdbMuReader = kMuReader,
670
  kGdbMuLow = kMuLow,
671
};
672
673
// kMuWrWait implies kMuWait.
674
// kMuReader and kMuWriter are mutually exclusive.
675
// If kMuReader is zero, there are no readers.
676
// Otherwise, if kMuWait is zero, the high order bits contain a count of the
677
// number of readers.  Otherwise, the reader count is held in
678
// PerThreadSynch::readers of the most recently queued waiter, again in the
679
// bits above kMuLow.
680
static const intptr_t kMuOne = 0x0100;  // a count of one reader
681
682
// flags passed to Enqueue and LockSlow{,WithTimeout,Loop}
683
static const int kMuHasBlocked = 0x01;  // already blocked (MUST == 1)
684
static const int kMuIsCond = 0x02;      // conditional waiter (CV or Condition)
685
static const int kMuIsFer = 0x04;       // wait morphing from a CondVar
686
687
static_assert(PerThreadSynch::kAlignment > kMuLow,
688
              "PerThreadSynch::kAlignment must be greater than kMuLow");
689
690
// This struct contains various bitmasks to be used in
691
// acquiring and releasing a mutex in a particular mode.
692
struct MuHowS {
693
  // if all the bits in fast_need_zero are zero, the lock can be acquired by
694
  // adding fast_add and oring fast_or.  The bit kMuDesig should be reset iff
695
  // this is the designated waker.
696
  intptr_t fast_need_zero;
697
  intptr_t fast_or;
698
  intptr_t fast_add;
699
700
  intptr_t slow_need_zero;  // fast_need_zero with events (e.g. logging)
701
702
  intptr_t slow_inc_need_zero;  // if all the bits in slow_inc_need_zero are
703
                                // zero a reader can acquire a read share by
704
                                // setting the reader bit and incrementing
705
                                // the reader count (in last waiter since
706
                                // we're now slow-path).  kMuWrWait be may
707
                                // be ignored if we already waited once.
708
};
709
710
static const MuHowS kSharedS = {
711
    // shared or read lock
712
    kMuWriter | kMuWait | kMuEvent,   // fast_need_zero
713
    kMuReader,                        // fast_or
714
    kMuOne,                           // fast_add
715
    kMuWriter | kMuWait,              // slow_need_zero
716
    kMuSpin | kMuWriter | kMuWrWait,  // slow_inc_need_zero
717
};
718
static const MuHowS kExclusiveS = {
719
    // exclusive or write lock
720
    kMuWriter | kMuReader | kMuEvent,  // fast_need_zero
721
    kMuWriter,                         // fast_or
722
    0,                                 // fast_add
723
    kMuWriter | kMuReader,             // slow_need_zero
724
    ~static_cast<intptr_t>(0),         // slow_inc_need_zero
725
};
726
static const Mutex::MuHow kShared = &kSharedS;        // shared lock
727
static const Mutex::MuHow kExclusive = &kExclusiveS;  // exclusive lock
728
729
#ifdef NDEBUG
730
static constexpr bool kDebugMode = false;
731
#else
732
static constexpr bool kDebugMode = true;
733
#endif
734
735
#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
736
static unsigned TsanFlags(Mutex::MuHow how) {
737
  return how == kShared ? __tsan_mutex_read_lock : 0;
738
}
739
#endif
740
741
#if defined(__APPLE__) || defined(ABSL_BUILD_DLL)
742
// When building a dll symbol export lists may reference the destructor
743
// and want it to be an exported symbol rather than an inline function.
744
// Some apple builds also do dynamic library build but don't say it explicitly.
745
Mutex::~Mutex() { Dtor(); }
746
#endif
747
748
#if !defined(NDEBUG) || defined(ABSL_HAVE_THREAD_SANITIZER)
749
0
void Mutex::Dtor() {
750
0
  if (kDebugMode) {
751
0
    this->ForgetDeadlockInfo();
752
0
  }
753
0
  ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static);
754
0
}
755
#endif
756
757
0
void Mutex::EnableDebugLog(const char* name) {
758
  // Need to disable writes here and in EnableInvariantDebugging to prevent
759
  // false race reports on SynchEvent objects. TSan ignores synchronization
760
  // on synch_event_mu in Lock/Unlock/etc methods due to mutex annotations,
761
  // but it sees few accesses to SynchEvent in EvalConditionAnnotated.
762
  // If we don't ignore accesses here, it can result in false races
763
  // between EvalConditionAnnotated and SynchEvent reuse in EnsureSynchEvent.
764
0
  ABSL_ANNOTATE_IGNORE_WRITES_BEGIN();
765
0
  SynchEvent* e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin);
766
0
  e->log = true;
767
0
  UnrefSynchEvent(e);
768
  // This prevents "error: undefined symbol: absl::Mutex::~Mutex()"
769
  // in a release build (NDEBUG defined) when a test does "#undef NDEBUG"
770
  // to use assert macro. In such case, the test does not get the dtor
771
  // definition because it's supposed to be outline when NDEBUG is not defined,
772
  // and this source file does not define one either because NDEBUG is defined.
773
  // Since it's not possible to take address of a destructor, we move the
774
  // actual destructor code into the separate Dtor function and force the
775
  // compiler to emit this function even if it's inline by taking its address.
776
0
  ABSL_ATTRIBUTE_UNUSED volatile auto dtor = &Mutex::Dtor;
777
0
  ABSL_ANNOTATE_IGNORE_WRITES_END();
778
0
}
779
780
0
void EnableMutexInvariantDebugging(bool enabled) {
781
0
  synch_check_invariants.store(enabled, std::memory_order_release);
782
0
}
783
784
0
void Mutex::EnableInvariantDebugging(void (*invariant)(void*), void* arg) {
785
0
  ABSL_ANNOTATE_IGNORE_WRITES_BEGIN();
786
0
  if (synch_check_invariants.load(std::memory_order_acquire) &&
787
0
      invariant != nullptr) {
788
0
    SynchEvent* e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin);
789
0
    e->invariant = invariant;
790
0
    e->arg = arg;
791
0
    UnrefSynchEvent(e);
792
0
  }
793
0
  ABSL_ANNOTATE_IGNORE_WRITES_END();
794
0
}
795
796
0
void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode) {
797
0
  synch_deadlock_detection.store(mode, std::memory_order_release);
798
0
}
799
800
// Return true iff threads x and y are part of the same equivalence
801
// class of waiters. An equivalence class is defined as the set of
802
// waiters with the same condition, type of lock, and thread priority.
803
//
804
// Requires that x and y be waiting on the same Mutex queue.
805
0
static bool MuEquivalentWaiter(PerThreadSynch* x, PerThreadSynch* y) {
806
0
  return x->waitp->how == y->waitp->how && x->priority == y->priority &&
807
0
         Condition::GuaranteedEqual(x->waitp->cond, y->waitp->cond);
808
0
}
809
810
// Given the contents of a mutex word containing a PerThreadSynch pointer,
811
// return the pointer.
812
0
static inline PerThreadSynch* GetPerThreadSynch(intptr_t v) {
813
0
  return reinterpret_cast<PerThreadSynch*>(v & kMuHigh);
814
0
}
815
816
// The next several routines maintain the per-thread next and skip fields
817
// used in the Mutex waiter queue.
818
// The queue is a circular singly-linked list, of which the "head" is the
819
// last element, and head->next if the first element.
820
// The skip field has the invariant:
821
//   For thread x, x->skip is one of:
822
//     - invalid (iff x is not in a Mutex wait queue),
823
//     - null, or
824
//     - a pointer to a distinct thread waiting later in the same Mutex queue
825
//       such that all threads in [x, x->skip] have the same condition, priority
826
//       and lock type (MuEquivalentWaiter() is true for all pairs in [x,
827
//       x->skip]).
828
// In addition, if x->skip is  valid, (x->may_skip || x->skip == null)
829
//
830
// By the spec of MuEquivalentWaiter(), it is not necessary when removing the
831
// first runnable thread y from the front a Mutex queue to adjust the skip
832
// field of another thread x because if x->skip==y, x->skip must (have) become
833
// invalid before y is removed.  The function TryRemove can remove a specified
834
// thread from an arbitrary position in the queue whether runnable or not, so
835
// it fixes up skip fields that would otherwise be left dangling.
836
// The statement
837
//     if (x->may_skip && MuEquivalentWaiter(x, x->next)) { x->skip = x->next; }
838
// maintains the invariant provided x is not the last waiter in a Mutex queue
839
// The statement
840
//          if (x->skip != null) { x->skip = x->skip->skip; }
841
// maintains the invariant.
842
843
// Returns the last thread y in a mutex waiter queue such that all threads in
844
// [x, y] inclusive share the same condition.  Sets skip fields of some threads
845
// in that range to optimize future evaluation of Skip() on x values in
846
// the range.  Requires thread x is in a mutex waiter queue.
847
// The locking is unusual.  Skip() is called under these conditions:
848
//   - spinlock is held in call from Enqueue(), with maybe_unlocking == false
849
//   - Mutex is held in call from UnlockSlow() by last unlocker, with
850
//     maybe_unlocking == true
851
//   - both Mutex and spinlock are held in call from DequeueAllWakeable() (from
852
//     UnlockSlow()) and TryRemove()
853
// These cases are mutually exclusive, so Skip() never runs concurrently
854
// with itself on the same Mutex.   The skip chain is used in these other places
855
// that cannot occur concurrently:
856
//   - FixSkip() (from TryRemove()) - spinlock and Mutex are held)
857
//   - Dequeue() (with spinlock and Mutex held)
858
//   - UnlockSlow() (with spinlock and Mutex held)
859
// A more complex case is Enqueue()
860
//   - Enqueue() (with spinlock held and maybe_unlocking == false)
861
//               This is the first case in which Skip is called, above.
862
//   - Enqueue() (without spinlock held; but queue is empty and being freshly
863
//                formed)
864
//   - Enqueue() (with spinlock held and maybe_unlocking == true)
865
// The first case has mutual exclusion, and the second isolation through
866
// working on an otherwise unreachable data structure.
867
// In the last case, Enqueue() is required to change no skip/next pointers
868
// except those in the added node and the former "head" node.  This implies
869
// that the new node is added after head, and so must be the new head or the
870
// new front of the queue.
871
0
static PerThreadSynch* Skip(PerThreadSynch* x) {
872
0
  PerThreadSynch* x0 = nullptr;
873
0
  PerThreadSynch* x1 = x;
874
0
  PerThreadSynch* x2 = x->skip;
875
0
  if (x2 != nullptr) {
876
    // Each iteration attempts to advance sequence (x0,x1,x2) to next sequence
877
    // such that   x1 == x0->skip && x2 == x1->skip
878
0
    while ((x0 = x1, x1 = x2, x2 = x2->skip) != nullptr) {
879
0
      x0->skip = x2;  // short-circuit skip from x0 to x2
880
0
    }
881
0
    x->skip = x1;  // short-circuit skip from x to result
882
0
  }
883
0
  return x1;
884
0
}
885
886
// "ancestor" appears before "to_be_removed" in the same Mutex waiter queue.
887
// The latter is going to be removed out of order, because of a timeout.
888
// Check whether "ancestor" has a skip field pointing to "to_be_removed",
889
// and fix it if it does.
890
0
static void FixSkip(PerThreadSynch* ancestor, PerThreadSynch* to_be_removed) {
891
0
  if (ancestor->skip == to_be_removed) {  // ancestor->skip left dangling
892
0
    if (to_be_removed->skip != nullptr) {
893
0
      ancestor->skip = to_be_removed->skip;  // can skip past to_be_removed
894
0
    } else if (ancestor->next != to_be_removed) {  // they are not adjacent
895
0
      ancestor->skip = ancestor->next;             // can skip one past ancestor
896
0
    } else {
897
0
      ancestor->skip = nullptr;  // can't skip at all
898
0
    }
899
0
  }
900
0
}
901
902
static void CondVarEnqueue(SynchWaitParams* waitp);
903
904
// Enqueue thread "waitp->thread" on a waiter queue.
905
// Called with mutex spinlock held if head != nullptr
906
// If head==nullptr and waitp->cv_word==nullptr, then Enqueue() is
907
// idempotent; it alters no state associated with the existing (empty)
908
// queue.
909
//
910
// If waitp->cv_word == nullptr, queue the thread at either the front or
911
// the end (according to its priority) of the circular mutex waiter queue whose
912
// head is "head", and return the new head.  mu is the previous mutex state,
913
// which contains the reader count (perhaps adjusted for the operation in
914
// progress) if the list was empty and a read lock held, and the holder hint if
915
// the list was empty and a write lock held.  (flags & kMuIsCond) indicates
916
// whether this thread was transferred from a CondVar or is waiting for a
917
// non-trivial condition.  In this case, Enqueue() never returns nullptr
918
//
919
// If waitp->cv_word != nullptr, CondVarEnqueue() is called, and "head" is
920
// returned. This mechanism is used by CondVar to queue a thread on the
921
// condition variable queue instead of the mutex queue in implementing Wait().
922
// In this case, Enqueue() can return nullptr (if head==nullptr).
923
static PerThreadSynch* Enqueue(PerThreadSynch* head, SynchWaitParams* waitp,
924
0
                               intptr_t mu, int flags) {
925
  // If we have been given a cv_word, call CondVarEnqueue() and return
926
  // the previous head of the Mutex waiter queue.
927
0
  if (waitp->cv_word != nullptr) {
928
0
    CondVarEnqueue(waitp);
929
0
    return head;
930
0
  }
931
932
0
  PerThreadSynch* s = waitp->thread;
933
0
  ABSL_RAW_CHECK(
934
0
      s->waitp == nullptr ||    // normal case
935
0
          s->waitp == waitp ||  // Fer()---transfer from condition variable
936
0
          s->suppress_fatal_errors,
937
0
      "detected illegal recursion into Mutex code");
938
0
  s->waitp = waitp;
939
0
  s->skip = nullptr;   // maintain skip invariant (see above)
940
0
  s->may_skip = true;  // always true on entering queue
941
0
  s->wake = false;     // not being woken
942
0
  s->cond_waiter = ((flags & kMuIsCond) != 0);
943
0
#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
944
0
  if ((flags & kMuIsFer) == 0) {
945
0
    assert(s == Synch_GetPerThread());
946
0
    int64_t now_cycles = CycleClock::Now();
947
0
    if (s->next_priority_read_cycles < now_cycles) {
948
      // Every so often, update our idea of the thread's priority.
949
      // pthread_getschedparam() is 5% of the block/wakeup time;
950
      // CycleClock::Now() is 0.5%.
951
0
      int policy;
952
0
      struct sched_param param;
953
0
      const int err = pthread_getschedparam(pthread_self(), &policy, &param);
954
0
      if (err != 0) {
955
0
        ABSL_RAW_LOG(ERROR, "pthread_getschedparam failed: %d", err);
956
0
      } else {
957
0
        s->priority = param.sched_priority;
958
0
        s->next_priority_read_cycles =
959
0
            now_cycles + static_cast<int64_t>(CycleClock::Frequency());
960
0
      }
961
0
    }
962
0
  }
963
0
#endif
964
0
  if (head == nullptr) {         // s is the only waiter
965
0
    s->next = s;                 // it's the only entry in the cycle
966
0
    s->readers = mu;             // reader count is from mu word
967
0
    s->maybe_unlocking = false;  // no one is searching an empty list
968
0
    head = s;                    // s is new head
969
0
  } else {
970
0
    PerThreadSynch* enqueue_after = nullptr;  // we'll put s after this element
971
0
#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
972
0
    if (s->priority > head->priority) {  // s's priority is above head's
973
      // try to put s in priority-fifo order, or failing that at the front.
974
0
      if (!head->maybe_unlocking) {
975
        // No unlocker can be scanning the queue, so we can insert into the
976
        // middle of the queue.
977
        //
978
        // Within a skip chain, all waiters have the same priority, so we can
979
        // skip forward through the chains until we find one with a lower
980
        // priority than the waiter to be enqueued.
981
0
        PerThreadSynch* advance_to = head;  // next value of enqueue_after
982
0
        do {
983
0
          enqueue_after = advance_to;
984
          // (side-effect: optimizes skip chain)
985
0
          advance_to = Skip(enqueue_after->next);
986
0
        } while (s->priority <= advance_to->priority);
987
        // termination guaranteed because s->priority > head->priority
988
        // and head is the end of a skip chain
989
0
      } else if (waitp->how == kExclusive && waitp->cond == nullptr) {
990
        // An unlocker could be scanning the queue, but we know it will recheck
991
        // the queue front for writers that have no condition, which is what s
992
        // is, so an insert at front is safe.
993
0
        enqueue_after = head;  // add after head, at front
994
0
      }
995
0
    }
996
0
#endif
997
0
    if (enqueue_after != nullptr) {
998
0
      s->next = enqueue_after->next;
999
0
      enqueue_after->next = s;
1000
1001
      // enqueue_after can be: head, Skip(...), or cur.
1002
      // The first two imply enqueue_after->skip == nullptr, and
1003
      // the last is used only if MuEquivalentWaiter(s, cur).
1004
      // We require this because clearing enqueue_after->skip
1005
      // is impossible; enqueue_after's predecessors might also
1006
      // incorrectly skip over s if we were to allow other
1007
      // insertion points.
1008
0
      ABSL_RAW_CHECK(enqueue_after->skip == nullptr ||
1009
0
                         MuEquivalentWaiter(enqueue_after, s),
1010
0
                     "Mutex Enqueue failure");
1011
1012
0
      if (enqueue_after != head && enqueue_after->may_skip &&
1013
0
          MuEquivalentWaiter(enqueue_after, enqueue_after->next)) {
1014
        // enqueue_after can skip to its new successor, s
1015
0
        enqueue_after->skip = enqueue_after->next;
1016
0
      }
1017
0
      if (MuEquivalentWaiter(s, s->next)) {  // s->may_skip is known to be true
1018
0
        s->skip = s->next;                   // s may skip to its successor
1019
0
      }
1020
0
    } else if ((flags & kMuHasBlocked) &&
1021
0
               (s->priority >= head->next->priority) &&
1022
0
               (!head->maybe_unlocking ||
1023
0
                (waitp->how == kExclusive &&
1024
0
                 Condition::GuaranteedEqual(waitp->cond, nullptr)))) {
1025
      // This thread has already waited, then was woken, then failed to acquire
1026
      // the mutex and now tries to requeue. Try to requeue it at head,
1027
      // otherwise it can suffer bad latency (wait whole queue several times).
1028
      // However, we need to be conservative. First, we need to ensure that we
1029
      // respect priorities. Then, we need to be careful to not break wait
1030
      // queue invariants: we require either that unlocker is not scanning
1031
      // the queue or that the current thread is a writer with no condition
1032
      // (unlocker will recheck the queue for such waiters).
1033
0
      s->next = head->next;
1034
0
      head->next = s;
1035
0
      if (MuEquivalentWaiter(s, s->next)) {  // s->may_skip is known to be true
1036
0
        s->skip = s->next;                   // s may skip to its successor
1037
0
      }
1038
0
    } else {  // enqueue not done any other way, so
1039
              // we're inserting s at the back
1040
      // s will become new head; copy data from head into it
1041
0
      s->next = head->next;  // add s after head
1042
0
      head->next = s;
1043
0
      s->readers = head->readers;  // reader count is from previous head
1044
0
      s->maybe_unlocking = head->maybe_unlocking;  // same for unlock hint
1045
0
      if (head->may_skip && MuEquivalentWaiter(head, s)) {
1046
        // head now has successor; may skip
1047
0
        head->skip = s;
1048
0
      }
1049
0
      head = s;  // s is new head
1050
0
    }
1051
0
  }
1052
0
  s->state.store(PerThreadSynch::kQueued, std::memory_order_relaxed);
1053
0
  return head;
1054
0
}
1055
1056
// Dequeue the successor pw->next of thread pw from the Mutex waiter queue
1057
// whose last element is head.  The new head element is returned, or null
1058
// if the list is made empty.
1059
// Dequeue is called with both spinlock and Mutex held.
1060
0
static PerThreadSynch* Dequeue(PerThreadSynch* head, PerThreadSynch* pw) {
1061
0
  PerThreadSynch* w = pw->next;
1062
0
  pw->next = w->next;                 // snip w out of list
1063
0
  if (head == w) {                    // we removed the head
1064
0
    head = (pw == w) ? nullptr : pw;  // either emptied list, or pw is new head
1065
0
  } else if (pw != head && MuEquivalentWaiter(pw, pw->next)) {
1066
    // pw can skip to its new successor
1067
0
    if (pw->next->skip !=
1068
0
        nullptr) {  // either skip to its successors skip target
1069
0
      pw->skip = pw->next->skip;
1070
0
    } else {  // or to pw's successor
1071
0
      pw->skip = pw->next;
1072
0
    }
1073
0
  }
1074
0
  return head;
1075
0
}
1076
1077
// Traverse the elements [ pw->next, h] of the circular list whose last element
1078
// is head.
1079
// Remove all elements with wake==true and place them in the
1080
// singly-linked list wake_list in the order found.   Assumes that
1081
// there is only one such element if the element has how == kExclusive.
1082
// Return the new head.
1083
static PerThreadSynch* DequeueAllWakeable(PerThreadSynch* head,
1084
                                          PerThreadSynch* pw,
1085
0
                                          PerThreadSynch** wake_tail) {
1086
0
  PerThreadSynch* orig_h = head;
1087
0
  PerThreadSynch* w = pw->next;
1088
0
  bool skipped = false;
1089
0
  do {
1090
0
    if (w->wake) {  // remove this element
1091
0
      ABSL_RAW_CHECK(pw->skip == nullptr, "bad skip in DequeueAllWakeable");
1092
      // we're removing pw's successor so either pw->skip is zero or we should
1093
      // already have removed pw since if pw->skip!=null, pw has the same
1094
      // condition as w.
1095
0
      head = Dequeue(head, pw);
1096
0
      w->next = *wake_tail;               // keep list terminated
1097
0
      *wake_tail = w;                     // add w to wake_list;
1098
0
      wake_tail = &w->next;               // next addition to end
1099
0
      if (w->waitp->how == kExclusive) {  // wake at most 1 writer
1100
0
        break;
1101
0
      }
1102
0
    } else {         // not waking this one; skip
1103
0
      pw = Skip(w);  // skip as much as possible
1104
0
      skipped = true;
1105
0
    }
1106
0
    w = pw->next;
1107
    // We want to stop processing after we've considered the original head,
1108
    // orig_h.  We can't test for w==orig_h in the loop because w may skip over
1109
    // it; we are guaranteed only that w's predecessor will not skip over
1110
    // orig_h.  When we've considered orig_h, either we've processed it and
1111
    // removed it (so orig_h != head), or we considered it and skipped it (so
1112
    // skipped==true && pw == head because skipping from head always skips by
1113
    // just one, leaving pw pointing at head).  So we want to
1114
    // continue the loop with the negation of that expression.
1115
0
  } while (orig_h == head && (pw != head || !skipped));
1116
0
  return head;
1117
0
}
1118
1119
// Try to remove thread s from the list of waiters on this mutex.
1120
// Does nothing if s is not on the waiter list.
1121
0
void Mutex::TryRemove(PerThreadSynch* s) {
1122
0
  SchedulingGuard::ScopedDisable disable_rescheduling;
1123
0
  intptr_t v = mu_.load(std::memory_order_relaxed);
1124
  // acquire spinlock & lock
1125
0
  if ((v & (kMuWait | kMuSpin | kMuWriter | kMuReader)) == kMuWait &&
1126
0
      mu_.compare_exchange_strong(v, v | kMuSpin | kMuWriter,
1127
0
                                  std::memory_order_acquire,
1128
0
                                  std::memory_order_relaxed)) {
1129
0
    PerThreadSynch* h = GetPerThreadSynch(v);
1130
0
    if (h != nullptr) {
1131
0
      PerThreadSynch* pw = h;  // pw is w's predecessor
1132
0
      PerThreadSynch* w;
1133
0
      if ((w = pw->next) != s) {  // search for thread,
1134
0
        do {                      // processing at least one element
1135
          // If the current element isn't equivalent to the waiter to be
1136
          // removed, we can skip the entire chain.
1137
0
          if (!MuEquivalentWaiter(s, w)) {
1138
0
            pw = Skip(w);  // so skip all that won't match
1139
            // we don't have to worry about dangling skip fields
1140
            // in the threads we skipped; none can point to s
1141
            // because they are in a different equivalence class.
1142
0
          } else {          // seeking same condition
1143
0
            FixSkip(w, s);  // fix up any skip pointer from w to s
1144
0
            pw = w;
1145
0
          }
1146
          // don't search further if we found the thread, or we're about to
1147
          // process the first thread again.
1148
0
        } while ((w = pw->next) != s && pw != h);
1149
0
      }
1150
0
      if (w == s) {  // found thread; remove it
1151
        // pw->skip may be non-zero here; the loop above ensured that
1152
        // no ancestor of s can skip to s, so removal is safe anyway.
1153
0
        h = Dequeue(h, pw);
1154
0
        s->next = nullptr;
1155
0
        s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
1156
0
      }
1157
0
    }
1158
0
    intptr_t nv;
1159
0
    do {  // release spinlock and lock
1160
0
      v = mu_.load(std::memory_order_relaxed);
1161
0
      nv = v & (kMuDesig | kMuEvent);
1162
0
      if (h != nullptr) {
1163
0
        nv |= kMuWait | reinterpret_cast<intptr_t>(h);
1164
0
        h->readers = 0;              // we hold writer lock
1165
0
        h->maybe_unlocking = false;  // finished unlocking
1166
0
      }
1167
0
    } while (!mu_.compare_exchange_weak(v, nv, std::memory_order_release,
1168
0
                                        std::memory_order_relaxed));
1169
0
  }
1170
0
}
1171
1172
// Wait until thread "s", which must be the current thread, is removed from the
1173
// this mutex's waiter queue.  If "s->waitp->timeout" has a timeout, wake up
1174
// if the wait extends past the absolute time specified, even if "s" is still
1175
// on the mutex queue.  In this case, remove "s" from the queue and return
1176
// true, otherwise return false.
1177
0
void Mutex::Block(PerThreadSynch* s) {
1178
0
  while (s->state.load(std::memory_order_acquire) == PerThreadSynch::kQueued) {
1179
0
    if (!DecrementSynchSem(this, s, s->waitp->timeout)) {
1180
      // After a timeout, we go into a spin loop until we remove ourselves
1181
      // from the queue, or someone else removes us.  We can't be sure to be
1182
      // able to remove ourselves in a single lock acquisition because this
1183
      // mutex may be held, and the holder has the right to read the centre
1184
      // of the waiter queue without holding the spinlock.
1185
0
      this->TryRemove(s);
1186
0
      int c = 0;
1187
0
      while (s->next != nullptr) {
1188
0
        c = synchronization_internal::MutexDelay(c, GENTLE);
1189
0
        this->TryRemove(s);
1190
0
      }
1191
0
      if (kDebugMode) {
1192
        // This ensures that we test the case that TryRemove() is called when s
1193
        // is not on the queue.
1194
0
        this->TryRemove(s);
1195
0
      }
1196
0
      s->waitp->timeout = KernelTimeout::Never();  // timeout is satisfied
1197
0
      s->waitp->cond = nullptr;  // condition no longer relevant for wakeups
1198
0
    }
1199
0
  }
1200
0
  ABSL_RAW_CHECK(s->waitp != nullptr || s->suppress_fatal_errors,
1201
0
                 "detected illegal recursion in Mutex code");
1202
0
  s->waitp = nullptr;
1203
0
}
1204
1205
// Wake thread w, and return the next thread in the list.
1206
0
PerThreadSynch* Mutex::Wakeup(PerThreadSynch* w) {
1207
0
  PerThreadSynch* next = w->next;
1208
0
  w->next = nullptr;
1209
0
  w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
1210
0
  IncrementSynchSem(this, w);
1211
1212
0
  return next;
1213
0
}
1214
1215
static GraphId GetGraphIdLocked(Mutex* mu)
1216
1.95M
    ABSL_EXCLUSIVE_LOCKS_REQUIRED(deadlock_graph_mu) {
1217
1.95M
  if (!deadlock_graph) {  // (re)create the deadlock graph.
1218
2
    deadlock_graph =
1219
2
        new (base_internal::LowLevelAlloc::Alloc(sizeof(*deadlock_graph)))
1220
2
            GraphCycles;
1221
2
  }
1222
1.95M
  return deadlock_graph->GetId(mu);
1223
1.95M
}
1224
1225
976k
static GraphId GetGraphId(Mutex* mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) {
1226
976k
  base_internal::SpinLockHolder l(deadlock_graph_mu);
1227
976k
  GraphId id = GetGraphIdLocked(mu);
1228
976k
  return id;
1229
976k
}
1230
1231
// Record a lock acquisition.  This is used in debug mode for deadlock
1232
// detection.  The held_locks pointer points to the relevant data
1233
// structure for each case.
1234
976k
static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld* held_locks) {
1235
976k
  int n = held_locks->n;
1236
976k
  int i = 0;
1237
976k
  while (i != n && held_locks->locks[i].id != id) {
1238
0
    i++;
1239
0
  }
1240
976k
  if (i == n) {
1241
976k
    if (n == ABSL_ARRAYSIZE(held_locks->locks)) {
1242
0
      held_locks->overflow = true;  // lost some data
1243
976k
    } else {                        // we have room for lock
1244
976k
      held_locks->locks[i].mu = mu;
1245
976k
      held_locks->locks[i].count = 1;
1246
976k
      held_locks->locks[i].id = id;
1247
976k
      held_locks->n = n + 1;
1248
976k
    }
1249
976k
  } else {
1250
0
    held_locks->locks[i].count++;
1251
0
  }
1252
976k
}
1253
1254
// Record a lock release.  Each call to LockEnter(mu, id, x) should be
1255
// eventually followed by a call to LockLeave(mu, id, x) by the same thread.
1256
// It does not process the event if is not needed when deadlock detection is
1257
// disabled.
1258
976k
static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld* held_locks) {
1259
976k
  int n = held_locks->n;
1260
976k
  int i = 0;
1261
976k
  while (i != n && held_locks->locks[i].id != id) {
1262
0
    i++;
1263
0
  }
1264
976k
  if (i == n) {
1265
0
    if (!held_locks->overflow) {
1266
      // The deadlock id may have been reassigned after ForgetDeadlockInfo,
1267
      // but in that case mu should still be present.
1268
0
      i = 0;
1269
0
      while (i != n && held_locks->locks[i].mu != mu) {
1270
0
        i++;
1271
0
      }
1272
0
      if (i == n) {  // mu missing means releasing unheld lock
1273
0
        SynchEvent* mu_events = GetSynchEvent(mu);
1274
0
        ABSL_RAW_LOG(FATAL,
1275
0
                     "thread releasing lock it does not hold: %p %s; "
1276
0
                     ,
1277
0
                     static_cast<void*>(mu),
1278
0
                     mu_events == nullptr ? "" : mu_events->name);
1279
0
      }
1280
0
    }
1281
976k
  } else if (held_locks->locks[i].count == 1) {
1282
976k
    held_locks->n = n - 1;
1283
976k
    held_locks->locks[i] = held_locks->locks[n - 1];
1284
976k
    held_locks->locks[n - 1].id = InvalidGraphId();
1285
976k
    held_locks->locks[n - 1].mu =
1286
976k
        nullptr;  // clear mu to please the leak detector.
1287
976k
  } else {
1288
0
    assert(held_locks->locks[i].count > 0);
1289
0
    held_locks->locks[i].count--;
1290
0
  }
1291
976k
}
1292
1293
// Call LockEnter() if in debug mode and deadlock detection is enabled.
1294
0
static inline void DebugOnlyLockEnter(Mutex* mu) {
1295
0
  if (kDebugMode) {
1296
0
    if (synch_deadlock_detection.load(std::memory_order_acquire) !=
1297
0
        OnDeadlockCycle::kIgnore) {
1298
0
      LockEnter(mu, GetGraphId(mu), Synch_GetAllLocks());
1299
0
    }
1300
0
  }
1301
0
}
1302
1303
// Call LockEnter() if in debug mode and deadlock detection is enabled.
1304
976k
static inline void DebugOnlyLockEnter(Mutex* mu, GraphId id) {
1305
976k
  if (kDebugMode) {
1306
976k
    if (synch_deadlock_detection.load(std::memory_order_acquire) !=
1307
976k
        OnDeadlockCycle::kIgnore) {
1308
976k
      LockEnter(mu, id, Synch_GetAllLocks());
1309
976k
    }
1310
976k
  }
1311
976k
}
1312
1313
// Call LockLeave() if in debug mode and deadlock detection is enabled.
1314
976k
static inline void DebugOnlyLockLeave(Mutex* mu) {
1315
976k
  if (kDebugMode) {
1316
976k
    if (synch_deadlock_detection.load(std::memory_order_acquire) !=
1317
976k
        OnDeadlockCycle::kIgnore) {
1318
976k
      LockLeave(mu, GetGraphId(mu), Synch_GetAllLocks());
1319
976k
    }
1320
976k
  }
1321
976k
}
1322
1323
static char* StackString(void** pcs, int n, char* buf, int maxlen,
1324
0
                         bool symbolize) {
1325
0
  static constexpr int kSymLen = 200;
1326
0
  char sym[kSymLen];
1327
0
  int len = 0;
1328
0
  for (int i = 0; i != n; i++) {
1329
0
    if (len >= maxlen) return buf;
1330
0
    size_t count = static_cast<size_t>(maxlen - len);
1331
0
    if (symbolize) {
1332
0
      if (!absl::Symbolize(pcs[i], sym, kSymLen)) {
1333
0
        sym[0] = '\0';
1334
0
      }
1335
0
      snprintf(buf + len, count, "%s\t@ %p %s\n", (i == 0 ? "\n" : ""), pcs[i],
1336
0
               sym);
1337
0
    } else {
1338
0
      snprintf(buf + len, count, " %p", pcs[i]);
1339
0
    }
1340
0
    len += static_cast<int>(strlen(&buf[len]));
1341
0
  }
1342
0
  return buf;
1343
0
}
1344
1345
0
static char* CurrentStackString(char* buf, int maxlen, bool symbolize) {
1346
0
  void* pcs[40];
1347
0
  return StackString(pcs, absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 2), buf,
1348
0
                     maxlen, symbolize);
1349
0
}
1350
1351
namespace {
1352
enum {
1353
  kMaxDeadlockPathLen = 10
1354
};  // maximum length of a deadlock cycle;
1355
    // a path this long would be remarkable
1356
// Buffers required to report a deadlock.
1357
// We do not allocate them on stack to avoid large stack frame.
1358
struct DeadlockReportBuffers {
1359
  char buf[6100];
1360
  GraphId path[kMaxDeadlockPathLen];
1361
};
1362
1363
struct ScopedDeadlockReportBuffers {
1364
0
  ScopedDeadlockReportBuffers() {
1365
0
    b = reinterpret_cast<DeadlockReportBuffers*>(
1366
0
        base_internal::LowLevelAlloc::Alloc(sizeof(*b)));
1367
0
  }
1368
0
  ~ScopedDeadlockReportBuffers() { base_internal::LowLevelAlloc::Free(b); }
1369
  DeadlockReportBuffers* b;
1370
};
1371
1372
// Helper to pass to GraphCycles::UpdateStackTrace.
1373
0
int GetStack(void** stack, int max_depth) {
1374
0
  return absl::GetStackTrace(stack, max_depth, 3);
1375
0
}
1376
}  // anonymous namespace
1377
1378
// Called in debug mode when a thread is about to acquire a lock in a way that
1379
// may block.
1380
976k
static GraphId DeadlockCheck(Mutex* mu) {
1381
976k
  if (synch_deadlock_detection.load(std::memory_order_acquire) ==
1382
976k
      OnDeadlockCycle::kIgnore) {
1383
0
    return InvalidGraphId();
1384
0
  }
1385
1386
976k
  SynchLocksHeld* all_locks = Synch_GetAllLocks();
1387
1388
976k
  absl::base_internal::SpinLockHolder lock(deadlock_graph_mu);
1389
976k
  const GraphId mu_id = GetGraphIdLocked(mu);
1390
1391
976k
  if (all_locks->n == 0) {
1392
    // There are no other locks held. Return now so that we don't need to
1393
    // call GetSynchEvent(). This way we do not record the stack trace
1394
    // for this Mutex. It's ok, since if this Mutex is involved in a deadlock,
1395
    // it can't always be the first lock acquired by a thread.
1396
976k
    return mu_id;
1397
976k
  }
1398
1399
  // We prefer to keep stack traces that show a thread holding and acquiring
1400
  // as many locks as possible.  This increases the chances that a given edge
1401
  // in the acquires-before graph will be represented in the stack traces
1402
  // recorded for the locks.
1403
0
  deadlock_graph->UpdateStackTrace(mu_id, all_locks->n + 1, GetStack);
1404
1405
  // For each other mutex already held by this thread:
1406
0
  for (int i = 0; i != all_locks->n; i++) {
1407
0
    const GraphId other_node_id = all_locks->locks[i].id;
1408
0
    const Mutex* other =
1409
0
        static_cast<const Mutex*>(deadlock_graph->Ptr(other_node_id));
1410
0
    if (other == nullptr) {
1411
      // Ignore stale lock
1412
0
      continue;
1413
0
    }
1414
1415
    // Add the acquired-before edge to the graph.
1416
0
    if (!deadlock_graph->InsertEdge(other_node_id, mu_id)) {
1417
0
      ScopedDeadlockReportBuffers scoped_buffers;
1418
0
      DeadlockReportBuffers* b = scoped_buffers.b;
1419
0
      static int number_of_reported_deadlocks = 0;
1420
0
      number_of_reported_deadlocks++;
1421
      // Symbolize only 2 first deadlock report to avoid huge slowdowns.
1422
0
      bool symbolize = number_of_reported_deadlocks <= 2;
1423
0
      ABSL_RAW_LOG(ERROR, "Potential Mutex deadlock: %s",
1424
0
                   CurrentStackString(b->buf, sizeof (b->buf), symbolize));
1425
0
      size_t len = 0;
1426
0
      for (int j = 0; j != all_locks->n; j++) {
1427
0
        void* pr = deadlock_graph->Ptr(all_locks->locks[j].id);
1428
0
        if (pr != nullptr) {
1429
0
          snprintf(b->buf + len, sizeof(b->buf) - len, " %p", pr);
1430
0
          len += strlen(&b->buf[len]);
1431
0
        }
1432
0
      }
1433
0
      ABSL_RAW_LOG(ERROR,
1434
0
                   "Acquiring absl::Mutex %p while holding %s; a cycle in the "
1435
0
                   "historical lock ordering graph has been observed",
1436
0
                   static_cast<void*>(mu), b->buf);
1437
0
      ABSL_RAW_LOG(ERROR, "Cycle: ");
1438
0
      int path_len = deadlock_graph->FindPath(mu_id, other_node_id,
1439
0
                                              ABSL_ARRAYSIZE(b->path), b->path);
1440
0
      for (int j = 0; j != path_len && j != ABSL_ARRAYSIZE(b->path); j++) {
1441
0
        GraphId id = b->path[j];
1442
0
        Mutex* path_mu = static_cast<Mutex*>(deadlock_graph->Ptr(id));
1443
0
        if (path_mu == nullptr) continue;
1444
0
        void** stack;
1445
0
        int depth = deadlock_graph->GetStackTrace(id, &stack);
1446
0
        snprintf(b->buf, sizeof(b->buf),
1447
0
                 "mutex@%p stack: ", static_cast<void*>(path_mu));
1448
0
        StackString(stack, depth, b->buf + strlen(b->buf),
1449
0
                    static_cast<int>(sizeof(b->buf) - strlen(b->buf)),
1450
0
                    symbolize);
1451
0
        ABSL_RAW_LOG(ERROR, "%s", b->buf);
1452
0
      }
1453
0
      if (path_len > static_cast<int>(ABSL_ARRAYSIZE(b->path))) {
1454
0
        ABSL_RAW_LOG(ERROR, "(long cycle; list truncated)");
1455
0
      }
1456
0
      if (synch_deadlock_detection.load(std::memory_order_acquire) ==
1457
0
          OnDeadlockCycle::kAbort) {
1458
0
        deadlock_graph_mu.unlock();  // avoid deadlock in fatal sighandler
1459
0
        ABSL_RAW_LOG(FATAL, "dying due to potential deadlock");
1460
0
        return mu_id;
1461
0
      }
1462
0
      break;  // report at most one potential deadlock per acquisition
1463
0
    }
1464
0
  }
1465
1466
0
  return mu_id;
1467
0
}
1468
1469
// Invoke DeadlockCheck() iff we're in debug mode and
1470
// deadlock checking has been enabled.
1471
976k
static inline GraphId DebugOnlyDeadlockCheck(Mutex* mu) {
1472
976k
  if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
1473
976k
                        OnDeadlockCycle::kIgnore) {
1474
976k
    return DeadlockCheck(mu);
1475
976k
  } else {
1476
0
    return InvalidGraphId();
1477
0
  }
1478
976k
}
1479
1480
0
void Mutex::ForgetDeadlockInfo() {
1481
0
  if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
1482
0
                        OnDeadlockCycle::kIgnore) {
1483
0
    deadlock_graph_mu.lock();
1484
0
    if (deadlock_graph != nullptr) {
1485
0
      deadlock_graph->RemoveNode(this);
1486
0
    }
1487
0
    deadlock_graph_mu.unlock();
1488
0
  }
1489
0
}
1490
1491
0
void Mutex::AssertNotHeld() const {
1492
  // We have the data to allow this check only if in debug mode and deadlock
1493
  // detection is enabled.
1494
0
  if (kDebugMode &&
1495
0
      (mu_.load(std::memory_order_relaxed) & (kMuWriter | kMuReader)) != 0 &&
1496
0
      synch_deadlock_detection.load(std::memory_order_acquire) !=
1497
0
          OnDeadlockCycle::kIgnore) {
1498
0
    GraphId id = GetGraphId(const_cast<Mutex*>(this));
1499
0
    SynchLocksHeld* locks = Synch_GetAllLocks();
1500
0
    for (int i = 0; i != locks->n; i++) {
1501
0
      if (locks->locks[i].id == id) {
1502
0
        SynchEvent* mu_events = GetSynchEvent(this);
1503
0
        ABSL_RAW_LOG(FATAL, "thread should not hold mutex %p %s",
1504
0
                     static_cast<const void*>(this),
1505
0
                     (mu_events == nullptr ? "" : mu_events->name));
1506
0
      }
1507
0
    }
1508
0
  }
1509
0
}
1510
1511
// Attempt to acquire *mu, and return whether successful.  The implementation
1512
// may spin for a short while if the lock cannot be acquired immediately.
1513
0
static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) {
1514
0
  int c = globals.spinloop_iterations.load(std::memory_order_relaxed);
1515
0
  do {  // do/while somewhat faster on AMD
1516
0
    intptr_t v = mu->load(std::memory_order_relaxed);
1517
0
    if ((v & (kMuReader | kMuEvent)) != 0) {
1518
0
      return false;                       // a reader or tracing -> give up
1519
0
    } else if (((v & kMuWriter) == 0) &&  // no holder -> try to acquire
1520
0
               mu->compare_exchange_strong(v, kMuWriter | v,
1521
0
                                           std::memory_order_acquire,
1522
0
                                           std::memory_order_relaxed)) {
1523
0
      return true;
1524
0
    }
1525
0
  } while (--c > 0);
1526
0
  return false;
1527
0
}
1528
1529
49
void Mutex::lock() {
1530
49
  ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
1531
49
  GraphId id = DebugOnlyDeadlockCheck(this);
1532
49
  intptr_t v = mu_.load(std::memory_order_relaxed);
1533
  // try fast acquire, then spin loop
1534
49
  if (ABSL_PREDICT_FALSE((v & (kMuWriter | kMuReader | kMuEvent)) != 0) ||
1535
49
      ABSL_PREDICT_FALSE(!mu_.compare_exchange_strong(
1536
49
          v, kMuWriter | v, std::memory_order_acquire,
1537
49
          std::memory_order_relaxed))) {
1538
    // try spin acquire, then slow loop
1539
0
    if (ABSL_PREDICT_FALSE(!TryAcquireWithSpinning(&this->mu_))) {
1540
0
      this->LockSlow(kExclusive, nullptr, 0);
1541
0
    }
1542
0
  }
1543
49
  DebugOnlyLockEnter(this, id);
1544
49
  ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
1545
49
}
1546
1547
976k
void Mutex::lock_shared() {
1548
976k
  ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
1549
976k
  GraphId id = DebugOnlyDeadlockCheck(this);
1550
976k
  intptr_t v = mu_.load(std::memory_order_relaxed);
1551
976k
  for (;;) {
1552
    // If there are non-readers holding the lock, use the slow loop.
1553
976k
    if (ABSL_PREDICT_FALSE(v & (kMuWriter | kMuWait | kMuEvent)) != 0) {
1554
0
      this->LockSlow(kShared, nullptr, 0);
1555
0
      break;
1556
0
    }
1557
    // We can avoid the loop and only use the CAS when the lock is free or
1558
    // only held by readers.
1559
976k
    if (ABSL_PREDICT_TRUE(mu_.compare_exchange_weak(
1560
976k
            v, (kMuReader | v) + kMuOne, std::memory_order_acquire,
1561
976k
            std::memory_order_relaxed))) {
1562
976k
      break;
1563
976k
    }
1564
976k
  }
1565
976k
  DebugOnlyLockEnter(this, id);
1566
976k
  ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
1567
976k
}
1568
1569
bool Mutex::LockWhenCommon(const Condition& cond,
1570
                           synchronization_internal::KernelTimeout t,
1571
0
                           bool write) {
1572
0
  MuHow how = write ? kExclusive : kShared;
1573
0
  ABSL_TSAN_MUTEX_PRE_LOCK(this, TsanFlags(how));
1574
0
  GraphId id = DebugOnlyDeadlockCheck(this);
1575
0
  bool res = LockSlowWithDeadline(how, &cond, t, 0);
1576
0
  DebugOnlyLockEnter(this, id);
1577
0
  ABSL_TSAN_MUTEX_POST_LOCK(this, TsanFlags(how), 0);
1578
0
  return res;
1579
0
}
1580
1581
0
bool Mutex::AwaitCommon(const Condition& cond, KernelTimeout t) {
1582
0
  if (kDebugMode) {
1583
0
    this->AssertReaderHeld();
1584
0
  }
1585
0
  if (cond.Eval()) {  // condition already true; nothing to do
1586
0
    return true;
1587
0
  }
1588
0
  MuHow how =
1589
0
      (mu_.load(std::memory_order_relaxed) & kMuWriter) ? kExclusive : kShared;
1590
0
  ABSL_TSAN_MUTEX_PRE_UNLOCK(this, TsanFlags(how));
1591
0
  SynchWaitParams waitp(how, &cond, t, nullptr /*no cvmu*/,
1592
0
                        Synch_GetPerThreadAnnotated(this),
1593
0
                        nullptr /*no cv_word*/);
1594
0
  this->UnlockSlow(&waitp);
1595
0
  this->Block(waitp.thread);
1596
0
  ABSL_TSAN_MUTEX_POST_UNLOCK(this, TsanFlags(how));
1597
0
  ABSL_TSAN_MUTEX_PRE_LOCK(this, TsanFlags(how));
1598
0
  this->LockSlowLoop(&waitp, kMuHasBlocked | kMuIsCond);
1599
0
  bool res = waitp.cond != nullptr ||  // => cond known true from LockSlowLoop
1600
0
             EvalConditionAnnotated(&cond, this, true, false, how == kShared);
1601
0
  ABSL_TSAN_MUTEX_POST_LOCK(this, TsanFlags(how), 0);
1602
0
  ABSL_RAW_CHECK(res || t.has_timeout(),
1603
0
                 "condition untrue on return from Await");
1604
0
  return res;
1605
0
}
1606
1607
0
bool Mutex::try_lock() {
1608
0
  ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
1609
0
  intptr_t v = mu_.load(std::memory_order_relaxed);
1610
  // Try fast acquire.
1611
0
  if (ABSL_PREDICT_TRUE((v & (kMuWriter | kMuReader | kMuEvent)) == 0)) {
1612
0
    if (ABSL_PREDICT_TRUE(mu_.compare_exchange_strong(
1613
0
            v, kMuWriter | v, std::memory_order_acquire,
1614
0
            std::memory_order_relaxed))) {
1615
0
      DebugOnlyLockEnter(this);
1616
0
      ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
1617
0
      return true;
1618
0
    }
1619
0
  } else if (ABSL_PREDICT_FALSE((v & kMuEvent) != 0)) {
1620
    // We're recording events.
1621
0
    return TryLockSlow();
1622
0
  }
1623
0
  ABSL_TSAN_MUTEX_POST_LOCK(
1624
0
      this, __tsan_mutex_try_lock | __tsan_mutex_try_lock_failed, 0);
1625
0
  return false;
1626
0
}
1627
1628
0
ABSL_ATTRIBUTE_NOINLINE bool Mutex::TryLockSlow() {
1629
0
  intptr_t v = mu_.load(std::memory_order_relaxed);
1630
0
  if ((v & kExclusive->slow_need_zero) == 0 &&  // try fast acquire
1631
0
      mu_.compare_exchange_strong(
1632
0
          v, (kExclusive->fast_or | v) + kExclusive->fast_add,
1633
0
          std::memory_order_acquire, std::memory_order_relaxed)) {
1634
0
    DebugOnlyLockEnter(this);
1635
0
    PostSynchEvent(this, SYNCH_EV_TRYLOCK_SUCCESS);
1636
0
    ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
1637
0
    return true;
1638
0
  }
1639
0
  PostSynchEvent(this, SYNCH_EV_TRYLOCK_FAILED);
1640
0
  ABSL_TSAN_MUTEX_POST_LOCK(
1641
0
      this, __tsan_mutex_try_lock | __tsan_mutex_try_lock_failed, 0);
1642
0
  return false;
1643
0
}
1644
1645
0
bool Mutex::try_lock_shared() {
1646
0
  ABSL_TSAN_MUTEX_PRE_LOCK(this,
1647
0
                           __tsan_mutex_read_lock | __tsan_mutex_try_lock);
1648
0
  intptr_t v = mu_.load(std::memory_order_relaxed);
1649
  // Clang tends to unroll the loop when compiling with optimization.
1650
  // But in this case it just unnecessary increases code size.
1651
  // If CAS is failing due to contention, the jump cost is negligible.
1652
0
#if defined(__clang__)
1653
0
#pragma nounroll
1654
0
#endif
1655
  // The while-loops (here and below) iterate only if the mutex word keeps
1656
  // changing (typically because the reader count changes) under the CAS.
1657
  // We limit the number of attempts to avoid having to think about livelock.
1658
0
  for (int loop_limit = 5; loop_limit != 0; loop_limit--) {
1659
0
    if (ABSL_PREDICT_FALSE((v & (kMuWriter | kMuWait | kMuEvent)) != 0)) {
1660
0
      break;
1661
0
    }
1662
0
    if (ABSL_PREDICT_TRUE(mu_.compare_exchange_strong(
1663
0
            v, (kMuReader | v) + kMuOne, std::memory_order_acquire,
1664
0
            std::memory_order_relaxed))) {
1665
0
      DebugOnlyLockEnter(this);
1666
0
      ABSL_TSAN_MUTEX_POST_LOCK(
1667
0
          this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);
1668
0
      return true;
1669
0
    }
1670
0
  }
1671
0
  if (ABSL_PREDICT_TRUE((v & kMuEvent) == 0)) {
1672
0
    ABSL_TSAN_MUTEX_POST_LOCK(this,
1673
0
                              __tsan_mutex_read_lock | __tsan_mutex_try_lock |
1674
0
                                  __tsan_mutex_try_lock_failed,
1675
0
                              0);
1676
0
    return false;
1677
0
  }
1678
  // we're recording events
1679
0
  return ReaderTryLockSlow();
1680
0
}
1681
1682
0
ABSL_ATTRIBUTE_NOINLINE bool Mutex::ReaderTryLockSlow() {
1683
0
  intptr_t v = mu_.load(std::memory_order_relaxed);
1684
0
#if defined(__clang__)
1685
0
#pragma nounroll
1686
0
#endif
1687
0
  for (int loop_limit = 5; loop_limit != 0; loop_limit--) {
1688
0
    if ((v & kShared->slow_need_zero) == 0 &&
1689
0
        mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
1690
0
                                    std::memory_order_acquire,
1691
0
                                    std::memory_order_relaxed)) {
1692
0
      DebugOnlyLockEnter(this);
1693
0
      PostSynchEvent(this, SYNCH_EV_READERTRYLOCK_SUCCESS);
1694
0
      ABSL_TSAN_MUTEX_POST_LOCK(
1695
0
          this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);
1696
0
      return true;
1697
0
    }
1698
0
  }
1699
0
  PostSynchEvent(this, SYNCH_EV_READERTRYLOCK_FAILED);
1700
0
  ABSL_TSAN_MUTEX_POST_LOCK(this,
1701
0
                            __tsan_mutex_read_lock | __tsan_mutex_try_lock |
1702
0
                                __tsan_mutex_try_lock_failed,
1703
0
                            0);
1704
0
  return false;
1705
0
}
1706
1707
49
void Mutex::unlock() {
1708
49
  ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
1709
49
  DebugOnlyLockLeave(this);
1710
49
  intptr_t v = mu_.load(std::memory_order_relaxed);
1711
1712
49
  if (kDebugMode && ((v & (kMuWriter | kMuReader)) != kMuWriter)) {
1713
0
    ABSL_RAW_LOG(FATAL, "Mutex unlocked when destroyed or not locked: v=0x%x",
1714
0
                 static_cast<unsigned>(v));
1715
0
  }
1716
1717
  // should_try_cas is whether we'll try a compare-and-swap immediately.
1718
  // NOTE: optimized out when kDebugMode is false.
1719
49
  bool should_try_cas = ((v & (kMuEvent | kMuWriter)) == kMuWriter &&
1720
49
                         (v & (kMuWait | kMuDesig)) != kMuWait);
1721
1722
  // But, we can use an alternate computation of it, that compilers
1723
  // currently don't find on their own.  When that changes, this function
1724
  // can be simplified.
1725
  //
1726
  // should_try_cas is true iff the bits satisfy the following conditions:
1727
  //
1728
  //                   Ev Wr Wa De
1729
  // equal to           0  1
1730
  // and not equal to         1  0
1731
  //
1732
  // after xoring by    0  1  0  1,  this is equivalent to:
1733
  //
1734
  // equal to           0  0
1735
  // and not equal to         1  1,  which is the same as:
1736
  //
1737
  // smaller than       0  0  1  1
1738
49
  static_assert(kMuEvent > kMuWait, "Needed for should_try_cas_fast");
1739
49
  static_assert(kMuEvent > kMuDesig, "Needed for should_try_cas_fast");
1740
49
  static_assert(kMuWriter > kMuWait, "Needed for should_try_cas_fast");
1741
49
  static_assert(kMuWriter > kMuDesig, "Needed for should_try_cas_fast");
1742
1743
49
  bool should_try_cas_fast =
1744
49
      ((v ^ (kMuWriter | kMuDesig)) &
1745
49
       (kMuEvent | kMuWriter | kMuWait | kMuDesig)) < (kMuWait | kMuDesig);
1746
1747
49
  if (kDebugMode && should_try_cas != should_try_cas_fast) {
1748
    // We would usually use PRIdPTR here, but is not correctly implemented
1749
    // within the android toolchain.
1750
0
    ABSL_RAW_LOG(FATAL, "internal logic error %llx %llx %llx\n",
1751
0
                 static_cast<long long>(v),
1752
0
                 static_cast<long long>(should_try_cas),
1753
0
                 static_cast<long long>(should_try_cas_fast));
1754
0
  }
1755
49
  if (should_try_cas_fast &&
1756
49
      mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
1757
49
                                  std::memory_order_release,
1758
49
                                  std::memory_order_relaxed)) {
1759
    // fast writer release (writer with no waiters or with designated waker)
1760
49
  } else {
1761
0
    this->UnlockSlow(nullptr /*no waitp*/);  // take slow path
1762
0
  }
1763
49
  ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);
1764
49
}
1765
1766
// Requires v to represent a reader-locked state.
1767
976k
static bool ExactlyOneReader(intptr_t v) {
1768
976k
  assert((v & (kMuWriter | kMuReader)) == kMuReader);
1769
976k
  assert((v & kMuHigh) != 0);
1770
  // The more straightforward "(v & kMuHigh) == kMuOne" also works, but
1771
  // on some architectures the following generates slightly smaller code.
1772
  // It may be faster too.
1773
976k
  constexpr intptr_t kMuMultipleWaitersMask = kMuHigh ^ kMuOne;
1774
976k
  return (v & kMuMultipleWaitersMask) == 0;
1775
976k
}
1776
1777
976k
void Mutex::unlock_shared() {
1778
976k
  ABSL_TSAN_MUTEX_PRE_UNLOCK(this, __tsan_mutex_read_lock);
1779
976k
  DebugOnlyLockLeave(this);
1780
976k
  intptr_t v = mu_.load(std::memory_order_relaxed);
1781
976k
  assert((v & (kMuWriter | kMuReader)) == kMuReader);
1782
976k
  for (;;) {
1783
976k
    if (ABSL_PREDICT_FALSE((v & (kMuReader | kMuWait | kMuEvent)) !=
1784
976k
                           kMuReader)) {
1785
0
      this->UnlockSlow(nullptr /*no waitp*/);  // take slow path
1786
0
      break;
1787
0
    }
1788
    // fast reader release (reader with no waiters)
1789
976k
    intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne;
1790
976k
    if (ABSL_PREDICT_TRUE(
1791
976k
            mu_.compare_exchange_strong(v, v - clear, std::memory_order_release,
1792
976k
                                        std::memory_order_relaxed))) {
1793
976k
      break;
1794
976k
    }
1795
976k
  }
1796
976k
  ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
1797
976k
}
1798
1799
// Clears the designated waker flag in the mutex if this thread has blocked, and
1800
// therefore may be the designated waker.
1801
0
static intptr_t ClearDesignatedWakerMask(int flag) {
1802
0
  assert(flag >= 0);
1803
0
  assert(flag <= 1);
1804
0
  switch (flag) {
1805
0
    case 0:  // not blocked
1806
0
      return ~static_cast<intptr_t>(0);
1807
0
    case 1:  // blocked; turn off the designated waker bit
1808
0
      return ~static_cast<intptr_t>(kMuDesig);
1809
0
  }
1810
0
  ABSL_UNREACHABLE();
1811
0
}
1812
1813
// Conditionally ignores the existence of waiting writers if a reader that has
1814
// already blocked once wakes up.
1815
0
static intptr_t IgnoreWaitingWritersMask(int flag) {
1816
0
  assert(flag >= 0);
1817
0
  assert(flag <= 1);
1818
0
  switch (flag) {
1819
0
    case 0:  // not blocked
1820
0
      return ~static_cast<intptr_t>(0);
1821
0
    case 1:  // blocked; pretend there are no waiting writers
1822
0
      return ~static_cast<intptr_t>(kMuWrWait);
1823
0
  }
1824
0
  ABSL_UNREACHABLE();
1825
0
}
1826
1827
// Internal version of LockWhen().  See LockSlowWithDeadline()
1828
ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition* cond,
1829
0
                                             int flags) {
1830
  // Note: we specifically initialize spinloop_iterations after the first use
1831
  // in TryAcquireWithSpinning so that Lock function does not have any non-tail
1832
  // calls and consequently a stack frame. It's fine to have spinloop_iterations
1833
  // uninitialized (meaning no spinning) in all initial uncontended Lock calls
1834
  // and in the first contended call. After that we will have
1835
  // spinloop_iterations properly initialized.
1836
0
  if (ABSL_PREDICT_FALSE(
1837
0
          globals.spinloop_iterations.load(std::memory_order_relaxed) == 0)) {
1838
0
    if (absl::base_internal::NumCPUs() > 1) {
1839
      // If this is multiprocessor, allow spinning.
1840
0
      globals.spinloop_iterations.store(1500, std::memory_order_relaxed);
1841
0
    } else {
1842
      // If this a uniprocessor, only yield/sleep.
1843
0
      globals.spinloop_iterations.store(-1, std::memory_order_relaxed);
1844
0
    }
1845
0
  }
1846
0
  ABSL_RAW_CHECK(
1847
0
      this->LockSlowWithDeadline(how, cond, KernelTimeout::Never(), flags),
1848
0
      "condition untrue on return from LockSlow");
1849
0
}
1850
1851
// Compute cond->Eval() and tell race detectors that we do it under mutex mu.
1852
static inline bool EvalConditionAnnotated(const Condition* cond, Mutex* mu,
1853
                                          bool locking, bool trylock,
1854
0
                                          bool read_lock) {
1855
  // Delicate annotation dance.
1856
  // We are currently inside of read/write lock/unlock operation.
1857
  // All memory accesses are ignored inside of mutex operations + for unlock
1858
  // operation tsan considers that we've already released the mutex.
1859
0
  bool res = false;
1860
#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
1861
  const uint32_t flags = read_lock ? __tsan_mutex_read_lock : 0;
1862
  const uint32_t tryflags = flags | (trylock ? __tsan_mutex_try_lock : 0);
1863
#endif
1864
0
  if (locking) {
1865
    // For lock we pretend that we have finished the operation,
1866
    // evaluate the predicate, then unlock the mutex and start locking it again
1867
    // to match the annotation at the end of outer lock operation.
1868
    // Note: we can't simply do POST_LOCK, Eval, PRE_LOCK, because then tsan
1869
    // will think the lock acquisition is recursive which will trigger
1870
    // deadlock detector.
1871
0
    ABSL_TSAN_MUTEX_POST_LOCK(mu, tryflags, 0);
1872
0
    res = cond->Eval();
1873
    // There is no "try" version of Unlock, so use flags instead of tryflags.
1874
0
    ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, flags);
1875
0
    ABSL_TSAN_MUTEX_POST_UNLOCK(mu, flags);
1876
0
    ABSL_TSAN_MUTEX_PRE_LOCK(mu, tryflags);
1877
0
  } else {
1878
    // Similarly, for unlock we pretend that we have unlocked the mutex,
1879
    // lock the mutex, evaluate the predicate, and start unlocking it again
1880
    // to match the annotation at the end of outer unlock operation.
1881
0
    ABSL_TSAN_MUTEX_POST_UNLOCK(mu, flags);
1882
0
    ABSL_TSAN_MUTEX_PRE_LOCK(mu, flags);
1883
0
    ABSL_TSAN_MUTEX_POST_LOCK(mu, flags, 0);
1884
0
    res = cond->Eval();
1885
0
    ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, flags);
1886
0
  }
1887
  // Prevent unused param warnings in non-TSAN builds.
1888
0
  static_cast<void>(mu);
1889
0
  static_cast<void>(trylock);
1890
0
  static_cast<void>(read_lock);
1891
0
  return res;
1892
0
}
1893
1894
// Compute cond->Eval() hiding it from race detectors.
1895
// We are hiding it because inside of UnlockSlow we can evaluate a predicate
1896
// that was just added by a concurrent Lock operation; Lock adds the predicate
1897
// to the internal Mutex list without actually acquiring the Mutex
1898
// (it only acquires the internal spinlock, which is rightfully invisible for
1899
// tsan). As the result there is no tsan-visible synchronization between the
1900
// addition and this thread. So if we would enable race detection here,
1901
// it would race with the predicate initialization.
1902
0
static inline bool EvalConditionIgnored(Mutex* mu, const Condition* cond) {
1903
  // Memory accesses are already ignored inside of lock/unlock operations,
1904
  // but synchronization operations are also ignored. When we evaluate the
1905
  // predicate we must ignore only memory accesses but not synchronization,
1906
  // because missed synchronization can lead to false reports later.
1907
  // So we "divert" (which un-ignores both memory accesses and synchronization)
1908
  // and then separately turn on ignores of memory accesses.
1909
0
  ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
1910
0
  ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
1911
0
  bool res = cond->Eval();
1912
0
  ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END();
1913
0
  ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
1914
0
  static_cast<void>(mu);  // Prevent unused param warning in non-TSAN builds.
1915
0
  return res;
1916
0
}
1917
1918
// Internal equivalent of *LockWhenWithDeadline(), where
1919
//   "t" represents the absolute timeout; !t.has_timeout() means "forever".
1920
//   "how" is "kShared" (for ReaderLockWhen) or "kExclusive" (for LockWhen)
1921
// In flags, bits are ored together:
1922
// - kMuHasBlocked indicates that the client has already blocked on the call so
1923
//   the designated waker bit must be cleared and waiting writers should not
1924
//   obstruct this call
1925
// - kMuIsCond indicates that this is a conditional acquire (condition variable,
1926
//   Await,  LockWhen) so contention profiling should be suppressed.
1927
bool Mutex::LockSlowWithDeadline(MuHow how, const Condition* cond,
1928
0
                                 KernelTimeout t, int flags) {
1929
0
  intptr_t v = mu_.load(std::memory_order_relaxed);
1930
0
  bool unlock = false;
1931
0
  if ((v & how->fast_need_zero) == 0 &&  // try fast acquire
1932
0
      mu_.compare_exchange_strong(
1933
0
          v,
1934
0
          (how->fast_or |
1935
0
           (v & ClearDesignatedWakerMask(flags & kMuHasBlocked))) +
1936
0
              how->fast_add,
1937
0
          std::memory_order_acquire, std::memory_order_relaxed)) {
1938
0
    if (cond == nullptr ||
1939
0
        EvalConditionAnnotated(cond, this, true, false, how == kShared)) {
1940
0
      return true;
1941
0
    }
1942
0
    unlock = true;
1943
0
  }
1944
0
  SynchWaitParams waitp(how, cond, t, nullptr /*no cvmu*/,
1945
0
                        Synch_GetPerThreadAnnotated(this),
1946
0
                        nullptr /*no cv_word*/);
1947
0
  if (cond != nullptr) {
1948
0
    flags |= kMuIsCond;
1949
0
  }
1950
0
  if (unlock) {
1951
0
    this->UnlockSlow(&waitp);
1952
0
    this->Block(waitp.thread);
1953
0
    flags |= kMuHasBlocked;
1954
0
  }
1955
0
  this->LockSlowLoop(&waitp, flags);
1956
0
  return waitp.cond != nullptr ||  // => cond known true from LockSlowLoop
1957
0
         cond == nullptr ||
1958
0
         EvalConditionAnnotated(cond, this, true, false, how == kShared);
1959
0
}
1960
1961
// RAW_CHECK_FMT() takes a condition, a printf-style format string, and
1962
// the printf-style argument list.   The format string must be a literal.
1963
// Arguments after the first are not evaluated unless the condition is true.
1964
#define RAW_CHECK_FMT(cond, ...)                                   \
1965
0
  do {                                                             \
1966
0
    if (ABSL_PREDICT_FALSE(!(cond))) {                             \
1967
0
      ABSL_RAW_LOG(FATAL, "Check " #cond " failed: " __VA_ARGS__); \
1968
0
    }                                                              \
1969
0
  } while (0)
1970
1971
0
static void CheckForMutexCorruption(intptr_t v, const char* label) {
1972
  // Test for either of two situations that should not occur in v:
1973
  //   kMuWriter and kMuReader
1974
  //   kMuWrWait and !kMuWait
1975
0
  const uintptr_t w = static_cast<uintptr_t>(v ^ kMuWait);
1976
  // By flipping that bit, we can now test for:
1977
  //   kMuWriter and kMuReader in w
1978
  //   kMuWrWait and kMuWait in w
1979
  // We've chosen these two pairs of values to be so that they will overlap,
1980
  // respectively, when the word is left shifted by three.  This allows us to
1981
  // save a branch in the common (correct) case of them not being coincident.
1982
0
  static_assert(kMuReader << 3 == kMuWriter, "must match");
1983
0
  static_assert(kMuWait << 3 == kMuWrWait, "must match");
1984
0
  if (ABSL_PREDICT_TRUE((w & (w << 3) & (kMuWriter | kMuWrWait)) == 0)) return;
1985
0
  RAW_CHECK_FMT((v & (kMuWriter | kMuReader)) != (kMuWriter | kMuReader),
1986
0
                "%s: Mutex corrupt: both reader and writer lock held: %p",
1987
0
                label, reinterpret_cast<void*>(v));
1988
0
  RAW_CHECK_FMT((v & (kMuWait | kMuWrWait)) != kMuWrWait,
1989
0
                "%s: Mutex corrupt: waiting writer with no waiters: %p", label,
1990
0
                reinterpret_cast<void*>(v));
1991
0
  assert(false);
1992
0
}
1993
1994
0
void Mutex::LockSlowLoop(SynchWaitParams* waitp, int flags) {
1995
0
  SchedulingGuard::ScopedDisable disable_rescheduling;
1996
0
  int c = 0;
1997
0
  intptr_t v = mu_.load(std::memory_order_relaxed);
1998
0
  if ((v & kMuEvent) != 0) {
1999
0
    PostSynchEvent(
2000
0
        this, waitp->how == kExclusive ? SYNCH_EV_LOCK : SYNCH_EV_READERLOCK);
2001
0
  }
2002
0
  ABSL_RAW_CHECK(
2003
0
      waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
2004
0
      "detected illegal recursion into Mutex code");
2005
0
  for (;;) {
2006
0
    v = mu_.load(std::memory_order_relaxed);
2007
0
    CheckForMutexCorruption(v, "Lock");
2008
0
    if ((v & waitp->how->slow_need_zero) == 0) {
2009
0
      if (mu_.compare_exchange_strong(
2010
0
              v,
2011
0
              (waitp->how->fast_or |
2012
0
               (v & ClearDesignatedWakerMask(flags & kMuHasBlocked))) +
2013
0
                  waitp->how->fast_add,
2014
0
              std::memory_order_acquire, std::memory_order_relaxed)) {
2015
0
        if (waitp->cond == nullptr ||
2016
0
            EvalConditionAnnotated(waitp->cond, this, true, false,
2017
0
                                   waitp->how == kShared)) {
2018
0
          break;  // we timed out, or condition true, so return
2019
0
        }
2020
0
        this->UnlockSlow(waitp);  // got lock but condition false
2021
0
        this->Block(waitp->thread);
2022
0
        flags |= kMuHasBlocked;
2023
0
        c = 0;
2024
0
      }
2025
0
    } else {  // need to access waiter list
2026
0
      bool dowait = false;
2027
0
      if ((v & (kMuSpin | kMuWait)) == 0) {  // no waiters
2028
        // This thread tries to become the one and only waiter.
2029
0
        PerThreadSynch* new_h = Enqueue(nullptr, waitp, v, flags);
2030
0
        intptr_t nv =
2031
0
            (v & ClearDesignatedWakerMask(flags & kMuHasBlocked) & kMuLow) |
2032
0
            kMuWait;
2033
0
        ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to empty list failed");
2034
0
        if (waitp->how == kExclusive && (v & kMuReader) != 0) {
2035
0
          nv |= kMuWrWait;
2036
0
        }
2037
0
        if (mu_.compare_exchange_strong(
2038
0
                v, reinterpret_cast<intptr_t>(new_h) | nv,
2039
0
                std::memory_order_release, std::memory_order_relaxed)) {
2040
0
          dowait = true;
2041
0
        } else {  // attempted Enqueue() failed
2042
          // zero out the waitp field set by Enqueue()
2043
0
          waitp->thread->waitp = nullptr;
2044
0
        }
2045
0
      } else if ((v & waitp->how->slow_inc_need_zero &
2046
0
                  IgnoreWaitingWritersMask(flags & kMuHasBlocked)) == 0) {
2047
        // This is a reader that needs to increment the reader count,
2048
        // but the count is currently held in the last waiter.
2049
0
        if (mu_.compare_exchange_strong(
2050
0
                v,
2051
0
                (v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |
2052
0
                    kMuSpin | kMuReader,
2053
0
                std::memory_order_acquire, std::memory_order_relaxed)) {
2054
0
          PerThreadSynch* h = GetPerThreadSynch(v);
2055
0
          h->readers += kMuOne;  // inc reader count in waiter
2056
0
          do {                   // release spinlock
2057
0
            v = mu_.load(std::memory_order_relaxed);
2058
0
          } while (!mu_.compare_exchange_weak(v, (v & ~kMuSpin) | kMuReader,
2059
0
                                              std::memory_order_release,
2060
0
                                              std::memory_order_relaxed));
2061
0
          if (waitp->cond == nullptr ||
2062
0
              EvalConditionAnnotated(waitp->cond, this, true, false,
2063
0
                                     waitp->how == kShared)) {
2064
0
            break;  // we timed out, or condition true, so return
2065
0
          }
2066
0
          this->UnlockSlow(waitp);  // got lock but condition false
2067
0
          this->Block(waitp->thread);
2068
0
          flags |= kMuHasBlocked;
2069
0
          c = 0;
2070
0
        }
2071
0
      } else if ((v & kMuSpin) == 0 &&  // attempt to queue ourselves
2072
0
                 mu_.compare_exchange_strong(
2073
0
                     v,
2074
0
                     (v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |
2075
0
                         kMuSpin | kMuWait,
2076
0
                     std::memory_order_acquire, std::memory_order_relaxed)) {
2077
0
        PerThreadSynch* h = GetPerThreadSynch(v);
2078
0
        PerThreadSynch* new_h = Enqueue(h, waitp, v, flags);
2079
0
        intptr_t wr_wait = 0;
2080
0
        ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to list failed");
2081
0
        if (waitp->how == kExclusive && (v & kMuReader) != 0) {
2082
0
          wr_wait = kMuWrWait;  // give priority to a waiting writer
2083
0
        }
2084
0
        do {  // release spinlock
2085
0
          v = mu_.load(std::memory_order_relaxed);
2086
0
        } while (!mu_.compare_exchange_weak(
2087
0
            v,
2088
0
            (v & (kMuLow & ~kMuSpin)) | kMuWait | wr_wait |
2089
0
                reinterpret_cast<intptr_t>(new_h),
2090
0
            std::memory_order_release, std::memory_order_relaxed));
2091
0
        dowait = true;
2092
0
      }
2093
0
      if (dowait) {
2094
0
        this->Block(waitp->thread);  // wait until removed from list or timeout
2095
0
        flags |= kMuHasBlocked;
2096
0
        c = 0;
2097
0
      }
2098
0
    }
2099
0
    ABSL_RAW_CHECK(
2100
0
        waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
2101
0
        "detected illegal recursion into Mutex code");
2102
    // delay, then try again
2103
0
    c = synchronization_internal::MutexDelay(c, GENTLE);
2104
0
  }
2105
0
  ABSL_RAW_CHECK(
2106
0
      waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
2107
0
      "detected illegal recursion into Mutex code");
2108
0
  if ((v & kMuEvent) != 0) {
2109
0
    PostSynchEvent(this, waitp->how == kExclusive
2110
0
                             ? SYNCH_EV_LOCK_RETURNING
2111
0
                             : SYNCH_EV_READERLOCK_RETURNING);
2112
0
  }
2113
0
}
2114
2115
// Unlock this mutex, which is held by the current thread.
2116
// If waitp is non-zero, it must be the wait parameters for the current thread
2117
// which holds the lock but is not runnable because its condition is false
2118
// or it is in the process of blocking on a condition variable; it must requeue
2119
// itself on the mutex/condvar to wait for its condition to become true.
2120
0
ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams* waitp) {
2121
0
  SchedulingGuard::ScopedDisable disable_rescheduling;
2122
0
  intptr_t v = mu_.load(std::memory_order_relaxed);
2123
0
  this->AssertReaderHeld();
2124
0
  CheckForMutexCorruption(v, "Unlock");
2125
0
  if ((v & kMuEvent) != 0) {
2126
0
    PostSynchEvent(
2127
0
        this, (v & kMuWriter) != 0 ? SYNCH_EV_UNLOCK : SYNCH_EV_READERUNLOCK);
2128
0
  }
2129
0
  int c = 0;
2130
  // the waiter under consideration to wake, or zero
2131
0
  PerThreadSynch* w = nullptr;
2132
  // the predecessor to w or zero
2133
0
  PerThreadSynch* pw = nullptr;
2134
  // head of the list searched previously, or zero
2135
0
  PerThreadSynch* old_h = nullptr;
2136
  // a condition that's known to be false.
2137
0
  PerThreadSynch* wake_list = kPerThreadSynchNull;  // list of threads to wake
2138
0
  intptr_t wr_wait = 0;  // set to kMuWrWait if we wake a reader and a
2139
                         // later writer could have acquired the lock
2140
                         // (starvation avoidance)
2141
0
  ABSL_RAW_CHECK(waitp == nullptr || waitp->thread->waitp == nullptr ||
2142
0
                     waitp->thread->suppress_fatal_errors,
2143
0
                 "detected illegal recursion into Mutex code");
2144
  // This loop finds threads wake_list to wakeup if any, and removes them from
2145
  // the list of waiters.  In addition, it places waitp.thread on the queue of
2146
  // waiters if waitp is non-zero.
2147
0
  for (;;) {
2148
0
    v = mu_.load(std::memory_order_relaxed);
2149
0
    if ((v & kMuWriter) != 0 && (v & (kMuWait | kMuDesig)) != kMuWait &&
2150
0
        waitp == nullptr) {
2151
      // fast writer release (writer with no waiters or with designated waker)
2152
0
      if (mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
2153
0
                                      std::memory_order_release,
2154
0
                                      std::memory_order_relaxed)) {
2155
0
        return;
2156
0
      }
2157
0
    } else if ((v & (kMuReader | kMuWait)) == kMuReader && waitp == nullptr) {
2158
      // fast reader release (reader with no waiters)
2159
0
      intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne;
2160
0
      if (mu_.compare_exchange_strong(v, v - clear, std::memory_order_release,
2161
0
                                      std::memory_order_relaxed)) {
2162
0
        return;
2163
0
      }
2164
0
    } else if ((v & kMuSpin) == 0 &&  // attempt to get spinlock
2165
0
               mu_.compare_exchange_strong(v, v | kMuSpin,
2166
0
                                           std::memory_order_acquire,
2167
0
                                           std::memory_order_relaxed)) {
2168
0
      if ((v & kMuWait) == 0) {  // no one to wake
2169
0
        intptr_t nv;
2170
0
        bool do_enqueue = true;  // always Enqueue() the first time
2171
0
        ABSL_RAW_CHECK(waitp != nullptr,
2172
0
                       "UnlockSlow is confused");  // about to sleep
2173
0
        do {  // must loop to release spinlock as reader count may change
2174
0
          v = mu_.load(std::memory_order_relaxed);
2175
          // decrement reader count if there are readers
2176
0
          intptr_t new_readers = (v >= kMuOne) ? v - kMuOne : v;
2177
0
          PerThreadSynch* new_h = nullptr;
2178
0
          if (do_enqueue) {
2179
            // If we are enqueuing on a CondVar (waitp->cv_word != nullptr) then
2180
            // we must not retry here.  The initial attempt will always have
2181
            // succeeded, further attempts would enqueue us against *this due to
2182
            // Fer() handling.
2183
0
            do_enqueue = (waitp->cv_word == nullptr);
2184
0
            new_h = Enqueue(nullptr, waitp, new_readers, kMuIsCond);
2185
0
          }
2186
0
          intptr_t clear = kMuWrWait | kMuWriter;  // by default clear write bit
2187
0
          if ((v & kMuWriter) == 0 && ExactlyOneReader(v)) {  // last reader
2188
0
            clear = kMuWrWait | kMuReader;                    // clear read bit
2189
0
          }
2190
0
          nv = (v & kMuLow & ~clear & ~kMuSpin);
2191
0
          if (new_h != nullptr) {
2192
0
            nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
2193
0
          } else {  // new_h could be nullptr if we queued ourselves on a
2194
                    // CondVar
2195
            // In that case, we must place the reader count back in the mutex
2196
            // word, as Enqueue() did not store it in the new waiter.
2197
0
            nv |= new_readers & kMuHigh;
2198
0
          }
2199
          // release spinlock & our lock; retry if reader-count changed
2200
          // (writer count cannot change since we hold lock)
2201
0
        } while (!mu_.compare_exchange_weak(v, nv, std::memory_order_release,
2202
0
                                            std::memory_order_relaxed));
2203
0
        break;
2204
0
      }
2205
2206
      // There are waiters.
2207
      // Set h to the head of the circular waiter list.
2208
0
      PerThreadSynch* h = GetPerThreadSynch(v);
2209
0
      if ((v & kMuReader) != 0 && (h->readers & kMuHigh) > kMuOne) {
2210
        // a reader but not the last
2211
0
        h->readers -= kMuOne;    // release our lock
2212
0
        intptr_t nv = v;         // normally just release spinlock
2213
0
        if (waitp != nullptr) {  // but waitp!=nullptr => must queue ourselves
2214
0
          PerThreadSynch* new_h = Enqueue(h, waitp, v, kMuIsCond);
2215
0
          ABSL_RAW_CHECK(new_h != nullptr,
2216
0
                         "waiters disappeared during Enqueue()!");
2217
0
          nv &= kMuLow;
2218
0
          nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
2219
0
        }
2220
0
        mu_.store(nv, std::memory_order_release);  // release spinlock
2221
        // can release with a store because there were waiters
2222
0
        break;
2223
0
      }
2224
2225
      // Either we didn't search before, or we marked the queue
2226
      // as "maybe_unlocking" and no one else should have changed it.
2227
0
      ABSL_RAW_CHECK(old_h == nullptr || h->maybe_unlocking,
2228
0
                     "Mutex queue changed beneath us");
2229
2230
      // The lock is becoming free, and there's a waiter
2231
0
      if (old_h != nullptr &&
2232
0
          !old_h->may_skip) {    // we used old_h as a terminator
2233
0
        old_h->may_skip = true;  // allow old_h to skip once more
2234
0
        ABSL_RAW_CHECK(old_h->skip == nullptr, "illegal skip from head");
2235
0
        if (h != old_h && MuEquivalentWaiter(old_h, old_h->next)) {
2236
0
          old_h->skip = old_h->next;  // old_h not head & can skip to successor
2237
0
        }
2238
0
      }
2239
0
      if (h->next->waitp->how == kExclusive &&
2240
0
          h->next->waitp->cond == nullptr) {
2241
        // easy case: writer with no condition; no need to search
2242
0
        pw = h;  // wake w, the successor of h (=pw)
2243
0
        w = h->next;
2244
0
        w->wake = true;
2245
        // We are waking up a writer.  This writer may be racing against
2246
        // an already awake reader for the lock.  We want the
2247
        // writer to usually win this race,
2248
        // because if it doesn't, we can potentially keep taking a reader
2249
        // perpetually and writers will starve.  Worse than
2250
        // that, this can also starve other readers if kMuWrWait gets set
2251
        // later.
2252
0
        wr_wait = kMuWrWait;
2253
0
      } else if (w != nullptr && (w->waitp->how == kExclusive || h == old_h)) {
2254
        // we found a waiter w to wake on a previous iteration and either it's
2255
        // a writer, or we've searched the entire list so we have all the
2256
        // readers.
2257
0
        if (pw == nullptr) {  // if w's predecessor is unknown, it must be h
2258
0
          pw = h;
2259
0
        }
2260
0
      } else {
2261
        // At this point we don't know all the waiters to wake, and the first
2262
        // waiter has a condition or is a reader.  We avoid searching over
2263
        // waiters we've searched on previous iterations by starting at
2264
        // old_h if it's set.  If old_h==h, there's no one to wakeup at all.
2265
0
        if (old_h == h) {  // we've searched before, and nothing's new
2266
                           // so there's no one to wake.
2267
0
          intptr_t nv = (v & ~(kMuReader | kMuWriter | kMuWrWait));
2268
0
          h->readers = 0;
2269
0
          h->maybe_unlocking = false;  // finished unlocking
2270
0
          if (waitp != nullptr) {      // we must queue ourselves and sleep
2271
0
            PerThreadSynch* new_h = Enqueue(h, waitp, v, kMuIsCond);
2272
0
            nv &= kMuLow;
2273
0
            if (new_h != nullptr) {
2274
0
              nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
2275
0
            }  // else new_h could be nullptr if we queued ourselves on a
2276
               // CondVar
2277
0
          }
2278
          // release spinlock & lock
2279
          // can release with a store because there were waiters
2280
0
          mu_.store(nv, std::memory_order_release);
2281
0
          break;
2282
0
        }
2283
2284
        // set up to walk the list
2285
0
        PerThreadSynch* w_walk;   // current waiter during list walk
2286
0
        PerThreadSynch* pw_walk;  // previous waiter during list walk
2287
0
        if (old_h != nullptr) {   // we've searched up to old_h before
2288
0
          pw_walk = old_h;
2289
0
          w_walk = old_h->next;
2290
0
        } else {  // no prior search, start at beginning
2291
0
          pw_walk =
2292
0
              nullptr;  // h->next's predecessor may change; don't record it
2293
0
          w_walk = h->next;
2294
0
        }
2295
2296
0
        h->may_skip = false;  // ensure we never skip past h in future searches
2297
                              // even if other waiters are queued after it.
2298
0
        ABSL_RAW_CHECK(h->skip == nullptr, "illegal skip from head");
2299
2300
0
        h->maybe_unlocking = true;  // we're about to scan the waiter list
2301
                                    // without the spinlock held.
2302
                                    // Enqueue must be conservative about
2303
                                    // priority queuing.
2304
2305
        // We must release the spinlock to evaluate the conditions.
2306
0
        mu_.store(v, std::memory_order_release);  // release just spinlock
2307
        // can release with a store because there were waiters
2308
2309
        // h is the last waiter queued, and w_walk the first unsearched waiter.
2310
        // Without the spinlock, the locations mu_ and h->next may now change
2311
        // underneath us, but since we hold the lock itself, the only legal
2312
        // change is to add waiters between h and w_walk.  Therefore, it's safe
2313
        // to walk the path from w_walk to h inclusive. (TryRemove() can remove
2314
        // a waiter anywhere, but it acquires both the spinlock and the Mutex)
2315
2316
0
        old_h = h;  // remember we searched to here
2317
2318
        // Walk the path upto and including h looking for waiters we can wake.
2319
0
        while (pw_walk != h) {
2320
0
          w_walk->wake = false;
2321
0
          if (w_walk->waitp->cond ==
2322
0
                  nullptr ||  // no condition => vacuously true OR
2323
                              // this thread's condition is true
2324
0
              EvalConditionIgnored(this, w_walk->waitp->cond)) {
2325
0
            if (w == nullptr) {
2326
0
              w_walk->wake = true;  // can wake this waiter
2327
0
              w = w_walk;
2328
0
              pw = pw_walk;
2329
0
              if (w_walk->waitp->how == kExclusive) {
2330
0
                wr_wait = kMuWrWait;
2331
0
                break;  // bail if waking this writer
2332
0
              }
2333
0
            } else if (w_walk->waitp->how == kShared) {  // wake if a reader
2334
0
              w_walk->wake = true;
2335
0
            } else {  // writer with true condition
2336
0
              wr_wait = kMuWrWait;
2337
0
            }
2338
0
          }
2339
0
          if (w_walk->wake) {  // we're waking reader w_walk
2340
0
            pw_walk = w_walk;  // don't skip similar waiters
2341
0
          } else {             // not waking; skip as much as possible
2342
0
            pw_walk = Skip(w_walk);
2343
0
          }
2344
          // If pw_walk == h, then load of pw_walk->next can race with
2345
          // concurrent write in Enqueue(). However, at the same time
2346
          // we do not need to do the load, because we will bail out
2347
          // from the loop anyway.
2348
0
          if (pw_walk != h) {
2349
0
            w_walk = pw_walk->next;
2350
0
          }
2351
0
        }
2352
2353
0
        continue;  // restart for(;;)-loop to wakeup w or to find more waiters
2354
0
      }
2355
0
      ABSL_RAW_CHECK(pw->next == w, "pw not w's predecessor");
2356
      // The first (and perhaps only) waiter we've chosen to wake is w, whose
2357
      // predecessor is pw.  If w is a reader, we must wake all the other
2358
      // waiters with wake==true as well.  We may also need to queue
2359
      // ourselves if waitp != null.  The spinlock and the lock are still
2360
      // held.
2361
2362
      // This traverses the list in [ pw->next, h ], where h is the head,
2363
      // removing all elements with wake==true and placing them in the
2364
      // singly-linked list wake_list.  Returns the new head.
2365
0
      h = DequeueAllWakeable(h, pw, &wake_list);
2366
2367
0
      intptr_t nv = (v & kMuEvent) | kMuDesig;
2368
      // assume no waiters left,
2369
      // set kMuDesig for INV1a
2370
2371
0
      if (waitp != nullptr) {  // we must queue ourselves and sleep
2372
0
        h = Enqueue(h, waitp, v, kMuIsCond);
2373
        // h is new last waiter; could be null if we queued ourselves on a
2374
        // CondVar
2375
0
      }
2376
2377
0
      ABSL_RAW_CHECK(wake_list != kPerThreadSynchNull,
2378
0
                     "unexpected empty wake list");
2379
2380
0
      if (h != nullptr) {  // there are waiters left
2381
0
        h->readers = 0;
2382
0
        h->maybe_unlocking = false;  // finished unlocking
2383
0
        nv |= wr_wait | kMuWait | reinterpret_cast<intptr_t>(h);
2384
0
      }
2385
2386
      // release both spinlock & lock
2387
      // can release with a store because there were waiters
2388
0
      mu_.store(nv, std::memory_order_release);
2389
0
      break;  // out of for(;;)-loop
2390
0
    }
2391
    // aggressive here; no one can proceed till we do
2392
0
    c = synchronization_internal::MutexDelay(c, AGGRESSIVE);
2393
0
  }  // end of for(;;)-loop
2394
2395
0
  if (wake_list != kPerThreadSynchNull) {
2396
0
    int64_t total_wait_cycles = 0;
2397
0
    int64_t max_wait_cycles = 0;
2398
0
    int64_t now = CycleClock::Now();
2399
0
    do {
2400
      // Profile lock contention events only if the waiter was trying to acquire
2401
      // the lock, not waiting on a condition variable or Condition.
2402
0
      if (!wake_list->cond_waiter) {
2403
0
        int64_t cycles_waited =
2404
0
            (now - wake_list->waitp->contention_start_cycles);
2405
0
        total_wait_cycles += cycles_waited;
2406
0
        if (max_wait_cycles == 0) max_wait_cycles = cycles_waited;
2407
0
        wake_list->waitp->contention_start_cycles = now;
2408
0
        wake_list->waitp->should_submit_contention_data = true;
2409
0
      }
2410
0
      wake_list = Wakeup(wake_list);  // wake waiters
2411
0
    } while (wake_list != kPerThreadSynchNull);
2412
0
    if (total_wait_cycles > 0) {
2413
0
      mutex_tracer("slow release", this, total_wait_cycles);
2414
0
      ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
2415
0
      submit_profile_data(total_wait_cycles);
2416
0
      ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
2417
0
    }
2418
0
  }
2419
0
}
2420
2421
// Used by CondVar implementation to reacquire mutex after waking from
2422
// condition variable.  This routine is used instead of Lock() because the
2423
// waiting thread may have been moved from the condition variable queue to the
2424
// mutex queue without a wakeup, by Trans().  In that case, when the thread is
2425
// finally woken, the woken thread will believe it has been woken from the
2426
// condition variable (i.e. its PC will be in when in the CondVar code), when
2427
// in fact it has just been woken from the mutex.  Thus, it must enter the slow
2428
// path of the mutex in the same state as if it had just woken from the mutex.
2429
// That is, it must ensure to clear kMuDesig (INV1b).
2430
0
void Mutex::Trans(MuHow how) {
2431
0
  this->LockSlow(how, nullptr, kMuHasBlocked | kMuIsCond);
2432
0
}
2433
2434
// Used by CondVar implementation to effectively wake thread w from the
2435
// condition variable.  If this mutex is free, we simply wake the thread.
2436
// It will later acquire the mutex with high probability.  Otherwise, we
2437
// enqueue thread w on this mutex.
2438
0
void Mutex::Fer(PerThreadSynch* w) {
2439
0
  SchedulingGuard::ScopedDisable disable_rescheduling;
2440
0
  int c = 0;
2441
0
  ABSL_RAW_CHECK(w->waitp->cond == nullptr,
2442
0
                 "Mutex::Fer while waiting on Condition");
2443
0
  ABSL_RAW_CHECK(w->waitp->cv_word == nullptr,
2444
0
                 "Mutex::Fer with pending CondVar queueing");
2445
  // The CondVar timeout is not relevant for the Mutex wait.
2446
0
  w->waitp->timeout = {};
2447
0
  for (;;) {
2448
0
    intptr_t v = mu_.load(std::memory_order_relaxed);
2449
    // Note: must not queue if the mutex is unlocked (nobody will wake it).
2450
    // For example, we can have only kMuWait (conditional) or maybe
2451
    // kMuWait|kMuWrWait.
2452
    // conflicting != 0 implies that the waking thread cannot currently take
2453
    // the mutex, which in turn implies that someone else has it and can wake
2454
    // us if we queue.
2455
0
    const intptr_t conflicting =
2456
0
        kMuWriter | (w->waitp->how == kShared ? 0 : kMuReader);
2457
0
    if ((v & conflicting) == 0) {
2458
0
      w->next = nullptr;
2459
0
      w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
2460
0
      IncrementSynchSem(this, w);
2461
0
      return;
2462
0
    } else {
2463
0
      if ((v & (kMuSpin | kMuWait)) == 0) {  // no waiters
2464
        // This thread tries to become the one and only waiter.
2465
0
        PerThreadSynch* new_h =
2466
0
            Enqueue(nullptr, w->waitp, v, kMuIsCond | kMuIsFer);
2467
0
        ABSL_RAW_CHECK(new_h != nullptr,
2468
0
                       "Enqueue failed");  // we must queue ourselves
2469
0
        if (mu_.compare_exchange_strong(
2470
0
                v, reinterpret_cast<intptr_t>(new_h) | (v & kMuLow) | kMuWait,
2471
0
                std::memory_order_release, std::memory_order_relaxed)) {
2472
0
          return;
2473
0
        }
2474
0
      } else if ((v & kMuSpin) == 0 &&
2475
0
                 mu_.compare_exchange_strong(v, v | kMuSpin | kMuWait)) {
2476
0
        PerThreadSynch* h = GetPerThreadSynch(v);
2477
0
        PerThreadSynch* new_h = Enqueue(h, w->waitp, v, kMuIsCond | kMuIsFer);
2478
0
        ABSL_RAW_CHECK(new_h != nullptr,
2479
0
                       "Enqueue failed");  // we must queue ourselves
2480
0
        do {
2481
0
          v = mu_.load(std::memory_order_relaxed);
2482
0
        } while (!mu_.compare_exchange_weak(
2483
0
            v,
2484
0
            (v & kMuLow & ~kMuSpin) | kMuWait |
2485
0
                reinterpret_cast<intptr_t>(new_h),
2486
0
            std::memory_order_release, std::memory_order_relaxed));
2487
0
        return;
2488
0
      }
2489
0
    }
2490
0
    c = synchronization_internal::MutexDelay(c, GENTLE);
2491
0
  }
2492
0
}
2493
2494
0
void Mutex::AssertHeld() const {
2495
0
  if ((mu_.load(std::memory_order_relaxed) & kMuWriter) == 0) {
2496
0
    SynchEvent* e = GetSynchEvent(this);
2497
0
    ABSL_RAW_LOG(FATAL, "thread should hold write lock on Mutex %p %s",
2498
0
                 static_cast<const void*>(this), (e == nullptr ? "" : e->name));
2499
0
  }
2500
0
}
2501
2502
0
void Mutex::AssertReaderHeld() const {
2503
0
  if ((mu_.load(std::memory_order_relaxed) & (kMuReader | kMuWriter)) == 0) {
2504
0
    SynchEvent* e = GetSynchEvent(this);
2505
0
    ABSL_RAW_LOG(FATAL,
2506
0
                 "thread should hold at least a read lock on Mutex %p %s",
2507
0
                 static_cast<const void*>(this), (e == nullptr ? "" : e->name));
2508
0
  }
2509
0
}
2510
2511
// -------------------------------- condition variables
2512
static const intptr_t kCvSpin = 0x0001L;   // spinlock protects waiter list
2513
static const intptr_t kCvEvent = 0x0002L;  // record events
2514
2515
static const intptr_t kCvLow = 0x0003L;  // low order bits of CV
2516
2517
// Hack to make constant values available to gdb pretty printer
2518
enum {
2519
  kGdbCvSpin = kCvSpin,
2520
  kGdbCvEvent = kCvEvent,
2521
  kGdbCvLow = kCvLow,
2522
};
2523
2524
static_assert(PerThreadSynch::kAlignment > kCvLow,
2525
              "PerThreadSynch::kAlignment must be greater than kCvLow");
2526
2527
0
void CondVar::EnableDebugLog(const char* name) {
2528
0
  SynchEvent* e = EnsureSynchEvent(&this->cv_, name, kCvEvent, kCvSpin);
2529
0
  e->log = true;
2530
0
  UnrefSynchEvent(e);
2531
0
}
2532
2533
// Remove thread s from the list of waiters on this condition variable.
2534
0
void CondVar::Remove(PerThreadSynch* s) {
2535
0
  SchedulingGuard::ScopedDisable disable_rescheduling;
2536
0
  intptr_t v;
2537
0
  int c = 0;
2538
0
  for (v = cv_.load(std::memory_order_relaxed);;
2539
0
       v = cv_.load(std::memory_order_relaxed)) {
2540
0
    if ((v & kCvSpin) == 0 &&  // attempt to acquire spinlock
2541
0
        cv_.compare_exchange_strong(v, v | kCvSpin, std::memory_order_acquire,
2542
0
                                    std::memory_order_relaxed)) {
2543
0
      PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
2544
0
      if (h != nullptr) {
2545
0
        PerThreadSynch* w = h;
2546
0
        while (w->next != s && w->next != h) {  // search for thread
2547
0
          w = w->next;
2548
0
        }
2549
0
        if (w->next == s) {  // found thread; remove it
2550
0
          w->next = s->next;
2551
0
          if (h == s) {
2552
0
            h = (w == s) ? nullptr : w;
2553
0
          }
2554
0
          s->next = nullptr;
2555
0
          s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
2556
0
        }
2557
0
      }
2558
      // release spinlock
2559
0
      cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
2560
0
                std::memory_order_release);
2561
0
      return;
2562
0
    } else {
2563
      // try again after a delay
2564
0
      c = synchronization_internal::MutexDelay(c, GENTLE);
2565
0
    }
2566
0
  }
2567
0
}
2568
2569
// Queue thread waitp->thread on condition variable word cv_word using
2570
// wait parameters waitp.
2571
// We split this into a separate routine, rather than simply doing it as part
2572
// of WaitCommon().  If we were to queue ourselves on the condition variable
2573
// before calling Mutex::UnlockSlow(), the Mutex code might be re-entered (via
2574
// the logging code, or via a Condition function) and might potentially attempt
2575
// to block this thread.  That would be a problem if the thread were already on
2576
// a condition variable waiter queue.  Thus, we use the waitp->cv_word to tell
2577
// the unlock code to call CondVarEnqueue() to queue the thread on the condition
2578
// variable queue just before the mutex is to be unlocked, and (most
2579
// importantly) after any call to an external routine that might re-enter the
2580
// mutex code.
2581
0
static void CondVarEnqueue(SynchWaitParams* waitp) {
2582
  // This thread might be transferred to the Mutex queue by Fer() when
2583
  // we are woken.  To make sure that is what happens, Enqueue() doesn't
2584
  // call CondVarEnqueue() again but instead uses its normal code.  We
2585
  // must do this before we queue ourselves so that cv_word will be null
2586
  // when seen by the dequeuer, who may wish immediately to requeue
2587
  // this thread on another queue.
2588
0
  std::atomic<intptr_t>* cv_word = waitp->cv_word;
2589
0
  waitp->cv_word = nullptr;
2590
2591
0
  intptr_t v = cv_word->load(std::memory_order_relaxed);
2592
0
  int c = 0;
2593
0
  while ((v & kCvSpin) != 0 ||  // acquire spinlock
2594
0
         !cv_word->compare_exchange_weak(v, v | kCvSpin,
2595
0
                                         std::memory_order_acquire,
2596
0
                                         std::memory_order_relaxed)) {
2597
0
    c = synchronization_internal::MutexDelay(c, GENTLE);
2598
0
    v = cv_word->load(std::memory_order_relaxed);
2599
0
  }
2600
0
  ABSL_RAW_CHECK(waitp->thread->waitp == nullptr, "waiting when shouldn't be");
2601
0
  waitp->thread->waitp = waitp;  // prepare ourselves for waiting
2602
0
  PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
2603
0
  if (h == nullptr) {  // add this thread to waiter list
2604
0
    waitp->thread->next = waitp->thread;
2605
0
  } else {
2606
0
    waitp->thread->next = h->next;
2607
0
    h->next = waitp->thread;
2608
0
  }
2609
0
  waitp->thread->state.store(PerThreadSynch::kQueued,
2610
0
                             std::memory_order_relaxed);
2611
0
  cv_word->store((v & kCvEvent) | reinterpret_cast<intptr_t>(waitp->thread),
2612
0
                 std::memory_order_release);
2613
0
}
2614
2615
0
bool CondVar::WaitCommon(Mutex* mutex, KernelTimeout t) {
2616
0
  bool rc = false;  // return value; true iff we timed-out
2617
2618
0
  intptr_t mutex_v = mutex->mu_.load(std::memory_order_relaxed);
2619
0
  Mutex::MuHow mutex_how = ((mutex_v & kMuWriter) != 0) ? kExclusive : kShared;
2620
0
  ABSL_TSAN_MUTEX_PRE_UNLOCK(mutex, TsanFlags(mutex_how));
2621
2622
  // maybe trace this call
2623
0
  intptr_t v = cv_.load(std::memory_order_relaxed);
2624
0
  cond_var_tracer("Wait", this);
2625
0
  if ((v & kCvEvent) != 0) {
2626
0
    PostSynchEvent(this, SYNCH_EV_WAIT);
2627
0
  }
2628
2629
  // Release mu and wait on condition variable.
2630
0
  SynchWaitParams waitp(mutex_how, nullptr, t, mutex,
2631
0
                        Synch_GetPerThreadAnnotated(mutex), &cv_);
2632
  // UnlockSlow() will call CondVarEnqueue() just before releasing the
2633
  // Mutex, thus queuing this thread on the condition variable.  See
2634
  // CondVarEnqueue() for the reasons.
2635
0
  mutex->UnlockSlow(&waitp);
2636
2637
  // wait for signal
2638
0
  while (waitp.thread->state.load(std::memory_order_acquire) ==
2639
0
         PerThreadSynch::kQueued) {
2640
0
    if (!Mutex::DecrementSynchSem(mutex, waitp.thread, t)) {
2641
      // DecrementSynchSem returned due to timeout.
2642
      // Now we will either (1) remove ourselves from the wait list in Remove
2643
      // below, in which case Remove will set thread.state = kAvailable and
2644
      // we will not call DecrementSynchSem again; or (2) Signal/SignalAll
2645
      // has removed us concurrently and is calling Wakeup, which will set
2646
      // thread.state = kAvailable and post to the semaphore.
2647
      // It's important to reset the timeout for the case (2) because otherwise
2648
      // we can live-lock in this loop since DecrementSynchSem will always
2649
      // return immediately due to timeout, but Signal/SignalAll is not
2650
      // necessary set thread.state = kAvailable yet (and is not scheduled
2651
      // due to thread priorities or other scheduler artifacts).
2652
      // Note this could also be resolved if Signal/SignalAll would set
2653
      // thread.state = kAvailable while holding the wait list spin lock.
2654
      // But this can't be easily done for SignalAll since it grabs the whole
2655
      // wait list with a single compare-exchange and does not really grab
2656
      // the spin lock.
2657
0
      t = KernelTimeout::Never();
2658
0
      this->Remove(waitp.thread);
2659
0
      rc = true;
2660
0
    }
2661
0
  }
2662
2663
0
  ABSL_RAW_CHECK(waitp.thread->waitp != nullptr, "not waiting when should be");
2664
0
  waitp.thread->waitp = nullptr;  // cleanup
2665
2666
  // maybe trace this call
2667
0
  cond_var_tracer("Unwait", this);
2668
0
  if ((v & kCvEvent) != 0) {
2669
0
    PostSynchEvent(this, SYNCH_EV_WAIT_RETURNING);
2670
0
  }
2671
2672
  // From synchronization point of view Wait is unlock of the mutex followed
2673
  // by lock of the mutex. We've annotated start of unlock in the beginning
2674
  // of the function. Now, finish unlock and annotate lock of the mutex.
2675
  // (Trans is effectively lock).
2676
0
  ABSL_TSAN_MUTEX_POST_UNLOCK(mutex, TsanFlags(mutex_how));
2677
0
  ABSL_TSAN_MUTEX_PRE_LOCK(mutex, TsanFlags(mutex_how));
2678
0
  mutex->Trans(mutex_how);  // Reacquire mutex
2679
0
  ABSL_TSAN_MUTEX_POST_LOCK(mutex, TsanFlags(mutex_how), 0);
2680
0
  return rc;
2681
0
}
2682
2683
0
void CondVar::Signal() {
2684
0
  SchedulingGuard::ScopedDisable disable_rescheduling;
2685
0
  ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
2686
0
  intptr_t v;
2687
0
  int c = 0;
2688
0
  for (v = cv_.load(std::memory_order_relaxed); v != 0;
2689
0
       v = cv_.load(std::memory_order_relaxed)) {
2690
0
    if ((v & kCvSpin) == 0 &&  // attempt to acquire spinlock
2691
0
        cv_.compare_exchange_strong(v, v | kCvSpin, std::memory_order_acquire,
2692
0
                                    std::memory_order_relaxed)) {
2693
0
      PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
2694
0
      PerThreadSynch* w = nullptr;
2695
0
      if (h != nullptr) {  // remove first waiter
2696
0
        w = h->next;
2697
0
        if (w == h) {
2698
0
          h = nullptr;
2699
0
        } else {
2700
0
          h->next = w->next;
2701
0
        }
2702
0
      }
2703
      // release spinlock
2704
0
      cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
2705
0
                std::memory_order_release);
2706
0
      if (w != nullptr) {
2707
0
        w->waitp->cvmu->Fer(w);  // wake waiter, if there was one
2708
0
        cond_var_tracer("Signal wakeup", this);
2709
0
      }
2710
0
      if ((v & kCvEvent) != 0) {
2711
0
        PostSynchEvent(this, SYNCH_EV_SIGNAL);
2712
0
      }
2713
0
      ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
2714
0
      return;
2715
0
    } else {
2716
0
      c = synchronization_internal::MutexDelay(c, GENTLE);
2717
0
    }
2718
0
  }
2719
0
  ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
2720
0
}
2721
2722
0
void CondVar::SignalAll() {
2723
0
  ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
2724
0
  intptr_t v;
2725
0
  int c = 0;
2726
0
  for (v = cv_.load(std::memory_order_relaxed); v != 0;
2727
0
       v = cv_.load(std::memory_order_relaxed)) {
2728
    // empty the list if spinlock free
2729
    // We do this by simply setting the list to empty using
2730
    // compare and swap.   We then have the entire list in our hands,
2731
    // which cannot be changing since we grabbed it while no one
2732
    // held the lock.
2733
0
    if ((v & kCvSpin) == 0 &&
2734
0
        cv_.compare_exchange_strong(v, v & kCvEvent, std::memory_order_acquire,
2735
0
                                    std::memory_order_relaxed)) {
2736
0
      PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
2737
0
      if (h != nullptr) {
2738
0
        PerThreadSynch* w;
2739
0
        PerThreadSynch* n = h->next;
2740
0
        do {  // for every thread, wake it up
2741
0
          w = n;
2742
0
          n = n->next;
2743
0
          w->waitp->cvmu->Fer(w);
2744
0
        } while (w != h);
2745
0
        cond_var_tracer("SignalAll wakeup", this);
2746
0
      }
2747
0
      if ((v & kCvEvent) != 0) {
2748
0
        PostSynchEvent(this, SYNCH_EV_SIGNALALL);
2749
0
      }
2750
0
      ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
2751
0
      return;
2752
0
    } else {
2753
      // try again after a delay
2754
0
      c = synchronization_internal::MutexDelay(c, GENTLE);
2755
0
    }
2756
0
  }
2757
0
  ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
2758
0
}
2759
2760
0
void ReleasableMutexLock::Release() {
2761
0
  ABSL_RAW_CHECK(this->mu_ != nullptr,
2762
0
                 "ReleasableMutexLock::Release may only be called once");
2763
0
  this->mu_->unlock();
2764
0
  this->mu_ = nullptr;
2765
0
}
2766
2767
#ifdef ABSL_HAVE_THREAD_SANITIZER
2768
extern "C" void __tsan_read1(void* addr);
2769
#else
2770
#define __tsan_read1(addr)  // do nothing if TSan not enabled
2771
#endif
2772
2773
// A function that just returns its argument, dereferenced
2774
0
static bool Dereference(void* arg) {
2775
  // ThreadSanitizer does not instrument this file for memory accesses.
2776
  // This function dereferences a user variable that can participate
2777
  // in a data race, so we need to manually tell TSan about this memory access.
2778
0
  __tsan_read1(arg);
2779
0
  return *(static_cast<bool*>(arg));
2780
0
}
2781
2782
ABSL_CONST_INIT const Condition Condition::kTrue;
2783
2784
Condition::Condition(bool (*func)(void*), void* arg)
2785
0
    : eval_(&CallVoidPtrFunction), arg_(arg) {
2786
0
  static_assert(sizeof(&func) <= sizeof(callback_),
2787
0
                "An overlarge function pointer passed to Condition.");
2788
0
  StoreCallback(func);
2789
0
}
2790
2791
0
bool Condition::CallVoidPtrFunction(const Condition* c) {
2792
0
  using FunctionPointer = bool (*)(void*);
2793
0
  FunctionPointer function_pointer;
2794
0
  std::memcpy(&function_pointer, c->callback_, sizeof(function_pointer));
2795
0
  return (*function_pointer)(c->arg_);
2796
0
}
2797
2798
Condition::Condition(const bool* cond)
2799
0
    : eval_(CallVoidPtrFunction),
2800
      // const_cast is safe since Dereference does not modify arg
2801
0
      arg_(const_cast<bool*>(cond)) {
2802
0
  using FunctionPointer = bool (*)(void*);
2803
0
  const FunctionPointer dereference = Dereference;
2804
0
  StoreCallback(dereference);
2805
0
}
2806
2807
0
bool Condition::Eval() const { return (*this->eval_)(this); }
2808
2809
0
bool Condition::GuaranteedEqual(const Condition* a, const Condition* b) {
2810
0
  if (a == nullptr || b == nullptr) {
2811
0
    return a == b;
2812
0
  }
2813
  // Check equality of the representative fields.
2814
0
  return a->eval_ == b->eval_ && a->arg_ == b->arg_ &&
2815
0
         !memcmp(a->callback_, b->callback_, sizeof(a->callback_));
2816
0
}
2817
2818
ABSL_NAMESPACE_END
2819
}  // namespace absl