Coverage Report

Created: 2024-02-25 06:31

/proc/self/cwd/external/com_google_absl/absl/synchronization/mutex.cc
Line
Count
Source (jump to first uncovered line)
1
// Copyright 2017 The Abseil Authors.
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//      https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14
15
#include "absl/synchronization/mutex.h"
16
17
#ifdef _WIN32
18
#include <windows.h>
19
#ifdef ERROR
20
#undef ERROR
21
#endif
22
#else
23
#include <fcntl.h>
24
#include <pthread.h>
25
#include <sched.h>
26
#include <sys/time.h>
27
#endif
28
29
#include <assert.h>
30
#include <errno.h>
31
#include <stdio.h>
32
#include <stdlib.h>
33
#include <string.h>
34
#include <time.h>
35
36
#include <algorithm>
37
#include <atomic>
38
#include <cinttypes>
39
#include <cstddef>
40
#include <cstring>
41
#include <iterator>
42
#include <thread>  // NOLINT(build/c++11)
43
44
#include "absl/base/attributes.h"
45
#include "absl/base/call_once.h"
46
#include "absl/base/config.h"
47
#include "absl/base/dynamic_annotations.h"
48
#include "absl/base/internal/atomic_hook.h"
49
#include "absl/base/internal/cycleclock.h"
50
#include "absl/base/internal/hide_ptr.h"
51
#include "absl/base/internal/low_level_alloc.h"
52
#include "absl/base/internal/raw_logging.h"
53
#include "absl/base/internal/spinlock.h"
54
#include "absl/base/internal/sysinfo.h"
55
#include "absl/base/internal/thread_identity.h"
56
#include "absl/base/internal/tsan_mutex_interface.h"
57
#include "absl/base/optimization.h"
58
#include "absl/base/port.h"
59
#include "absl/debugging/stacktrace.h"
60
#include "absl/debugging/symbolize.h"
61
#include "absl/synchronization/internal/graphcycles.h"
62
#include "absl/synchronization/internal/per_thread_sem.h"
63
#include "absl/time/time.h"
64
65
using absl::base_internal::CurrentThreadIdentityIfPresent;
66
using absl::base_internal::PerThreadSynch;
67
using absl::base_internal::SchedulingGuard;
68
using absl::base_internal::ThreadIdentity;
69
using absl::synchronization_internal::GetOrCreateCurrentThreadIdentity;
70
using absl::synchronization_internal::GraphCycles;
71
using absl::synchronization_internal::GraphId;
72
using absl::synchronization_internal::InvalidGraphId;
73
using absl::synchronization_internal::KernelTimeout;
74
using absl::synchronization_internal::PerThreadSem;
75
76
extern "C" {
77
0
ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)() {
78
0
  std::this_thread::yield();
79
0
}
80
}  // extern "C"
81
82
namespace absl {
83
ABSL_NAMESPACE_BEGIN
84
85
namespace {
86
87
#if defined(ABSL_HAVE_THREAD_SANITIZER)
88
constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kIgnore;
89
#else
90
constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kAbort;
91
#endif
92
93
ABSL_CONST_INIT std::atomic<OnDeadlockCycle> synch_deadlock_detection(
94
    kDeadlockDetectionDefault);
95
ABSL_CONST_INIT std::atomic<bool> synch_check_invariants(false);
96
97
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
98
absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)>
99
    submit_profile_data;
100
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<void (*)(
101
    const char *msg, const void *obj, int64_t wait_cycles)>
102
    mutex_tracer;
103
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
104
    absl::base_internal::AtomicHook<void (*)(const char *msg, const void *cv)>
105
        cond_var_tracer;
106
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<
107
    bool (*)(const void *pc, char *out, int out_size)>
108
    symbolizer(absl::Symbolize);
109
110
}  // namespace
111
112
static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
113
                                          bool locking, bool trylock,
114
                                          bool read_lock);
115
116
0
void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles)) {
117
0
  submit_profile_data.Store(fn);
118
0
}
119
120
void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
121
0
                                    int64_t wait_cycles)) {
122
0
  mutex_tracer.Store(fn);
123
0
}
124
125
0
void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv)) {
126
0
  cond_var_tracer.Store(fn);
127
0
}
128
129
0
void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size)) {
130
0
  symbolizer.Store(fn);
131
0
}
132
133
namespace {
134
// Represents the strategy for spin and yield.
135
// See the comment in GetMutexGlobals() for more information.
136
enum DelayMode { AGGRESSIVE, GENTLE };
137
138
struct ABSL_CACHELINE_ALIGNED MutexGlobals {
139
  absl::once_flag once;
140
  int spinloop_iterations = 0;
141
  int32_t mutex_sleep_spins[2] = {};
142
  absl::Duration mutex_sleep_time;
143
};
144
145
0
absl::Duration MeasureTimeToYield() {
146
0
  absl::Time before = absl::Now();
147
0
  ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
148
0
  return absl::Now() - before;
149
0
}
150
151
0
const MutexGlobals &GetMutexGlobals() {
152
0
  ABSL_CONST_INIT static MutexGlobals data;
153
0
  absl::base_internal::LowLevelCallOnce(&data.once, [&]() {
154
0
    const int num_cpus = absl::base_internal::NumCPUs();
155
0
    data.spinloop_iterations = num_cpus > 1 ? 1500 : 0;
156
    // If this a uniprocessor, only yield/sleep.
157
    // Real-time threads are often unable to yield, so the sleep time needs
158
    // to be long enough to keep the calling thread asleep until scheduling
159
    // happens.
160
    // If this is multiprocessor, allow spinning. If the mode is
161
    // aggressive then spin many times before yielding.  If the mode is
162
    // gentle then spin only a few times before yielding.  Aggressive spinning
163
    // is used to ensure that an Unlock() call, which must get the spin lock
164
    // for any thread to make progress gets it without undue delay.
165
0
    if (num_cpus > 1) {
166
0
      data.mutex_sleep_spins[AGGRESSIVE] = 5000;
167
0
      data.mutex_sleep_spins[GENTLE] = 250;
168
0
      data.mutex_sleep_time = absl::Microseconds(10);
169
0
    } else {
170
0
      data.mutex_sleep_spins[AGGRESSIVE] = 0;
171
0
      data.mutex_sleep_spins[GENTLE] = 0;
172
0
      data.mutex_sleep_time = MeasureTimeToYield() * 5;
173
0
      data.mutex_sleep_time =
174
0
          std::min(data.mutex_sleep_time, absl::Milliseconds(1));
175
0
      data.mutex_sleep_time =
176
0
          std::max(data.mutex_sleep_time, absl::Microseconds(10));
177
0
    }
178
0
  });
179
0
  return data;
180
0
}
181
}  // namespace
182
183
namespace synchronization_internal {
184
// Returns the Mutex delay on iteration `c` depending on the given `mode`.
185
// The returned value should be used as `c` for the next call to `MutexDelay`.
186
0
int MutexDelay(int32_t c, int mode) {
187
0
  const int32_t limit = GetMutexGlobals().mutex_sleep_spins[mode];
188
0
  const absl::Duration sleep_time = GetMutexGlobals().mutex_sleep_time;
189
0
  if (c < limit) {
190
    // Spin.
191
0
    c++;
192
0
  } else {
193
0
    SchedulingGuard::ScopedEnable enable_rescheduling;
194
0
    ABSL_TSAN_MUTEX_PRE_DIVERT(nullptr, 0);
195
0
    if (c == limit) {
196
      // Yield once.
197
0
      ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
198
0
      c++;
199
0
    } else {
200
      // Then wait.
201
0
      absl::SleepFor(sleep_time);
202
0
      c = 0;
203
0
    }
204
0
    ABSL_TSAN_MUTEX_POST_DIVERT(nullptr, 0);
205
0
  }
206
0
  return c;
207
0
}
208
}  // namespace synchronization_internal
209
210
// --------------------------Generic atomic ops
211
// Ensure that "(*pv & bits) == bits" by doing an atomic update of "*pv" to
212
// "*pv | bits" if necessary.  Wait until (*pv & wait_until_clear)==0
213
// before making any change.
214
// This is used to set flags in mutex and condition variable words.
215
static void AtomicSetBits(std::atomic<intptr_t>* pv, intptr_t bits,
216
0
                          intptr_t wait_until_clear) {
217
0
  intptr_t v;
218
0
  do {
219
0
    v = pv->load(std::memory_order_relaxed);
220
0
  } while ((v & bits) != bits &&
221
0
           ((v & wait_until_clear) != 0 ||
222
0
            !pv->compare_exchange_weak(v, v | bits,
223
0
                                       std::memory_order_release,
224
0
                                       std::memory_order_relaxed)));
225
0
}
226
227
// Ensure that "(*pv & bits) == 0" by doing an atomic update of "*pv" to
228
// "*pv & ~bits" if necessary.  Wait until (*pv & wait_until_clear)==0
229
// before making any change.
230
// This is used to unset flags in mutex and condition variable words.
231
static void AtomicClearBits(std::atomic<intptr_t>* pv, intptr_t bits,
232
0
                            intptr_t wait_until_clear) {
233
0
  intptr_t v;
234
0
  do {
235
0
    v = pv->load(std::memory_order_relaxed);
236
0
  } while ((v & bits) != 0 &&
237
0
           ((v & wait_until_clear) != 0 ||
238
0
            !pv->compare_exchange_weak(v, v & ~bits,
239
0
                                       std::memory_order_release,
240
0
                                       std::memory_order_relaxed)));
241
0
}
242
243
//------------------------------------------------------------------
244
245
// Data for doing deadlock detection.
246
ABSL_CONST_INIT static absl::base_internal::SpinLock deadlock_graph_mu(
247
    absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
248
249
// Graph used to detect deadlocks.
250
ABSL_CONST_INIT static GraphCycles *deadlock_graph
251
    ABSL_GUARDED_BY(deadlock_graph_mu) ABSL_PT_GUARDED_BY(deadlock_graph_mu);
252
253
//------------------------------------------------------------------
254
// An event mechanism for debugging mutex use.
255
// It also allows mutexes to be given names for those who can't handle
256
// addresses, and instead like to give their data structures names like
257
// "Henry", "Fido", or "Rupert IV, King of Yondavia".
258
259
namespace {  // to prevent name pollution
260
enum {       // Mutex and CondVar events passed as "ev" to PostSynchEvent
261
             // Mutex events
262
  SYNCH_EV_TRYLOCK_SUCCESS,
263
  SYNCH_EV_TRYLOCK_FAILED,
264
  SYNCH_EV_READERTRYLOCK_SUCCESS,
265
  SYNCH_EV_READERTRYLOCK_FAILED,
266
  SYNCH_EV_LOCK,
267
  SYNCH_EV_LOCK_RETURNING,
268
  SYNCH_EV_READERLOCK,
269
  SYNCH_EV_READERLOCK_RETURNING,
270
  SYNCH_EV_UNLOCK,
271
  SYNCH_EV_READERUNLOCK,
272
273
  // CondVar events
274
  SYNCH_EV_WAIT,
275
  SYNCH_EV_WAIT_RETURNING,
276
  SYNCH_EV_SIGNAL,
277
  SYNCH_EV_SIGNALALL,
278
};
279
280
enum {                    // Event flags
281
  SYNCH_F_R = 0x01,       // reader event
282
  SYNCH_F_LCK = 0x02,     // PostSynchEvent called with mutex held
283
  SYNCH_F_TRY = 0x04,     // TryLock or ReaderTryLock
284
  SYNCH_F_UNLOCK = 0x08,  // Unlock or ReaderUnlock
285
286
  SYNCH_F_LCK_W = SYNCH_F_LCK,
287
  SYNCH_F_LCK_R = SYNCH_F_LCK | SYNCH_F_R,
288
};
289
}  // anonymous namespace
290
291
// Properties of the events.
292
static const struct {
293
  int flags;
294
  const char *msg;
295
} event_properties[] = {
296
    {SYNCH_F_LCK_W | SYNCH_F_TRY, "TryLock succeeded "},
297
    {0, "TryLock failed "},
298
    {SYNCH_F_LCK_R | SYNCH_F_TRY, "ReaderTryLock succeeded "},
299
    {0, "ReaderTryLock failed "},
300
    {0, "Lock blocking "},
301
    {SYNCH_F_LCK_W, "Lock returning "},
302
    {0, "ReaderLock blocking "},
303
    {SYNCH_F_LCK_R, "ReaderLock returning "},
304
    {SYNCH_F_LCK_W | SYNCH_F_UNLOCK, "Unlock "},
305
    {SYNCH_F_LCK_R | SYNCH_F_UNLOCK, "ReaderUnlock "},
306
    {0, "Wait on "},
307
    {0, "Wait unblocked "},
308
    {0, "Signal on "},
309
    {0, "SignalAll on "},
310
};
311
312
ABSL_CONST_INIT static absl::base_internal::SpinLock synch_event_mu(
313
    absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
314
315
// Hash table size; should be prime > 2.
316
// Can't be too small, as it's used for deadlock detection information.
317
static constexpr uint32_t kNSynchEvent = 1031;
318
319
static struct SynchEvent {     // this is a trivial hash table for the events
320
  // struct is freed when refcount reaches 0
321
  int refcount ABSL_GUARDED_BY(synch_event_mu);
322
323
  // buckets have linear, 0-terminated  chains
324
  SynchEvent *next ABSL_GUARDED_BY(synch_event_mu);
325
326
  // Constant after initialization
327
  uintptr_t masked_addr;  // object at this address is called "name"
328
329
  // No explicit synchronization used.  Instead we assume that the
330
  // client who enables/disables invariants/logging on a Mutex does so
331
  // while the Mutex is not being concurrently accessed by others.
332
  void (*invariant)(void *arg);  // called on each event
333
  void *arg;            // first arg to (*invariant)()
334
  bool log;             // logging turned on
335
336
  // Constant after initialization
337
  char name[1];         // actually longer---NUL-terminated string
338
} * synch_event[kNSynchEvent] ABSL_GUARDED_BY(synch_event_mu);
339
340
// Ensure that the object at "addr" has a SynchEvent struct associated with it,
341
// set "bits" in the word there (waiting until lockbit is clear before doing
342
// so), and return a refcounted reference that will remain valid until
343
// UnrefSynchEvent() is called.  If a new SynchEvent is allocated,
344
// the string name is copied into it.
345
// When used with a mutex, the caller should also ensure that kMuEvent
346
// is set in the mutex word, and similarly for condition variables and kCVEvent.
347
static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr,
348
                                    const char *name, intptr_t bits,
349
0
                                    intptr_t lockbit) {
350
0
  uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
351
0
  SynchEvent *e;
352
  // first look for existing SynchEvent struct..
353
0
  synch_event_mu.Lock();
354
0
  for (e = synch_event[h];
355
0
       e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
356
0
       e = e->next) {
357
0
  }
358
0
  if (e == nullptr) {  // no SynchEvent struct found; make one.
359
0
    if (name == nullptr) {
360
0
      name = "";
361
0
    }
362
0
    size_t l = strlen(name);
363
0
    e = reinterpret_cast<SynchEvent *>(
364
0
        base_internal::LowLevelAlloc::Alloc(sizeof(*e) + l));
365
0
    e->refcount = 2;    // one for return value, one for linked list
366
0
    e->masked_addr = base_internal::HidePtr(addr);
367
0
    e->invariant = nullptr;
368
0
    e->arg = nullptr;
369
0
    e->log = false;
370
0
    strcpy(e->name, name);  // NOLINT(runtime/printf)
371
0
    e->next = synch_event[h];
372
0
    AtomicSetBits(addr, bits, lockbit);
373
0
    synch_event[h] = e;
374
0
  } else {
375
0
    e->refcount++;      // for return value
376
0
  }
377
0
  synch_event_mu.Unlock();
378
0
  return e;
379
0
}
380
381
// Deallocate the SynchEvent *e, whose refcount has fallen to zero.
382
0
static void DeleteSynchEvent(SynchEvent *e) {
383
0
  base_internal::LowLevelAlloc::Free(e);
384
0
}
385
386
// Decrement the reference count of *e, or do nothing if e==null.
387
0
static void UnrefSynchEvent(SynchEvent *e) {
388
0
  if (e != nullptr) {
389
0
    synch_event_mu.Lock();
390
0
    bool del = (--(e->refcount) == 0);
391
0
    synch_event_mu.Unlock();
392
0
    if (del) {
393
0
      DeleteSynchEvent(e);
394
0
    }
395
0
  }
396
0
}
397
398
// Forget the mapping from the object (Mutex or CondVar) at address addr
399
// to SynchEvent object, and clear "bits" in its word (waiting until lockbit
400
// is clear before doing so).
401
static void ForgetSynchEvent(std::atomic<intptr_t> *addr, intptr_t bits,
402
0
                             intptr_t lockbit) {
403
0
  uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
404
0
  SynchEvent **pe;
405
0
  SynchEvent *e;
406
0
  synch_event_mu.Lock();
407
0
  for (pe = &synch_event[h];
408
0
       (e = *pe) != nullptr && e->masked_addr != base_internal::HidePtr(addr);
409
0
       pe = &e->next) {
410
0
  }
411
0
  bool del = false;
412
0
  if (e != nullptr) {
413
0
    *pe = e->next;
414
0
    del = (--(e->refcount) == 0);
415
0
  }
416
0
  AtomicClearBits(addr, bits, lockbit);
417
0
  synch_event_mu.Unlock();
418
0
  if (del) {
419
0
    DeleteSynchEvent(e);
420
0
  }
421
0
}
422
423
// Return a refcounted reference to the SynchEvent of the object at address
424
// "addr", if any.  The pointer returned is valid until the UnrefSynchEvent() is
425
// called.
426
0
static SynchEvent *GetSynchEvent(const void *addr) {
427
0
  uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
428
0
  SynchEvent *e;
429
0
  synch_event_mu.Lock();
430
0
  for (e = synch_event[h];
431
0
       e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
432
0
       e = e->next) {
433
0
  }
434
0
  if (e != nullptr) {
435
0
    e->refcount++;
436
0
  }
437
0
  synch_event_mu.Unlock();
438
0
  return e;
439
0
}
440
441
// Called when an event "ev" occurs on a Mutex of CondVar "obj"
442
// if event recording is on
443
0
static void PostSynchEvent(void *obj, int ev) {
444
0
  SynchEvent *e = GetSynchEvent(obj);
445
  // logging is on if event recording is on and either there's no event struct,
446
  // or it explicitly says to log
447
0
  if (e == nullptr || e->log) {
448
0
    void *pcs[40];
449
0
    int n = absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 1);
450
    // A buffer with enough space for the ASCII for all the PCs, even on a
451
    // 64-bit machine.
452
0
    char buffer[ABSL_ARRAYSIZE(pcs) * 24];
453
0
    int pos = snprintf(buffer, sizeof (buffer), " @");
454
0
    for (int i = 0; i != n; i++) {
455
0
      int b = snprintf(&buffer[pos], sizeof(buffer) - static_cast<size_t>(pos),
456
0
                       " %p", pcs[i]);
457
0
      if (b < 0 ||
458
0
          static_cast<size_t>(b) >= sizeof(buffer) - static_cast<size_t>(pos)) {
459
0
        break;
460
0
      }
461
0
      pos += b;
462
0
    }
463
0
    ABSL_RAW_LOG(INFO, "%s%p %s %s", event_properties[ev].msg, obj,
464
0
                 (e == nullptr ? "" : e->name), buffer);
465
0
  }
466
0
  const int flags = event_properties[ev].flags;
467
0
  if ((flags & SYNCH_F_LCK) != 0 && e != nullptr && e->invariant != nullptr) {
468
    // Calling the invariant as is causes problems under ThreadSanitizer.
469
    // We are currently inside of Mutex Lock/Unlock and are ignoring all
470
    // memory accesses and synchronization. If the invariant transitively
471
    // synchronizes something else and we ignore the synchronization, we will
472
    // get false positive race reports later.
473
    // Reuse EvalConditionAnnotated to properly call into user code.
474
0
    struct local {
475
0
      static bool pred(SynchEvent *ev) {
476
0
        (*ev->invariant)(ev->arg);
477
0
        return false;
478
0
      }
479
0
    };
480
0
    Condition cond(&local::pred, e);
481
0
    Mutex *mu = static_cast<Mutex *>(obj);
482
0
    const bool locking = (flags & SYNCH_F_UNLOCK) == 0;
483
0
    const bool trylock = (flags & SYNCH_F_TRY) != 0;
484
0
    const bool read_lock = (flags & SYNCH_F_R) != 0;
485
0
    EvalConditionAnnotated(&cond, mu, locking, trylock, read_lock);
486
0
  }
487
0
  UnrefSynchEvent(e);
488
0
}
489
490
//------------------------------------------------------------------
491
492
// The SynchWaitParams struct encapsulates the way in which a thread is waiting:
493
// whether it has a timeout, the condition, exclusive/shared, and whether a
494
// condition variable wait has an associated Mutex (as opposed to another
495
// type of lock).  It also points to the PerThreadSynch struct of its thread.
496
// cv_word tells Enqueue() to enqueue on a CondVar using CondVarEnqueue().
497
//
498
// This structure is held on the stack rather than directly in
499
// PerThreadSynch because a thread can be waiting on multiple Mutexes if,
500
// while waiting on one Mutex, the implementation calls a client callback
501
// (such as a Condition function) that acquires another Mutex. We don't
502
// strictly need to allow this, but programmers become confused if we do not
503
// allow them to use functions such a LOG() within Condition functions.  The
504
// PerThreadSynch struct points at the most recent SynchWaitParams struct when
505
// the thread is on a Mutex's waiter queue.
506
struct SynchWaitParams {
507
  SynchWaitParams(Mutex::MuHow how_arg, const Condition *cond_arg,
508
                  KernelTimeout timeout_arg, Mutex *cvmu_arg,
509
                  PerThreadSynch *thread_arg,
510
                  std::atomic<intptr_t> *cv_word_arg)
511
      : how(how_arg),
512
        cond(cond_arg),
513
        timeout(timeout_arg),
514
        cvmu(cvmu_arg),
515
        thread(thread_arg),
516
        cv_word(cv_word_arg),
517
        contention_start_cycles(base_internal::CycleClock::Now()),
518
0
        should_submit_contention_data(false) {}
519
520
  const Mutex::MuHow how;  // How this thread needs to wait.
521
  const Condition *cond;  // The condition that this thread is waiting for.
522
                          // In Mutex, this field is set to zero if a timeout
523
                          // expires.
524
  KernelTimeout timeout;  // timeout expiry---absolute time
525
                          // In Mutex, this field is set to zero if a timeout
526
                          // expires.
527
  Mutex *const cvmu;      // used for transfer from cond var to mutex
528
  PerThreadSynch *const thread;  // thread that is waiting
529
530
  // If not null, thread should be enqueued on the CondVar whose state
531
  // word is cv_word instead of queueing normally on the Mutex.
532
  std::atomic<intptr_t> *cv_word;
533
534
  int64_t contention_start_cycles;  // Time (in cycles) when this thread started
535
                                    // to contend for the mutex.
536
  bool should_submit_contention_data;
537
};
538
539
struct SynchLocksHeld {
540
  int n;              // number of valid entries in locks[]
541
  bool overflow;      // true iff we overflowed the array at some point
542
  struct {
543
    Mutex *mu;        // lock acquired
544
    int32_t count;      // times acquired
545
    GraphId id;       // deadlock_graph id of acquired lock
546
  } locks[40];
547
  // If a thread overfills the array during deadlock detection, we
548
  // continue, discarding information as needed.  If no overflow has
549
  // taken place, we can provide more error checking, such as
550
  // detecting when a thread releases a lock it does not hold.
551
};
552
553
// A sentinel value in lists that is not 0.
554
// A 0 value is used to mean "not on a list".
555
static PerThreadSynch *const kPerThreadSynchNull =
556
  reinterpret_cast<PerThreadSynch *>(1);
557
558
0
static SynchLocksHeld *LocksHeldAlloc() {
559
0
  SynchLocksHeld *ret = reinterpret_cast<SynchLocksHeld *>(
560
0
      base_internal::LowLevelAlloc::Alloc(sizeof(SynchLocksHeld)));
561
0
  ret->n = 0;
562
0
  ret->overflow = false;
563
0
  return ret;
564
0
}
565
566
// Return the PerThreadSynch-struct for this thread.
567
0
static PerThreadSynch *Synch_GetPerThread() {
568
0
  ThreadIdentity *identity = GetOrCreateCurrentThreadIdentity();
569
0
  return &identity->per_thread_synch;
570
0
}
571
572
0
static PerThreadSynch *Synch_GetPerThreadAnnotated(Mutex *mu) {
573
0
  if (mu) {
574
0
    ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
575
0
  }
576
0
  PerThreadSynch *w = Synch_GetPerThread();
577
0
  if (mu) {
578
0
    ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
579
0
  }
580
0
  return w;
581
0
}
582
583
0
static SynchLocksHeld *Synch_GetAllLocks() {
584
0
  PerThreadSynch *s = Synch_GetPerThread();
585
0
  if (s->all_locks == nullptr) {
586
0
    s->all_locks = LocksHeldAlloc();  // Freed by ReclaimThreadIdentity.
587
0
  }
588
0
  return s->all_locks;
589
0
}
590
591
// Post on "w"'s associated PerThreadSem.
592
0
void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) {
593
0
  if (mu) {
594
0
    ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
595
    // We miss synchronization around passing PerThreadSynch between threads
596
    // since it happens inside of the Mutex code, so we need to ignore all
597
    // accesses to the object.
598
0
    ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
599
0
    PerThreadSem::Post(w->thread_identity());
600
0
    ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END();
601
0
    ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
602
0
  } else {
603
0
    PerThreadSem::Post(w->thread_identity());
604
0
  }
605
0
}
606
607
// Wait on "w"'s associated PerThreadSem; returns false if timeout expired.
608
0
bool Mutex::DecrementSynchSem(Mutex *mu, PerThreadSynch *w, KernelTimeout t) {
609
0
  if (mu) {
610
0
    ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
611
0
  }
612
0
  assert(w == Synch_GetPerThread());
613
0
  static_cast<void>(w);
614
0
  bool res = PerThreadSem::Wait(t);
615
0
  if (mu) {
616
0
    ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
617
0
  }
618
0
  return res;
619
0
}
620
621
// We're in a fatal signal handler that hopes to use Mutex and to get
622
// lucky by not deadlocking.  We try to improve its chances of success
623
// by effectively disabling some of the consistency checks.  This will
624
// prevent certain ABSL_RAW_CHECK() statements from being triggered when
625
// re-rentry is detected.  The ABSL_RAW_CHECK() statements are those in the
626
// Mutex code checking that the "waitp" field has not been reused.
627
0
void Mutex::InternalAttemptToUseMutexInFatalSignalHandler() {
628
  // Fix the per-thread state only if it exists.
629
0
  ThreadIdentity *identity = CurrentThreadIdentityIfPresent();
630
0
  if (identity != nullptr) {
631
0
    identity->per_thread_synch.suppress_fatal_errors = true;
632
0
  }
633
  // Don't do deadlock detection when we are already failing.
634
0
  synch_deadlock_detection.store(OnDeadlockCycle::kIgnore,
635
0
                                 std::memory_order_release);
636
0
}
637
638
// --------------------------time support
639
640
// Return the current time plus the timeout.  Use the same clock as
641
// PerThreadSem::Wait() for consistency.  Unfortunately, we don't have
642
// such a choice when a deadline is given directly.
643
0
static absl::Time DeadlineFromTimeout(absl::Duration timeout) {
644
0
#ifndef _WIN32
645
0
  struct timeval tv;
646
0
  gettimeofday(&tv, nullptr);
647
0
  return absl::TimeFromTimeval(tv) + timeout;
648
#else
649
  return absl::Now() + timeout;
650
#endif
651
0
}
652
653
// --------------------------Mutexes
654
655
// In the layout below, the msb of the bottom byte is currently unused.  Also,
656
// the following constraints were considered in choosing the layout:
657
//  o Both the debug allocator's "uninitialized" and "freed" patterns (0xab and
658
//    0xcd) are illegal: reader and writer lock both held.
659
//  o kMuWriter and kMuEvent should exceed kMuDesig and kMuWait, to enable the
660
//    bit-twiddling trick in Mutex::Unlock().
661
//  o kMuWriter / kMuReader == kMuWrWait / kMuWait,
662
//    to enable the bit-twiddling trick in CheckForMutexCorruption().
663
static const intptr_t kMuReader      = 0x0001L;  // a reader holds the lock
664
static const intptr_t kMuDesig       = 0x0002L;  // there's a designated waker
665
static const intptr_t kMuWait        = 0x0004L;  // threads are waiting
666
static const intptr_t kMuWriter      = 0x0008L;  // a writer holds the lock
667
static const intptr_t kMuEvent       = 0x0010L;  // record this mutex's events
668
// INVARIANT1:  there's a thread that was blocked on the mutex, is
669
// no longer, yet has not yet acquired the mutex.  If there's a
670
// designated waker, all threads can avoid taking the slow path in
671
// unlock because the designated waker will subsequently acquire
672
// the lock and wake someone.  To maintain INVARIANT1 the bit is
673
// set when a thread is unblocked(INV1a), and threads that were
674
// unblocked reset the bit when they either acquire or re-block
675
// (INV1b).
676
static const intptr_t kMuWrWait      = 0x0020L;  // runnable writer is waiting
677
                                                 // for a reader
678
static const intptr_t kMuSpin        = 0x0040L;  // spinlock protects wait list
679
static const intptr_t kMuLow         = 0x00ffL;  // mask all mutex bits
680
static const intptr_t kMuHigh        = ~kMuLow;  // mask pointer/reader count
681
682
// Hack to make constant values available to gdb pretty printer
683
enum {
684
  kGdbMuSpin = kMuSpin,
685
  kGdbMuEvent = kMuEvent,
686
  kGdbMuWait = kMuWait,
687
  kGdbMuWriter = kMuWriter,
688
  kGdbMuDesig = kMuDesig,
689
  kGdbMuWrWait = kMuWrWait,
690
  kGdbMuReader = kMuReader,
691
  kGdbMuLow = kMuLow,
692
};
693
694
// kMuWrWait implies kMuWait.
695
// kMuReader and kMuWriter are mutually exclusive.
696
// If kMuReader is zero, there are no readers.
697
// Otherwise, if kMuWait is zero, the high order bits contain a count of the
698
// number of readers.  Otherwise, the reader count is held in
699
// PerThreadSynch::readers of the most recently queued waiter, again in the
700
// bits above kMuLow.
701
static const intptr_t kMuOne = 0x0100;  // a count of one reader
702
703
// flags passed to Enqueue and LockSlow{,WithTimeout,Loop}
704
static const int kMuHasBlocked = 0x01;  // already blocked (MUST == 1)
705
static const int kMuIsCond = 0x02;      // conditional waiter (CV or Condition)
706
707
static_assert(PerThreadSynch::kAlignment > kMuLow,
708
              "PerThreadSynch::kAlignment must be greater than kMuLow");
709
710
// This struct contains various bitmasks to be used in
711
// acquiring and releasing a mutex in a particular mode.
712
struct MuHowS {
713
  // if all the bits in fast_need_zero are zero, the lock can be acquired by
714
  // adding fast_add and oring fast_or.  The bit kMuDesig should be reset iff
715
  // this is the designated waker.
716
  intptr_t fast_need_zero;
717
  intptr_t fast_or;
718
  intptr_t fast_add;
719
720
  intptr_t slow_need_zero;  // fast_need_zero with events (e.g. logging)
721
722
  intptr_t slow_inc_need_zero;  // if all the bits in slow_inc_need_zero are
723
                                // zero a reader can acquire a read share by
724
                                // setting the reader bit and incrementing
725
                                // the reader count (in last waiter since
726
                                // we're now slow-path).  kMuWrWait be may
727
                                // be ignored if we already waited once.
728
};
729
730
static const MuHowS kSharedS = {
731
    // shared or read lock
732
    kMuWriter | kMuWait | kMuEvent,   // fast_need_zero
733
    kMuReader,                        // fast_or
734
    kMuOne,                           // fast_add
735
    kMuWriter | kMuWait,              // slow_need_zero
736
    kMuSpin | kMuWriter | kMuWrWait,  // slow_inc_need_zero
737
};
738
static const MuHowS kExclusiveS = {
739
    // exclusive or write lock
740
    kMuWriter | kMuReader | kMuEvent,  // fast_need_zero
741
    kMuWriter,                         // fast_or
742
    0,                                 // fast_add
743
    kMuWriter | kMuReader,             // slow_need_zero
744
    ~static_cast<intptr_t>(0),         // slow_inc_need_zero
745
};
746
static const Mutex::MuHow kShared = &kSharedS;        // shared lock
747
static const Mutex::MuHow kExclusive = &kExclusiveS;  // exclusive lock
748
749
#ifdef NDEBUG
750
static constexpr bool kDebugMode = false;
751
#else
752
static constexpr bool kDebugMode = true;
753
#endif
754
755
#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
756
static unsigned TsanFlags(Mutex::MuHow how) {
757
  return how == kShared ? __tsan_mutex_read_lock : 0;
758
}
759
#endif
760
761
0
static bool DebugOnlyIsExiting() {
762
0
  return false;
763
0
}
764
765
0
Mutex::~Mutex() {
766
0
  intptr_t v = mu_.load(std::memory_order_relaxed);
767
0
  if ((v & kMuEvent) != 0 && !DebugOnlyIsExiting()) {
768
0
    ForgetSynchEvent(&this->mu_, kMuEvent, kMuSpin);
769
0
  }
770
0
  if (kDebugMode) {
771
0
    this->ForgetDeadlockInfo();
772
0
  }
773
0
  ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static);
774
0
}
775
776
0
void Mutex::EnableDebugLog(const char *name) {
777
0
  SynchEvent *e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin);
778
0
  e->log = true;
779
0
  UnrefSynchEvent(e);
780
0
}
781
782
0
void EnableMutexInvariantDebugging(bool enabled) {
783
0
  synch_check_invariants.store(enabled, std::memory_order_release);
784
0
}
785
786
void Mutex::EnableInvariantDebugging(void (*invariant)(void *),
787
0
                                     void *arg) {
788
0
  if (synch_check_invariants.load(std::memory_order_acquire) &&
789
0
      invariant != nullptr) {
790
0
    SynchEvent *e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin);
791
0
    e->invariant = invariant;
792
0
    e->arg = arg;
793
0
    UnrefSynchEvent(e);
794
0
  }
795
0
}
796
797
0
void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode) {
798
0
  synch_deadlock_detection.store(mode, std::memory_order_release);
799
0
}
800
801
// Return true iff threads x and y are part of the same equivalence
802
// class of waiters. An equivalence class is defined as the set of
803
// waiters with the same condition, type of lock, and thread priority.
804
//
805
// Requires that x and y be waiting on the same Mutex queue.
806
0
static bool MuEquivalentWaiter(PerThreadSynch *x, PerThreadSynch *y) {
807
0
  return x->waitp->how == y->waitp->how && x->priority == y->priority &&
808
0
         Condition::GuaranteedEqual(x->waitp->cond, y->waitp->cond);
809
0
}
810
811
// Given the contents of a mutex word containing a PerThreadSynch pointer,
812
// return the pointer.
813
0
static inline PerThreadSynch *GetPerThreadSynch(intptr_t v) {
814
0
  return reinterpret_cast<PerThreadSynch *>(v & kMuHigh);
815
0
}
816
817
// The next several routines maintain the per-thread next and skip fields
818
// used in the Mutex waiter queue.
819
// The queue is a circular singly-linked list, of which the "head" is the
820
// last element, and head->next if the first element.
821
// The skip field has the invariant:
822
//   For thread x, x->skip is one of:
823
//     - invalid (iff x is not in a Mutex wait queue),
824
//     - null, or
825
//     - a pointer to a distinct thread waiting later in the same Mutex queue
826
//       such that all threads in [x, x->skip] have the same condition, priority
827
//       and lock type (MuEquivalentWaiter() is true for all pairs in [x,
828
//       x->skip]).
829
// In addition, if x->skip is  valid, (x->may_skip || x->skip == null)
830
//
831
// By the spec of MuEquivalentWaiter(), it is not necessary when removing the
832
// first runnable thread y from the front a Mutex queue to adjust the skip
833
// field of another thread x because if x->skip==y, x->skip must (have) become
834
// invalid before y is removed.  The function TryRemove can remove a specified
835
// thread from an arbitrary position in the queue whether runnable or not, so
836
// it fixes up skip fields that would otherwise be left dangling.
837
// The statement
838
//     if (x->may_skip && MuEquivalentWaiter(x, x->next)) { x->skip = x->next; }
839
// maintains the invariant provided x is not the last waiter in a Mutex queue
840
// The statement
841
//          if (x->skip != null) { x->skip = x->skip->skip; }
842
// maintains the invariant.
843
844
// Returns the last thread y in a mutex waiter queue such that all threads in
845
// [x, y] inclusive share the same condition.  Sets skip fields of some threads
846
// in that range to optimize future evaluation of Skip() on x values in
847
// the range.  Requires thread x is in a mutex waiter queue.
848
// The locking is unusual.  Skip() is called under these conditions:
849
//   - spinlock is held in call from Enqueue(), with maybe_unlocking == false
850
//   - Mutex is held in call from UnlockSlow() by last unlocker, with
851
//     maybe_unlocking == true
852
//   - both Mutex and spinlock are held in call from DequeueAllWakeable() (from
853
//     UnlockSlow()) and TryRemove()
854
// These cases are mutually exclusive, so Skip() never runs concurrently
855
// with itself on the same Mutex.   The skip chain is used in these other places
856
// that cannot occur concurrently:
857
//   - FixSkip() (from TryRemove()) - spinlock and Mutex are held)
858
//   - Dequeue() (with spinlock and Mutex held)
859
//   - UnlockSlow() (with spinlock and Mutex held)
860
// A more complex case is Enqueue()
861
//   - Enqueue() (with spinlock held and maybe_unlocking == false)
862
//               This is the first case in which Skip is called, above.
863
//   - Enqueue() (without spinlock held; but queue is empty and being freshly
864
//                formed)
865
//   - Enqueue() (with spinlock held and maybe_unlocking == true)
866
// The first case has mutual exclusion, and the second isolation through
867
// working on an otherwise unreachable data structure.
868
// In the last case, Enqueue() is required to change no skip/next pointers
869
// except those in the added node and the former "head" node.  This implies
870
// that the new node is added after head, and so must be the new head or the
871
// new front of the queue.
872
0
static PerThreadSynch *Skip(PerThreadSynch *x) {
873
0
  PerThreadSynch *x0 = nullptr;
874
0
  PerThreadSynch *x1 = x;
875
0
  PerThreadSynch *x2 = x->skip;
876
0
  if (x2 != nullptr) {
877
    // Each iteration attempts to advance sequence (x0,x1,x2) to next sequence
878
    // such that   x1 == x0->skip && x2 == x1->skip
879
0
    while ((x0 = x1, x1 = x2, x2 = x2->skip) != nullptr) {
880
0
      x0->skip = x2;      // short-circuit skip from x0 to x2
881
0
    }
882
0
    x->skip = x1;         // short-circuit skip from x to result
883
0
  }
884
0
  return x1;
885
0
}
886
887
// "ancestor" appears before "to_be_removed" in the same Mutex waiter queue.
888
// The latter is going to be removed out of order, because of a timeout.
889
// Check whether "ancestor" has a skip field pointing to "to_be_removed",
890
// and fix it if it does.
891
0
static void FixSkip(PerThreadSynch *ancestor, PerThreadSynch *to_be_removed) {
892
0
  if (ancestor->skip == to_be_removed) {  // ancestor->skip left dangling
893
0
    if (to_be_removed->skip != nullptr) {
894
0
      ancestor->skip = to_be_removed->skip;  // can skip past to_be_removed
895
0
    } else if (ancestor->next != to_be_removed) {  // they are not adjacent
896
0
      ancestor->skip = ancestor->next;             // can skip one past ancestor
897
0
    } else {
898
0
      ancestor->skip = nullptr;  // can't skip at all
899
0
    }
900
0
  }
901
0
}
902
903
static void CondVarEnqueue(SynchWaitParams *waitp);
904
905
// Enqueue thread "waitp->thread" on a waiter queue.
906
// Called with mutex spinlock held if head != nullptr
907
// If head==nullptr and waitp->cv_word==nullptr, then Enqueue() is
908
// idempotent; it alters no state associated with the existing (empty)
909
// queue.
910
//
911
// If waitp->cv_word == nullptr, queue the thread at either the front or
912
// the end (according to its priority) of the circular mutex waiter queue whose
913
// head is "head", and return the new head.  mu is the previous mutex state,
914
// which contains the reader count (perhaps adjusted for the operation in
915
// progress) if the list was empty and a read lock held, and the holder hint if
916
// the list was empty and a write lock held.  (flags & kMuIsCond) indicates
917
// whether this thread was transferred from a CondVar or is waiting for a
918
// non-trivial condition.  In this case, Enqueue() never returns nullptr
919
//
920
// If waitp->cv_word != nullptr, CondVarEnqueue() is called, and "head" is
921
// returned. This mechanism is used by CondVar to queue a thread on the
922
// condition variable queue instead of the mutex queue in implementing Wait().
923
// In this case, Enqueue() can return nullptr (if head==nullptr).
924
static PerThreadSynch *Enqueue(PerThreadSynch *head,
925
0
                               SynchWaitParams *waitp, intptr_t mu, int flags) {
926
  // If we have been given a cv_word, call CondVarEnqueue() and return
927
  // the previous head of the Mutex waiter queue.
928
0
  if (waitp->cv_word != nullptr) {
929
0
    CondVarEnqueue(waitp);
930
0
    return head;
931
0
  }
932
933
0
  PerThreadSynch *s = waitp->thread;
934
0
  ABSL_RAW_CHECK(
935
0
      s->waitp == nullptr ||    // normal case
936
0
          s->waitp == waitp ||  // Fer()---transfer from condition variable
937
0
          s->suppress_fatal_errors,
938
0
      "detected illegal recursion into Mutex code");
939
0
  s->waitp = waitp;
940
0
  s->skip = nullptr;             // maintain skip invariant (see above)
941
0
  s->may_skip = true;            // always true on entering queue
942
0
  s->wake = false;               // not being woken
943
0
  s->cond_waiter = ((flags & kMuIsCond) != 0);
944
0
  if (head == nullptr) {         // s is the only waiter
945
0
    s->next = s;                 // it's the only entry in the cycle
946
0
    s->readers = mu;             // reader count is from mu word
947
0
    s->maybe_unlocking = false;  // no one is searching an empty list
948
0
    head = s;                    // s is new head
949
0
  } else {
950
0
    PerThreadSynch *enqueue_after = nullptr;  // we'll put s after this element
951
0
#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
952
0
    int64_t now_cycles = base_internal::CycleClock::Now();
953
0
    if (s->next_priority_read_cycles < now_cycles) {
954
      // Every so often, update our idea of the thread's priority.
955
      // pthread_getschedparam() is 5% of the block/wakeup time;
956
      // base_internal::CycleClock::Now() is 0.5%.
957
0
      int policy;
958
0
      struct sched_param param;
959
0
      const int err = pthread_getschedparam(pthread_self(), &policy, &param);
960
0
      if (err != 0) {
961
0
        ABSL_RAW_LOG(ERROR, "pthread_getschedparam failed: %d", err);
962
0
      } else {
963
0
        s->priority = param.sched_priority;
964
0
        s->next_priority_read_cycles =
965
0
            now_cycles +
966
0
            static_cast<int64_t>(base_internal::CycleClock::Frequency());
967
0
      }
968
0
    }
969
0
    if (s->priority > head->priority) {  // s's priority is above head's
970
      // try to put s in priority-fifo order, or failing that at the front.
971
0
      if (!head->maybe_unlocking) {
972
        // No unlocker can be scanning the queue, so we can insert into the
973
        // middle of the queue.
974
        //
975
        // Within a skip chain, all waiters have the same priority, so we can
976
        // skip forward through the chains until we find one with a lower
977
        // priority than the waiter to be enqueued.
978
0
        PerThreadSynch *advance_to = head;    // next value of enqueue_after
979
0
        do {
980
0
          enqueue_after = advance_to;
981
          // (side-effect: optimizes skip chain)
982
0
          advance_to = Skip(enqueue_after->next);
983
0
        } while (s->priority <= advance_to->priority);
984
              // termination guaranteed because s->priority > head->priority
985
              // and head is the end of a skip chain
986
0
      } else if (waitp->how == kExclusive &&
987
0
                 Condition::GuaranteedEqual(waitp->cond, nullptr)) {
988
        // An unlocker could be scanning the queue, but we know it will recheck
989
        // the queue front for writers that have no condition, which is what s
990
        // is, so an insert at front is safe.
991
0
        enqueue_after = head;       // add after head, at front
992
0
      }
993
0
    }
994
0
#endif
995
0
    if (enqueue_after != nullptr) {
996
0
      s->next = enqueue_after->next;
997
0
      enqueue_after->next = s;
998
999
      // enqueue_after can be: head, Skip(...), or cur.
1000
      // The first two imply enqueue_after->skip == nullptr, and
1001
      // the last is used only if MuEquivalentWaiter(s, cur).
1002
      // We require this because clearing enqueue_after->skip
1003
      // is impossible; enqueue_after's predecessors might also
1004
      // incorrectly skip over s if we were to allow other
1005
      // insertion points.
1006
0
      ABSL_RAW_CHECK(enqueue_after->skip == nullptr ||
1007
0
                         MuEquivalentWaiter(enqueue_after, s),
1008
0
                     "Mutex Enqueue failure");
1009
1010
0
      if (enqueue_after != head && enqueue_after->may_skip &&
1011
0
          MuEquivalentWaiter(enqueue_after, enqueue_after->next)) {
1012
        // enqueue_after can skip to its new successor, s
1013
0
        enqueue_after->skip = enqueue_after->next;
1014
0
      }
1015
0
      if (MuEquivalentWaiter(s, s->next)) {  // s->may_skip is known to be true
1016
0
        s->skip = s->next;                // s may skip to its successor
1017
0
      }
1018
0
    } else {   // enqueue not done any other way, so
1019
               // we're inserting s at the back
1020
      // s will become new head; copy data from head into it
1021
0
      s->next = head->next;        // add s after head
1022
0
      head->next = s;
1023
0
      s->readers = head->readers;  // reader count is from previous head
1024
0
      s->maybe_unlocking = head->maybe_unlocking;  // same for unlock hint
1025
0
      if (head->may_skip && MuEquivalentWaiter(head, s)) {
1026
        // head now has successor; may skip
1027
0
        head->skip = s;
1028
0
      }
1029
0
      head = s;  // s is new head
1030
0
    }
1031
0
  }
1032
0
  s->state.store(PerThreadSynch::kQueued, std::memory_order_relaxed);
1033
0
  return head;
1034
0
}
1035
1036
// Dequeue the successor pw->next of thread pw from the Mutex waiter queue
1037
// whose last element is head.  The new head element is returned, or null
1038
// if the list is made empty.
1039
// Dequeue is called with both spinlock and Mutex held.
1040
0
static PerThreadSynch *Dequeue(PerThreadSynch *head, PerThreadSynch *pw) {
1041
0
  PerThreadSynch *w = pw->next;
1042
0
  pw->next = w->next;         // snip w out of list
1043
0
  if (head == w) {            // we removed the head
1044
0
    head = (pw == w) ? nullptr : pw;  // either emptied list, or pw is new head
1045
0
  } else if (pw != head && MuEquivalentWaiter(pw, pw->next)) {
1046
    // pw can skip to its new successor
1047
0
    if (pw->next->skip !=
1048
0
        nullptr) {  // either skip to its successors skip target
1049
0
      pw->skip = pw->next->skip;
1050
0
    } else {                   // or to pw's successor
1051
0
      pw->skip = pw->next;
1052
0
    }
1053
0
  }
1054
0
  return head;
1055
0
}
1056
1057
// Traverse the elements [ pw->next, h] of the circular list whose last element
1058
// is head.
1059
// Remove all elements with wake==true and place them in the
1060
// singly-linked list wake_list in the order found.   Assumes that
1061
// there is only one such element if the element has how == kExclusive.
1062
// Return the new head.
1063
static PerThreadSynch *DequeueAllWakeable(PerThreadSynch *head,
1064
                                          PerThreadSynch *pw,
1065
0
                                          PerThreadSynch **wake_tail) {
1066
0
  PerThreadSynch *orig_h = head;
1067
0
  PerThreadSynch *w = pw->next;
1068
0
  bool skipped = false;
1069
0
  do {
1070
0
    if (w->wake) {                    // remove this element
1071
0
      ABSL_RAW_CHECK(pw->skip == nullptr, "bad skip in DequeueAllWakeable");
1072
      // we're removing pw's successor so either pw->skip is zero or we should
1073
      // already have removed pw since if pw->skip!=null, pw has the same
1074
      // condition as w.
1075
0
      head = Dequeue(head, pw);
1076
0
      w->next = *wake_tail;           // keep list terminated
1077
0
      *wake_tail = w;                 // add w to wake_list;
1078
0
      wake_tail = &w->next;           // next addition to end
1079
0
      if (w->waitp->how == kExclusive) {  // wake at most 1 writer
1080
0
        break;
1081
0
      }
1082
0
    } else {                // not waking this one; skip
1083
0
      pw = Skip(w);       // skip as much as possible
1084
0
      skipped = true;
1085
0
    }
1086
0
    w = pw->next;
1087
    // We want to stop processing after we've considered the original head,
1088
    // orig_h.  We can't test for w==orig_h in the loop because w may skip over
1089
    // it; we are guaranteed only that w's predecessor will not skip over
1090
    // orig_h.  When we've considered orig_h, either we've processed it and
1091
    // removed it (so orig_h != head), or we considered it and skipped it (so
1092
    // skipped==true && pw == head because skipping from head always skips by
1093
    // just one, leaving pw pointing at head).  So we want to
1094
    // continue the loop with the negation of that expression.
1095
0
  } while (orig_h == head && (pw != head || !skipped));
1096
0
  return head;
1097
0
}
1098
1099
// Try to remove thread s from the list of waiters on this mutex.
1100
// Does nothing if s is not on the waiter list.
1101
0
void Mutex::TryRemove(PerThreadSynch *s) {
1102
0
  SchedulingGuard::ScopedDisable disable_rescheduling;
1103
0
  intptr_t v = mu_.load(std::memory_order_relaxed);
1104
  // acquire spinlock & lock
1105
0
  if ((v & (kMuWait | kMuSpin | kMuWriter | kMuReader)) == kMuWait &&
1106
0
      mu_.compare_exchange_strong(v, v | kMuSpin | kMuWriter,
1107
0
                                  std::memory_order_acquire,
1108
0
                                  std::memory_order_relaxed)) {
1109
0
    PerThreadSynch *h = GetPerThreadSynch(v);
1110
0
    if (h != nullptr) {
1111
0
      PerThreadSynch *pw = h;   // pw is w's predecessor
1112
0
      PerThreadSynch *w;
1113
0
      if ((w = pw->next) != s) {  // search for thread,
1114
0
        do {                      // processing at least one element
1115
          // If the current element isn't equivalent to the waiter to be
1116
          // removed, we can skip the entire chain.
1117
0
          if (!MuEquivalentWaiter(s, w)) {
1118
0
            pw = Skip(w);                // so skip all that won't match
1119
            // we don't have to worry about dangling skip fields
1120
            // in the threads we skipped; none can point to s
1121
            // because they are in a different equivalence class.
1122
0
          } else {          // seeking same condition
1123
0
            FixSkip(w, s);  // fix up any skip pointer from w to s
1124
0
            pw = w;
1125
0
          }
1126
          // don't search further if we found the thread, or we're about to
1127
          // process the first thread again.
1128
0
        } while ((w = pw->next) != s && pw != h);
1129
0
      }
1130
0
      if (w == s) {                 // found thread; remove it
1131
        // pw->skip may be non-zero here; the loop above ensured that
1132
        // no ancestor of s can skip to s, so removal is safe anyway.
1133
0
        h = Dequeue(h, pw);
1134
0
        s->next = nullptr;
1135
0
        s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
1136
0
      }
1137
0
    }
1138
0
    intptr_t nv;
1139
0
    do {                        // release spinlock and lock
1140
0
      v = mu_.load(std::memory_order_relaxed);
1141
0
      nv = v & (kMuDesig | kMuEvent);
1142
0
      if (h != nullptr) {
1143
0
        nv |= kMuWait | reinterpret_cast<intptr_t>(h);
1144
0
        h->readers = 0;            // we hold writer lock
1145
0
        h->maybe_unlocking = false;  // finished unlocking
1146
0
      }
1147
0
    } while (!mu_.compare_exchange_weak(v, nv,
1148
0
                                        std::memory_order_release,
1149
0
                                        std::memory_order_relaxed));
1150
0
  }
1151
0
}
1152
1153
// Wait until thread "s", which must be the current thread, is removed from the
1154
// this mutex's waiter queue.  If "s->waitp->timeout" has a timeout, wake up
1155
// if the wait extends past the absolute time specified, even if "s" is still
1156
// on the mutex queue.  In this case, remove "s" from the queue and return
1157
// true, otherwise return false.
1158
0
void Mutex::Block(PerThreadSynch *s) {
1159
0
  while (s->state.load(std::memory_order_acquire) == PerThreadSynch::kQueued) {
1160
0
    if (!DecrementSynchSem(this, s, s->waitp->timeout)) {
1161
      // After a timeout, we go into a spin loop until we remove ourselves
1162
      // from the queue, or someone else removes us.  We can't be sure to be
1163
      // able to remove ourselves in a single lock acquisition because this
1164
      // mutex may be held, and the holder has the right to read the centre
1165
      // of the waiter queue without holding the spinlock.
1166
0
      this->TryRemove(s);
1167
0
      int c = 0;
1168
0
      while (s->next != nullptr) {
1169
0
        c = synchronization_internal::MutexDelay(c, GENTLE);
1170
0
        this->TryRemove(s);
1171
0
      }
1172
0
      if (kDebugMode) {
1173
        // This ensures that we test the case that TryRemove() is called when s
1174
        // is not on the queue.
1175
0
        this->TryRemove(s);
1176
0
      }
1177
0
      s->waitp->timeout = KernelTimeout::Never();      // timeout is satisfied
1178
0
      s->waitp->cond = nullptr;  // condition no longer relevant for wakeups
1179
0
    }
1180
0
  }
1181
0
  ABSL_RAW_CHECK(s->waitp != nullptr || s->suppress_fatal_errors,
1182
0
                 "detected illegal recursion in Mutex code");
1183
0
  s->waitp = nullptr;
1184
0
}
1185
1186
// Wake thread w, and return the next thread in the list.
1187
0
PerThreadSynch *Mutex::Wakeup(PerThreadSynch *w) {
1188
0
  PerThreadSynch *next = w->next;
1189
0
  w->next = nullptr;
1190
0
  w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
1191
0
  IncrementSynchSem(this, w);
1192
1193
0
  return next;
1194
0
}
1195
1196
static GraphId GetGraphIdLocked(Mutex *mu)
1197
0
    ABSL_EXCLUSIVE_LOCKS_REQUIRED(deadlock_graph_mu) {
1198
0
  if (!deadlock_graph) {  // (re)create the deadlock graph.
1199
0
    deadlock_graph =
1200
0
        new (base_internal::LowLevelAlloc::Alloc(sizeof(*deadlock_graph)))
1201
0
            GraphCycles;
1202
0
  }
1203
0
  return deadlock_graph->GetId(mu);
1204
0
}
1205
1206
0
static GraphId GetGraphId(Mutex *mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) {
1207
0
  deadlock_graph_mu.Lock();
1208
0
  GraphId id = GetGraphIdLocked(mu);
1209
0
  deadlock_graph_mu.Unlock();
1210
0
  return id;
1211
0
}
1212
1213
// Record a lock acquisition.  This is used in debug mode for deadlock
1214
// detection.  The held_locks pointer points to the relevant data
1215
// structure for each case.
1216
0
static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
1217
0
  int n = held_locks->n;
1218
0
  int i = 0;
1219
0
  while (i != n && held_locks->locks[i].id != id) {
1220
0
    i++;
1221
0
  }
1222
0
  if (i == n) {
1223
0
    if (n == ABSL_ARRAYSIZE(held_locks->locks)) {
1224
0
      held_locks->overflow = true;  // lost some data
1225
0
    } else {                        // we have room for lock
1226
0
      held_locks->locks[i].mu = mu;
1227
0
      held_locks->locks[i].count = 1;
1228
0
      held_locks->locks[i].id = id;
1229
0
      held_locks->n = n + 1;
1230
0
    }
1231
0
  } else {
1232
0
    held_locks->locks[i].count++;
1233
0
  }
1234
0
}
1235
1236
// Record a lock release.  Each call to LockEnter(mu, id, x) should be
1237
// eventually followed by a call to LockLeave(mu, id, x) by the same thread.
1238
// It does not process the event if is not needed when deadlock detection is
1239
// disabled.
1240
0
static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
1241
0
  int n = held_locks->n;
1242
0
  int i = 0;
1243
0
  while (i != n && held_locks->locks[i].id != id) {
1244
0
    i++;
1245
0
  }
1246
0
  if (i == n) {
1247
0
    if (!held_locks->overflow) {
1248
0
      // The deadlock id may have been reassigned after ForgetDeadlockInfo,
1249
0
      // but in that case mu should still be present.
1250
0
      i = 0;
1251
0
      while (i != n && held_locks->locks[i].mu != mu) {
1252
0
        i++;
1253
0
      }
1254
0
      if (i == n) {  // mu missing means releasing unheld lock
1255
0
        SynchEvent *mu_events = GetSynchEvent(mu);
1256
0
        ABSL_RAW_LOG(FATAL,
1257
0
                     "thread releasing lock it does not hold: %p %s; "
1258
0
                     ,
1259
0
                     static_cast<void *>(mu),
1260
0
                     mu_events == nullptr ? "" : mu_events->name);
1261
0
      }
1262
0
    }
1263
0
  } else if (held_locks->locks[i].count == 1) {
1264
0
    held_locks->n = n - 1;
1265
0
    held_locks->locks[i] = held_locks->locks[n - 1];
1266
0
    held_locks->locks[n - 1].id = InvalidGraphId();
1267
0
    held_locks->locks[n - 1].mu =
1268
0
        nullptr;  // clear mu to please the leak detector.
1269
0
  } else {
1270
0
    assert(held_locks->locks[i].count > 0);
1271
0
    held_locks->locks[i].count--;
1272
0
  }
1273
0
}
1274
1275
// Call LockEnter() if in debug mode and deadlock detection is enabled.
1276
0
static inline void DebugOnlyLockEnter(Mutex *mu) {
1277
0
  if (kDebugMode) {
1278
0
    if (synch_deadlock_detection.load(std::memory_order_acquire) !=
1279
0
        OnDeadlockCycle::kIgnore) {
1280
0
      LockEnter(mu, GetGraphId(mu), Synch_GetAllLocks());
1281
0
    }
1282
0
  }
1283
0
}
1284
1285
// Call LockEnter() if in debug mode and deadlock detection is enabled.
1286
6
static inline void DebugOnlyLockEnter(Mutex *mu, GraphId id) {
1287
6
  if (kDebugMode) {
1288
0
    if (synch_deadlock_detection.load(std::memory_order_acquire) !=
1289
0
        OnDeadlockCycle::kIgnore) {
1290
0
      LockEnter(mu, id, Synch_GetAllLocks());
1291
0
    }
1292
0
  }
1293
6
}
1294
1295
// Call LockLeave() if in debug mode and deadlock detection is enabled.
1296
6
static inline void DebugOnlyLockLeave(Mutex *mu) {
1297
6
  if (kDebugMode) {
1298
0
    if (synch_deadlock_detection.load(std::memory_order_acquire) !=
1299
0
        OnDeadlockCycle::kIgnore) {
1300
0
      LockLeave(mu, GetGraphId(mu), Synch_GetAllLocks());
1301
0
    }
1302
0
  }
1303
6
}
1304
1305
static char *StackString(void **pcs, int n, char *buf, int maxlen,
1306
0
                         bool symbolize) {
1307
0
  static const int kSymLen = 200;
1308
0
  char sym[kSymLen];
1309
0
  int len = 0;
1310
0
  for (int i = 0; i != n; i++) {
1311
0
    if (len >= maxlen)
1312
0
      return buf;
1313
0
    size_t count = static_cast<size_t>(maxlen - len);
1314
0
    if (symbolize) {
1315
0
      if (!symbolizer(pcs[i], sym, kSymLen)) {
1316
0
        sym[0] = '\0';
1317
0
      }
1318
0
      snprintf(buf + len, count, "%s\t@ %p %s\n", (i == 0 ? "\n" : ""), pcs[i],
1319
0
               sym);
1320
0
    } else {
1321
0
      snprintf(buf + len, count, " %p", pcs[i]);
1322
0
    }
1323
0
    len += strlen(&buf[len]);
1324
0
  }
1325
0
  return buf;
1326
0
}
1327
1328
0
static char *CurrentStackString(char *buf, int maxlen, bool symbolize) {
1329
0
  void *pcs[40];
1330
0
  return StackString(pcs, absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 2), buf,
1331
0
                     maxlen, symbolize);
1332
0
}
1333
1334
namespace {
1335
enum { kMaxDeadlockPathLen = 10 };  // maximum length of a deadlock cycle;
1336
                                    // a path this long would be remarkable
1337
// Buffers required to report a deadlock.
1338
// We do not allocate them on stack to avoid large stack frame.
1339
struct DeadlockReportBuffers {
1340
  char buf[6100];
1341
  GraphId path[kMaxDeadlockPathLen];
1342
};
1343
1344
struct ScopedDeadlockReportBuffers {
1345
0
  ScopedDeadlockReportBuffers() {
1346
0
    b = reinterpret_cast<DeadlockReportBuffers *>(
1347
0
        base_internal::LowLevelAlloc::Alloc(sizeof(*b)));
1348
0
  }
1349
0
  ~ScopedDeadlockReportBuffers() { base_internal::LowLevelAlloc::Free(b); }
1350
  DeadlockReportBuffers *b;
1351
};
1352
1353
// Helper to pass to GraphCycles::UpdateStackTrace.
1354
0
int GetStack(void** stack, int max_depth) {
1355
0
  return absl::GetStackTrace(stack, max_depth, 3);
1356
0
}
1357
}  // anonymous namespace
1358
1359
// Called in debug mode when a thread is about to acquire a lock in a way that
1360
// may block.
1361
0
static GraphId DeadlockCheck(Mutex *mu) {
1362
0
  if (synch_deadlock_detection.load(std::memory_order_acquire) ==
1363
0
      OnDeadlockCycle::kIgnore) {
1364
0
    return InvalidGraphId();
1365
0
  }
1366
0
1367
0
  SynchLocksHeld *all_locks = Synch_GetAllLocks();
1368
0
1369
0
  absl::base_internal::SpinLockHolder lock(&deadlock_graph_mu);
1370
0
  const GraphId mu_id = GetGraphIdLocked(mu);
1371
0
1372
0
  if (all_locks->n == 0) {
1373
0
    // There are no other locks held. Return now so that we don't need to
1374
0
    // call GetSynchEvent(). This way we do not record the stack trace
1375
0
    // for this Mutex. It's ok, since if this Mutex is involved in a deadlock,
1376
0
    // it can't always be the first lock acquired by a thread.
1377
0
    return mu_id;
1378
0
  }
1379
0
1380
0
  // We prefer to keep stack traces that show a thread holding and acquiring
1381
0
  // as many locks as possible.  This increases the chances that a given edge
1382
0
  // in the acquires-before graph will be represented in the stack traces
1383
0
  // recorded for the locks.
1384
0
  deadlock_graph->UpdateStackTrace(mu_id, all_locks->n + 1, GetStack);
1385
0
1386
0
  // For each other mutex already held by this thread:
1387
0
  for (int i = 0; i != all_locks->n; i++) {
1388
0
    const GraphId other_node_id = all_locks->locks[i].id;
1389
0
    const Mutex *other =
1390
0
        static_cast<const Mutex *>(deadlock_graph->Ptr(other_node_id));
1391
0
    if (other == nullptr) {
1392
0
      // Ignore stale lock
1393
0
      continue;
1394
0
    }
1395
0
1396
0
    // Add the acquired-before edge to the graph.
1397
0
    if (!deadlock_graph->InsertEdge(other_node_id, mu_id)) {
1398
0
      ScopedDeadlockReportBuffers scoped_buffers;
1399
0
      DeadlockReportBuffers *b = scoped_buffers.b;
1400
0
      static int number_of_reported_deadlocks = 0;
1401
0
      number_of_reported_deadlocks++;
1402
0
      // Symbolize only 2 first deadlock report to avoid huge slowdowns.
1403
0
      bool symbolize = number_of_reported_deadlocks <= 2;
1404
0
      ABSL_RAW_LOG(ERROR, "Potential Mutex deadlock: %s",
1405
0
                   CurrentStackString(b->buf, sizeof (b->buf), symbolize));
1406
0
      size_t len = 0;
1407
0
      for (int j = 0; j != all_locks->n; j++) {
1408
0
        void* pr = deadlock_graph->Ptr(all_locks->locks[j].id);
1409
0
        if (pr != nullptr) {
1410
0
          snprintf(b->buf + len, sizeof (b->buf) - len, " %p", pr);
1411
0
          len += strlen(&b->buf[len]);
1412
0
        }
1413
0
      }
1414
0
      ABSL_RAW_LOG(ERROR,
1415
0
                   "Acquiring absl::Mutex %p while holding %s; a cycle in the "
1416
0
                   "historical lock ordering graph has been observed",
1417
0
                   static_cast<void *>(mu), b->buf);
1418
0
      ABSL_RAW_LOG(ERROR, "Cycle: ");
1419
0
      int path_len = deadlock_graph->FindPath(
1420
0
          mu_id, other_node_id, ABSL_ARRAYSIZE(b->path), b->path);
1421
0
      for (int j = 0; j != path_len; j++) {
1422
0
        GraphId id = b->path[j];
1423
0
        Mutex *path_mu = static_cast<Mutex *>(deadlock_graph->Ptr(id));
1424
0
        if (path_mu == nullptr) continue;
1425
0
        void** stack;
1426
0
        int depth = deadlock_graph->GetStackTrace(id, &stack);
1427
0
        snprintf(b->buf, sizeof(b->buf),
1428
0
                 "mutex@%p stack: ", static_cast<void *>(path_mu));
1429
0
        StackString(stack, depth, b->buf + strlen(b->buf),
1430
0
                    static_cast<int>(sizeof(b->buf) - strlen(b->buf)),
1431
0
                    symbolize);
1432
0
        ABSL_RAW_LOG(ERROR, "%s", b->buf);
1433
0
      }
1434
0
      if (synch_deadlock_detection.load(std::memory_order_acquire) ==
1435
0
          OnDeadlockCycle::kAbort) {
1436
0
        deadlock_graph_mu.Unlock();  // avoid deadlock in fatal sighandler
1437
0
        ABSL_RAW_LOG(FATAL, "dying due to potential deadlock");
1438
0
        return mu_id;
1439
0
      }
1440
0
      break;   // report at most one potential deadlock per acquisition
1441
0
    }
1442
0
  }
1443
0
1444
0
  return mu_id;
1445
0
}
1446
1447
// Invoke DeadlockCheck() iff we're in debug mode and
1448
// deadlock checking has been enabled.
1449
6
static inline GraphId DebugOnlyDeadlockCheck(Mutex *mu) {
1450
6
  if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
1451
0
                        OnDeadlockCycle::kIgnore) {
1452
0
    return DeadlockCheck(mu);
1453
6
  } else {
1454
6
    return InvalidGraphId();
1455
6
  }
1456
6
}
1457
1458
0
void Mutex::ForgetDeadlockInfo() {
1459
0
  if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
1460
0
                        OnDeadlockCycle::kIgnore) {
1461
0
    deadlock_graph_mu.Lock();
1462
0
    if (deadlock_graph != nullptr) {
1463
0
      deadlock_graph->RemoveNode(this);
1464
0
    }
1465
0
    deadlock_graph_mu.Unlock();
1466
0
  }
1467
0
}
1468
1469
0
void Mutex::AssertNotHeld() const {
1470
  // We have the data to allow this check only if in debug mode and deadlock
1471
  // detection is enabled.
1472
0
  if (kDebugMode &&
1473
0
      (mu_.load(std::memory_order_relaxed) & (kMuWriter | kMuReader)) != 0 &&
1474
0
      synch_deadlock_detection.load(std::memory_order_acquire) !=
1475
0
          OnDeadlockCycle::kIgnore) {
1476
0
    GraphId id = GetGraphId(const_cast<Mutex *>(this));
1477
0
    SynchLocksHeld *locks = Synch_GetAllLocks();
1478
0
    for (int i = 0; i != locks->n; i++) {
1479
0
      if (locks->locks[i].id == id) {
1480
0
        SynchEvent *mu_events = GetSynchEvent(this);
1481
0
        ABSL_RAW_LOG(FATAL, "thread should not hold mutex %p %s",
1482
0
                     static_cast<const void *>(this),
1483
0
                     (mu_events == nullptr ? "" : mu_events->name));
1484
0
      }
1485
0
    }
1486
0
  }
1487
0
}
1488
1489
// Attempt to acquire *mu, and return whether successful.  The implementation
1490
// may spin for a short while if the lock cannot be acquired immediately.
1491
0
static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) {
1492
0
  int c = GetMutexGlobals().spinloop_iterations;
1493
0
  do {  // do/while somewhat faster on AMD
1494
0
    intptr_t v = mu->load(std::memory_order_relaxed);
1495
0
    if ((v & (kMuReader|kMuEvent)) != 0) {
1496
0
      return false;  // a reader or tracing -> give up
1497
0
    } else if (((v & kMuWriter) == 0) &&  // no holder -> try to acquire
1498
0
               mu->compare_exchange_strong(v, kMuWriter | v,
1499
0
                                           std::memory_order_acquire,
1500
0
                                           std::memory_order_relaxed)) {
1501
0
      return true;
1502
0
    }
1503
0
  } while (--c > 0);
1504
0
  return false;
1505
0
}
1506
1507
6
void Mutex::Lock() {
1508
6
  ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
1509
6
  GraphId id = DebugOnlyDeadlockCheck(this);
1510
6
  intptr_t v = mu_.load(std::memory_order_relaxed);
1511
  // try fast acquire, then spin loop
1512
6
  if ((v & (kMuWriter | kMuReader | kMuEvent)) != 0 ||
1513
6
      !mu_.compare_exchange_strong(v, kMuWriter | v,
1514
6
                                   std::memory_order_acquire,
1515
6
                                   std::memory_order_relaxed)) {
1516
    // try spin acquire, then slow loop
1517
0
    if (!TryAcquireWithSpinning(&this->mu_)) {
1518
0
      this->LockSlow(kExclusive, nullptr, 0);
1519
0
    }
1520
0
  }
1521
6
  DebugOnlyLockEnter(this, id);
1522
6
  ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
1523
6
}
1524
1525
0
void Mutex::ReaderLock() {
1526
0
  ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
1527
0
  GraphId id = DebugOnlyDeadlockCheck(this);
1528
0
  intptr_t v = mu_.load(std::memory_order_relaxed);
1529
  // try fast acquire, then slow loop
1530
0
  if ((v & (kMuWriter | kMuWait | kMuEvent)) != 0 ||
1531
0
      !mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
1532
0
                                   std::memory_order_acquire,
1533
0
                                   std::memory_order_relaxed)) {
1534
0
    this->LockSlow(kShared, nullptr, 0);
1535
0
  }
1536
0
  DebugOnlyLockEnter(this, id);
1537
0
  ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
1538
0
}
1539
1540
0
void Mutex::LockWhen(const Condition &cond) {
1541
0
  ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
1542
0
  GraphId id = DebugOnlyDeadlockCheck(this);
1543
0
  this->LockSlow(kExclusive, &cond, 0);
1544
0
  DebugOnlyLockEnter(this, id);
1545
0
  ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
1546
0
}
1547
1548
0
bool Mutex::LockWhenWithTimeout(const Condition &cond, absl::Duration timeout) {
1549
0
  return LockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
1550
0
}
1551
1552
0
bool Mutex::LockWhenWithDeadline(const Condition &cond, absl::Time deadline) {
1553
0
  ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
1554
0
  GraphId id = DebugOnlyDeadlockCheck(this);
1555
0
  bool res = LockSlowWithDeadline(kExclusive, &cond,
1556
0
                                  KernelTimeout(deadline), 0);
1557
0
  DebugOnlyLockEnter(this, id);
1558
0
  ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
1559
0
  return res;
1560
0
}
1561
1562
0
void Mutex::ReaderLockWhen(const Condition &cond) {
1563
0
  ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
1564
0
  GraphId id = DebugOnlyDeadlockCheck(this);
1565
0
  this->LockSlow(kShared, &cond, 0);
1566
0
  DebugOnlyLockEnter(this, id);
1567
0
  ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
1568
0
}
1569
1570
bool Mutex::ReaderLockWhenWithTimeout(const Condition &cond,
1571
0
                                      absl::Duration timeout) {
1572
0
  return ReaderLockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
1573
0
}
1574
1575
bool Mutex::ReaderLockWhenWithDeadline(const Condition &cond,
1576
0
                                       absl::Time deadline) {
1577
0
  ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
1578
0
  GraphId id = DebugOnlyDeadlockCheck(this);
1579
0
  bool res = LockSlowWithDeadline(kShared, &cond, KernelTimeout(deadline), 0);
1580
0
  DebugOnlyLockEnter(this, id);
1581
0
  ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
1582
0
  return res;
1583
0
}
1584
1585
0
void Mutex::Await(const Condition &cond) {
1586
0
  if (cond.Eval()) {    // condition already true; nothing to do
1587
0
    if (kDebugMode) {
1588
0
      this->AssertReaderHeld();
1589
0
    }
1590
0
  } else {              // normal case
1591
0
    ABSL_RAW_CHECK(this->AwaitCommon(cond, KernelTimeout::Never()),
1592
0
                   "condition untrue on return from Await");
1593
0
  }
1594
0
}
1595
1596
0
bool Mutex::AwaitWithTimeout(const Condition &cond, absl::Duration timeout) {
1597
0
  return AwaitWithDeadline(cond, DeadlineFromTimeout(timeout));
1598
0
}
1599
1600
0
bool Mutex::AwaitWithDeadline(const Condition &cond, absl::Time deadline) {
1601
0
  if (cond.Eval()) {      // condition already true; nothing to do
1602
0
    if (kDebugMode) {
1603
0
      this->AssertReaderHeld();
1604
0
    }
1605
0
    return true;
1606
0
  }
1607
1608
0
  KernelTimeout t{deadline};
1609
0
  bool res = this->AwaitCommon(cond, t);
1610
0
  ABSL_RAW_CHECK(res || t.has_timeout(),
1611
0
                 "condition untrue on return from Await");
1612
0
  return res;
1613
0
}
1614
1615
0
bool Mutex::AwaitCommon(const Condition &cond, KernelTimeout t) {
1616
0
  this->AssertReaderHeld();
1617
0
  MuHow how =
1618
0
      (mu_.load(std::memory_order_relaxed) & kMuWriter) ? kExclusive : kShared;
1619
0
  ABSL_TSAN_MUTEX_PRE_UNLOCK(this, TsanFlags(how));
1620
0
  SynchWaitParams waitp(
1621
0
      how, &cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this),
1622
0
      nullptr /*no cv_word*/);
1623
0
  int flags = kMuHasBlocked;
1624
0
  if (!Condition::GuaranteedEqual(&cond, nullptr)) {
1625
0
    flags |= kMuIsCond;
1626
0
  }
1627
0
  this->UnlockSlow(&waitp);
1628
0
  this->Block(waitp.thread);
1629
0
  ABSL_TSAN_MUTEX_POST_UNLOCK(this, TsanFlags(how));
1630
0
  ABSL_TSAN_MUTEX_PRE_LOCK(this, TsanFlags(how));
1631
0
  this->LockSlowLoop(&waitp, flags);
1632
0
  bool res = waitp.cond != nullptr ||  // => cond known true from LockSlowLoop
1633
0
             EvalConditionAnnotated(&cond, this, true, false, how == kShared);
1634
0
  ABSL_TSAN_MUTEX_POST_LOCK(this, TsanFlags(how), 0);
1635
0
  return res;
1636
0
}
1637
1638
0
bool Mutex::TryLock() {
1639
0
  ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
1640
0
  intptr_t v = mu_.load(std::memory_order_relaxed);
1641
0
  if ((v & (kMuWriter | kMuReader | kMuEvent)) == 0 &&  // try fast acquire
1642
0
      mu_.compare_exchange_strong(v, kMuWriter | v,
1643
0
                                  std::memory_order_acquire,
1644
0
                                  std::memory_order_relaxed)) {
1645
0
    DebugOnlyLockEnter(this);
1646
0
    ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
1647
0
    return true;
1648
0
  }
1649
0
  if ((v & kMuEvent) != 0) {              // we're recording events
1650
0
    if ((v & kExclusive->slow_need_zero) == 0 &&  // try fast acquire
1651
0
        mu_.compare_exchange_strong(
1652
0
            v, (kExclusive->fast_or | v) + kExclusive->fast_add,
1653
0
            std::memory_order_acquire, std::memory_order_relaxed)) {
1654
0
      DebugOnlyLockEnter(this);
1655
0
      PostSynchEvent(this, SYNCH_EV_TRYLOCK_SUCCESS);
1656
0
      ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
1657
0
      return true;
1658
0
    } else {
1659
0
      PostSynchEvent(this, SYNCH_EV_TRYLOCK_FAILED);
1660
0
    }
1661
0
  }
1662
0
  ABSL_TSAN_MUTEX_POST_LOCK(
1663
0
      this, __tsan_mutex_try_lock | __tsan_mutex_try_lock_failed, 0);
1664
0
  return false;
1665
0
}
1666
1667
0
bool Mutex::ReaderTryLock() {
1668
0
  ABSL_TSAN_MUTEX_PRE_LOCK(this,
1669
0
                           __tsan_mutex_read_lock | __tsan_mutex_try_lock);
1670
0
  intptr_t v = mu_.load(std::memory_order_relaxed);
1671
  // The while-loops (here and below) iterate only if the mutex word keeps
1672
  // changing (typically because the reader count changes) under the CAS.  We
1673
  // limit the number of attempts to avoid having to think about livelock.
1674
0
  int loop_limit = 5;
1675
0
  while ((v & (kMuWriter|kMuWait|kMuEvent)) == 0 && loop_limit != 0) {
1676
0
    if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
1677
0
                                    std::memory_order_acquire,
1678
0
                                    std::memory_order_relaxed)) {
1679
0
      DebugOnlyLockEnter(this);
1680
0
      ABSL_TSAN_MUTEX_POST_LOCK(
1681
0
          this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);
1682
0
      return true;
1683
0
    }
1684
0
    loop_limit--;
1685
0
    v = mu_.load(std::memory_order_relaxed);
1686
0
  }
1687
0
  if ((v & kMuEvent) != 0) {   // we're recording events
1688
0
    loop_limit = 5;
1689
0
    while ((v & kShared->slow_need_zero) == 0 && loop_limit != 0) {
1690
0
      if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
1691
0
                                      std::memory_order_acquire,
1692
0
                                      std::memory_order_relaxed)) {
1693
0
        DebugOnlyLockEnter(this);
1694
0
        PostSynchEvent(this, SYNCH_EV_READERTRYLOCK_SUCCESS);
1695
0
        ABSL_TSAN_MUTEX_POST_LOCK(
1696
0
            this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);
1697
0
        return true;
1698
0
      }
1699
0
      loop_limit--;
1700
0
      v = mu_.load(std::memory_order_relaxed);
1701
0
    }
1702
0
    if ((v & kMuEvent) != 0) {
1703
0
      PostSynchEvent(this, SYNCH_EV_READERTRYLOCK_FAILED);
1704
0
    }
1705
0
  }
1706
0
  ABSL_TSAN_MUTEX_POST_LOCK(this,
1707
0
                            __tsan_mutex_read_lock | __tsan_mutex_try_lock |
1708
0
                                __tsan_mutex_try_lock_failed,
1709
0
                            0);
1710
0
  return false;
1711
0
}
1712
1713
6
void Mutex::Unlock() {
1714
6
  ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
1715
6
  DebugOnlyLockLeave(this);
1716
6
  intptr_t v = mu_.load(std::memory_order_relaxed);
1717
1718
6
  if (kDebugMode && ((v & (kMuWriter | kMuReader)) != kMuWriter)) {
1719
0
    ABSL_RAW_LOG(FATAL, "Mutex unlocked when destroyed or not locked: v=0x%x",
1720
0
                 static_cast<unsigned>(v));
1721
0
  }
1722
1723
  // should_try_cas is whether we'll try a compare-and-swap immediately.
1724
  // NOTE: optimized out when kDebugMode is false.
1725
6
  bool should_try_cas = ((v & (kMuEvent | kMuWriter)) == kMuWriter &&
1726
6
                          (v & (kMuWait | kMuDesig)) != kMuWait);
1727
  // But, we can use an alternate computation of it, that compilers
1728
  // currently don't find on their own.  When that changes, this function
1729
  // can be simplified.
1730
6
  intptr_t x = (v ^ (kMuWriter | kMuWait)) & (kMuWriter | kMuEvent);
1731
6
  intptr_t y = (v ^ (kMuWriter | kMuWait)) & (kMuWait | kMuDesig);
1732
  // Claim: "x == 0 && y > 0" is equal to should_try_cas.
1733
  // Also, because kMuWriter and kMuEvent exceed kMuDesig and kMuWait,
1734
  // all possible non-zero values for x exceed all possible values for y.
1735
  // Therefore, (x == 0 && y > 0) == (x < y).
1736
6
  if (kDebugMode && should_try_cas != (x < y)) {
1737
    // We would usually use PRIdPTR here, but is not correctly implemented
1738
    // within the android toolchain.
1739
0
    ABSL_RAW_LOG(FATAL, "internal logic error %llx %llx %llx\n",
1740
0
                 static_cast<long long>(v), static_cast<long long>(x),
1741
0
                 static_cast<long long>(y));
1742
0
  }
1743
6
  if (x < y &&
1744
6
      mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
1745
6
                                  std::memory_order_release,
1746
6
                                  std::memory_order_relaxed)) {
1747
    // fast writer release (writer with no waiters or with designated waker)
1748
6
  } else {
1749
0
    this->UnlockSlow(nullptr /*no waitp*/);  // take slow path
1750
0
  }
1751
6
  ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);
1752
6
}
1753
1754
// Requires v to represent a reader-locked state.
1755
0
static bool ExactlyOneReader(intptr_t v) {
1756
0
  assert((v & (kMuWriter|kMuReader)) == kMuReader);
1757
0
  assert((v & kMuHigh) != 0);
1758
  // The more straightforward "(v & kMuHigh) == kMuOne" also works, but
1759
  // on some architectures the following generates slightly smaller code.
1760
  // It may be faster too.
1761
0
  constexpr intptr_t kMuMultipleWaitersMask = kMuHigh ^ kMuOne;
1762
0
  return (v & kMuMultipleWaitersMask) == 0;
1763
0
}
1764
1765
0
void Mutex::ReaderUnlock() {
1766
0
  ABSL_TSAN_MUTEX_PRE_UNLOCK(this, __tsan_mutex_read_lock);
1767
0
  DebugOnlyLockLeave(this);
1768
0
  intptr_t v = mu_.load(std::memory_order_relaxed);
1769
0
  assert((v & (kMuWriter|kMuReader)) == kMuReader);
1770
0
  if ((v & (kMuReader|kMuWait|kMuEvent)) == kMuReader) {
1771
    // fast reader release (reader with no waiters)
1772
0
    intptr_t clear = ExactlyOneReader(v) ? kMuReader|kMuOne : kMuOne;
1773
0
    if (mu_.compare_exchange_strong(v, v - clear,
1774
0
                                    std::memory_order_release,
1775
0
                                    std::memory_order_relaxed)) {
1776
0
      ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
1777
0
      return;
1778
0
    }
1779
0
  }
1780
0
  this->UnlockSlow(nullptr /*no waitp*/);  // take slow path
1781
0
  ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
1782
0
}
1783
1784
// Clears the designated waker flag in the mutex if this thread has blocked, and
1785
// therefore may be the designated waker.
1786
0
static intptr_t ClearDesignatedWakerMask(int flag) {
1787
0
  assert(flag >= 0);
1788
0
  assert(flag <= 1);
1789
0
  switch (flag) {
1790
0
    case 0:  // not blocked
1791
0
      return ~static_cast<intptr_t>(0);
1792
0
    case 1:  // blocked; turn off the designated waker bit
1793
0
      return ~static_cast<intptr_t>(kMuDesig);
1794
0
  }
1795
0
  ABSL_UNREACHABLE();
1796
0
}
1797
1798
// Conditionally ignores the existence of waiting writers if a reader that has
1799
// already blocked once wakes up.
1800
0
static intptr_t IgnoreWaitingWritersMask(int flag) {
1801
0
  assert(flag >= 0);
1802
0
  assert(flag <= 1);
1803
0
  switch (flag) {
1804
0
    case 0:  // not blocked
1805
0
      return ~static_cast<intptr_t>(0);
1806
0
    case 1:  // blocked; pretend there are no waiting writers
1807
0
      return ~static_cast<intptr_t>(kMuWrWait);
1808
0
  }
1809
0
  ABSL_UNREACHABLE();
1810
0
}
1811
1812
// Internal version of LockWhen().  See LockSlowWithDeadline()
1813
ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond,
1814
0
                                             int flags) {
1815
0
  ABSL_RAW_CHECK(
1816
0
      this->LockSlowWithDeadline(how, cond, KernelTimeout::Never(), flags),
1817
0
      "condition untrue on return from LockSlow");
1818
0
}
1819
1820
// Compute cond->Eval() and tell race detectors that we do it under mutex mu.
1821
static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
1822
                                          bool locking, bool trylock,
1823
0
                                          bool read_lock) {
1824
  // Delicate annotation dance.
1825
  // We are currently inside of read/write lock/unlock operation.
1826
  // All memory accesses are ignored inside of mutex operations + for unlock
1827
  // operation tsan considers that we've already released the mutex.
1828
0
  bool res = false;
1829
#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
1830
  const uint32_t flags = read_lock ? __tsan_mutex_read_lock : 0;
1831
  const uint32_t tryflags = flags | (trylock ? __tsan_mutex_try_lock : 0);
1832
#endif
1833
0
  if (locking) {
1834
    // For lock we pretend that we have finished the operation,
1835
    // evaluate the predicate, then unlock the mutex and start locking it again
1836
    // to match the annotation at the end of outer lock operation.
1837
    // Note: we can't simply do POST_LOCK, Eval, PRE_LOCK, because then tsan
1838
    // will think the lock acquisition is recursive which will trigger
1839
    // deadlock detector.
1840
0
    ABSL_TSAN_MUTEX_POST_LOCK(mu, tryflags, 0);
1841
0
    res = cond->Eval();
1842
    // There is no "try" version of Unlock, so use flags instead of tryflags.
1843
0
    ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, flags);
1844
0
    ABSL_TSAN_MUTEX_POST_UNLOCK(mu, flags);
1845
0
    ABSL_TSAN_MUTEX_PRE_LOCK(mu, tryflags);
1846
0
  } else {
1847
    // Similarly, for unlock we pretend that we have unlocked the mutex,
1848
    // lock the mutex, evaluate the predicate, and start unlocking it again
1849
    // to match the annotation at the end of outer unlock operation.
1850
0
    ABSL_TSAN_MUTEX_POST_UNLOCK(mu, flags);
1851
0
    ABSL_TSAN_MUTEX_PRE_LOCK(mu, flags);
1852
0
    ABSL_TSAN_MUTEX_POST_LOCK(mu, flags, 0);
1853
0
    res = cond->Eval();
1854
0
    ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, flags);
1855
0
  }
1856
  // Prevent unused param warnings in non-TSAN builds.
1857
0
  static_cast<void>(mu);
1858
0
  static_cast<void>(trylock);
1859
0
  static_cast<void>(read_lock);
1860
0
  return res;
1861
0
}
1862
1863
// Compute cond->Eval() hiding it from race detectors.
1864
// We are hiding it because inside of UnlockSlow we can evaluate a predicate
1865
// that was just added by a concurrent Lock operation; Lock adds the predicate
1866
// to the internal Mutex list without actually acquiring the Mutex
1867
// (it only acquires the internal spinlock, which is rightfully invisible for
1868
// tsan). As the result there is no tsan-visible synchronization between the
1869
// addition and this thread. So if we would enable race detection here,
1870
// it would race with the predicate initialization.
1871
0
static inline bool EvalConditionIgnored(Mutex *mu, const Condition *cond) {
1872
  // Memory accesses are already ignored inside of lock/unlock operations,
1873
  // but synchronization operations are also ignored. When we evaluate the
1874
  // predicate we must ignore only memory accesses but not synchronization,
1875
  // because missed synchronization can lead to false reports later.
1876
  // So we "divert" (which un-ignores both memory accesses and synchronization)
1877
  // and then separately turn on ignores of memory accesses.
1878
0
  ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
1879
0
  ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
1880
0
  bool res = cond->Eval();
1881
0
  ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END();
1882
0
  ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
1883
0
  static_cast<void>(mu);  // Prevent unused param warning in non-TSAN builds.
1884
0
  return res;
1885
0
}
1886
1887
// Internal equivalent of *LockWhenWithDeadline(), where
1888
//   "t" represents the absolute timeout; !t.has_timeout() means "forever".
1889
//   "how" is "kShared" (for ReaderLockWhen) or "kExclusive" (for LockWhen)
1890
// In flags, bits are ored together:
1891
// - kMuHasBlocked indicates that the client has already blocked on the call so
1892
//   the designated waker bit must be cleared and waiting writers should not
1893
//   obstruct this call
1894
// - kMuIsCond indicates that this is a conditional acquire (condition variable,
1895
//   Await,  LockWhen) so contention profiling should be suppressed.
1896
bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond,
1897
0
                                 KernelTimeout t, int flags) {
1898
0
  intptr_t v = mu_.load(std::memory_order_relaxed);
1899
0
  bool unlock = false;
1900
0
  if ((v & how->fast_need_zero) == 0 &&  // try fast acquire
1901
0
      mu_.compare_exchange_strong(
1902
0
          v,
1903
0
          (how->fast_or |
1904
0
           (v & ClearDesignatedWakerMask(flags & kMuHasBlocked))) +
1905
0
              how->fast_add,
1906
0
          std::memory_order_acquire, std::memory_order_relaxed)) {
1907
0
    if (cond == nullptr ||
1908
0
        EvalConditionAnnotated(cond, this, true, false, how == kShared)) {
1909
0
      return true;
1910
0
    }
1911
0
    unlock = true;
1912
0
  }
1913
0
  SynchWaitParams waitp(
1914
0
      how, cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this),
1915
0
      nullptr /*no cv_word*/);
1916
0
  if (!Condition::GuaranteedEqual(cond, nullptr)) {
1917
0
    flags |= kMuIsCond;
1918
0
  }
1919
0
  if (unlock) {
1920
0
    this->UnlockSlow(&waitp);
1921
0
    this->Block(waitp.thread);
1922
0
    flags |= kMuHasBlocked;
1923
0
  }
1924
0
  this->LockSlowLoop(&waitp, flags);
1925
0
  return waitp.cond != nullptr ||  // => cond known true from LockSlowLoop
1926
0
         cond == nullptr ||
1927
0
         EvalConditionAnnotated(cond, this, true, false, how == kShared);
1928
0
}
1929
1930
// RAW_CHECK_FMT() takes a condition, a printf-style format string, and
1931
// the printf-style argument list.   The format string must be a literal.
1932
// Arguments after the first are not evaluated unless the condition is true.
1933
#define RAW_CHECK_FMT(cond, ...)                                   \
1934
0
  do {                                                             \
1935
0
    if (ABSL_PREDICT_FALSE(!(cond))) {                             \
1936
0
      ABSL_RAW_LOG(FATAL, "Check " #cond " failed: " __VA_ARGS__); \
1937
0
    }                                                              \
1938
0
  } while (0)
1939
1940
0
static void CheckForMutexCorruption(intptr_t v, const char* label) {
1941
  // Test for either of two situations that should not occur in v:
1942
  //   kMuWriter and kMuReader
1943
  //   kMuWrWait and !kMuWait
1944
0
  const uintptr_t w = static_cast<uintptr_t>(v ^ kMuWait);
1945
  // By flipping that bit, we can now test for:
1946
  //   kMuWriter and kMuReader in w
1947
  //   kMuWrWait and kMuWait in w
1948
  // We've chosen these two pairs of values to be so that they will overlap,
1949
  // respectively, when the word is left shifted by three.  This allows us to
1950
  // save a branch in the common (correct) case of them not being coincident.
1951
0
  static_assert(kMuReader << 3 == kMuWriter, "must match");
1952
0
  static_assert(kMuWait << 3 == kMuWrWait, "must match");
1953
0
  if (ABSL_PREDICT_TRUE((w & (w << 3) & (kMuWriter | kMuWrWait)) == 0)) return;
1954
0
  RAW_CHECK_FMT((v & (kMuWriter | kMuReader)) != (kMuWriter | kMuReader),
1955
0
                "%s: Mutex corrupt: both reader and writer lock held: %p",
1956
0
                label, reinterpret_cast<void *>(v));
1957
0
  RAW_CHECK_FMT((v & (kMuWait | kMuWrWait)) != kMuWrWait,
1958
0
                "%s: Mutex corrupt: waiting writer with no waiters: %p",
1959
0
                label, reinterpret_cast<void *>(v));
1960
0
  assert(false);
1961
0
}
1962
1963
0
void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
1964
0
  SchedulingGuard::ScopedDisable disable_rescheduling;
1965
0
  int c = 0;
1966
0
  intptr_t v = mu_.load(std::memory_order_relaxed);
1967
0
  if ((v & kMuEvent) != 0) {
1968
0
    PostSynchEvent(this,
1969
0
         waitp->how == kExclusive?  SYNCH_EV_LOCK: SYNCH_EV_READERLOCK);
1970
0
  }
1971
0
  ABSL_RAW_CHECK(
1972
0
      waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
1973
0
      "detected illegal recursion into Mutex code");
1974
0
  for (;;) {
1975
0
    v = mu_.load(std::memory_order_relaxed);
1976
0
    CheckForMutexCorruption(v, "Lock");
1977
0
    if ((v & waitp->how->slow_need_zero) == 0) {
1978
0
      if (mu_.compare_exchange_strong(
1979
0
              v,
1980
0
              (waitp->how->fast_or |
1981
0
               (v & ClearDesignatedWakerMask(flags & kMuHasBlocked))) +
1982
0
                  waitp->how->fast_add,
1983
0
              std::memory_order_acquire, std::memory_order_relaxed)) {
1984
0
        if (waitp->cond == nullptr ||
1985
0
            EvalConditionAnnotated(waitp->cond, this, true, false,
1986
0
                                   waitp->how == kShared)) {
1987
0
          break;  // we timed out, or condition true, so return
1988
0
        }
1989
0
        this->UnlockSlow(waitp);  // got lock but condition false
1990
0
        this->Block(waitp->thread);
1991
0
        flags |= kMuHasBlocked;
1992
0
        c = 0;
1993
0
      }
1994
0
    } else {                      // need to access waiter list
1995
0
      bool dowait = false;
1996
0
      if ((v & (kMuSpin|kMuWait)) == 0) {   // no waiters
1997
        // This thread tries to become the one and only waiter.
1998
0
        PerThreadSynch *new_h = Enqueue(nullptr, waitp, v, flags);
1999
0
        intptr_t nv =
2000
0
            (v & ClearDesignatedWakerMask(flags & kMuHasBlocked) & kMuLow) |
2001
0
            kMuWait;
2002
0
        ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to empty list failed");
2003
0
        if (waitp->how == kExclusive && (v & kMuReader) != 0) {
2004
0
          nv |= kMuWrWait;
2005
0
        }
2006
0
        if (mu_.compare_exchange_strong(
2007
0
                v, reinterpret_cast<intptr_t>(new_h) | nv,
2008
0
                std::memory_order_release, std::memory_order_relaxed)) {
2009
0
          dowait = true;
2010
0
        } else {            // attempted Enqueue() failed
2011
          // zero out the waitp field set by Enqueue()
2012
0
          waitp->thread->waitp = nullptr;
2013
0
        }
2014
0
      } else if ((v & waitp->how->slow_inc_need_zero &
2015
0
                  IgnoreWaitingWritersMask(flags & kMuHasBlocked)) == 0) {
2016
        // This is a reader that needs to increment the reader count,
2017
        // but the count is currently held in the last waiter.
2018
0
        if (mu_.compare_exchange_strong(
2019
0
                v,
2020
0
                (v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |
2021
0
                    kMuSpin | kMuReader,
2022
0
                std::memory_order_acquire, std::memory_order_relaxed)) {
2023
0
          PerThreadSynch *h = GetPerThreadSynch(v);
2024
0
          h->readers += kMuOne;       // inc reader count in waiter
2025
0
          do {                        // release spinlock
2026
0
            v = mu_.load(std::memory_order_relaxed);
2027
0
          } while (!mu_.compare_exchange_weak(v, (v & ~kMuSpin) | kMuReader,
2028
0
                                              std::memory_order_release,
2029
0
                                              std::memory_order_relaxed));
2030
0
          if (waitp->cond == nullptr ||
2031
0
              EvalConditionAnnotated(waitp->cond, this, true, false,
2032
0
                                     waitp->how == kShared)) {
2033
0
            break;  // we timed out, or condition true, so return
2034
0
          }
2035
0
          this->UnlockSlow(waitp);           // got lock but condition false
2036
0
          this->Block(waitp->thread);
2037
0
          flags |= kMuHasBlocked;
2038
0
          c = 0;
2039
0
        }
2040
0
      } else if ((v & kMuSpin) == 0 &&  // attempt to queue ourselves
2041
0
                 mu_.compare_exchange_strong(
2042
0
                     v,
2043
0
                     (v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |
2044
0
                         kMuSpin | kMuWait,
2045
0
                     std::memory_order_acquire, std::memory_order_relaxed)) {
2046
0
        PerThreadSynch *h = GetPerThreadSynch(v);
2047
0
        PerThreadSynch *new_h = Enqueue(h, waitp, v, flags);
2048
0
        intptr_t wr_wait = 0;
2049
0
        ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to list failed");
2050
0
        if (waitp->how == kExclusive && (v & kMuReader) != 0) {
2051
0
          wr_wait = kMuWrWait;      // give priority to a waiting writer
2052
0
        }
2053
0
        do {                        // release spinlock
2054
0
          v = mu_.load(std::memory_order_relaxed);
2055
0
        } while (!mu_.compare_exchange_weak(
2056
0
            v, (v & (kMuLow & ~kMuSpin)) | kMuWait | wr_wait |
2057
0
            reinterpret_cast<intptr_t>(new_h),
2058
0
            std::memory_order_release, std::memory_order_relaxed));
2059
0
        dowait = true;
2060
0
      }
2061
0
      if (dowait) {
2062
0
        this->Block(waitp->thread);  // wait until removed from list or timeout
2063
0
        flags |= kMuHasBlocked;
2064
0
        c = 0;
2065
0
      }
2066
0
    }
2067
0
    ABSL_RAW_CHECK(
2068
0
        waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
2069
0
        "detected illegal recursion into Mutex code");
2070
    // delay, then try again
2071
0
    c = synchronization_internal::MutexDelay(c, GENTLE);
2072
0
  }
2073
0
  ABSL_RAW_CHECK(
2074
0
      waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
2075
0
      "detected illegal recursion into Mutex code");
2076
0
  if ((v & kMuEvent) != 0) {
2077
0
    PostSynchEvent(this,
2078
0
                   waitp->how == kExclusive? SYNCH_EV_LOCK_RETURNING :
2079
0
                                      SYNCH_EV_READERLOCK_RETURNING);
2080
0
  }
2081
0
}
2082
2083
// Unlock this mutex, which is held by the current thread.
2084
// If waitp is non-zero, it must be the wait parameters for the current thread
2085
// which holds the lock but is not runnable because its condition is false
2086
// or it is in the process of blocking on a condition variable; it must requeue
2087
// itself on the mutex/condvar to wait for its condition to become true.
2088
0
ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
2089
0
  SchedulingGuard::ScopedDisable disable_rescheduling;
2090
0
  intptr_t v = mu_.load(std::memory_order_relaxed);
2091
0
  this->AssertReaderHeld();
2092
0
  CheckForMutexCorruption(v, "Unlock");
2093
0
  if ((v & kMuEvent) != 0) {
2094
0
    PostSynchEvent(this,
2095
0
                (v & kMuWriter) != 0? SYNCH_EV_UNLOCK: SYNCH_EV_READERUNLOCK);
2096
0
  }
2097
0
  int c = 0;
2098
  // the waiter under consideration to wake, or zero
2099
0
  PerThreadSynch *w = nullptr;
2100
  // the predecessor to w or zero
2101
0
  PerThreadSynch *pw = nullptr;
2102
  // head of the list searched previously, or zero
2103
0
  PerThreadSynch *old_h = nullptr;
2104
  // a condition that's known to be false.
2105
0
  const Condition *known_false = nullptr;
2106
0
  PerThreadSynch *wake_list = kPerThreadSynchNull;   // list of threads to wake
2107
0
  intptr_t wr_wait = 0;        // set to kMuWrWait if we wake a reader and a
2108
                               // later writer could have acquired the lock
2109
                               // (starvation avoidance)
2110
0
  ABSL_RAW_CHECK(waitp == nullptr || waitp->thread->waitp == nullptr ||
2111
0
                     waitp->thread->suppress_fatal_errors,
2112
0
                 "detected illegal recursion into Mutex code");
2113
  // This loop finds threads wake_list to wakeup if any, and removes them from
2114
  // the list of waiters.  In addition, it places waitp.thread on the queue of
2115
  // waiters if waitp is non-zero.
2116
0
  for (;;) {
2117
0
    v = mu_.load(std::memory_order_relaxed);
2118
0
    if ((v & kMuWriter) != 0 && (v & (kMuWait | kMuDesig)) != kMuWait &&
2119
0
        waitp == nullptr) {
2120
      // fast writer release (writer with no waiters or with designated waker)
2121
0
      if (mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
2122
0
                                      std::memory_order_release,
2123
0
                                      std::memory_order_relaxed)) {
2124
0
        return;
2125
0
      }
2126
0
    } else if ((v & (kMuReader | kMuWait)) == kMuReader && waitp == nullptr) {
2127
      // fast reader release (reader with no waiters)
2128
0
      intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne;
2129
0
      if (mu_.compare_exchange_strong(v, v - clear,
2130
0
                                      std::memory_order_release,
2131
0
                                      std::memory_order_relaxed)) {
2132
0
        return;
2133
0
      }
2134
0
    } else if ((v & kMuSpin) == 0 &&  // attempt to get spinlock
2135
0
               mu_.compare_exchange_strong(v, v | kMuSpin,
2136
0
                                           std::memory_order_acquire,
2137
0
                                           std::memory_order_relaxed)) {
2138
0
      if ((v & kMuWait) == 0) {       // no one to wake
2139
0
        intptr_t nv;
2140
0
        bool do_enqueue = true;  // always Enqueue() the first time
2141
0
        ABSL_RAW_CHECK(waitp != nullptr,
2142
0
                       "UnlockSlow is confused");  // about to sleep
2143
0
        do {    // must loop to release spinlock as reader count may change
2144
0
          v = mu_.load(std::memory_order_relaxed);
2145
          // decrement reader count if there are readers
2146
0
          intptr_t new_readers = (v >= kMuOne)?  v - kMuOne : v;
2147
0
          PerThreadSynch *new_h = nullptr;
2148
0
          if (do_enqueue) {
2149
            // If we are enqueuing on a CondVar (waitp->cv_word != nullptr) then
2150
            // we must not retry here.  The initial attempt will always have
2151
            // succeeded, further attempts would enqueue us against *this due to
2152
            // Fer() handling.
2153
0
            do_enqueue = (waitp->cv_word == nullptr);
2154
0
            new_h = Enqueue(nullptr, waitp, new_readers, kMuIsCond);
2155
0
          }
2156
0
          intptr_t clear = kMuWrWait | kMuWriter;  // by default clear write bit
2157
0
          if ((v & kMuWriter) == 0 && ExactlyOneReader(v)) {  // last reader
2158
0
            clear = kMuWrWait | kMuReader;                    // clear read bit
2159
0
          }
2160
0
          nv = (v & kMuLow & ~clear & ~kMuSpin);
2161
0
          if (new_h != nullptr) {
2162
0
            nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
2163
0
          } else {  // new_h could be nullptr if we queued ourselves on a
2164
                    // CondVar
2165
            // In that case, we must place the reader count back in the mutex
2166
            // word, as Enqueue() did not store it in the new waiter.
2167
0
            nv |= new_readers & kMuHigh;
2168
0
          }
2169
          // release spinlock & our lock; retry if reader-count changed
2170
          // (writer count cannot change since we hold lock)
2171
0
        } while (!mu_.compare_exchange_weak(v, nv,
2172
0
                                            std::memory_order_release,
2173
0
                                            std::memory_order_relaxed));
2174
0
        break;
2175
0
      }
2176
2177
      // There are waiters.
2178
      // Set h to the head of the circular waiter list.
2179
0
      PerThreadSynch *h = GetPerThreadSynch(v);
2180
0
      if ((v & kMuReader) != 0 && (h->readers & kMuHigh) > kMuOne) {
2181
        // a reader but not the last
2182
0
        h->readers -= kMuOne;  // release our lock
2183
0
        intptr_t nv = v;       // normally just release spinlock
2184
0
        if (waitp != nullptr) {  // but waitp!=nullptr => must queue ourselves
2185
0
          PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond);
2186
0
          ABSL_RAW_CHECK(new_h != nullptr,
2187
0
                         "waiters disappeared during Enqueue()!");
2188
0
          nv &= kMuLow;
2189
0
          nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
2190
0
        }
2191
0
        mu_.store(nv, std::memory_order_release);  // release spinlock
2192
        // can release with a store because there were waiters
2193
0
        break;
2194
0
      }
2195
2196
      // Either we didn't search before, or we marked the queue
2197
      // as "maybe_unlocking" and no one else should have changed it.
2198
0
      ABSL_RAW_CHECK(old_h == nullptr || h->maybe_unlocking,
2199
0
                     "Mutex queue changed beneath us");
2200
2201
      // The lock is becoming free, and there's a waiter
2202
0
      if (old_h != nullptr &&
2203
0
          !old_h->may_skip) {                  // we used old_h as a terminator
2204
0
        old_h->may_skip = true;                // allow old_h to skip once more
2205
0
        ABSL_RAW_CHECK(old_h->skip == nullptr, "illegal skip from head");
2206
0
        if (h != old_h && MuEquivalentWaiter(old_h, old_h->next)) {
2207
0
          old_h->skip = old_h->next;  // old_h not head & can skip to successor
2208
0
        }
2209
0
      }
2210
0
      if (h->next->waitp->how == kExclusive &&
2211
0
          Condition::GuaranteedEqual(h->next->waitp->cond, nullptr)) {
2212
        // easy case: writer with no condition; no need to search
2213
0
        pw = h;                       // wake w, the successor of h (=pw)
2214
0
        w = h->next;
2215
0
        w->wake = true;
2216
        // We are waking up a writer.  This writer may be racing against
2217
        // an already awake reader for the lock.  We want the
2218
        // writer to usually win this race,
2219
        // because if it doesn't, we can potentially keep taking a reader
2220
        // perpetually and writers will starve.  Worse than
2221
        // that, this can also starve other readers if kMuWrWait gets set
2222
        // later.
2223
0
        wr_wait = kMuWrWait;
2224
0
      } else if (w != nullptr && (w->waitp->how == kExclusive || h == old_h)) {
2225
        // we found a waiter w to wake on a previous iteration and either it's
2226
        // a writer, or we've searched the entire list so we have all the
2227
        // readers.
2228
0
        if (pw == nullptr) {  // if w's predecessor is unknown, it must be h
2229
0
          pw = h;
2230
0
        }
2231
0
      } else {
2232
        // At this point we don't know all the waiters to wake, and the first
2233
        // waiter has a condition or is a reader.  We avoid searching over
2234
        // waiters we've searched on previous iterations by starting at
2235
        // old_h if it's set.  If old_h==h, there's no one to wakeup at all.
2236
0
        if (old_h == h) {      // we've searched before, and nothing's new
2237
                               // so there's no one to wake.
2238
0
          intptr_t nv = (v & ~(kMuReader|kMuWriter|kMuWrWait));
2239
0
          h->readers = 0;
2240
0
          h->maybe_unlocking = false;   // finished unlocking
2241
0
          if (waitp != nullptr) {       // we must queue ourselves and sleep
2242
0
            PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond);
2243
0
            nv &= kMuLow;
2244
0
            if (new_h != nullptr) {
2245
0
              nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
2246
0
            }  // else new_h could be nullptr if we queued ourselves on a
2247
               // CondVar
2248
0
          }
2249
          // release spinlock & lock
2250
          // can release with a store because there were waiters
2251
0
          mu_.store(nv, std::memory_order_release);
2252
0
          break;
2253
0
        }
2254
2255
        // set up to walk the list
2256
0
        PerThreadSynch *w_walk;   // current waiter during list walk
2257
0
        PerThreadSynch *pw_walk;  // previous waiter during list walk
2258
0
        if (old_h != nullptr) {  // we've searched up to old_h before
2259
0
          pw_walk = old_h;
2260
0
          w_walk = old_h->next;
2261
0
        } else {            // no prior search, start at beginning
2262
0
          pw_walk =
2263
0
              nullptr;  // h->next's predecessor may change; don't record it
2264
0
          w_walk = h->next;
2265
0
        }
2266
2267
0
        h->may_skip = false;  // ensure we never skip past h in future searches
2268
                              // even if other waiters are queued after it.
2269
0
        ABSL_RAW_CHECK(h->skip == nullptr, "illegal skip from head");
2270
2271
0
        h->maybe_unlocking = true;  // we're about to scan the waiter list
2272
                                    // without the spinlock held.
2273
                                    // Enqueue must be conservative about
2274
                                    // priority queuing.
2275
2276
        // We must release the spinlock to evaluate the conditions.
2277
0
        mu_.store(v, std::memory_order_release);  // release just spinlock
2278
        // can release with a store because there were waiters
2279
2280
        // h is the last waiter queued, and w_walk the first unsearched waiter.
2281
        // Without the spinlock, the locations mu_ and h->next may now change
2282
        // underneath us, but since we hold the lock itself, the only legal
2283
        // change is to add waiters between h and w_walk.  Therefore, it's safe
2284
        // to walk the path from w_walk to h inclusive. (TryRemove() can remove
2285
        // a waiter anywhere, but it acquires both the spinlock and the Mutex)
2286
2287
0
        old_h = h;        // remember we searched to here
2288
2289
        // Walk the path upto and including h looking for waiters we can wake.
2290
0
        while (pw_walk != h) {
2291
0
          w_walk->wake = false;
2292
0
          if (w_walk->waitp->cond ==
2293
0
                  nullptr ||  // no condition => vacuously true OR
2294
0
              (w_walk->waitp->cond != known_false &&
2295
               // this thread's condition is not known false, AND
2296
               //  is in fact true
2297
0
               EvalConditionIgnored(this, w_walk->waitp->cond))) {
2298
0
            if (w == nullptr) {
2299
0
              w_walk->wake = true;    // can wake this waiter
2300
0
              w = w_walk;
2301
0
              pw = pw_walk;
2302
0
              if (w_walk->waitp->how == kExclusive) {
2303
0
                wr_wait = kMuWrWait;
2304
0
                break;                // bail if waking this writer
2305
0
              }
2306
0
            } else if (w_walk->waitp->how == kShared) {  // wake if a reader
2307
0
              w_walk->wake = true;
2308
0
            } else {   // writer with true condition
2309
0
              wr_wait = kMuWrWait;
2310
0
            }
2311
0
          } else {                  // can't wake; condition false
2312
0
            known_false = w_walk->waitp->cond;  // remember last false condition
2313
0
          }
2314
0
          if (w_walk->wake) {   // we're waking reader w_walk
2315
0
            pw_walk = w_walk;   // don't skip similar waiters
2316
0
          } else {              // not waking; skip as much as possible
2317
0
            pw_walk = Skip(w_walk);
2318
0
          }
2319
          // If pw_walk == h, then load of pw_walk->next can race with
2320
          // concurrent write in Enqueue(). However, at the same time
2321
          // we do not need to do the load, because we will bail out
2322
          // from the loop anyway.
2323
0
          if (pw_walk != h) {
2324
0
            w_walk = pw_walk->next;
2325
0
          }
2326
0
        }
2327
2328
0
        continue;  // restart for(;;)-loop to wakeup w or to find more waiters
2329
0
      }
2330
0
      ABSL_RAW_CHECK(pw->next == w, "pw not w's predecessor");
2331
      // The first (and perhaps only) waiter we've chosen to wake is w, whose
2332
      // predecessor is pw.  If w is a reader, we must wake all the other
2333
      // waiters with wake==true as well.  We may also need to queue
2334
      // ourselves if waitp != null.  The spinlock and the lock are still
2335
      // held.
2336
2337
      // This traverses the list in [ pw->next, h ], where h is the head,
2338
      // removing all elements with wake==true and placing them in the
2339
      // singly-linked list wake_list.  Returns the new head.
2340
0
      h = DequeueAllWakeable(h, pw, &wake_list);
2341
2342
0
      intptr_t nv = (v & kMuEvent) | kMuDesig;
2343
                                             // assume no waiters left,
2344
                                             // set kMuDesig for INV1a
2345
2346
0
      if (waitp != nullptr) {  // we must queue ourselves and sleep
2347
0
        h = Enqueue(h, waitp, v, kMuIsCond);
2348
        // h is new last waiter; could be null if we queued ourselves on a
2349
        // CondVar
2350
0
      }
2351
2352
0
      ABSL_RAW_CHECK(wake_list != kPerThreadSynchNull,
2353
0
                     "unexpected empty wake list");
2354
2355
0
      if (h != nullptr) {  // there are waiters left
2356
0
        h->readers = 0;
2357
0
        h->maybe_unlocking = false;     // finished unlocking
2358
0
        nv |= wr_wait | kMuWait | reinterpret_cast<intptr_t>(h);
2359
0
      }
2360
2361
      // release both spinlock & lock
2362
      // can release with a store because there were waiters
2363
0
      mu_.store(nv, std::memory_order_release);
2364
0
      break;  // out of for(;;)-loop
2365
0
    }
2366
    // aggressive here; no one can proceed till we do
2367
0
    c = synchronization_internal::MutexDelay(c, AGGRESSIVE);
2368
0
  }                            // end of for(;;)-loop
2369
2370
0
  if (wake_list != kPerThreadSynchNull) {
2371
0
    int64_t total_wait_cycles = 0;
2372
0
    int64_t max_wait_cycles = 0;
2373
0
    int64_t now = base_internal::CycleClock::Now();
2374
0
    do {
2375
      // Profile lock contention events only if the waiter was trying to acquire
2376
      // the lock, not waiting on a condition variable or Condition.
2377
0
      if (!wake_list->cond_waiter) {
2378
0
        int64_t cycles_waited =
2379
0
            (now - wake_list->waitp->contention_start_cycles);
2380
0
        total_wait_cycles += cycles_waited;
2381
0
        if (max_wait_cycles == 0) max_wait_cycles = cycles_waited;
2382
0
        wake_list->waitp->contention_start_cycles = now;
2383
0
        wake_list->waitp->should_submit_contention_data = true;
2384
0
      }
2385
0
      wake_list = Wakeup(wake_list);              // wake waiters
2386
0
    } while (wake_list != kPerThreadSynchNull);
2387
0
    if (total_wait_cycles > 0) {
2388
0
      mutex_tracer("slow release", this, total_wait_cycles);
2389
0
      ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
2390
0
      submit_profile_data(total_wait_cycles);
2391
0
      ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
2392
0
    }
2393
0
  }
2394
0
}
2395
2396
// Used by CondVar implementation to reacquire mutex after waking from
2397
// condition variable.  This routine is used instead of Lock() because the
2398
// waiting thread may have been moved from the condition variable queue to the
2399
// mutex queue without a wakeup, by Trans().  In that case, when the thread is
2400
// finally woken, the woken thread will believe it has been woken from the
2401
// condition variable (i.e. its PC will be in when in the CondVar code), when
2402
// in fact it has just been woken from the mutex.  Thus, it must enter the slow
2403
// path of the mutex in the same state as if it had just woken from the mutex.
2404
// That is, it must ensure to clear kMuDesig (INV1b).
2405
0
void Mutex::Trans(MuHow how) {
2406
0
  this->LockSlow(how, nullptr, kMuHasBlocked | kMuIsCond);
2407
0
}
2408
2409
// Used by CondVar implementation to effectively wake thread w from the
2410
// condition variable.  If this mutex is free, we simply wake the thread.
2411
// It will later acquire the mutex with high probability.  Otherwise, we
2412
// enqueue thread w on this mutex.
2413
0
void Mutex::Fer(PerThreadSynch *w) {
2414
0
  SchedulingGuard::ScopedDisable disable_rescheduling;
2415
0
  int c = 0;
2416
0
  ABSL_RAW_CHECK(w->waitp->cond == nullptr,
2417
0
                 "Mutex::Fer while waiting on Condition");
2418
0
  ABSL_RAW_CHECK(!w->waitp->timeout.has_timeout(),
2419
0
                 "Mutex::Fer while in timed wait");
2420
0
  ABSL_RAW_CHECK(w->waitp->cv_word == nullptr,
2421
0
                 "Mutex::Fer with pending CondVar queueing");
2422
0
  for (;;) {
2423
0
    intptr_t v = mu_.load(std::memory_order_relaxed);
2424
    // Note: must not queue if the mutex is unlocked (nobody will wake it).
2425
    // For example, we can have only kMuWait (conditional) or maybe
2426
    // kMuWait|kMuWrWait.
2427
    // conflicting != 0 implies that the waking thread cannot currently take
2428
    // the mutex, which in turn implies that someone else has it and can wake
2429
    // us if we queue.
2430
0
    const intptr_t conflicting =
2431
0
        kMuWriter | (w->waitp->how == kShared ? 0 : kMuReader);
2432
0
    if ((v & conflicting) == 0) {
2433
0
      w->next = nullptr;
2434
0
      w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
2435
0
      IncrementSynchSem(this, w);
2436
0
      return;
2437
0
    } else {
2438
0
      if ((v & (kMuSpin|kMuWait)) == 0) {       // no waiters
2439
        // This thread tries to become the one and only waiter.
2440
0
        PerThreadSynch *new_h = Enqueue(nullptr, w->waitp, v, kMuIsCond);
2441
0
        ABSL_RAW_CHECK(new_h != nullptr,
2442
0
                       "Enqueue failed");  // we must queue ourselves
2443
0
        if (mu_.compare_exchange_strong(
2444
0
                v, reinterpret_cast<intptr_t>(new_h) | (v & kMuLow) | kMuWait,
2445
0
                std::memory_order_release, std::memory_order_relaxed)) {
2446
0
          return;
2447
0
        }
2448
0
      } else if ((v & kMuSpin) == 0 &&
2449
0
                 mu_.compare_exchange_strong(v, v | kMuSpin | kMuWait)) {
2450
0
        PerThreadSynch *h = GetPerThreadSynch(v);
2451
0
        PerThreadSynch *new_h = Enqueue(h, w->waitp, v, kMuIsCond);
2452
0
        ABSL_RAW_CHECK(new_h != nullptr,
2453
0
                       "Enqueue failed");  // we must queue ourselves
2454
0
        do {
2455
0
          v = mu_.load(std::memory_order_relaxed);
2456
0
        } while (!mu_.compare_exchange_weak(
2457
0
            v,
2458
0
            (v & kMuLow & ~kMuSpin) | kMuWait |
2459
0
                reinterpret_cast<intptr_t>(new_h),
2460
0
            std::memory_order_release, std::memory_order_relaxed));
2461
0
        return;
2462
0
      }
2463
0
    }
2464
0
    c = synchronization_internal::MutexDelay(c, GENTLE);
2465
0
  }
2466
0
}
2467
2468
0
void Mutex::AssertHeld() const {
2469
0
  if ((mu_.load(std::memory_order_relaxed) & kMuWriter) == 0) {
2470
0
    SynchEvent *e = GetSynchEvent(this);
2471
0
    ABSL_RAW_LOG(FATAL, "thread should hold write lock on Mutex %p %s",
2472
0
                 static_cast<const void *>(this),
2473
0
                 (e == nullptr ? "" : e->name));
2474
0
  }
2475
0
}
2476
2477
0
void Mutex::AssertReaderHeld() const {
2478
0
  if ((mu_.load(std::memory_order_relaxed) & (kMuReader | kMuWriter)) == 0) {
2479
0
    SynchEvent *e = GetSynchEvent(this);
2480
0
    ABSL_RAW_LOG(
2481
0
        FATAL, "thread should hold at least a read lock on Mutex %p %s",
2482
0
        static_cast<const void *>(this), (e == nullptr ? "" : e->name));
2483
0
  }
2484
0
}
2485
2486
// -------------------------------- condition variables
2487
static const intptr_t kCvSpin = 0x0001L;   // spinlock protects waiter list
2488
static const intptr_t kCvEvent = 0x0002L;  // record events
2489
2490
static const intptr_t kCvLow = 0x0003L;  // low order bits of CV
2491
2492
// Hack to make constant values available to gdb pretty printer
2493
enum { kGdbCvSpin = kCvSpin, kGdbCvEvent = kCvEvent, kGdbCvLow = kCvLow, };
2494
2495
static_assert(PerThreadSynch::kAlignment > kCvLow,
2496
              "PerThreadSynch::kAlignment must be greater than kCvLow");
2497
2498
0
void CondVar::EnableDebugLog(const char *name) {
2499
0
  SynchEvent *e = EnsureSynchEvent(&this->cv_, name, kCvEvent, kCvSpin);
2500
0
  e->log = true;
2501
0
  UnrefSynchEvent(e);
2502
0
}
2503
2504
0
CondVar::~CondVar() {
2505
0
  if ((cv_.load(std::memory_order_relaxed) & kCvEvent) != 0) {
2506
0
    ForgetSynchEvent(&this->cv_, kCvEvent, kCvSpin);
2507
0
  }
2508
0
}
2509
2510
2511
// Remove thread s from the list of waiters on this condition variable.
2512
0
void CondVar::Remove(PerThreadSynch *s) {
2513
0
  SchedulingGuard::ScopedDisable disable_rescheduling;
2514
0
  intptr_t v;
2515
0
  int c = 0;
2516
0
  for (v = cv_.load(std::memory_order_relaxed);;
2517
0
       v = cv_.load(std::memory_order_relaxed)) {
2518
0
    if ((v & kCvSpin) == 0 &&  // attempt to acquire spinlock
2519
0
        cv_.compare_exchange_strong(v, v | kCvSpin,
2520
0
                                    std::memory_order_acquire,
2521
0
                                    std::memory_order_relaxed)) {
2522
0
      PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
2523
0
      if (h != nullptr) {
2524
0
        PerThreadSynch *w = h;
2525
0
        while (w->next != s && w->next != h) {  // search for thread
2526
0
          w = w->next;
2527
0
        }
2528
0
        if (w->next == s) {           // found thread; remove it
2529
0
          w->next = s->next;
2530
0
          if (h == s) {
2531
0
            h = (w == s) ? nullptr : w;
2532
0
          }
2533
0
          s->next = nullptr;
2534
0
          s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
2535
0
        }
2536
0
      }
2537
                                      // release spinlock
2538
0
      cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
2539
0
                std::memory_order_release);
2540
0
      return;
2541
0
    } else {
2542
      // try again after a delay
2543
0
      c = synchronization_internal::MutexDelay(c, GENTLE);
2544
0
    }
2545
0
  }
2546
0
}
2547
2548
// Queue thread waitp->thread on condition variable word cv_word using
2549
// wait parameters waitp.
2550
// We split this into a separate routine, rather than simply doing it as part
2551
// of WaitCommon().  If we were to queue ourselves on the condition variable
2552
// before calling Mutex::UnlockSlow(), the Mutex code might be re-entered (via
2553
// the logging code, or via a Condition function) and might potentially attempt
2554
// to block this thread.  That would be a problem if the thread were already on
2555
// a condition variable waiter queue.  Thus, we use the waitp->cv_word to tell
2556
// the unlock code to call CondVarEnqueue() to queue the thread on the condition
2557
// variable queue just before the mutex is to be unlocked, and (most
2558
// importantly) after any call to an external routine that might re-enter the
2559
// mutex code.
2560
0
static void CondVarEnqueue(SynchWaitParams *waitp) {
2561
  // This thread might be transferred to the Mutex queue by Fer() when
2562
  // we are woken.  To make sure that is what happens, Enqueue() doesn't
2563
  // call CondVarEnqueue() again but instead uses its normal code.  We
2564
  // must do this before we queue ourselves so that cv_word will be null
2565
  // when seen by the dequeuer, who may wish immediately to requeue
2566
  // this thread on another queue.
2567
0
  std::atomic<intptr_t> *cv_word = waitp->cv_word;
2568
0
  waitp->cv_word = nullptr;
2569
2570
0
  intptr_t v = cv_word->load(std::memory_order_relaxed);
2571
0
  int c = 0;
2572
0
  while ((v & kCvSpin) != 0 ||  // acquire spinlock
2573
0
         !cv_word->compare_exchange_weak(v, v | kCvSpin,
2574
0
                                         std::memory_order_acquire,
2575
0
                                         std::memory_order_relaxed)) {
2576
0
    c = synchronization_internal::MutexDelay(c, GENTLE);
2577
0
    v = cv_word->load(std::memory_order_relaxed);
2578
0
  }
2579
0
  ABSL_RAW_CHECK(waitp->thread->waitp == nullptr, "waiting when shouldn't be");
2580
0
  waitp->thread->waitp = waitp;      // prepare ourselves for waiting
2581
0
  PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
2582
0
  if (h == nullptr) {  // add this thread to waiter list
2583
0
    waitp->thread->next = waitp->thread;
2584
0
  } else {
2585
0
    waitp->thread->next = h->next;
2586
0
    h->next = waitp->thread;
2587
0
  }
2588
0
  waitp->thread->state.store(PerThreadSynch::kQueued,
2589
0
                             std::memory_order_relaxed);
2590
0
  cv_word->store((v & kCvEvent) | reinterpret_cast<intptr_t>(waitp->thread),
2591
0
                 std::memory_order_release);
2592
0
}
2593
2594
0
bool CondVar::WaitCommon(Mutex *mutex, KernelTimeout t) {
2595
0
  bool rc = false;          // return value; true iff we timed-out
2596
2597
0
  intptr_t mutex_v = mutex->mu_.load(std::memory_order_relaxed);
2598
0
  Mutex::MuHow mutex_how = ((mutex_v & kMuWriter) != 0) ? kExclusive : kShared;
2599
0
  ABSL_TSAN_MUTEX_PRE_UNLOCK(mutex, TsanFlags(mutex_how));
2600
2601
  // maybe trace this call
2602
0
  intptr_t v = cv_.load(std::memory_order_relaxed);
2603
0
  cond_var_tracer("Wait", this);
2604
0
  if ((v & kCvEvent) != 0) {
2605
0
    PostSynchEvent(this, SYNCH_EV_WAIT);
2606
0
  }
2607
2608
  // Release mu and wait on condition variable.
2609
0
  SynchWaitParams waitp(mutex_how, nullptr, t, mutex,
2610
0
                        Synch_GetPerThreadAnnotated(mutex), &cv_);
2611
  // UnlockSlow() will call CondVarEnqueue() just before releasing the
2612
  // Mutex, thus queuing this thread on the condition variable.  See
2613
  // CondVarEnqueue() for the reasons.
2614
0
  mutex->UnlockSlow(&waitp);
2615
2616
  // wait for signal
2617
0
  while (waitp.thread->state.load(std::memory_order_acquire) ==
2618
0
         PerThreadSynch::kQueued) {
2619
0
    if (!Mutex::DecrementSynchSem(mutex, waitp.thread, t)) {
2620
      // DecrementSynchSem returned due to timeout.
2621
      // Now we will either (1) remove ourselves from the wait list in Remove
2622
      // below, in which case Remove will set thread.state = kAvailable and
2623
      // we will not call DecrementSynchSem again; or (2) Signal/SignalAll
2624
      // has removed us concurrently and is calling Wakeup, which will set
2625
      // thread.state = kAvailable and post to the semaphore.
2626
      // It's important to reset the timeout for the case (2) because otherwise
2627
      // we can live-lock in this loop since DecrementSynchSem will always
2628
      // return immediately due to timeout, but Signal/SignalAll is not
2629
      // necessary set thread.state = kAvailable yet (and is not scheduled
2630
      // due to thread priorities or other scheduler artifacts).
2631
      // Note this could also be resolved if Signal/SignalAll would set
2632
      // thread.state = kAvailable while holding the wait list spin lock.
2633
      // But this can't be easily done for SignalAll since it grabs the whole
2634
      // wait list with a single compare-exchange and does not really grab
2635
      // the spin lock.
2636
0
      t = KernelTimeout::Never();
2637
0
      this->Remove(waitp.thread);
2638
0
      rc = true;
2639
0
    }
2640
0
  }
2641
2642
0
  ABSL_RAW_CHECK(waitp.thread->waitp != nullptr, "not waiting when should be");
2643
0
  waitp.thread->waitp = nullptr;  // cleanup
2644
2645
  // maybe trace this call
2646
0
  cond_var_tracer("Unwait", this);
2647
0
  if ((v & kCvEvent) != 0) {
2648
0
    PostSynchEvent(this, SYNCH_EV_WAIT_RETURNING);
2649
0
  }
2650
2651
  // From synchronization point of view Wait is unlock of the mutex followed
2652
  // by lock of the mutex. We've annotated start of unlock in the beginning
2653
  // of the function. Now, finish unlock and annotate lock of the mutex.
2654
  // (Trans is effectively lock).
2655
0
  ABSL_TSAN_MUTEX_POST_UNLOCK(mutex, TsanFlags(mutex_how));
2656
0
  ABSL_TSAN_MUTEX_PRE_LOCK(mutex, TsanFlags(mutex_how));
2657
0
  mutex->Trans(mutex_how);  // Reacquire mutex
2658
0
  ABSL_TSAN_MUTEX_POST_LOCK(mutex, TsanFlags(mutex_how), 0);
2659
0
  return rc;
2660
0
}
2661
2662
0
bool CondVar::WaitWithTimeout(Mutex *mu, absl::Duration timeout) {
2663
0
  return WaitWithDeadline(mu, DeadlineFromTimeout(timeout));
2664
0
}
2665
2666
0
bool CondVar::WaitWithDeadline(Mutex *mu, absl::Time deadline) {
2667
0
  return WaitCommon(mu, KernelTimeout(deadline));
2668
0
}
2669
2670
0
void CondVar::Wait(Mutex *mu) {
2671
0
  WaitCommon(mu, KernelTimeout::Never());
2672
0
}
2673
2674
// Wake thread w
2675
// If it was a timed wait, w will be waiting on w->cv
2676
// Otherwise, if it was not a Mutex mutex, w will be waiting on w->sem
2677
// Otherwise, w is transferred to the Mutex mutex via Mutex::Fer().
2678
0
void CondVar::Wakeup(PerThreadSynch *w) {
2679
0
  if (w->waitp->timeout.has_timeout() || w->waitp->cvmu == nullptr) {
2680
    // The waiting thread only needs to observe "w->state == kAvailable" to be
2681
    // released, we must cache "cvmu" before clearing "next".
2682
0
    Mutex *mu = w->waitp->cvmu;
2683
0
    w->next = nullptr;
2684
0
    w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
2685
0
    Mutex::IncrementSynchSem(mu, w);
2686
0
  } else {
2687
0
    w->waitp->cvmu->Fer(w);
2688
0
  }
2689
0
}
2690
2691
0
void CondVar::Signal() {
2692
0
  SchedulingGuard::ScopedDisable disable_rescheduling;
2693
0
  ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
2694
0
  intptr_t v;
2695
0
  int c = 0;
2696
0
  for (v = cv_.load(std::memory_order_relaxed); v != 0;
2697
0
       v = cv_.load(std::memory_order_relaxed)) {
2698
0
    if ((v & kCvSpin) == 0 &&  // attempt to acquire spinlock
2699
0
        cv_.compare_exchange_strong(v, v | kCvSpin,
2700
0
                                    std::memory_order_acquire,
2701
0
                                    std::memory_order_relaxed)) {
2702
0
      PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
2703
0
      PerThreadSynch *w = nullptr;
2704
0
      if (h != nullptr) {  // remove first waiter
2705
0
        w = h->next;
2706
0
        if (w == h) {
2707
0
          h = nullptr;
2708
0
        } else {
2709
0
          h->next = w->next;
2710
0
        }
2711
0
      }
2712
                                      // release spinlock
2713
0
      cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
2714
0
                std::memory_order_release);
2715
0
      if (w != nullptr) {
2716
0
        CondVar::Wakeup(w);                // wake waiter, if there was one
2717
0
        cond_var_tracer("Signal wakeup", this);
2718
0
      }
2719
0
      if ((v & kCvEvent) != 0) {
2720
0
        PostSynchEvent(this, SYNCH_EV_SIGNAL);
2721
0
      }
2722
0
      ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
2723
0
      return;
2724
0
    } else {
2725
0
      c = synchronization_internal::MutexDelay(c, GENTLE);
2726
0
    }
2727
0
  }
2728
0
  ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
2729
0
}
2730
2731
0
void CondVar::SignalAll () {
2732
0
  ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
2733
0
  intptr_t v;
2734
0
  int c = 0;
2735
0
  for (v = cv_.load(std::memory_order_relaxed); v != 0;
2736
0
       v = cv_.load(std::memory_order_relaxed)) {
2737
    // empty the list if spinlock free
2738
    // We do this by simply setting the list to empty using
2739
    // compare and swap.   We then have the entire list in our hands,
2740
    // which cannot be changing since we grabbed it while no one
2741
    // held the lock.
2742
0
    if ((v & kCvSpin) == 0 &&
2743
0
        cv_.compare_exchange_strong(v, v & kCvEvent, std::memory_order_acquire,
2744
0
                                    std::memory_order_relaxed)) {
2745
0
      PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
2746
0
      if (h != nullptr) {
2747
0
        PerThreadSynch *w;
2748
0
        PerThreadSynch *n = h->next;
2749
0
        do {                          // for every thread, wake it up
2750
0
          w = n;
2751
0
          n = n->next;
2752
0
          CondVar::Wakeup(w);
2753
0
        } while (w != h);
2754
0
        cond_var_tracer("SignalAll wakeup", this);
2755
0
      }
2756
0
      if ((v & kCvEvent) != 0) {
2757
0
        PostSynchEvent(this, SYNCH_EV_SIGNALALL);
2758
0
      }
2759
0
      ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
2760
0
      return;
2761
0
    } else {
2762
      // try again after a delay
2763
0
      c = synchronization_internal::MutexDelay(c, GENTLE);
2764
0
    }
2765
0
  }
2766
0
  ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
2767
0
}
2768
2769
0
void ReleasableMutexLock::Release() {
2770
0
  ABSL_RAW_CHECK(this->mu_ != nullptr,
2771
0
                 "ReleasableMutexLock::Release may only be called once");
2772
0
  this->mu_->Unlock();
2773
0
  this->mu_ = nullptr;
2774
0
}
2775
2776
#ifdef ABSL_HAVE_THREAD_SANITIZER
2777
extern "C" void __tsan_read1(void *addr);
2778
#else
2779
#define __tsan_read1(addr)  // do nothing if TSan not enabled
2780
#endif
2781
2782
// A function that just returns its argument, dereferenced
2783
0
static bool Dereference(void *arg) {
2784
  // ThreadSanitizer does not instrument this file for memory accesses.
2785
  // This function dereferences a user variable that can participate
2786
  // in a data race, so we need to manually tell TSan about this memory access.
2787
0
  __tsan_read1(arg);
2788
0
  return *(static_cast<bool *>(arg));
2789
0
}
2790
2791
ABSL_CONST_INIT const Condition Condition::kTrue;
2792
2793
Condition::Condition(bool (*func)(void *), void *arg)
2794
    : eval_(&CallVoidPtrFunction),
2795
0
      arg_(arg) {
2796
0
  static_assert(sizeof(&func) <= sizeof(callback_),
2797
0
                "An overlarge function pointer passed to Condition.");
2798
0
  StoreCallback(func);
2799
0
}
2800
2801
0
bool Condition::CallVoidPtrFunction(const Condition *c) {
2802
0
  using FunctionPointer = bool (*)(void *);
2803
0
  FunctionPointer function_pointer;
2804
0
  std::memcpy(&function_pointer, c->callback_, sizeof(function_pointer));
2805
0
  return (*function_pointer)(c->arg_);
2806
0
}
2807
2808
Condition::Condition(const bool *cond)
2809
    : eval_(CallVoidPtrFunction),
2810
      // const_cast is safe since Dereference does not modify arg
2811
0
      arg_(const_cast<bool *>(cond)) {
2812
0
  using FunctionPointer = bool (*)(void *);
2813
0
  const FunctionPointer dereference = Dereference;
2814
0
  StoreCallback(dereference);
2815
0
}
2816
2817
0
bool Condition::Eval() const {
2818
  // eval_ == null for kTrue
2819
0
  return (this->eval_ == nullptr) || (*this->eval_)(this);
2820
0
}
2821
2822
0
bool Condition::GuaranteedEqual(const Condition *a, const Condition *b) {
2823
  // kTrue logic.
2824
0
  if (a == nullptr || a->eval_ == nullptr) {
2825
0
    return b == nullptr || b->eval_ == nullptr;
2826
0
  } else if (b == nullptr || b->eval_ == nullptr) {
2827
0
    return false;
2828
0
  }
2829
  // Check equality of the representative fields.
2830
0
  return a->eval_ == b->eval_ && a->arg_ == b->arg_ &&
2831
0
         !memcmp(a->callback_, b->callback_, sizeof(a->callback_));
2832
0
}
2833
2834
ABSL_NAMESPACE_END
2835
}  // namespace absl