/src/abseil-cpp/absl/synchronization/mutex.cc
Line  | Count  | Source (jump to first uncovered line)  | 
1  |  | // Copyright 2017 The Abseil Authors.  | 
2  |  | //  | 
3  |  | // Licensed under the Apache License, Version 2.0 (the "License");  | 
4  |  | // you may not use this file except in compliance with the License.  | 
5  |  | // You may obtain a copy of the License at  | 
6  |  | //  | 
7  |  | //      https://www.apache.org/licenses/LICENSE-2.0  | 
8  |  | //  | 
9  |  | // Unless required by applicable law or agreed to in writing, software  | 
10  |  | // distributed under the License is distributed on an "AS IS" BASIS,  | 
11  |  | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  | 
12  |  | // See the License for the specific language governing permissions and  | 
13  |  | // limitations under the License.  | 
14  |  |  | 
15  |  | #include "absl/synchronization/mutex.h"  | 
16  |  |  | 
17  |  | #ifdef _WIN32  | 
18  |  | #include <windows.h>  | 
19  |  | #ifdef ERROR  | 
20  |  | #undef ERROR  | 
21  |  | #endif  | 
22  |  | #else  | 
23  |  | #include <fcntl.h>  | 
24  |  | #include <pthread.h>  | 
25  |  | #include <sched.h>  | 
26  |  | #include <sys/time.h>  | 
27  |  | #endif  | 
28  |  |  | 
29  |  | #include <assert.h>  | 
30  |  | #include <errno.h>  | 
31  |  | #include <stdio.h>  | 
32  |  | #include <stdlib.h>  | 
33  |  | #include <string.h>  | 
34  |  | #include <time.h>  | 
35  |  |  | 
36  |  | #include <algorithm>  | 
37  |  | #include <atomic>  | 
38  |  | #include <cstddef>  | 
39  |  | #include <cstdlib>  | 
40  |  | #include <cstring>  | 
41  |  | #include <thread>  // NOLINT(build/c++11)  | 
42  |  |  | 
43  |  | #include "absl/base/attributes.h"  | 
44  |  | #include "absl/base/call_once.h"  | 
45  |  | #include "absl/base/config.h"  | 
46  |  | #include "absl/base/dynamic_annotations.h"  | 
47  |  | #include "absl/base/internal/atomic_hook.h"  | 
48  |  | #include "absl/base/internal/cycleclock.h"  | 
49  |  | #include "absl/base/internal/hide_ptr.h"  | 
50  |  | #include "absl/base/internal/low_level_alloc.h"  | 
51  |  | #include "absl/base/internal/raw_logging.h"  | 
52  |  | #include "absl/base/internal/spinlock.h"  | 
53  |  | #include "absl/base/internal/sysinfo.h"  | 
54  |  | #include "absl/base/internal/thread_identity.h"  | 
55  |  | #include "absl/base/internal/tsan_mutex_interface.h"  | 
56  |  | #include "absl/base/optimization.h"  | 
57  |  | #include "absl/debugging/stacktrace.h"  | 
58  |  | #include "absl/debugging/symbolize.h"  | 
59  |  | #include "absl/synchronization/internal/graphcycles.h"  | 
60  |  | #include "absl/synchronization/internal/per_thread_sem.h"  | 
61  |  | #include "absl/time/time.h"  | 
62  |  |  | 
63  |  | using absl::base_internal::CurrentThreadIdentityIfPresent;  | 
64  |  | using absl::base_internal::CycleClock;  | 
65  |  | using absl::base_internal::PerThreadSynch;  | 
66  |  | using absl::base_internal::SchedulingGuard;  | 
67  |  | using absl::base_internal::ThreadIdentity;  | 
68  |  | using absl::synchronization_internal::GetOrCreateCurrentThreadIdentity;  | 
69  |  | using absl::synchronization_internal::GraphCycles;  | 
70  |  | using absl::synchronization_internal::GraphId;  | 
71  |  | using absl::synchronization_internal::InvalidGraphId;  | 
72  |  | using absl::synchronization_internal::KernelTimeout;  | 
73  |  | using absl::synchronization_internal::PerThreadSem;  | 
74  |  |  | 
75  |  | extern "C" { | 
76  | 0  | ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)() { | 
77  | 0  |   std::this_thread::yield();  | 
78  | 0  | }  | 
79  |  | }  // extern "C"  | 
80  |  |  | 
81  |  | namespace absl { | 
82  |  | ABSL_NAMESPACE_BEGIN  | 
83  |  |  | 
84  |  | namespace { | 
85  |  |  | 
86  |  | #if defined(ABSL_HAVE_THREAD_SANITIZER)  | 
87  |  | constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kIgnore;  | 
88  |  | #else  | 
89  |  | constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kAbort;  | 
90  |  | #endif  | 
91  |  |  | 
92  |  | ABSL_CONST_INIT std::atomic<OnDeadlockCycle> synch_deadlock_detection(  | 
93  |  |     kDeadlockDetectionDefault);  | 
94  |  | ABSL_CONST_INIT std::atomic<bool> synch_check_invariants(false);  | 
95  |  |  | 
96  |  | ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES  | 
97  |  | absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)>  | 
98  |  |     submit_profile_data;  | 
99  |  | ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<void (*)(  | 
100  |  |     const char* msg, const void* obj, int64_t wait_cycles)>  | 
101  |  |     mutex_tracer;  | 
102  |  | ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES  | 
103  |  | absl::base_internal::AtomicHook<void (*)(const char* msg, const void* cv)>  | 
104  |  |     cond_var_tracer;  | 
105  |  |  | 
106  |  | }  // namespace  | 
107  |  |  | 
108  |  | static inline bool EvalConditionAnnotated(const Condition* cond, Mutex* mu,  | 
109  |  |                                           bool locking, bool trylock,  | 
110  |  |                                           bool read_lock);  | 
111  |  |  | 
112  | 0  | void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles)) { | 
113  | 0  |   submit_profile_data.Store(fn);  | 
114  | 0  | }  | 
115  |  |  | 
116  |  | void RegisterMutexTracer(void (*fn)(const char* msg, const void* obj,  | 
117  | 0  |                                     int64_t wait_cycles)) { | 
118  | 0  |   mutex_tracer.Store(fn);  | 
119  | 0  | }  | 
120  |  |  | 
121  | 0  | void RegisterCondVarTracer(void (*fn)(const char* msg, const void* cv)) { | 
122  | 0  |   cond_var_tracer.Store(fn);  | 
123  | 0  | }  | 
124  |  |  | 
125  |  | namespace { | 
126  |  | // Represents the strategy for spin and yield.  | 
127  |  | // See the comment in GetMutexGlobals() for more information.  | 
128  |  | enum DelayMode { AGGRESSIVE, GENTLE }; | 
129  |  |  | 
130  |  | struct ABSL_CACHELINE_ALIGNED MutexGlobals { | 
131  |  |   absl::once_flag once;  | 
132  |  |   int spinloop_iterations = 0;  | 
133  |  |   int32_t mutex_sleep_spins[2] = {}; | 
134  |  |   absl::Duration mutex_sleep_time;  | 
135  |  | };  | 
136  |  |  | 
137  | 0  | absl::Duration MeasureTimeToYield() { | 
138  | 0  |   absl::Time before = absl::Now();  | 
139  | 0  |   ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();  | 
140  | 0  |   return absl::Now() - before;  | 
141  | 0  | }  | 
142  |  |  | 
143  | 0  | const MutexGlobals& GetMutexGlobals() { | 
144  | 0  |   ABSL_CONST_INIT static MutexGlobals data;  | 
145  | 0  |   absl::base_internal::LowLevelCallOnce(&data.once, [&]() { | 
146  | 0  |     if (absl::base_internal::NumCPUs() > 1) { | 
147  |  |       // If this is multiprocessor, allow spinning. If the mode is  | 
148  |  |       // aggressive then spin many times before yielding. If the mode is  | 
149  |  |       // gentle then spin only a few times before yielding. Aggressive spinning  | 
150  |  |       // is used to ensure that an Unlock() call, which must get the spin lock  | 
151  |  |       // for any thread to make progress gets it without undue delay.  | 
152  | 0  |       data.spinloop_iterations = 1500;  | 
153  | 0  |       data.mutex_sleep_spins[AGGRESSIVE] = 5000;  | 
154  | 0  |       data.mutex_sleep_spins[GENTLE] = 250;  | 
155  | 0  |       data.mutex_sleep_time = absl::Microseconds(10);  | 
156  | 0  |     } else { | 
157  |  |       // If this a uniprocessor, only yield/sleep. Real-time threads are often  | 
158  |  |       // unable to yield, so the sleep time needs to be long enough to keep  | 
159  |  |       // the calling thread asleep until scheduling happens.  | 
160  | 0  |       data.spinloop_iterations = 0;  | 
161  | 0  |       data.mutex_sleep_spins[AGGRESSIVE] = 0;  | 
162  | 0  |       data.mutex_sleep_spins[GENTLE] = 0;  | 
163  | 0  |       data.mutex_sleep_time = MeasureTimeToYield() * 5;  | 
164  | 0  |       data.mutex_sleep_time =  | 
165  | 0  |           std::min(data.mutex_sleep_time, absl::Milliseconds(1));  | 
166  | 0  |       data.mutex_sleep_time =  | 
167  | 0  |           std::max(data.mutex_sleep_time, absl::Microseconds(10));  | 
168  | 0  |     }  | 
169  | 0  |   });  | 
170  | 0  |   return data;  | 
171  | 0  | }  | 
172  |  | }  // namespace  | 
173  |  |  | 
174  |  | namespace synchronization_internal { | 
175  |  | // Returns the Mutex delay on iteration `c` depending on the given `mode`.  | 
176  |  | // The returned value should be used as `c` for the next call to `MutexDelay`.  | 
177  | 0  | int MutexDelay(int32_t c, int mode) { | 
178  | 0  |   const int32_t limit = GetMutexGlobals().mutex_sleep_spins[mode];  | 
179  | 0  |   const absl::Duration sleep_time = GetMutexGlobals().mutex_sleep_time;  | 
180  | 0  |   if (c < limit) { | 
181  |  |     // Spin.  | 
182  | 0  |     c++;  | 
183  | 0  |   } else { | 
184  | 0  |     SchedulingGuard::ScopedEnable enable_rescheduling;  | 
185  | 0  |     ABSL_TSAN_MUTEX_PRE_DIVERT(nullptr, 0);  | 
186  | 0  |     if (c == limit) { | 
187  |  |       // Yield once.  | 
188  | 0  |       ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();  | 
189  | 0  |       c++;  | 
190  | 0  |     } else { | 
191  |  |       // Then wait.  | 
192  | 0  |       absl::SleepFor(sleep_time);  | 
193  | 0  |       c = 0;  | 
194  | 0  |     }  | 
195  | 0  |     ABSL_TSAN_MUTEX_POST_DIVERT(nullptr, 0);  | 
196  | 0  |   }  | 
197  | 0  |   return c;  | 
198  | 0  | }  | 
199  |  | }  // namespace synchronization_internal  | 
200  |  |  | 
201  |  | // --------------------------Generic atomic ops  | 
202  |  | // Ensure that "(*pv & bits) == bits" by doing an atomic update of "*pv" to  | 
203  |  | // "*pv | bits" if necessary.  Wait until (*pv & wait_until_clear)==0  | 
204  |  | // before making any change.  | 
205  |  | // This is used to set flags in mutex and condition variable words.  | 
206  |  | static void AtomicSetBits(std::atomic<intptr_t>* pv, intptr_t bits,  | 
207  | 0  |                           intptr_t wait_until_clear) { | 
208  | 0  |   intptr_t v;  | 
209  | 0  |   do { | 
210  | 0  |     v = pv->load(std::memory_order_relaxed);  | 
211  | 0  |   } while ((v & bits) != bits &&  | 
212  | 0  |            ((v & wait_until_clear) != 0 ||  | 
213  | 0  |             !pv->compare_exchange_weak(v, v | bits, std::memory_order_release,  | 
214  | 0  |                                        std::memory_order_relaxed)));  | 
215  | 0  | }  | 
216  |  |  | 
217  |  | // Ensure that "(*pv & bits) == 0" by doing an atomic update of "*pv" to  | 
218  |  | // "*pv & ~bits" if necessary.  Wait until (*pv & wait_until_clear)==0  | 
219  |  | // before making any change.  | 
220  |  | // This is used to unset flags in mutex and condition variable words.  | 
221  |  | static void AtomicClearBits(std::atomic<intptr_t>* pv, intptr_t bits,  | 
222  | 0  |                             intptr_t wait_until_clear) { | 
223  | 0  |   intptr_t v;  | 
224  | 0  |   do { | 
225  | 0  |     v = pv->load(std::memory_order_relaxed);  | 
226  | 0  |   } while ((v & bits) != 0 &&  | 
227  | 0  |            ((v & wait_until_clear) != 0 ||  | 
228  | 0  |             !pv->compare_exchange_weak(v, v & ~bits, std::memory_order_release,  | 
229  | 0  |                                        std::memory_order_relaxed)));  | 
230  | 0  | }  | 
231  |  |  | 
232  |  | //------------------------------------------------------------------  | 
233  |  |  | 
234  |  | // Data for doing deadlock detection.  | 
235  |  | ABSL_CONST_INIT static absl::base_internal::SpinLock deadlock_graph_mu(  | 
236  |  |     absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);  | 
237  |  |  | 
238  |  | // Graph used to detect deadlocks.  | 
239  |  | ABSL_CONST_INIT static GraphCycles* deadlock_graph  | 
240  |  |     ABSL_GUARDED_BY(deadlock_graph_mu) ABSL_PT_GUARDED_BY(deadlock_graph_mu);  | 
241  |  |  | 
242  |  | //------------------------------------------------------------------  | 
243  |  | // An event mechanism for debugging mutex use.  | 
244  |  | // It also allows mutexes to be given names for those who can't handle  | 
245  |  | // addresses, and instead like to give their data structures names like  | 
246  |  | // "Henry", "Fido", or "Rupert IV, King of Yondavia".  | 
247  |  |  | 
248  |  | namespace {  // to prevent name pollution | 
249  |  | enum {       // Mutex and CondVar events passed as "ev" to PostSynchEvent | 
250  |  |              // Mutex events  | 
251  |  |   SYNCH_EV_TRYLOCK_SUCCESS,  | 
252  |  |   SYNCH_EV_TRYLOCK_FAILED,  | 
253  |  |   SYNCH_EV_READERTRYLOCK_SUCCESS,  | 
254  |  |   SYNCH_EV_READERTRYLOCK_FAILED,  | 
255  |  |   SYNCH_EV_LOCK,  | 
256  |  |   SYNCH_EV_LOCK_RETURNING,  | 
257  |  |   SYNCH_EV_READERLOCK,  | 
258  |  |   SYNCH_EV_READERLOCK_RETURNING,  | 
259  |  |   SYNCH_EV_UNLOCK,  | 
260  |  |   SYNCH_EV_READERUNLOCK,  | 
261  |  |  | 
262  |  |   // CondVar events  | 
263  |  |   SYNCH_EV_WAIT,  | 
264  |  |   SYNCH_EV_WAIT_RETURNING,  | 
265  |  |   SYNCH_EV_SIGNAL,  | 
266  |  |   SYNCH_EV_SIGNALALL,  | 
267  |  | };  | 
268  |  |  | 
269  |  | enum {                    // Event flags | 
270  |  |   SYNCH_F_R = 0x01,       // reader event  | 
271  |  |   SYNCH_F_LCK = 0x02,     // PostSynchEvent called with mutex held  | 
272  |  |   SYNCH_F_TRY = 0x04,     // TryLock or ReaderTryLock  | 
273  |  |   SYNCH_F_UNLOCK = 0x08,  // Unlock or ReaderUnlock  | 
274  |  |  | 
275  |  |   SYNCH_F_LCK_W = SYNCH_F_LCK,  | 
276  |  |   SYNCH_F_LCK_R = SYNCH_F_LCK | SYNCH_F_R,  | 
277  |  | };  | 
278  |  | }  // anonymous namespace  | 
279  |  |  | 
280  |  | // Properties of the events.  | 
281  |  | static const struct { | 
282  |  |   int flags;  | 
283  |  |   const char* msg;  | 
284  |  | } event_properties[] = { | 
285  |  |     {SYNCH_F_LCK_W | SYNCH_F_TRY, "TryLock succeeded "}, | 
286  |  |     {0, "TryLock failed "}, | 
287  |  |     {SYNCH_F_LCK_R | SYNCH_F_TRY, "ReaderTryLock succeeded "}, | 
288  |  |     {0, "ReaderTryLock failed "}, | 
289  |  |     {0, "Lock blocking "}, | 
290  |  |     {SYNCH_F_LCK_W, "Lock returning "}, | 
291  |  |     {0, "ReaderLock blocking "}, | 
292  |  |     {SYNCH_F_LCK_R, "ReaderLock returning "}, | 
293  |  |     {SYNCH_F_LCK_W | SYNCH_F_UNLOCK, "Unlock "}, | 
294  |  |     {SYNCH_F_LCK_R | SYNCH_F_UNLOCK, "ReaderUnlock "}, | 
295  |  |     {0, "Wait on "}, | 
296  |  |     {0, "Wait unblocked "}, | 
297  |  |     {0, "Signal on "}, | 
298  |  |     {0, "SignalAll on "}, | 
299  |  | };  | 
300  |  |  | 
301  |  | ABSL_CONST_INIT static absl::base_internal::SpinLock synch_event_mu(  | 
302  |  |     absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);  | 
303  |  |  | 
304  |  | // Hash table size; should be prime > 2.  | 
305  |  | // Can't be too small, as it's used for deadlock detection information.  | 
306  |  | static constexpr uint32_t kNSynchEvent = 1031;  | 
307  |  |  | 
308  |  | static struct SynchEvent {  // this is a trivial hash table for the events | 
309  |  |   // struct is freed when refcount reaches 0  | 
310  |  |   int refcount ABSL_GUARDED_BY(synch_event_mu);  | 
311  |  |  | 
312  |  |   // buckets have linear, 0-terminated  chains  | 
313  |  |   SynchEvent* next ABSL_GUARDED_BY(synch_event_mu);  | 
314  |  |  | 
315  |  |   // Constant after initialization  | 
316  |  |   uintptr_t masked_addr;  // object at this address is called "name"  | 
317  |  |  | 
318  |  |   // No explicit synchronization used.  Instead we assume that the  | 
319  |  |   // client who enables/disables invariants/logging on a Mutex does so  | 
320  |  |   // while the Mutex is not being concurrently accessed by others.  | 
321  |  |   void (*invariant)(void* arg);  // called on each event  | 
322  |  |   void* arg;                     // first arg to (*invariant)()  | 
323  |  |   bool log;                      // logging turned on  | 
324  |  |  | 
325  |  |   // Constant after initialization  | 
326  |  |   char name[1];  // actually longer---NUL-terminated string  | 
327  |  | }* synch_event[kNSynchEvent] ABSL_GUARDED_BY(synch_event_mu);  | 
328  |  |  | 
329  |  | // Ensure that the object at "addr" has a SynchEvent struct associated with it,  | 
330  |  | // set "bits" in the word there (waiting until lockbit is clear before doing  | 
331  |  | // so), and return a refcounted reference that will remain valid until  | 
332  |  | // UnrefSynchEvent() is called.  If a new SynchEvent is allocated,  | 
333  |  | // the string name is copied into it.  | 
334  |  | // When used with a mutex, the caller should also ensure that kMuEvent  | 
335  |  | // is set in the mutex word, and similarly for condition variables and kCVEvent.  | 
336  |  | static SynchEvent* EnsureSynchEvent(std::atomic<intptr_t>* addr,  | 
337  |  |                                     const char* name, intptr_t bits,  | 
338  | 0  |                                     intptr_t lockbit) { | 
339  | 0  |   uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;  | 
340  | 0  |   SynchEvent* e;  | 
341  |  |   // first look for existing SynchEvent struct..  | 
342  | 0  |   synch_event_mu.Lock();  | 
343  | 0  |   for (e = synch_event[h];  | 
344  | 0  |        e != nullptr && e->masked_addr != base_internal::HidePtr(addr);  | 
345  | 0  |        e = e->next) { | 
346  | 0  |   }  | 
347  | 0  |   if (e == nullptr) {  // no SynchEvent struct found; make one. | 
348  | 0  |     if (name == nullptr) { | 
349  | 0  |       name = "";  | 
350  | 0  |     }  | 
351  | 0  |     size_t l = strlen(name);  | 
352  | 0  |     e = reinterpret_cast<SynchEvent*>(  | 
353  | 0  |         base_internal::LowLevelAlloc::Alloc(sizeof(*e) + l));  | 
354  | 0  |     e->refcount = 2;  // one for return value, one for linked list  | 
355  | 0  |     e->masked_addr = base_internal::HidePtr(addr);  | 
356  | 0  |     e->invariant = nullptr;  | 
357  | 0  |     e->arg = nullptr;  | 
358  | 0  |     e->log = false;  | 
359  | 0  |     strcpy(e->name, name);  // NOLINT(runtime/printf)  | 
360  | 0  |     e->next = synch_event[h];  | 
361  | 0  |     AtomicSetBits(addr, bits, lockbit);  | 
362  | 0  |     synch_event[h] = e;  | 
363  | 0  |   } else { | 
364  | 0  |     e->refcount++;  // for return value  | 
365  | 0  |   }  | 
366  | 0  |   synch_event_mu.Unlock();  | 
367  | 0  |   return e;  | 
368  | 0  | }  | 
369  |  |  | 
370  |  | // Deallocate the SynchEvent *e, whose refcount has fallen to zero.  | 
371  | 0  | static void DeleteSynchEvent(SynchEvent* e) { | 
372  | 0  |   base_internal::LowLevelAlloc::Free(e);  | 
373  | 0  | }  | 
374  |  |  | 
375  |  | // Decrement the reference count of *e, or do nothing if e==null.  | 
376  | 0  | static void UnrefSynchEvent(SynchEvent* e) { | 
377  | 0  |   if (e != nullptr) { | 
378  | 0  |     synch_event_mu.Lock();  | 
379  | 0  |     bool del = (--(e->refcount) == 0);  | 
380  | 0  |     synch_event_mu.Unlock();  | 
381  | 0  |     if (del) { | 
382  | 0  |       DeleteSynchEvent(e);  | 
383  | 0  |     }  | 
384  | 0  |   }  | 
385  | 0  | }  | 
386  |  |  | 
387  |  | // Forget the mapping from the object (Mutex or CondVar) at address addr  | 
388  |  | // to SynchEvent object, and clear "bits" in its word (waiting until lockbit  | 
389  |  | // is clear before doing so).  | 
390  |  | static void ForgetSynchEvent(std::atomic<intptr_t>* addr, intptr_t bits,  | 
391  | 0  |                              intptr_t lockbit) { | 
392  | 0  |   uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;  | 
393  | 0  |   SynchEvent** pe;  | 
394  | 0  |   SynchEvent* e;  | 
395  | 0  |   synch_event_mu.Lock();  | 
396  | 0  |   for (pe = &synch_event[h];  | 
397  | 0  |        (e = *pe) != nullptr && e->masked_addr != base_internal::HidePtr(addr);  | 
398  | 0  |        pe = &e->next) { | 
399  | 0  |   }  | 
400  | 0  |   bool del = false;  | 
401  | 0  |   if (e != nullptr) { | 
402  | 0  |     *pe = e->next;  | 
403  | 0  |     del = (--(e->refcount) == 0);  | 
404  | 0  |   }  | 
405  | 0  |   AtomicClearBits(addr, bits, lockbit);  | 
406  | 0  |   synch_event_mu.Unlock();  | 
407  | 0  |   if (del) { | 
408  | 0  |     DeleteSynchEvent(e);  | 
409  | 0  |   }  | 
410  | 0  | }  | 
411  |  |  | 
412  |  | // Return a refcounted reference to the SynchEvent of the object at address  | 
413  |  | // "addr", if any.  The pointer returned is valid until the UnrefSynchEvent() is  | 
414  |  | // called.  | 
415  | 0  | static SynchEvent* GetSynchEvent(const void* addr) { | 
416  | 0  |   uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;  | 
417  | 0  |   SynchEvent* e;  | 
418  | 0  |   synch_event_mu.Lock();  | 
419  | 0  |   for (e = synch_event[h];  | 
420  | 0  |        e != nullptr && e->masked_addr != base_internal::HidePtr(addr);  | 
421  | 0  |        e = e->next) { | 
422  | 0  |   }  | 
423  | 0  |   if (e != nullptr) { | 
424  | 0  |     e->refcount++;  | 
425  | 0  |   }  | 
426  | 0  |   synch_event_mu.Unlock();  | 
427  | 0  |   return e;  | 
428  | 0  | }  | 
429  |  |  | 
430  |  | // Called when an event "ev" occurs on a Mutex of CondVar "obj"  | 
431  |  | // if event recording is on  | 
432  | 0  | static void PostSynchEvent(void* obj, int ev) { | 
433  | 0  |   SynchEvent* e = GetSynchEvent(obj);  | 
434  |  |   // logging is on if event recording is on and either there's no event struct,  | 
435  |  |   // or it explicitly says to log  | 
436  | 0  |   if (e == nullptr || e->log) { | 
437  | 0  |     void* pcs[40];  | 
438  | 0  |     int n = absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 1);  | 
439  |  |     // A buffer with enough space for the ASCII for all the PCs, even on a  | 
440  |  |     // 64-bit machine.  | 
441  | 0  |     char buffer[ABSL_ARRAYSIZE(pcs) * 24];  | 
442  | 0  |     int pos = snprintf(buffer, sizeof(buffer), " @");  | 
443  | 0  |     for (int i = 0; i != n; i++) { | 
444  | 0  |       int b = snprintf(&buffer[pos], sizeof(buffer) - static_cast<size_t>(pos),  | 
445  | 0  |                        " %p", pcs[i]);  | 
446  | 0  |       if (b < 0 ||  | 
447  | 0  |           static_cast<size_t>(b) >= sizeof(buffer) - static_cast<size_t>(pos)) { | 
448  | 0  |         break;  | 
449  | 0  |       }  | 
450  | 0  |       pos += b;  | 
451  | 0  |     }  | 
452  | 0  |     ABSL_RAW_LOG(INFO, "%s%p %s %s", event_properties[ev].msg, obj,  | 
453  | 0  |                  (e == nullptr ? "" : e->name), buffer);  | 
454  | 0  |   }  | 
455  | 0  |   const int flags = event_properties[ev].flags;  | 
456  | 0  |   if ((flags & SYNCH_F_LCK) != 0 && e != nullptr && e->invariant != nullptr) { | 
457  |  |     // Calling the invariant as is causes problems under ThreadSanitizer.  | 
458  |  |     // We are currently inside of Mutex Lock/Unlock and are ignoring all  | 
459  |  |     // memory accesses and synchronization. If the invariant transitively  | 
460  |  |     // synchronizes something else and we ignore the synchronization, we will  | 
461  |  |     // get false positive race reports later.  | 
462  |  |     // Reuse EvalConditionAnnotated to properly call into user code.  | 
463  | 0  |     struct local { | 
464  | 0  |       static bool pred(SynchEvent* ev) { | 
465  | 0  |         (*ev->invariant)(ev->arg);  | 
466  | 0  |         return false;  | 
467  | 0  |       }  | 
468  | 0  |     };  | 
469  | 0  |     Condition cond(&local::pred, e);  | 
470  | 0  |     Mutex* mu = static_cast<Mutex*>(obj);  | 
471  | 0  |     const bool locking = (flags & SYNCH_F_UNLOCK) == 0;  | 
472  | 0  |     const bool trylock = (flags & SYNCH_F_TRY) != 0;  | 
473  | 0  |     const bool read_lock = (flags & SYNCH_F_R) != 0;  | 
474  | 0  |     EvalConditionAnnotated(&cond, mu, locking, trylock, read_lock);  | 
475  | 0  |   }  | 
476  | 0  |   UnrefSynchEvent(e);  | 
477  | 0  | }  | 
478  |  |  | 
479  |  | //------------------------------------------------------------------  | 
480  |  |  | 
481  |  | // The SynchWaitParams struct encapsulates the way in which a thread is waiting:  | 
482  |  | // whether it has a timeout, the condition, exclusive/shared, and whether a  | 
483  |  | // condition variable wait has an associated Mutex (as opposed to another  | 
484  |  | // type of lock).  It also points to the PerThreadSynch struct of its thread.  | 
485  |  | // cv_word tells Enqueue() to enqueue on a CondVar using CondVarEnqueue().  | 
486  |  | //  | 
487  |  | // This structure is held on the stack rather than directly in  | 
488  |  | // PerThreadSynch because a thread can be waiting on multiple Mutexes if,  | 
489  |  | // while waiting on one Mutex, the implementation calls a client callback  | 
490  |  | // (such as a Condition function) that acquires another Mutex. We don't  | 
491  |  | // strictly need to allow this, but programmers become confused if we do not  | 
492  |  | // allow them to use functions such a LOG() within Condition functions.  The  | 
493  |  | // PerThreadSynch struct points at the most recent SynchWaitParams struct when  | 
494  |  | // the thread is on a Mutex's waiter queue.  | 
495  |  | struct SynchWaitParams { | 
496  |  |   SynchWaitParams(Mutex::MuHow how_arg, const Condition* cond_arg,  | 
497  |  |                   KernelTimeout timeout_arg, Mutex* cvmu_arg,  | 
498  |  |                   PerThreadSynch* thread_arg,  | 
499  |  |                   std::atomic<intptr_t>* cv_word_arg)  | 
500  |  |       : how(how_arg),  | 
501  |  |         cond(cond_arg),  | 
502  |  |         timeout(timeout_arg),  | 
503  |  |         cvmu(cvmu_arg),  | 
504  |  |         thread(thread_arg),  | 
505  |  |         cv_word(cv_word_arg),  | 
506  |  |         contention_start_cycles(CycleClock::Now()),  | 
507  | 0  |         should_submit_contention_data(false) {} | 
508  |  |  | 
509  |  |   const Mutex::MuHow how;  // How this thread needs to wait.  | 
510  |  |   const Condition* cond;   // The condition that this thread is waiting for.  | 
511  |  |                            // In Mutex, this field is set to zero if a timeout  | 
512  |  |                            // expires.  | 
513  |  |   KernelTimeout timeout;  // timeout expiry---absolute time  | 
514  |  |                           // In Mutex, this field is set to zero if a timeout  | 
515  |  |                           // expires.  | 
516  |  |   Mutex* const cvmu;      // used for transfer from cond var to mutex  | 
517  |  |   PerThreadSynch* const thread;  // thread that is waiting  | 
518  |  |  | 
519  |  |   // If not null, thread should be enqueued on the CondVar whose state  | 
520  |  |   // word is cv_word instead of queueing normally on the Mutex.  | 
521  |  |   std::atomic<intptr_t>* cv_word;  | 
522  |  |  | 
523  |  |   int64_t contention_start_cycles;  // Time (in cycles) when this thread started  | 
524  |  |                                     // to contend for the mutex.  | 
525  |  |   bool should_submit_contention_data;  | 
526  |  | };  | 
527  |  |  | 
528  |  | struct SynchLocksHeld { | 
529  |  |   int n;          // number of valid entries in locks[]  | 
530  |  |   bool overflow;  // true iff we overflowed the array at some point  | 
531  |  |   struct { | 
532  |  |     Mutex* mu;      // lock acquired  | 
533  |  |     int32_t count;  // times acquired  | 
534  |  |     GraphId id;     // deadlock_graph id of acquired lock  | 
535  |  |   } locks[40];  | 
536  |  |   // If a thread overfills the array during deadlock detection, we  | 
537  |  |   // continue, discarding information as needed.  If no overflow has  | 
538  |  |   // taken place, we can provide more error checking, such as  | 
539  |  |   // detecting when a thread releases a lock it does not hold.  | 
540  |  | };  | 
541  |  |  | 
542  |  | // A sentinel value in lists that is not 0.  | 
543  |  | // A 0 value is used to mean "not on a list".  | 
544  |  | static PerThreadSynch* const kPerThreadSynchNull =  | 
545  |  |     reinterpret_cast<PerThreadSynch*>(1);  | 
546  |  |  | 
547  | 2  | static SynchLocksHeld* LocksHeldAlloc() { | 
548  | 2  |   SynchLocksHeld* ret = reinterpret_cast<SynchLocksHeld*>(  | 
549  | 2  |       base_internal::LowLevelAlloc::Alloc(sizeof(SynchLocksHeld)));  | 
550  | 2  |   ret->n = 0;  | 
551  | 2  |   ret->overflow = false;  | 
552  | 2  |   return ret;  | 
553  | 2  | }  | 
554  |  |  | 
555  |  | // Return the PerThreadSynch-struct for this thread.  | 
556  | 9.72M  | static PerThreadSynch* Synch_GetPerThread() { | 
557  | 9.72M  |   ThreadIdentity* identity = GetOrCreateCurrentThreadIdentity();  | 
558  | 9.72M  |   return &identity->per_thread_synch;  | 
559  | 9.72M  | }  | 
560  |  |  | 
561  | 0  | static PerThreadSynch* Synch_GetPerThreadAnnotated(Mutex* mu) { | 
562  | 0  |   if (mu) { | 
563  | 0  |     ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);  | 
564  | 0  |   }  | 
565  | 0  |   PerThreadSynch* w = Synch_GetPerThread();  | 
566  | 0  |   if (mu) { | 
567  | 0  |     ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);  | 
568  | 0  |   }  | 
569  | 0  |   return w;  | 
570  | 0  | }  | 
571  |  |  | 
572  | 9.72M  | static SynchLocksHeld* Synch_GetAllLocks() { | 
573  | 9.72M  |   PerThreadSynch* s = Synch_GetPerThread();  | 
574  | 9.72M  |   if (s->all_locks == nullptr) { | 
575  | 2  |     s->all_locks = LocksHeldAlloc();  // Freed by ReclaimThreadIdentity.  | 
576  | 2  |   }  | 
577  | 9.72M  |   return s->all_locks;  | 
578  | 9.72M  | }  | 
579  |  |  | 
580  |  | // Post on "w"'s associated PerThreadSem.  | 
581  | 0  | void Mutex::IncrementSynchSem(Mutex* mu, PerThreadSynch* w) { | 
582  | 0  |   static_cast<void>(mu);  // Prevent unused param warning in non-TSAN builds.  | 
583  | 0  |   ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);  | 
584  |  |   // We miss synchronization around passing PerThreadSynch between threads  | 
585  |  |   // since it happens inside of the Mutex code, so we need to ignore all  | 
586  |  |   // accesses to the object.  | 
587  | 0  |   ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();  | 
588  | 0  |   PerThreadSem::Post(w->thread_identity());  | 
589  | 0  |   ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END();  | 
590  | 0  |   ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);  | 
591  | 0  | }  | 
592  |  |  | 
593  |  | // Wait on "w"'s associated PerThreadSem; returns false if timeout expired.  | 
594  | 0  | bool Mutex::DecrementSynchSem(Mutex* mu, PerThreadSynch* w, KernelTimeout t) { | 
595  | 0  |   static_cast<void>(mu);  // Prevent unused param warning in non-TSAN builds.  | 
596  | 0  |   ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);  | 
597  | 0  |   assert(w == Synch_GetPerThread());  | 
598  | 0  |   static_cast<void>(w);  | 
599  | 0  |   bool res = PerThreadSem::Wait(t);  | 
600  | 0  |   ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);  | 
601  | 0  |   return res;  | 
602  | 0  | }  | 
603  |  |  | 
604  |  | // We're in a fatal signal handler that hopes to use Mutex and to get  | 
605  |  | // lucky by not deadlocking.  We try to improve its chances of success  | 
606  |  | // by effectively disabling some of the consistency checks.  This will  | 
607  |  | // prevent certain ABSL_RAW_CHECK() statements from being triggered when  | 
608  |  | // re-rentry is detected.  The ABSL_RAW_CHECK() statements are those in the  | 
609  |  | // Mutex code checking that the "waitp" field has not been reused.  | 
610  | 0  | void Mutex::InternalAttemptToUseMutexInFatalSignalHandler() { | 
611  |  |   // Fix the per-thread state only if it exists.  | 
612  | 0  |   ThreadIdentity* identity = CurrentThreadIdentityIfPresent();  | 
613  | 0  |   if (identity != nullptr) { | 
614  | 0  |     identity->per_thread_synch.suppress_fatal_errors = true;  | 
615  | 0  |   }  | 
616  |  |   // Don't do deadlock detection when we are already failing.  | 
617  | 0  |   synch_deadlock_detection.store(OnDeadlockCycle::kIgnore,  | 
618  | 0  |                                  std::memory_order_release);  | 
619  | 0  | }  | 
620  |  |  | 
621  |  | // --------------------------Mutexes  | 
622  |  |  | 
623  |  | // In the layout below, the msb of the bottom byte is currently unused.  Also,  | 
624  |  | // the following constraints were considered in choosing the layout:  | 
625  |  | //  o Both the debug allocator's "uninitialized" and "freed" patterns (0xab and  | 
626  |  | //    0xcd) are illegal: reader and writer lock both held.  | 
627  |  | //  o kMuWriter and kMuEvent should exceed kMuDesig and kMuWait, to enable the  | 
628  |  | //    bit-twiddling trick in Mutex::Unlock().  | 
629  |  | //  o kMuWriter / kMuReader == kMuWrWait / kMuWait,  | 
630  |  | //    to enable the bit-twiddling trick in CheckForMutexCorruption().  | 
631  |  | static const intptr_t kMuReader = 0x0001L;  // a reader holds the lock  | 
632  |  | // There's a designated waker.  | 
633  |  | // INVARIANT1:  there's a thread that was blocked on the mutex, is  | 
634  |  | // no longer, yet has not yet acquired the mutex.  If there's a  | 
635  |  | // designated waker, all threads can avoid taking the slow path in  | 
636  |  | // unlock because the designated waker will subsequently acquire  | 
637  |  | // the lock and wake someone.  To maintain INVARIANT1 the bit is  | 
638  |  | // set when a thread is unblocked(INV1a), and threads that were  | 
639  |  | // unblocked reset the bit when they either acquire or re-block (INV1b).  | 
640  |  | static const intptr_t kMuDesig = 0x0002L;  | 
641  |  | static const intptr_t kMuWait = 0x0004L;    // threads are waiting  | 
642  |  | static const intptr_t kMuWriter = 0x0008L;  // a writer holds the lock  | 
643  |  | static const intptr_t kMuEvent = 0x0010L;   // record this mutex's events  | 
644  |  | // Runnable writer is waiting for a reader.  | 
645  |  | // If set, new readers will not lock the mutex to avoid writer starvation.  | 
646  |  | // Note: if a reader has higher priority than the writer, it will still lock  | 
647  |  | // the mutex ahead of the waiting writer, but in a very inefficient manner:  | 
648  |  | // the reader will first queue itself and block, but then the last unlocking  | 
649  |  | // reader will wake it.  | 
650  |  | static const intptr_t kMuWrWait = 0x0020L;  | 
651  |  | static const intptr_t kMuSpin = 0x0040L;  // spinlock protects wait list  | 
652  |  | static const intptr_t kMuLow = 0x00ffL;   // mask all mutex bits  | 
653  |  | static const intptr_t kMuHigh = ~kMuLow;  // mask pointer/reader count  | 
654  |  |  | 
655  |  | // Hack to make constant values available to gdb pretty printer  | 
656  |  | enum { | 
657  |  |   kGdbMuSpin = kMuSpin,  | 
658  |  |   kGdbMuEvent = kMuEvent,  | 
659  |  |   kGdbMuWait = kMuWait,  | 
660  |  |   kGdbMuWriter = kMuWriter,  | 
661  |  |   kGdbMuDesig = kMuDesig,  | 
662  |  |   kGdbMuWrWait = kMuWrWait,  | 
663  |  |   kGdbMuReader = kMuReader,  | 
664  |  |   kGdbMuLow = kMuLow,  | 
665  |  | };  | 
666  |  |  | 
667  |  | // kMuWrWait implies kMuWait.  | 
668  |  | // kMuReader and kMuWriter are mutually exclusive.  | 
669  |  | // If kMuReader is zero, there are no readers.  | 
670  |  | // Otherwise, if kMuWait is zero, the high order bits contain a count of the  | 
671  |  | // number of readers.  Otherwise, the reader count is held in  | 
672  |  | // PerThreadSynch::readers of the most recently queued waiter, again in the  | 
673  |  | // bits above kMuLow.  | 
674  |  | static const intptr_t kMuOne = 0x0100;  // a count of one reader  | 
675  |  |  | 
676  |  | // flags passed to Enqueue and LockSlow{,WithTimeout,Loop} | 
677  |  | static const int kMuHasBlocked = 0x01;  // already blocked (MUST == 1)  | 
678  |  | static const int kMuIsCond = 0x02;      // conditional waiter (CV or Condition)  | 
679  |  | static const int kMuIsFer = 0x04;       // wait morphing from a CondVar  | 
680  |  |  | 
681  |  | static_assert(PerThreadSynch::kAlignment > kMuLow,  | 
682  |  |               "PerThreadSynch::kAlignment must be greater than kMuLow");  | 
683  |  |  | 
684  |  | // This struct contains various bitmasks to be used in  | 
685  |  | // acquiring and releasing a mutex in a particular mode.  | 
686  |  | struct MuHowS { | 
687  |  |   // if all the bits in fast_need_zero are zero, the lock can be acquired by  | 
688  |  |   // adding fast_add and oring fast_or.  The bit kMuDesig should be reset iff  | 
689  |  |   // this is the designated waker.  | 
690  |  |   intptr_t fast_need_zero;  | 
691  |  |   intptr_t fast_or;  | 
692  |  |   intptr_t fast_add;  | 
693  |  |  | 
694  |  |   intptr_t slow_need_zero;  // fast_need_zero with events (e.g. logging)  | 
695  |  |  | 
696  |  |   intptr_t slow_inc_need_zero;  // if all the bits in slow_inc_need_zero are  | 
697  |  |                                 // zero a reader can acquire a read share by  | 
698  |  |                                 // setting the reader bit and incrementing  | 
699  |  |                                 // the reader count (in last waiter since  | 
700  |  |                                 // we're now slow-path).  kMuWrWait be may  | 
701  |  |                                 // be ignored if we already waited once.  | 
702  |  | };  | 
703  |  |  | 
704  |  | static const MuHowS kSharedS = { | 
705  |  |     // shared or read lock  | 
706  |  |     kMuWriter | kMuWait | kMuEvent,   // fast_need_zero  | 
707  |  |     kMuReader,                        // fast_or  | 
708  |  |     kMuOne,                           // fast_add  | 
709  |  |     kMuWriter | kMuWait,              // slow_need_zero  | 
710  |  |     kMuSpin | kMuWriter | kMuWrWait,  // slow_inc_need_zero  | 
711  |  | };  | 
712  |  | static const MuHowS kExclusiveS = { | 
713  |  |     // exclusive or write lock  | 
714  |  |     kMuWriter | kMuReader | kMuEvent,  // fast_need_zero  | 
715  |  |     kMuWriter,                         // fast_or  | 
716  |  |     0,                                 // fast_add  | 
717  |  |     kMuWriter | kMuReader,             // slow_need_zero  | 
718  |  |     ~static_cast<intptr_t>(0),         // slow_inc_need_zero  | 
719  |  | };  | 
720  |  | static const Mutex::MuHow kShared = &kSharedS;        // shared lock  | 
721  |  | static const Mutex::MuHow kExclusive = &kExclusiveS;  // exclusive lock  | 
722  |  |  | 
723  |  | #ifdef NDEBUG  | 
724  |  | static constexpr bool kDebugMode = false;  | 
725  |  | #else  | 
726  |  | static constexpr bool kDebugMode = true;  | 
727  |  | #endif  | 
728  |  |  | 
729  |  | #ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE  | 
730  |  | static unsigned TsanFlags(Mutex::MuHow how) { | 
731  |  |   return how == kShared ? __tsan_mutex_read_lock : 0;  | 
732  |  | }  | 
733  |  | #endif  | 
734  |  |  | 
735  | 0  | static bool DebugOnlyIsExiting() { | 
736  | 0  |   return false;  | 
737  | 0  | }  | 
738  |  |  | 
739  | 0  | Mutex::~Mutex() { | 
740  | 0  |   intptr_t v = mu_.load(std::memory_order_relaxed);  | 
741  | 0  |   if ((v & kMuEvent) != 0 && !DebugOnlyIsExiting()) { | 
742  | 0  |     ForgetSynchEvent(&this->mu_, kMuEvent, kMuSpin);  | 
743  | 0  |   }  | 
744  | 0  |   if (kDebugMode) { | 
745  | 0  |     this->ForgetDeadlockInfo();  | 
746  | 0  |   }  | 
747  | 0  |   ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static);  | 
748  | 0  | }  | 
749  |  |  | 
750  | 0  | void Mutex::EnableDebugLog(const char* name) { | 
751  | 0  |   SynchEvent* e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin);  | 
752  | 0  |   e->log = true;  | 
753  | 0  |   UnrefSynchEvent(e);  | 
754  | 0  | }  | 
755  |  |  | 
756  | 0  | void EnableMutexInvariantDebugging(bool enabled) { | 
757  | 0  |   synch_check_invariants.store(enabled, std::memory_order_release);  | 
758  | 0  | }  | 
759  |  |  | 
760  | 0  | void Mutex::EnableInvariantDebugging(void (*invariant)(void*), void* arg) { | 
761  | 0  |   if (synch_check_invariants.load(std::memory_order_acquire) &&  | 
762  | 0  |       invariant != nullptr) { | 
763  | 0  |     SynchEvent* e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin);  | 
764  | 0  |     e->invariant = invariant;  | 
765  | 0  |     e->arg = arg;  | 
766  | 0  |     UnrefSynchEvent(e);  | 
767  | 0  |   }  | 
768  | 0  | }  | 
769  |  |  | 
770  | 0  | void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode) { | 
771  | 0  |   synch_deadlock_detection.store(mode, std::memory_order_release);  | 
772  | 0  | }  | 
773  |  |  | 
774  |  | // Return true iff threads x and y are part of the same equivalence  | 
775  |  | // class of waiters. An equivalence class is defined as the set of  | 
776  |  | // waiters with the same condition, type of lock, and thread priority.  | 
777  |  | //  | 
778  |  | // Requires that x and y be waiting on the same Mutex queue.  | 
779  | 0  | static bool MuEquivalentWaiter(PerThreadSynch* x, PerThreadSynch* y) { | 
780  | 0  |   return x->waitp->how == y->waitp->how && x->priority == y->priority &&  | 
781  | 0  |          Condition::GuaranteedEqual(x->waitp->cond, y->waitp->cond);  | 
782  | 0  | }  | 
783  |  |  | 
784  |  | // Given the contents of a mutex word containing a PerThreadSynch pointer,  | 
785  |  | // return the pointer.  | 
786  | 0  | static inline PerThreadSynch* GetPerThreadSynch(intptr_t v) { | 
787  | 0  |   return reinterpret_cast<PerThreadSynch*>(v & kMuHigh);  | 
788  | 0  | }  | 
789  |  |  | 
790  |  | // The next several routines maintain the per-thread next and skip fields  | 
791  |  | // used in the Mutex waiter queue.  | 
792  |  | // The queue is a circular singly-linked list, of which the "head" is the  | 
793  |  | // last element, and head->next if the first element.  | 
794  |  | // The skip field has the invariant:  | 
795  |  | //   For thread x, x->skip is one of:  | 
796  |  | //     - invalid (iff x is not in a Mutex wait queue),  | 
797  |  | //     - null, or  | 
798  |  | //     - a pointer to a distinct thread waiting later in the same Mutex queue  | 
799  |  | //       such that all threads in [x, x->skip] have the same condition, priority  | 
800  |  | //       and lock type (MuEquivalentWaiter() is true for all pairs in [x,  | 
801  |  | //       x->skip]).  | 
802  |  | // In addition, if x->skip is  valid, (x->may_skip || x->skip == null)  | 
803  |  | //  | 
804  |  | // By the spec of MuEquivalentWaiter(), it is not necessary when removing the  | 
805  |  | // first runnable thread y from the front a Mutex queue to adjust the skip  | 
806  |  | // field of another thread x because if x->skip==y, x->skip must (have) become  | 
807  |  | // invalid before y is removed.  The function TryRemove can remove a specified  | 
808  |  | // thread from an arbitrary position in the queue whether runnable or not, so  | 
809  |  | // it fixes up skip fields that would otherwise be left dangling.  | 
810  |  | // The statement  | 
811  |  | //     if (x->may_skip && MuEquivalentWaiter(x, x->next)) { x->skip = x->next; } | 
812  |  | // maintains the invariant provided x is not the last waiter in a Mutex queue  | 
813  |  | // The statement  | 
814  |  | //          if (x->skip != null) { x->skip = x->skip->skip; } | 
815  |  | // maintains the invariant.  | 
816  |  |  | 
817  |  | // Returns the last thread y in a mutex waiter queue such that all threads in  | 
818  |  | // [x, y] inclusive share the same condition.  Sets skip fields of some threads  | 
819  |  | // in that range to optimize future evaluation of Skip() on x values in  | 
820  |  | // the range.  Requires thread x is in a mutex waiter queue.  | 
821  |  | // The locking is unusual.  Skip() is called under these conditions:  | 
822  |  | //   - spinlock is held in call from Enqueue(), with maybe_unlocking == false  | 
823  |  | //   - Mutex is held in call from UnlockSlow() by last unlocker, with  | 
824  |  | //     maybe_unlocking == true  | 
825  |  | //   - both Mutex and spinlock are held in call from DequeueAllWakeable() (from  | 
826  |  | //     UnlockSlow()) and TryRemove()  | 
827  |  | // These cases are mutually exclusive, so Skip() never runs concurrently  | 
828  |  | // with itself on the same Mutex.   The skip chain is used in these other places  | 
829  |  | // that cannot occur concurrently:  | 
830  |  | //   - FixSkip() (from TryRemove()) - spinlock and Mutex are held)  | 
831  |  | //   - Dequeue() (with spinlock and Mutex held)  | 
832  |  | //   - UnlockSlow() (with spinlock and Mutex held)  | 
833  |  | // A more complex case is Enqueue()  | 
834  |  | //   - Enqueue() (with spinlock held and maybe_unlocking == false)  | 
835  |  | //               This is the first case in which Skip is called, above.  | 
836  |  | //   - Enqueue() (without spinlock held; but queue is empty and being freshly  | 
837  |  | //                formed)  | 
838  |  | //   - Enqueue() (with spinlock held and maybe_unlocking == true)  | 
839  |  | // The first case has mutual exclusion, and the second isolation through  | 
840  |  | // working on an otherwise unreachable data structure.  | 
841  |  | // In the last case, Enqueue() is required to change no skip/next pointers  | 
842  |  | // except those in the added node and the former "head" node.  This implies  | 
843  |  | // that the new node is added after head, and so must be the new head or the  | 
844  |  | // new front of the queue.  | 
845  | 0  | static PerThreadSynch* Skip(PerThreadSynch* x) { | 
846  | 0  |   PerThreadSynch* x0 = nullptr;  | 
847  | 0  |   PerThreadSynch* x1 = x;  | 
848  | 0  |   PerThreadSynch* x2 = x->skip;  | 
849  | 0  |   if (x2 != nullptr) { | 
850  |  |     // Each iteration attempts to advance sequence (x0,x1,x2) to next sequence  | 
851  |  |     // such that   x1 == x0->skip && x2 == x1->skip  | 
852  | 0  |     while ((x0 = x1, x1 = x2, x2 = x2->skip) != nullptr) { | 
853  | 0  |       x0->skip = x2;  // short-circuit skip from x0 to x2  | 
854  | 0  |     }  | 
855  | 0  |     x->skip = x1;  // short-circuit skip from x to result  | 
856  | 0  |   }  | 
857  | 0  |   return x1;  | 
858  | 0  | }  | 
859  |  |  | 
860  |  | // "ancestor" appears before "to_be_removed" in the same Mutex waiter queue.  | 
861  |  | // The latter is going to be removed out of order, because of a timeout.  | 
862  |  | // Check whether "ancestor" has a skip field pointing to "to_be_removed",  | 
863  |  | // and fix it if it does.  | 
864  | 0  | static void FixSkip(PerThreadSynch* ancestor, PerThreadSynch* to_be_removed) { | 
865  | 0  |   if (ancestor->skip == to_be_removed) {  // ancestor->skip left dangling | 
866  | 0  |     if (to_be_removed->skip != nullptr) { | 
867  | 0  |       ancestor->skip = to_be_removed->skip;  // can skip past to_be_removed  | 
868  | 0  |     } else if (ancestor->next != to_be_removed) {  // they are not adjacent | 
869  | 0  |       ancestor->skip = ancestor->next;             // can skip one past ancestor  | 
870  | 0  |     } else { | 
871  | 0  |       ancestor->skip = nullptr;  // can't skip at all  | 
872  | 0  |     }  | 
873  | 0  |   }  | 
874  | 0  | }  | 
875  |  |  | 
876  |  | static void CondVarEnqueue(SynchWaitParams* waitp);  | 
877  |  |  | 
878  |  | // Enqueue thread "waitp->thread" on a waiter queue.  | 
879  |  | // Called with mutex spinlock held if head != nullptr  | 
880  |  | // If head==nullptr and waitp->cv_word==nullptr, then Enqueue() is  | 
881  |  | // idempotent; it alters no state associated with the existing (empty)  | 
882  |  | // queue.  | 
883  |  | //  | 
884  |  | // If waitp->cv_word == nullptr, queue the thread at either the front or  | 
885  |  | // the end (according to its priority) of the circular mutex waiter queue whose  | 
886  |  | // head is "head", and return the new head.  mu is the previous mutex state,  | 
887  |  | // which contains the reader count (perhaps adjusted for the operation in  | 
888  |  | // progress) if the list was empty and a read lock held, and the holder hint if  | 
889  |  | // the list was empty and a write lock held.  (flags & kMuIsCond) indicates  | 
890  |  | // whether this thread was transferred from a CondVar or is waiting for a  | 
891  |  | // non-trivial condition.  In this case, Enqueue() never returns nullptr  | 
892  |  | //  | 
893  |  | // If waitp->cv_word != nullptr, CondVarEnqueue() is called, and "head" is  | 
894  |  | // returned. This mechanism is used by CondVar to queue a thread on the  | 
895  |  | // condition variable queue instead of the mutex queue in implementing Wait().  | 
896  |  | // In this case, Enqueue() can return nullptr (if head==nullptr).  | 
897  |  | static PerThreadSynch* Enqueue(PerThreadSynch* head, SynchWaitParams* waitp,  | 
898  | 0  |                                intptr_t mu, int flags) { | 
899  |  |   // If we have been given a cv_word, call CondVarEnqueue() and return  | 
900  |  |   // the previous head of the Mutex waiter queue.  | 
901  | 0  |   if (waitp->cv_word != nullptr) { | 
902  | 0  |     CondVarEnqueue(waitp);  | 
903  | 0  |     return head;  | 
904  | 0  |   }  | 
905  |  |  | 
906  | 0  |   PerThreadSynch* s = waitp->thread;  | 
907  | 0  |   ABSL_RAW_CHECK(  | 
908  | 0  |       s->waitp == nullptr ||    // normal case  | 
909  | 0  |           s->waitp == waitp ||  // Fer()---transfer from condition variable  | 
910  | 0  |           s->suppress_fatal_errors,  | 
911  | 0  |       "detected illegal recursion into Mutex code");  | 
912  | 0  |   s->waitp = waitp;  | 
913  | 0  |   s->skip = nullptr;   // maintain skip invariant (see above)  | 
914  | 0  |   s->may_skip = true;  // always true on entering queue  | 
915  | 0  |   s->wake = false;     // not being woken  | 
916  | 0  |   s->cond_waiter = ((flags & kMuIsCond) != 0);  | 
917  | 0  | #ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM  | 
918  | 0  |   if ((flags & kMuIsFer) == 0) { | 
919  | 0  |     assert(s == Synch_GetPerThread());  | 
920  | 0  |     int64_t now_cycles = CycleClock::Now();  | 
921  | 0  |     if (s->next_priority_read_cycles < now_cycles) { | 
922  |  |       // Every so often, update our idea of the thread's priority.  | 
923  |  |       // pthread_getschedparam() is 5% of the block/wakeup time;  | 
924  |  |       // CycleClock::Now() is 0.5%.  | 
925  | 0  |       int policy;  | 
926  | 0  |       struct sched_param param;  | 
927  | 0  |       const int err = pthread_getschedparam(pthread_self(), &policy, ¶m);  | 
928  | 0  |       if (err != 0) { | 
929  | 0  |         ABSL_RAW_LOG(ERROR, "pthread_getschedparam failed: %d", err);  | 
930  | 0  |       } else { | 
931  | 0  |         s->priority = param.sched_priority;  | 
932  | 0  |         s->next_priority_read_cycles =  | 
933  | 0  |             now_cycles + static_cast<int64_t>(CycleClock::Frequency());  | 
934  | 0  |       }  | 
935  | 0  |     }  | 
936  | 0  |   }  | 
937  | 0  | #endif  | 
938  | 0  |   if (head == nullptr) {         // s is the only waiter | 
939  | 0  |     s->next = s;                 // it's the only entry in the cycle  | 
940  | 0  |     s->readers = mu;             // reader count is from mu word  | 
941  | 0  |     s->maybe_unlocking = false;  // no one is searching an empty list  | 
942  | 0  |     head = s;                    // s is new head  | 
943  | 0  |   } else { | 
944  | 0  |     PerThreadSynch* enqueue_after = nullptr;  // we'll put s after this element  | 
945  | 0  | #ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM  | 
946  | 0  |     if (s->priority > head->priority) {  // s's priority is above head's | 
947  |  |       // try to put s in priority-fifo order, or failing that at the front.  | 
948  | 0  |       if (!head->maybe_unlocking) { | 
949  |  |         // No unlocker can be scanning the queue, so we can insert into the  | 
950  |  |         // middle of the queue.  | 
951  |  |         //  | 
952  |  |         // Within a skip chain, all waiters have the same priority, so we can  | 
953  |  |         // skip forward through the chains until we find one with a lower  | 
954  |  |         // priority than the waiter to be enqueued.  | 
955  | 0  |         PerThreadSynch* advance_to = head;  // next value of enqueue_after  | 
956  | 0  |         do { | 
957  | 0  |           enqueue_after = advance_to;  | 
958  |  |           // (side-effect: optimizes skip chain)  | 
959  | 0  |           advance_to = Skip(enqueue_after->next);  | 
960  | 0  |         } while (s->priority <= advance_to->priority);  | 
961  |  |         // termination guaranteed because s->priority > head->priority  | 
962  |  |         // and head is the end of a skip chain  | 
963  | 0  |       } else if (waitp->how == kExclusive && waitp->cond == nullptr) { | 
964  |  |         // An unlocker could be scanning the queue, but we know it will recheck  | 
965  |  |         // the queue front for writers that have no condition, which is what s  | 
966  |  |         // is, so an insert at front is safe.  | 
967  | 0  |         enqueue_after = head;  // add after head, at front  | 
968  | 0  |       }  | 
969  | 0  |     }  | 
970  | 0  | #endif  | 
971  | 0  |     if (enqueue_after != nullptr) { | 
972  | 0  |       s->next = enqueue_after->next;  | 
973  | 0  |       enqueue_after->next = s;  | 
974  |  |  | 
975  |  |       // enqueue_after can be: head, Skip(...), or cur.  | 
976  |  |       // The first two imply enqueue_after->skip == nullptr, and  | 
977  |  |       // the last is used only if MuEquivalentWaiter(s, cur).  | 
978  |  |       // We require this because clearing enqueue_after->skip  | 
979  |  |       // is impossible; enqueue_after's predecessors might also  | 
980  |  |       // incorrectly skip over s if we were to allow other  | 
981  |  |       // insertion points.  | 
982  | 0  |       ABSL_RAW_CHECK(enqueue_after->skip == nullptr ||  | 
983  | 0  |                          MuEquivalentWaiter(enqueue_after, s),  | 
984  | 0  |                      "Mutex Enqueue failure");  | 
985  |  |  | 
986  | 0  |       if (enqueue_after != head && enqueue_after->may_skip &&  | 
987  | 0  |           MuEquivalentWaiter(enqueue_after, enqueue_after->next)) { | 
988  |  |         // enqueue_after can skip to its new successor, s  | 
989  | 0  |         enqueue_after->skip = enqueue_after->next;  | 
990  | 0  |       }  | 
991  | 0  |       if (MuEquivalentWaiter(s, s->next)) {  // s->may_skip is known to be true | 
992  | 0  |         s->skip = s->next;                   // s may skip to its successor  | 
993  | 0  |       }  | 
994  | 0  |     } else {  // enqueue not done any other way, so | 
995  |  |               // we're inserting s at the back  | 
996  |  |       // s will become new head; copy data from head into it  | 
997  | 0  |       s->next = head->next;  // add s after head  | 
998  | 0  |       head->next = s;  | 
999  | 0  |       s->readers = head->readers;  // reader count is from previous head  | 
1000  | 0  |       s->maybe_unlocking = head->maybe_unlocking;  // same for unlock hint  | 
1001  | 0  |       if (head->may_skip && MuEquivalentWaiter(head, s)) { | 
1002  |  |         // head now has successor; may skip  | 
1003  | 0  |         head->skip = s;  | 
1004  | 0  |       }  | 
1005  | 0  |       head = s;  // s is new head  | 
1006  | 0  |     }  | 
1007  | 0  |   }  | 
1008  | 0  |   s->state.store(PerThreadSynch::kQueued, std::memory_order_relaxed);  | 
1009  | 0  |   return head;  | 
1010  | 0  | }  | 
1011  |  |  | 
1012  |  | // Dequeue the successor pw->next of thread pw from the Mutex waiter queue  | 
1013  |  | // whose last element is head.  The new head element is returned, or null  | 
1014  |  | // if the list is made empty.  | 
1015  |  | // Dequeue is called with both spinlock and Mutex held.  | 
1016  | 0  | static PerThreadSynch* Dequeue(PerThreadSynch* head, PerThreadSynch* pw) { | 
1017  | 0  |   PerThreadSynch* w = pw->next;  | 
1018  | 0  |   pw->next = w->next;                 // snip w out of list  | 
1019  | 0  |   if (head == w) {                    // we removed the head | 
1020  | 0  |     head = (pw == w) ? nullptr : pw;  // either emptied list, or pw is new head  | 
1021  | 0  |   } else if (pw != head && MuEquivalentWaiter(pw, pw->next)) { | 
1022  |  |     // pw can skip to its new successor  | 
1023  | 0  |     if (pw->next->skip !=  | 
1024  | 0  |         nullptr) {  // either skip to its successors skip target | 
1025  | 0  |       pw->skip = pw->next->skip;  | 
1026  | 0  |     } else {  // or to pw's successor | 
1027  | 0  |       pw->skip = pw->next;  | 
1028  | 0  |     }  | 
1029  | 0  |   }  | 
1030  | 0  |   return head;  | 
1031  | 0  | }  | 
1032  |  |  | 
1033  |  | // Traverse the elements [ pw->next, h] of the circular list whose last element  | 
1034  |  | // is head.  | 
1035  |  | // Remove all elements with wake==true and place them in the  | 
1036  |  | // singly-linked list wake_list in the order found.   Assumes that  | 
1037  |  | // there is only one such element if the element has how == kExclusive.  | 
1038  |  | // Return the new head.  | 
1039  |  | static PerThreadSynch* DequeueAllWakeable(PerThreadSynch* head,  | 
1040  |  |                                           PerThreadSynch* pw,  | 
1041  | 0  |                                           PerThreadSynch** wake_tail) { | 
1042  | 0  |   PerThreadSynch* orig_h = head;  | 
1043  | 0  |   PerThreadSynch* w = pw->next;  | 
1044  | 0  |   bool skipped = false;  | 
1045  | 0  |   do { | 
1046  | 0  |     if (w->wake) {  // remove this element | 
1047  | 0  |       ABSL_RAW_CHECK(pw->skip == nullptr, "bad skip in DequeueAllWakeable");  | 
1048  |  |       // we're removing pw's successor so either pw->skip is zero or we should  | 
1049  |  |       // already have removed pw since if pw->skip!=null, pw has the same  | 
1050  |  |       // condition as w.  | 
1051  | 0  |       head = Dequeue(head, pw);  | 
1052  | 0  |       w->next = *wake_tail;               // keep list terminated  | 
1053  | 0  |       *wake_tail = w;                     // add w to wake_list;  | 
1054  | 0  |       wake_tail = &w->next;               // next addition to end  | 
1055  | 0  |       if (w->waitp->how == kExclusive) {  // wake at most 1 writer | 
1056  | 0  |         break;  | 
1057  | 0  |       }  | 
1058  | 0  |     } else {         // not waking this one; skip | 
1059  | 0  |       pw = Skip(w);  // skip as much as possible  | 
1060  | 0  |       skipped = true;  | 
1061  | 0  |     }  | 
1062  | 0  |     w = pw->next;  | 
1063  |  |     // We want to stop processing after we've considered the original head,  | 
1064  |  |     // orig_h.  We can't test for w==orig_h in the loop because w may skip over  | 
1065  |  |     // it; we are guaranteed only that w's predecessor will not skip over  | 
1066  |  |     // orig_h.  When we've considered orig_h, either we've processed it and  | 
1067  |  |     // removed it (so orig_h != head), or we considered it and skipped it (so  | 
1068  |  |     // skipped==true && pw == head because skipping from head always skips by  | 
1069  |  |     // just one, leaving pw pointing at head).  So we want to  | 
1070  |  |     // continue the loop with the negation of that expression.  | 
1071  | 0  |   } while (orig_h == head && (pw != head || !skipped));  | 
1072  | 0  |   return head;  | 
1073  | 0  | }  | 
1074  |  |  | 
1075  |  | // Try to remove thread s from the list of waiters on this mutex.  | 
1076  |  | // Does nothing if s is not on the waiter list.  | 
1077  | 0  | void Mutex::TryRemove(PerThreadSynch* s) { | 
1078  | 0  |   SchedulingGuard::ScopedDisable disable_rescheduling;  | 
1079  | 0  |   intptr_t v = mu_.load(std::memory_order_relaxed);  | 
1080  |  |   // acquire spinlock & lock  | 
1081  | 0  |   if ((v & (kMuWait | kMuSpin | kMuWriter | kMuReader)) == kMuWait &&  | 
1082  | 0  |       mu_.compare_exchange_strong(v, v | kMuSpin | kMuWriter,  | 
1083  | 0  |                                   std::memory_order_acquire,  | 
1084  | 0  |                                   std::memory_order_relaxed)) { | 
1085  | 0  |     PerThreadSynch* h = GetPerThreadSynch(v);  | 
1086  | 0  |     if (h != nullptr) { | 
1087  | 0  |       PerThreadSynch* pw = h;  // pw is w's predecessor  | 
1088  | 0  |       PerThreadSynch* w;  | 
1089  | 0  |       if ((w = pw->next) != s) {  // search for thread, | 
1090  | 0  |         do {                      // processing at least one element | 
1091  |  |           // If the current element isn't equivalent to the waiter to be  | 
1092  |  |           // removed, we can skip the entire chain.  | 
1093  | 0  |           if (!MuEquivalentWaiter(s, w)) { | 
1094  | 0  |             pw = Skip(w);  // so skip all that won't match  | 
1095  |  |             // we don't have to worry about dangling skip fields  | 
1096  |  |             // in the threads we skipped; none can point to s  | 
1097  |  |             // because they are in a different equivalence class.  | 
1098  | 0  |           } else {          // seeking same condition | 
1099  | 0  |             FixSkip(w, s);  // fix up any skip pointer from w to s  | 
1100  | 0  |             pw = w;  | 
1101  | 0  |           }  | 
1102  |  |           // don't search further if we found the thread, or we're about to  | 
1103  |  |           // process the first thread again.  | 
1104  | 0  |         } while ((w = pw->next) != s && pw != h);  | 
1105  | 0  |       }  | 
1106  | 0  |       if (w == s) {  // found thread; remove it | 
1107  |  |         // pw->skip may be non-zero here; the loop above ensured that  | 
1108  |  |         // no ancestor of s can skip to s, so removal is safe anyway.  | 
1109  | 0  |         h = Dequeue(h, pw);  | 
1110  | 0  |         s->next = nullptr;  | 
1111  | 0  |         s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);  | 
1112  | 0  |       }  | 
1113  | 0  |     }  | 
1114  | 0  |     intptr_t nv;  | 
1115  | 0  |     do {  // release spinlock and lock | 
1116  | 0  |       v = mu_.load(std::memory_order_relaxed);  | 
1117  | 0  |       nv = v & (kMuDesig | kMuEvent);  | 
1118  | 0  |       if (h != nullptr) { | 
1119  | 0  |         nv |= kMuWait | reinterpret_cast<intptr_t>(h);  | 
1120  | 0  |         h->readers = 0;              // we hold writer lock  | 
1121  | 0  |         h->maybe_unlocking = false;  // finished unlocking  | 
1122  | 0  |       }  | 
1123  | 0  |     } while (!mu_.compare_exchange_weak(v, nv, std::memory_order_release,  | 
1124  | 0  |                                         std::memory_order_relaxed));  | 
1125  | 0  |   }  | 
1126  | 0  | }  | 
1127  |  |  | 
1128  |  | // Wait until thread "s", which must be the current thread, is removed from the  | 
1129  |  | // this mutex's waiter queue.  If "s->waitp->timeout" has a timeout, wake up  | 
1130  |  | // if the wait extends past the absolute time specified, even if "s" is still  | 
1131  |  | // on the mutex queue.  In this case, remove "s" from the queue and return  | 
1132  |  | // true, otherwise return false.  | 
1133  | 0  | void Mutex::Block(PerThreadSynch* s) { | 
1134  | 0  |   while (s->state.load(std::memory_order_acquire) == PerThreadSynch::kQueued) { | 
1135  | 0  |     if (!DecrementSynchSem(this, s, s->waitp->timeout)) { | 
1136  |  |       // After a timeout, we go into a spin loop until we remove ourselves  | 
1137  |  |       // from the queue, or someone else removes us.  We can't be sure to be  | 
1138  |  |       // able to remove ourselves in a single lock acquisition because this  | 
1139  |  |       // mutex may be held, and the holder has the right to read the centre  | 
1140  |  |       // of the waiter queue without holding the spinlock.  | 
1141  | 0  |       this->TryRemove(s);  | 
1142  | 0  |       int c = 0;  | 
1143  | 0  |       while (s->next != nullptr) { | 
1144  | 0  |         c = synchronization_internal::MutexDelay(c, GENTLE);  | 
1145  | 0  |         this->TryRemove(s);  | 
1146  | 0  |       }  | 
1147  | 0  |       if (kDebugMode) { | 
1148  |  |         // This ensures that we test the case that TryRemove() is called when s  | 
1149  |  |         // is not on the queue.  | 
1150  | 0  |         this->TryRemove(s);  | 
1151  | 0  |       }  | 
1152  | 0  |       s->waitp->timeout = KernelTimeout::Never();  // timeout is satisfied  | 
1153  | 0  |       s->waitp->cond = nullptr;  // condition no longer relevant for wakeups  | 
1154  | 0  |     }  | 
1155  | 0  |   }  | 
1156  | 0  |   ABSL_RAW_CHECK(s->waitp != nullptr || s->suppress_fatal_errors,  | 
1157  | 0  |                  "detected illegal recursion in Mutex code");  | 
1158  | 0  |   s->waitp = nullptr;  | 
1159  | 0  | }  | 
1160  |  |  | 
1161  |  | // Wake thread w, and return the next thread in the list.  | 
1162  | 0  | PerThreadSynch* Mutex::Wakeup(PerThreadSynch* w) { | 
1163  | 0  |   PerThreadSynch* next = w->next;  | 
1164  | 0  |   w->next = nullptr;  | 
1165  | 0  |   w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);  | 
1166  | 0  |   IncrementSynchSem(this, w);  | 
1167  |  | 
  | 
1168  | 0  |   return next;  | 
1169  | 0  | }  | 
1170  |  |  | 
1171  |  | static GraphId GetGraphIdLocked(Mutex* mu)  | 
1172  | 6.48M  |     ABSL_EXCLUSIVE_LOCKS_REQUIRED(deadlock_graph_mu) { | 
1173  | 6.48M  |   if (!deadlock_graph) {  // (re)create the deadlock graph. | 
1174  | 2  |     deadlock_graph =  | 
1175  | 2  |         new (base_internal::LowLevelAlloc::Alloc(sizeof(*deadlock_graph)))  | 
1176  | 2  |             GraphCycles;  | 
1177  | 2  |   }  | 
1178  | 6.48M  |   return deadlock_graph->GetId(mu);  | 
1179  | 6.48M  | }  | 
1180  |  |  | 
1181  | 3.24M  | static GraphId GetGraphId(Mutex* mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) { | 
1182  | 3.24M  |   deadlock_graph_mu.Lock();  | 
1183  | 3.24M  |   GraphId id = GetGraphIdLocked(mu);  | 
1184  | 3.24M  |   deadlock_graph_mu.Unlock();  | 
1185  | 3.24M  |   return id;  | 
1186  | 3.24M  | }  | 
1187  |  |  | 
1188  |  | // Record a lock acquisition.  This is used in debug mode for deadlock  | 
1189  |  | // detection.  The held_locks pointer points to the relevant data  | 
1190  |  | // structure for each case.  | 
1191  | 3.24M  | static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld* held_locks) { | 
1192  | 3.24M  |   int n = held_locks->n;  | 
1193  | 3.24M  |   int i = 0;  | 
1194  | 3.24M  |   while (i != n && held_locks->locks[i].id != id) { | 
1195  | 0  |     i++;  | 
1196  | 0  |   }  | 
1197  | 3.24M  |   if (i == n) { | 
1198  | 3.24M  |     if (n == ABSL_ARRAYSIZE(held_locks->locks)) { | 
1199  | 0  |       held_locks->overflow = true;  // lost some data  | 
1200  | 3.24M  |     } else {                        // we have room for lock | 
1201  | 3.24M  |       held_locks->locks[i].mu = mu;  | 
1202  | 3.24M  |       held_locks->locks[i].count = 1;  | 
1203  | 3.24M  |       held_locks->locks[i].id = id;  | 
1204  | 3.24M  |       held_locks->n = n + 1;  | 
1205  | 3.24M  |     }  | 
1206  | 3.24M  |   } else { | 
1207  | 0  |     held_locks->locks[i].count++;  | 
1208  | 0  |   }  | 
1209  | 3.24M  | }  | 
1210  |  |  | 
1211  |  | // Record a lock release.  Each call to LockEnter(mu, id, x) should be  | 
1212  |  | // eventually followed by a call to LockLeave(mu, id, x) by the same thread.  | 
1213  |  | // It does not process the event if is not needed when deadlock detection is  | 
1214  |  | // disabled.  | 
1215  | 3.24M  | static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld* held_locks) { | 
1216  | 3.24M  |   int n = held_locks->n;  | 
1217  | 3.24M  |   int i = 0;  | 
1218  | 3.24M  |   while (i != n && held_locks->locks[i].id != id) { | 
1219  | 0  |     i++;  | 
1220  | 0  |   }  | 
1221  | 3.24M  |   if (i == n) { | 
1222  | 0  |     if (!held_locks->overflow) { | 
1223  |  |       // The deadlock id may have been reassigned after ForgetDeadlockInfo,  | 
1224  |  |       // but in that case mu should still be present.  | 
1225  | 0  |       i = 0;  | 
1226  | 0  |       while (i != n && held_locks->locks[i].mu != mu) { | 
1227  | 0  |         i++;  | 
1228  | 0  |       }  | 
1229  | 0  |       if (i == n) {  // mu missing means releasing unheld lock | 
1230  | 0  |         SynchEvent* mu_events = GetSynchEvent(mu);  | 
1231  | 0  |         ABSL_RAW_LOG(FATAL,  | 
1232  | 0  |                      "thread releasing lock it does not hold: %p %s; "  | 
1233  | 0  |                      ,  | 
1234  | 0  |                      static_cast<void*>(mu),  | 
1235  | 0  |                      mu_events == nullptr ? "" : mu_events->name);  | 
1236  | 0  |       }  | 
1237  | 0  |     }  | 
1238  | 3.24M  |   } else if (held_locks->locks[i].count == 1) { | 
1239  | 3.24M  |     held_locks->n = n - 1;  | 
1240  | 3.24M  |     held_locks->locks[i] = held_locks->locks[n - 1];  | 
1241  | 3.24M  |     held_locks->locks[n - 1].id = InvalidGraphId();  | 
1242  | 3.24M  |     held_locks->locks[n - 1].mu =  | 
1243  | 3.24M  |         nullptr;  // clear mu to please the leak detector.  | 
1244  | 3.24M  |   } else { | 
1245  | 0  |     assert(held_locks->locks[i].count > 0);  | 
1246  | 0  |     held_locks->locks[i].count--;  | 
1247  | 0  |   }  | 
1248  | 3.24M  | }  | 
1249  |  |  | 
1250  |  | // Call LockEnter() if in debug mode and deadlock detection is enabled.  | 
1251  | 0  | static inline void DebugOnlyLockEnter(Mutex* mu) { | 
1252  | 0  |   if (kDebugMode) { | 
1253  | 0  |     if (synch_deadlock_detection.load(std::memory_order_acquire) !=  | 
1254  | 0  |         OnDeadlockCycle::kIgnore) { | 
1255  | 0  |       LockEnter(mu, GetGraphId(mu), Synch_GetAllLocks());  | 
1256  | 0  |     }  | 
1257  | 0  |   }  | 
1258  | 0  | }  | 
1259  |  |  | 
1260  |  | // Call LockEnter() if in debug mode and deadlock detection is enabled.  | 
1261  | 3.24M  | static inline void DebugOnlyLockEnter(Mutex* mu, GraphId id) { | 
1262  | 3.24M  |   if (kDebugMode) { | 
1263  | 3.24M  |     if (synch_deadlock_detection.load(std::memory_order_acquire) !=  | 
1264  | 3.24M  |         OnDeadlockCycle::kIgnore) { | 
1265  | 3.24M  |       LockEnter(mu, id, Synch_GetAllLocks());  | 
1266  | 3.24M  |     }  | 
1267  | 3.24M  |   }  | 
1268  | 3.24M  | }  | 
1269  |  |  | 
1270  |  | // Call LockLeave() if in debug mode and deadlock detection is enabled.  | 
1271  | 3.24M  | static inline void DebugOnlyLockLeave(Mutex* mu) { | 
1272  | 3.24M  |   if (kDebugMode) { | 
1273  | 3.24M  |     if (synch_deadlock_detection.load(std::memory_order_acquire) !=  | 
1274  | 3.24M  |         OnDeadlockCycle::kIgnore) { | 
1275  | 3.24M  |       LockLeave(mu, GetGraphId(mu), Synch_GetAllLocks());  | 
1276  | 3.24M  |     }  | 
1277  | 3.24M  |   }  | 
1278  | 3.24M  | }  | 
1279  |  |  | 
1280  |  | static char* StackString(void** pcs, int n, char* buf, int maxlen,  | 
1281  | 0  |                          bool symbolize) { | 
1282  | 0  |   static constexpr int kSymLen = 200;  | 
1283  | 0  |   char sym[kSymLen];  | 
1284  | 0  |   int len = 0;  | 
1285  | 0  |   for (int i = 0; i != n; i++) { | 
1286  | 0  |     if (len >= maxlen)  | 
1287  | 0  |       return buf;  | 
1288  | 0  |     size_t count = static_cast<size_t>(maxlen - len);  | 
1289  | 0  |     if (symbolize) { | 
1290  | 0  |       if (!absl::Symbolize(pcs[i], sym, kSymLen)) { | 
1291  | 0  |         sym[0] = '\0';  | 
1292  | 0  |       }  | 
1293  | 0  |       snprintf(buf + len, count, "%s\t@ %p %s\n", (i == 0 ? "\n" : ""), pcs[i],  | 
1294  | 0  |                sym);  | 
1295  | 0  |     } else { | 
1296  | 0  |       snprintf(buf + len, count, " %p", pcs[i]);  | 
1297  | 0  |     }  | 
1298  | 0  |     len += strlen(&buf[len]);  | 
1299  | 0  |   }  | 
1300  | 0  |   return buf;  | 
1301  | 0  | }  | 
1302  |  |  | 
1303  | 0  | static char* CurrentStackString(char* buf, int maxlen, bool symbolize) { | 
1304  | 0  |   void* pcs[40];  | 
1305  | 0  |   return StackString(pcs, absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 2), buf,  | 
1306  | 0  |                      maxlen, symbolize);  | 
1307  | 0  | }  | 
1308  |  |  | 
1309  |  | namespace { | 
1310  |  | enum { | 
1311  |  |   kMaxDeadlockPathLen = 10  | 
1312  |  | };  // maximum length of a deadlock cycle;  | 
1313  |  |     // a path this long would be remarkable  | 
1314  |  | // Buffers required to report a deadlock.  | 
1315  |  | // We do not allocate them on stack to avoid large stack frame.  | 
1316  |  | struct DeadlockReportBuffers { | 
1317  |  |   char buf[6100];  | 
1318  |  |   GraphId path[kMaxDeadlockPathLen];  | 
1319  |  | };  | 
1320  |  |  | 
1321  |  | struct ScopedDeadlockReportBuffers { | 
1322  | 0  |   ScopedDeadlockReportBuffers() { | 
1323  | 0  |     b = reinterpret_cast<DeadlockReportBuffers*>(  | 
1324  | 0  |         base_internal::LowLevelAlloc::Alloc(sizeof(*b)));  | 
1325  | 0  |   }  | 
1326  | 0  |   ~ScopedDeadlockReportBuffers() { base_internal::LowLevelAlloc::Free(b); } | 
1327  |  |   DeadlockReportBuffers* b;  | 
1328  |  | };  | 
1329  |  |  | 
1330  |  | // Helper to pass to GraphCycles::UpdateStackTrace.  | 
1331  | 0  | int GetStack(void** stack, int max_depth) { | 
1332  | 0  |   return absl::GetStackTrace(stack, max_depth, 3);  | 
1333  | 0  | }  | 
1334  |  | }  // anonymous namespace  | 
1335  |  |  | 
1336  |  | // Called in debug mode when a thread is about to acquire a lock in a way that  | 
1337  |  | // may block.  | 
1338  | 3.24M  | static GraphId DeadlockCheck(Mutex* mu) { | 
1339  | 3.24M  |   if (synch_deadlock_detection.load(std::memory_order_acquire) ==  | 
1340  | 3.24M  |       OnDeadlockCycle::kIgnore) { | 
1341  | 0  |     return InvalidGraphId();  | 
1342  | 0  |   }  | 
1343  |  |  | 
1344  | 3.24M  |   SynchLocksHeld* all_locks = Synch_GetAllLocks();  | 
1345  |  |  | 
1346  | 3.24M  |   absl::base_internal::SpinLockHolder lock(&deadlock_graph_mu);  | 
1347  | 3.24M  |   const GraphId mu_id = GetGraphIdLocked(mu);  | 
1348  |  |  | 
1349  | 3.24M  |   if (all_locks->n == 0) { | 
1350  |  |     // There are no other locks held. Return now so that we don't need to  | 
1351  |  |     // call GetSynchEvent(). This way we do not record the stack trace  | 
1352  |  |     // for this Mutex. It's ok, since if this Mutex is involved in a deadlock,  | 
1353  |  |     // it can't always be the first lock acquired by a thread.  | 
1354  | 3.24M  |     return mu_id;  | 
1355  | 3.24M  |   }  | 
1356  |  |  | 
1357  |  |   // We prefer to keep stack traces that show a thread holding and acquiring  | 
1358  |  |   // as many locks as possible.  This increases the chances that a given edge  | 
1359  |  |   // in the acquires-before graph will be represented in the stack traces  | 
1360  |  |   // recorded for the locks.  | 
1361  | 0  |   deadlock_graph->UpdateStackTrace(mu_id, all_locks->n + 1, GetStack);  | 
1362  |  |  | 
1363  |  |   // For each other mutex already held by this thread:  | 
1364  | 0  |   for (int i = 0; i != all_locks->n; i++) { | 
1365  | 0  |     const GraphId other_node_id = all_locks->locks[i].id;  | 
1366  | 0  |     const Mutex* other =  | 
1367  | 0  |         static_cast<const Mutex*>(deadlock_graph->Ptr(other_node_id));  | 
1368  | 0  |     if (other == nullptr) { | 
1369  |  |       // Ignore stale lock  | 
1370  | 0  |       continue;  | 
1371  | 0  |     }  | 
1372  |  |  | 
1373  |  |     // Add the acquired-before edge to the graph.  | 
1374  | 0  |     if (!deadlock_graph->InsertEdge(other_node_id, mu_id)) { | 
1375  | 0  |       ScopedDeadlockReportBuffers scoped_buffers;  | 
1376  | 0  |       DeadlockReportBuffers* b = scoped_buffers.b;  | 
1377  | 0  |       static int number_of_reported_deadlocks = 0;  | 
1378  | 0  |       number_of_reported_deadlocks++;  | 
1379  |  |       // Symbolize only 2 first deadlock report to avoid huge slowdowns.  | 
1380  | 0  |       bool symbolize = number_of_reported_deadlocks <= 2;  | 
1381  | 0  |       ABSL_RAW_LOG(ERROR, "Potential Mutex deadlock: %s",  | 
1382  | 0  |                    CurrentStackString(b->buf, sizeof (b->buf), symbolize));  | 
1383  | 0  |       size_t len = 0;  | 
1384  | 0  |       for (int j = 0; j != all_locks->n; j++) { | 
1385  | 0  |         void* pr = deadlock_graph->Ptr(all_locks->locks[j].id);  | 
1386  | 0  |         if (pr != nullptr) { | 
1387  | 0  |           snprintf(b->buf + len, sizeof(b->buf) - len, " %p", pr);  | 
1388  | 0  |           len += strlen(&b->buf[len]);  | 
1389  | 0  |         }  | 
1390  | 0  |       }  | 
1391  | 0  |       ABSL_RAW_LOG(ERROR,  | 
1392  | 0  |                    "Acquiring absl::Mutex %p while holding %s; a cycle in the "  | 
1393  | 0  |                    "historical lock ordering graph has been observed",  | 
1394  | 0  |                    static_cast<void*>(mu), b->buf);  | 
1395  | 0  |       ABSL_RAW_LOG(ERROR, "Cycle: ");  | 
1396  | 0  |       int path_len = deadlock_graph->FindPath(mu_id, other_node_id,  | 
1397  | 0  |                                               ABSL_ARRAYSIZE(b->path), b->path);  | 
1398  | 0  |       for (int j = 0; j != path_len && j != ABSL_ARRAYSIZE(b->path); j++) { | 
1399  | 0  |         GraphId id = b->path[j];  | 
1400  | 0  |         Mutex* path_mu = static_cast<Mutex*>(deadlock_graph->Ptr(id));  | 
1401  | 0  |         if (path_mu == nullptr) continue;  | 
1402  | 0  |         void** stack;  | 
1403  | 0  |         int depth = deadlock_graph->GetStackTrace(id, &stack);  | 
1404  | 0  |         snprintf(b->buf, sizeof(b->buf),  | 
1405  | 0  |                  "mutex@%p stack: ", static_cast<void*>(path_mu));  | 
1406  | 0  |         StackString(stack, depth, b->buf + strlen(b->buf),  | 
1407  | 0  |                     static_cast<int>(sizeof(b->buf) - strlen(b->buf)),  | 
1408  | 0  |                     symbolize);  | 
1409  | 0  |         ABSL_RAW_LOG(ERROR, "%s", b->buf);  | 
1410  | 0  |       }  | 
1411  | 0  |       if (path_len > static_cast<int>(ABSL_ARRAYSIZE(b->path))) { | 
1412  | 0  |         ABSL_RAW_LOG(ERROR, "(long cycle; list truncated)");  | 
1413  | 0  |       }  | 
1414  | 0  |       if (synch_deadlock_detection.load(std::memory_order_acquire) ==  | 
1415  | 0  |           OnDeadlockCycle::kAbort) { | 
1416  | 0  |         deadlock_graph_mu.Unlock();  // avoid deadlock in fatal sighandler  | 
1417  | 0  |         ABSL_RAW_LOG(FATAL, "dying due to potential deadlock");  | 
1418  | 0  |         return mu_id;  | 
1419  | 0  |       }  | 
1420  | 0  |       break;  // report at most one potential deadlock per acquisition  | 
1421  | 0  |     }  | 
1422  | 0  |   }  | 
1423  |  |  | 
1424  | 0  |   return mu_id;  | 
1425  | 0  | }  | 
1426  |  |  | 
1427  |  | // Invoke DeadlockCheck() iff we're in debug mode and  | 
1428  |  | // deadlock checking has been enabled.  | 
1429  | 3.24M  | static inline GraphId DebugOnlyDeadlockCheck(Mutex* mu) { | 
1430  | 3.24M  |   if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=  | 
1431  | 3.24M  |                         OnDeadlockCycle::kIgnore) { | 
1432  | 3.24M  |     return DeadlockCheck(mu);  | 
1433  | 3.24M  |   } else { | 
1434  | 0  |     return InvalidGraphId();  | 
1435  | 0  |   }  | 
1436  | 3.24M  | }  | 
1437  |  |  | 
1438  | 0  | void Mutex::ForgetDeadlockInfo() { | 
1439  | 0  |   if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=  | 
1440  | 0  |                         OnDeadlockCycle::kIgnore) { | 
1441  | 0  |     deadlock_graph_mu.Lock();  | 
1442  | 0  |     if (deadlock_graph != nullptr) { | 
1443  | 0  |       deadlock_graph->RemoveNode(this);  | 
1444  | 0  |     }  | 
1445  | 0  |     deadlock_graph_mu.Unlock();  | 
1446  | 0  |   }  | 
1447  | 0  | }  | 
1448  |  |  | 
1449  | 0  | void Mutex::AssertNotHeld() const { | 
1450  |  |   // We have the data to allow this check only if in debug mode and deadlock  | 
1451  |  |   // detection is enabled.  | 
1452  | 0  |   if (kDebugMode &&  | 
1453  | 0  |       (mu_.load(std::memory_order_relaxed) & (kMuWriter | kMuReader)) != 0 &&  | 
1454  | 0  |       synch_deadlock_detection.load(std::memory_order_acquire) !=  | 
1455  | 0  |           OnDeadlockCycle::kIgnore) { | 
1456  | 0  |     GraphId id = GetGraphId(const_cast<Mutex*>(this));  | 
1457  | 0  |     SynchLocksHeld* locks = Synch_GetAllLocks();  | 
1458  | 0  |     for (int i = 0; i != locks->n; i++) { | 
1459  | 0  |       if (locks->locks[i].id == id) { | 
1460  | 0  |         SynchEvent* mu_events = GetSynchEvent(this);  | 
1461  | 0  |         ABSL_RAW_LOG(FATAL, "thread should not hold mutex %p %s",  | 
1462  | 0  |                      static_cast<const void*>(this),  | 
1463  | 0  |                      (mu_events == nullptr ? "" : mu_events->name));  | 
1464  | 0  |       }  | 
1465  | 0  |     }  | 
1466  | 0  |   }  | 
1467  | 0  | }  | 
1468  |  |  | 
1469  |  | // Attempt to acquire *mu, and return whether successful.  The implementation  | 
1470  |  | // may spin for a short while if the lock cannot be acquired immediately.  | 
1471  | 0  | static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) { | 
1472  | 0  |   int c = GetMutexGlobals().spinloop_iterations;  | 
1473  | 0  |   do {  // do/while somewhat faster on AMD | 
1474  | 0  |     intptr_t v = mu->load(std::memory_order_relaxed);  | 
1475  | 0  |     if ((v & (kMuReader | kMuEvent)) != 0) { | 
1476  | 0  |       return false;                       // a reader or tracing -> give up  | 
1477  | 0  |     } else if (((v & kMuWriter) == 0) &&  // no holder -> try to acquire  | 
1478  | 0  |                mu->compare_exchange_strong(v, kMuWriter | v,  | 
1479  | 0  |                                            std::memory_order_acquire,  | 
1480  | 0  |                                            std::memory_order_relaxed)) { | 
1481  | 0  |       return true;  | 
1482  | 0  |     }  | 
1483  | 0  |   } while (--c > 0);  | 
1484  | 0  |   return false;  | 
1485  | 0  | }  | 
1486  |  |  | 
1487  | 61  | void Mutex::Lock() { | 
1488  | 61  |   ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);  | 
1489  | 61  |   GraphId id = DebugOnlyDeadlockCheck(this);  | 
1490  | 61  |   intptr_t v = mu_.load(std::memory_order_relaxed);  | 
1491  |  |   // try fast acquire, then spin loop  | 
1492  | 61  |   if ((v & (kMuWriter | kMuReader | kMuEvent)) != 0 ||  | 
1493  | 61  |       !mu_.compare_exchange_strong(v, kMuWriter | v, std::memory_order_acquire,  | 
1494  | 61  |                                    std::memory_order_relaxed)) { | 
1495  |  |     // try spin acquire, then slow loop  | 
1496  | 0  |     if (!TryAcquireWithSpinning(&this->mu_)) { | 
1497  | 0  |       this->LockSlow(kExclusive, nullptr, 0);  | 
1498  | 0  |     }  | 
1499  | 0  |   }  | 
1500  | 61  |   DebugOnlyLockEnter(this, id);  | 
1501  | 61  |   ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);  | 
1502  | 61  | }  | 
1503  |  |  | 
1504  | 3.24M  | void Mutex::ReaderLock() { | 
1505  | 3.24M  |   ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);  | 
1506  | 3.24M  |   GraphId id = DebugOnlyDeadlockCheck(this);  | 
1507  | 3.24M  |   intptr_t v = mu_.load(std::memory_order_relaxed);  | 
1508  | 3.24M  |   for (;;) { | 
1509  |  |     // If there are non-readers holding the lock, use the slow loop.  | 
1510  | 3.24M  |     if (ABSL_PREDICT_FALSE(v & (kMuWriter | kMuWait | kMuEvent)) != 0) { | 
1511  | 0  |       this->LockSlow(kShared, nullptr, 0);  | 
1512  | 0  |       break;  | 
1513  | 0  |     }  | 
1514  |  |     // We can avoid the loop and only use the CAS when the lock is free or  | 
1515  |  |     // only held by readers.  | 
1516  | 3.24M  |     if (ABSL_PREDICT_TRUE(mu_.compare_exchange_strong(  | 
1517  | 3.24M  |             v, (kMuReader | v) + kMuOne, std::memory_order_acquire,  | 
1518  | 3.24M  |             std::memory_order_relaxed))) { | 
1519  | 3.24M  |       break;  | 
1520  | 3.24M  |     }  | 
1521  | 3.24M  |   }  | 
1522  | 3.24M  |   DebugOnlyLockEnter(this, id);  | 
1523  | 3.24M  |   ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);  | 
1524  | 3.24M  | }  | 
1525  |  |  | 
1526  |  | bool Mutex::LockWhenCommon(const Condition& cond,  | 
1527  |  |                            synchronization_internal::KernelTimeout t,  | 
1528  | 0  |                            bool write) { | 
1529  | 0  |   MuHow how = write ? kExclusive : kShared;  | 
1530  | 0  |   ABSL_TSAN_MUTEX_PRE_LOCK(this, TsanFlags(how));  | 
1531  | 0  |   GraphId id = DebugOnlyDeadlockCheck(this);  | 
1532  | 0  |   bool res = LockSlowWithDeadline(how, &cond, t, 0);  | 
1533  | 0  |   DebugOnlyLockEnter(this, id);  | 
1534  | 0  |   ABSL_TSAN_MUTEX_POST_LOCK(this, TsanFlags(how), 0);  | 
1535  | 0  |   return res;  | 
1536  | 0  | }  | 
1537  |  |  | 
1538  | 0  | bool Mutex::AwaitCommon(const Condition& cond, KernelTimeout t) { | 
1539  | 0  |   if (kDebugMode) { | 
1540  | 0  |     this->AssertReaderHeld();  | 
1541  | 0  |   }  | 
1542  | 0  |   if (cond.Eval()) {  // condition already true; nothing to do | 
1543  | 0  |     return true;  | 
1544  | 0  |   }  | 
1545  | 0  |   MuHow how =  | 
1546  | 0  |       (mu_.load(std::memory_order_relaxed) & kMuWriter) ? kExclusive : kShared;  | 
1547  | 0  |   ABSL_TSAN_MUTEX_PRE_UNLOCK(this, TsanFlags(how));  | 
1548  | 0  |   SynchWaitParams waitp(how, &cond, t, nullptr /*no cvmu*/,  | 
1549  | 0  |                         Synch_GetPerThreadAnnotated(this),  | 
1550  | 0  |                         nullptr /*no cv_word*/);  | 
1551  | 0  |   this->UnlockSlow(&waitp);  | 
1552  | 0  |   this->Block(waitp.thread);  | 
1553  | 0  |   ABSL_TSAN_MUTEX_POST_UNLOCK(this, TsanFlags(how));  | 
1554  | 0  |   ABSL_TSAN_MUTEX_PRE_LOCK(this, TsanFlags(how));  | 
1555  | 0  |   this->LockSlowLoop(&waitp, kMuHasBlocked | kMuIsCond);  | 
1556  | 0  |   bool res = waitp.cond != nullptr ||  // => cond known true from LockSlowLoop  | 
1557  | 0  |              EvalConditionAnnotated(&cond, this, true, false, how == kShared);  | 
1558  | 0  |   ABSL_TSAN_MUTEX_POST_LOCK(this, TsanFlags(how), 0);  | 
1559  | 0  |   ABSL_RAW_CHECK(res || t.has_timeout(),  | 
1560  | 0  |                  "condition untrue on return from Await");  | 
1561  | 0  |   return res;  | 
1562  | 0  | }  | 
1563  |  |  | 
1564  | 0  | bool Mutex::TryLock() { | 
1565  | 0  |   ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);  | 
1566  | 0  |   intptr_t v = mu_.load(std::memory_order_relaxed);  | 
1567  |  |   // Try fast acquire.  | 
1568  | 0  |   if (ABSL_PREDICT_TRUE((v & (kMuWriter | kMuReader | kMuEvent)) == 0)) { | 
1569  | 0  |     if (ABSL_PREDICT_TRUE(mu_.compare_exchange_strong(  | 
1570  | 0  |             v, kMuWriter | v, std::memory_order_acquire,  | 
1571  | 0  |             std::memory_order_relaxed))) { | 
1572  | 0  |       DebugOnlyLockEnter(this);  | 
1573  | 0  |       ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);  | 
1574  | 0  |       return true;  | 
1575  | 0  |     }  | 
1576  | 0  |   } else if (ABSL_PREDICT_FALSE((v & kMuEvent) != 0)) { | 
1577  |  |     // We're recording events.  | 
1578  | 0  |     return TryLockSlow();  | 
1579  | 0  |   }  | 
1580  | 0  |   ABSL_TSAN_MUTEX_POST_LOCK(  | 
1581  | 0  |       this, __tsan_mutex_try_lock | __tsan_mutex_try_lock_failed, 0);  | 
1582  | 0  |   return false;  | 
1583  | 0  | }  | 
1584  |  |  | 
1585  | 0  | ABSL_ATTRIBUTE_NOINLINE bool Mutex::TryLockSlow() { | 
1586  | 0  |   intptr_t v = mu_.load(std::memory_order_relaxed);  | 
1587  | 0  |   if ((v & kExclusive->slow_need_zero) == 0 &&  // try fast acquire  | 
1588  | 0  |       mu_.compare_exchange_strong(  | 
1589  | 0  |           v, (kExclusive->fast_or | v) + kExclusive->fast_add,  | 
1590  | 0  |           std::memory_order_acquire, std::memory_order_relaxed)) { | 
1591  | 0  |     DebugOnlyLockEnter(this);  | 
1592  | 0  |     PostSynchEvent(this, SYNCH_EV_TRYLOCK_SUCCESS);  | 
1593  | 0  |     ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);  | 
1594  | 0  |     return true;  | 
1595  | 0  |   }  | 
1596  | 0  |   PostSynchEvent(this, SYNCH_EV_TRYLOCK_FAILED);  | 
1597  | 0  |   ABSL_TSAN_MUTEX_POST_LOCK(  | 
1598  | 0  |       this, __tsan_mutex_try_lock | __tsan_mutex_try_lock_failed, 0);  | 
1599  | 0  |   return false;  | 
1600  | 0  | }  | 
1601  |  |  | 
1602  | 0  | bool Mutex::ReaderTryLock() { | 
1603  | 0  |   ABSL_TSAN_MUTEX_PRE_LOCK(this,  | 
1604  | 0  |                            __tsan_mutex_read_lock | __tsan_mutex_try_lock);  | 
1605  | 0  |   intptr_t v = mu_.load(std::memory_order_relaxed);  | 
1606  |  |   // Clang tends to unroll the loop when compiling with optimization.  | 
1607  |  |   // But in this case it just unnecessary increases code size.  | 
1608  |  |   // If CAS is failing due to contention, the jump cost is negligible.  | 
1609  | 0  | #if defined(__clang__)  | 
1610  | 0  | #pragma nounroll  | 
1611  | 0  | #endif  | 
1612  |  |   // The while-loops (here and below) iterate only if the mutex word keeps  | 
1613  |  |   // changing (typically because the reader count changes) under the CAS.  | 
1614  |  |   // We limit the number of attempts to avoid having to think about livelock.  | 
1615  | 0  |   for (int loop_limit = 5; loop_limit != 0; loop_limit--) { | 
1616  | 0  |     if (ABSL_PREDICT_FALSE((v & (kMuWriter | kMuWait | kMuEvent)) != 0)) { | 
1617  | 0  |       break;  | 
1618  | 0  |     }  | 
1619  | 0  |     if (ABSL_PREDICT_TRUE(mu_.compare_exchange_strong(  | 
1620  | 0  |             v, (kMuReader | v) + kMuOne, std::memory_order_acquire,  | 
1621  | 0  |             std::memory_order_relaxed))) { | 
1622  | 0  |       DebugOnlyLockEnter(this);  | 
1623  | 0  |       ABSL_TSAN_MUTEX_POST_LOCK(  | 
1624  | 0  |           this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);  | 
1625  | 0  |       return true;  | 
1626  | 0  |     }  | 
1627  | 0  |   }  | 
1628  | 0  |   if (ABSL_PREDICT_TRUE((v & kMuEvent) == 0)) { | 
1629  | 0  |     ABSL_TSAN_MUTEX_POST_LOCK(this,  | 
1630  | 0  |                               __tsan_mutex_read_lock | __tsan_mutex_try_lock |  | 
1631  | 0  |                                   __tsan_mutex_try_lock_failed,  | 
1632  | 0  |                               0);  | 
1633  | 0  |     return false;  | 
1634  | 0  |   }  | 
1635  |  |   // we're recording events  | 
1636  | 0  |   return ReaderTryLockSlow();  | 
1637  | 0  | }  | 
1638  |  |  | 
1639  | 0  | ABSL_ATTRIBUTE_NOINLINE bool Mutex::ReaderTryLockSlow() { | 
1640  | 0  |   intptr_t v = mu_.load(std::memory_order_relaxed);  | 
1641  | 0  | #if defined(__clang__)  | 
1642  | 0  | #pragma nounroll  | 
1643  | 0  | #endif  | 
1644  | 0  |   for (int loop_limit = 5; loop_limit != 0; loop_limit--) { | 
1645  | 0  |     if ((v & kShared->slow_need_zero) == 0 &&  | 
1646  | 0  |         mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,  | 
1647  | 0  |                                     std::memory_order_acquire,  | 
1648  | 0  |                                     std::memory_order_relaxed)) { | 
1649  | 0  |       DebugOnlyLockEnter(this);  | 
1650  | 0  |       PostSynchEvent(this, SYNCH_EV_READERTRYLOCK_SUCCESS);  | 
1651  | 0  |       ABSL_TSAN_MUTEX_POST_LOCK(  | 
1652  | 0  |           this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);  | 
1653  | 0  |       return true;  | 
1654  | 0  |     }  | 
1655  | 0  |   }  | 
1656  | 0  |   PostSynchEvent(this, SYNCH_EV_READERTRYLOCK_FAILED);  | 
1657  | 0  |   ABSL_TSAN_MUTEX_POST_LOCK(this,  | 
1658  | 0  |                             __tsan_mutex_read_lock | __tsan_mutex_try_lock |  | 
1659  | 0  |                                 __tsan_mutex_try_lock_failed,  | 
1660  | 0  |                             0);  | 
1661  | 0  |   return false;  | 
1662  | 0  | }  | 
1663  |  |  | 
1664  | 61  | void Mutex::Unlock() { | 
1665  | 61  |   ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);  | 
1666  | 61  |   DebugOnlyLockLeave(this);  | 
1667  | 61  |   intptr_t v = mu_.load(std::memory_order_relaxed);  | 
1668  |  |  | 
1669  | 61  |   if (kDebugMode && ((v & (kMuWriter | kMuReader)) != kMuWriter)) { | 
1670  | 0  |     ABSL_RAW_LOG(FATAL, "Mutex unlocked when destroyed or not locked: v=0x%x",  | 
1671  | 0  |                  static_cast<unsigned>(v));  | 
1672  | 0  |   }  | 
1673  |  |  | 
1674  |  |   // should_try_cas is whether we'll try a compare-and-swap immediately.  | 
1675  |  |   // NOTE: optimized out when kDebugMode is false.  | 
1676  | 61  |   bool should_try_cas = ((v & (kMuEvent | kMuWriter)) == kMuWriter &&  | 
1677  | 61  |                          (v & (kMuWait | kMuDesig)) != kMuWait);  | 
1678  |  |   // But, we can use an alternate computation of it, that compilers  | 
1679  |  |   // currently don't find on their own.  When that changes, this function  | 
1680  |  |   // can be simplified.  | 
1681  | 61  |   intptr_t x = (v ^ (kMuWriter | kMuWait)) & (kMuWriter | kMuEvent);  | 
1682  | 61  |   intptr_t y = (v ^ (kMuWriter | kMuWait)) & (kMuWait | kMuDesig);  | 
1683  |  |   // Claim: "x == 0 && y > 0" is equal to should_try_cas.  | 
1684  |  |   // Also, because kMuWriter and kMuEvent exceed kMuDesig and kMuWait,  | 
1685  |  |   // all possible non-zero values for x exceed all possible values for y.  | 
1686  |  |   // Therefore, (x == 0 && y > 0) == (x < y).  | 
1687  | 61  |   if (kDebugMode && should_try_cas != (x < y)) { | 
1688  |  |     // We would usually use PRIdPTR here, but is not correctly implemented  | 
1689  |  |     // within the android toolchain.  | 
1690  | 0  |     ABSL_RAW_LOG(FATAL, "internal logic error %llx %llx %llx\n",  | 
1691  | 0  |                  static_cast<long long>(v), static_cast<long long>(x),  | 
1692  | 0  |                  static_cast<long long>(y));  | 
1693  | 0  |   }  | 
1694  | 61  |   if (x < y && mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),  | 
1695  | 61  |                                            std::memory_order_release,  | 
1696  | 61  |                                            std::memory_order_relaxed)) { | 
1697  |  |     // fast writer release (writer with no waiters or with designated waker)  | 
1698  | 61  |   } else { | 
1699  | 0  |     this->UnlockSlow(nullptr /*no waitp*/);  // take slow path  | 
1700  | 0  |   }  | 
1701  | 61  |   ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);  | 
1702  | 61  | }  | 
1703  |  |  | 
1704  |  | // Requires v to represent a reader-locked state.  | 
1705  | 3.24M  | static bool ExactlyOneReader(intptr_t v) { | 
1706  | 3.24M  |   assert((v & (kMuWriter | kMuReader)) == kMuReader);  | 
1707  | 0  |   assert((v & kMuHigh) != 0);  | 
1708  |  |   // The more straightforward "(v & kMuHigh) == kMuOne" also works, but  | 
1709  |  |   // on some architectures the following generates slightly smaller code.  | 
1710  |  |   // It may be faster too.  | 
1711  | 0  |   constexpr intptr_t kMuMultipleWaitersMask = kMuHigh ^ kMuOne;  | 
1712  | 3.24M  |   return (v & kMuMultipleWaitersMask) == 0;  | 
1713  | 3.24M  | }  | 
1714  |  |  | 
1715  | 3.24M  | void Mutex::ReaderUnlock() { | 
1716  | 3.24M  |   ABSL_TSAN_MUTEX_PRE_UNLOCK(this, __tsan_mutex_read_lock);  | 
1717  | 3.24M  |   DebugOnlyLockLeave(this);  | 
1718  | 3.24M  |   intptr_t v = mu_.load(std::memory_order_relaxed);  | 
1719  | 3.24M  |   assert((v & (kMuWriter | kMuReader)) == kMuReader);  | 
1720  | 3.24M  |   for (;;) { | 
1721  | 3.24M  |     if (ABSL_PREDICT_FALSE((v & (kMuReader | kMuWait | kMuEvent)) !=  | 
1722  | 3.24M  |                            kMuReader)) { | 
1723  | 0  |       this->UnlockSlow(nullptr /*no waitp*/);  // take slow path  | 
1724  | 0  |       break;  | 
1725  | 0  |     }  | 
1726  |  |     // fast reader release (reader with no waiters)  | 
1727  | 3.24M  |     intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne;  | 
1728  | 3.24M  |     if (ABSL_PREDICT_TRUE(  | 
1729  | 3.24M  |             mu_.compare_exchange_strong(v, v - clear, std::memory_order_release,  | 
1730  | 3.24M  |                                         std::memory_order_relaxed))) { | 
1731  | 3.24M  |       break;  | 
1732  | 3.24M  |     }  | 
1733  | 3.24M  |   }  | 
1734  | 3.24M  |   ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);  | 
1735  | 3.24M  | }  | 
1736  |  |  | 
1737  |  | // Clears the designated waker flag in the mutex if this thread has blocked, and  | 
1738  |  | // therefore may be the designated waker.  | 
1739  | 0  | static intptr_t ClearDesignatedWakerMask(int flag) { | 
1740  | 0  |   assert(flag >= 0);  | 
1741  | 0  |   assert(flag <= 1);  | 
1742  | 0  |   switch (flag) { | 
1743  | 0  |     case 0:  // not blocked  | 
1744  | 0  |       return ~static_cast<intptr_t>(0);  | 
1745  | 0  |     case 1:  // blocked; turn off the designated waker bit  | 
1746  | 0  |       return ~static_cast<intptr_t>(kMuDesig);  | 
1747  | 0  |   }  | 
1748  | 0  |   ABSL_UNREACHABLE();  | 
1749  | 0  | }  | 
1750  |  |  | 
1751  |  | // Conditionally ignores the existence of waiting writers if a reader that has  | 
1752  |  | // already blocked once wakes up.  | 
1753  | 0  | static intptr_t IgnoreWaitingWritersMask(int flag) { | 
1754  | 0  |   assert(flag >= 0);  | 
1755  | 0  |   assert(flag <= 1);  | 
1756  | 0  |   switch (flag) { | 
1757  | 0  |     case 0:  // not blocked  | 
1758  | 0  |       return ~static_cast<intptr_t>(0);  | 
1759  | 0  |     case 1:  // blocked; pretend there are no waiting writers  | 
1760  | 0  |       return ~static_cast<intptr_t>(kMuWrWait);  | 
1761  | 0  |   }  | 
1762  | 0  |   ABSL_UNREACHABLE();  | 
1763  | 0  | }  | 
1764  |  |  | 
1765  |  | // Internal version of LockWhen().  See LockSlowWithDeadline()  | 
1766  |  | ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition* cond,  | 
1767  | 0  |                                              int flags) { | 
1768  | 0  |   ABSL_RAW_CHECK(  | 
1769  | 0  |       this->LockSlowWithDeadline(how, cond, KernelTimeout::Never(), flags),  | 
1770  | 0  |       "condition untrue on return from LockSlow");  | 
1771  | 0  | }  | 
1772  |  |  | 
1773  |  | // Compute cond->Eval() and tell race detectors that we do it under mutex mu.  | 
1774  |  | static inline bool EvalConditionAnnotated(const Condition* cond, Mutex* mu,  | 
1775  |  |                                           bool locking, bool trylock,  | 
1776  | 0  |                                           bool read_lock) { | 
1777  |  |   // Delicate annotation dance.  | 
1778  |  |   // We are currently inside of read/write lock/unlock operation.  | 
1779  |  |   // All memory accesses are ignored inside of mutex operations + for unlock  | 
1780  |  |   // operation tsan considers that we've already released the mutex.  | 
1781  | 0  |   bool res = false;  | 
1782  |  | #ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE  | 
1783  |  |   const uint32_t flags = read_lock ? __tsan_mutex_read_lock : 0;  | 
1784  |  |   const uint32_t tryflags = flags | (trylock ? __tsan_mutex_try_lock : 0);  | 
1785  |  | #endif  | 
1786  | 0  |   if (locking) { | 
1787  |  |     // For lock we pretend that we have finished the operation,  | 
1788  |  |     // evaluate the predicate, then unlock the mutex and start locking it again  | 
1789  |  |     // to match the annotation at the end of outer lock operation.  | 
1790  |  |     // Note: we can't simply do POST_LOCK, Eval, PRE_LOCK, because then tsan  | 
1791  |  |     // will think the lock acquisition is recursive which will trigger  | 
1792  |  |     // deadlock detector.  | 
1793  | 0  |     ABSL_TSAN_MUTEX_POST_LOCK(mu, tryflags, 0);  | 
1794  | 0  |     res = cond->Eval();  | 
1795  |  |     // There is no "try" version of Unlock, so use flags instead of tryflags.  | 
1796  | 0  |     ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, flags);  | 
1797  | 0  |     ABSL_TSAN_MUTEX_POST_UNLOCK(mu, flags);  | 
1798  | 0  |     ABSL_TSAN_MUTEX_PRE_LOCK(mu, tryflags);  | 
1799  | 0  |   } else { | 
1800  |  |     // Similarly, for unlock we pretend that we have unlocked the mutex,  | 
1801  |  |     // lock the mutex, evaluate the predicate, and start unlocking it again  | 
1802  |  |     // to match the annotation at the end of outer unlock operation.  | 
1803  | 0  |     ABSL_TSAN_MUTEX_POST_UNLOCK(mu, flags);  | 
1804  | 0  |     ABSL_TSAN_MUTEX_PRE_LOCK(mu, flags);  | 
1805  | 0  |     ABSL_TSAN_MUTEX_POST_LOCK(mu, flags, 0);  | 
1806  | 0  |     res = cond->Eval();  | 
1807  | 0  |     ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, flags);  | 
1808  | 0  |   }  | 
1809  |  |   // Prevent unused param warnings in non-TSAN builds.  | 
1810  | 0  |   static_cast<void>(mu);  | 
1811  | 0  |   static_cast<void>(trylock);  | 
1812  | 0  |   static_cast<void>(read_lock);  | 
1813  | 0  |   return res;  | 
1814  | 0  | }  | 
1815  |  |  | 
1816  |  | // Compute cond->Eval() hiding it from race detectors.  | 
1817  |  | // We are hiding it because inside of UnlockSlow we can evaluate a predicate  | 
1818  |  | // that was just added by a concurrent Lock operation; Lock adds the predicate  | 
1819  |  | // to the internal Mutex list without actually acquiring the Mutex  | 
1820  |  | // (it only acquires the internal spinlock, which is rightfully invisible for  | 
1821  |  | // tsan). As the result there is no tsan-visible synchronization between the  | 
1822  |  | // addition and this thread. So if we would enable race detection here,  | 
1823  |  | // it would race with the predicate initialization.  | 
1824  | 0  | static inline bool EvalConditionIgnored(Mutex* mu, const Condition* cond) { | 
1825  |  |   // Memory accesses are already ignored inside of lock/unlock operations,  | 
1826  |  |   // but synchronization operations are also ignored. When we evaluate the  | 
1827  |  |   // predicate we must ignore only memory accesses but not synchronization,  | 
1828  |  |   // because missed synchronization can lead to false reports later.  | 
1829  |  |   // So we "divert" (which un-ignores both memory accesses and synchronization)  | 
1830  |  |   // and then separately turn on ignores of memory accesses.  | 
1831  | 0  |   ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);  | 
1832  | 0  |   ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();  | 
1833  | 0  |   bool res = cond->Eval();  | 
1834  | 0  |   ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END();  | 
1835  | 0  |   ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);  | 
1836  | 0  |   static_cast<void>(mu);  // Prevent unused param warning in non-TSAN builds.  | 
1837  | 0  |   return res;  | 
1838  | 0  | }  | 
1839  |  |  | 
1840  |  | // Internal equivalent of *LockWhenWithDeadline(), where  | 
1841  |  | //   "t" represents the absolute timeout; !t.has_timeout() means "forever".  | 
1842  |  | //   "how" is "kShared" (for ReaderLockWhen) or "kExclusive" (for LockWhen)  | 
1843  |  | // In flags, bits are ored together:  | 
1844  |  | // - kMuHasBlocked indicates that the client has already blocked on the call so  | 
1845  |  | //   the designated waker bit must be cleared and waiting writers should not  | 
1846  |  | //   obstruct this call  | 
1847  |  | // - kMuIsCond indicates that this is a conditional acquire (condition variable,  | 
1848  |  | //   Await,  LockWhen) so contention profiling should be suppressed.  | 
1849  |  | bool Mutex::LockSlowWithDeadline(MuHow how, const Condition* cond,  | 
1850  | 0  |                                  KernelTimeout t, int flags) { | 
1851  | 0  |   intptr_t v = mu_.load(std::memory_order_relaxed);  | 
1852  | 0  |   bool unlock = false;  | 
1853  | 0  |   if ((v & how->fast_need_zero) == 0 &&  // try fast acquire  | 
1854  | 0  |       mu_.compare_exchange_strong(  | 
1855  | 0  |           v,  | 
1856  | 0  |           (how->fast_or |  | 
1857  | 0  |            (v & ClearDesignatedWakerMask(flags & kMuHasBlocked))) +  | 
1858  | 0  |               how->fast_add,  | 
1859  | 0  |           std::memory_order_acquire, std::memory_order_relaxed)) { | 
1860  | 0  |     if (cond == nullptr ||  | 
1861  | 0  |         EvalConditionAnnotated(cond, this, true, false, how == kShared)) { | 
1862  | 0  |       return true;  | 
1863  | 0  |     }  | 
1864  | 0  |     unlock = true;  | 
1865  | 0  |   }  | 
1866  | 0  |   SynchWaitParams waitp(how, cond, t, nullptr /*no cvmu*/,  | 
1867  | 0  |                         Synch_GetPerThreadAnnotated(this),  | 
1868  | 0  |                         nullptr /*no cv_word*/);  | 
1869  | 0  |   if (cond != nullptr) { | 
1870  | 0  |     flags |= kMuIsCond;  | 
1871  | 0  |   }  | 
1872  | 0  |   if (unlock) { | 
1873  | 0  |     this->UnlockSlow(&waitp);  | 
1874  | 0  |     this->Block(waitp.thread);  | 
1875  | 0  |     flags |= kMuHasBlocked;  | 
1876  | 0  |   }  | 
1877  | 0  |   this->LockSlowLoop(&waitp, flags);  | 
1878  | 0  |   return waitp.cond != nullptr ||  // => cond known true from LockSlowLoop  | 
1879  | 0  |          cond == nullptr ||  | 
1880  | 0  |          EvalConditionAnnotated(cond, this, true, false, how == kShared);  | 
1881  | 0  | }  | 
1882  |  |  | 
1883  |  | // RAW_CHECK_FMT() takes a condition, a printf-style format string, and  | 
1884  |  | // the printf-style argument list.   The format string must be a literal.  | 
1885  |  | // Arguments after the first are not evaluated unless the condition is true.  | 
1886  |  | #define RAW_CHECK_FMT(cond, ...)                                   \  | 
1887  | 0  |   do {                                                             \ | 
1888  | 0  |     if (ABSL_PREDICT_FALSE(!(cond))) {                             \ | 
1889  | 0  |       ABSL_RAW_LOG(FATAL, "Check " #cond " failed: " __VA_ARGS__); \  | 
1890  | 0  |     }                                                              \  | 
1891  | 0  |   } while (0)  | 
1892  |  |  | 
1893  | 0  | static void CheckForMutexCorruption(intptr_t v, const char* label) { | 
1894  |  |   // Test for either of two situations that should not occur in v:  | 
1895  |  |   //   kMuWriter and kMuReader  | 
1896  |  |   //   kMuWrWait and !kMuWait  | 
1897  | 0  |   const uintptr_t w = static_cast<uintptr_t>(v ^ kMuWait);  | 
1898  |  |   // By flipping that bit, we can now test for:  | 
1899  |  |   //   kMuWriter and kMuReader in w  | 
1900  |  |   //   kMuWrWait and kMuWait in w  | 
1901  |  |   // We've chosen these two pairs of values to be so that they will overlap,  | 
1902  |  |   // respectively, when the word is left shifted by three.  This allows us to  | 
1903  |  |   // save a branch in the common (correct) case of them not being coincident.  | 
1904  | 0  |   static_assert(kMuReader << 3 == kMuWriter, "must match");  | 
1905  | 0  |   static_assert(kMuWait << 3 == kMuWrWait, "must match");  | 
1906  | 0  |   if (ABSL_PREDICT_TRUE((w & (w << 3) & (kMuWriter | kMuWrWait)) == 0)) return;  | 
1907  | 0  |   RAW_CHECK_FMT((v & (kMuWriter | kMuReader)) != (kMuWriter | kMuReader),  | 
1908  | 0  |                 "%s: Mutex corrupt: both reader and writer lock held: %p",  | 
1909  | 0  |                 label, reinterpret_cast<void*>(v));  | 
1910  | 0  |   RAW_CHECK_FMT((v & (kMuWait | kMuWrWait)) != kMuWrWait,  | 
1911  | 0  |                 "%s: Mutex corrupt: waiting writer with no waiters: %p", label,  | 
1912  | 0  |                 reinterpret_cast<void*>(v));  | 
1913  | 0  |   assert(false);  | 
1914  | 0  | }  | 
1915  |  |  | 
1916  | 0  | void Mutex::LockSlowLoop(SynchWaitParams* waitp, int flags) { | 
1917  | 0  |   SchedulingGuard::ScopedDisable disable_rescheduling;  | 
1918  | 0  |   int c = 0;  | 
1919  | 0  |   intptr_t v = mu_.load(std::memory_order_relaxed);  | 
1920  | 0  |   if ((v & kMuEvent) != 0) { | 
1921  | 0  |     PostSynchEvent(  | 
1922  | 0  |         this, waitp->how == kExclusive ? SYNCH_EV_LOCK : SYNCH_EV_READERLOCK);  | 
1923  | 0  |   }  | 
1924  | 0  |   ABSL_RAW_CHECK(  | 
1925  | 0  |       waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,  | 
1926  | 0  |       "detected illegal recursion into Mutex code");  | 
1927  | 0  |   for (;;) { | 
1928  | 0  |     v = mu_.load(std::memory_order_relaxed);  | 
1929  | 0  |     CheckForMutexCorruption(v, "Lock");  | 
1930  | 0  |     if ((v & waitp->how->slow_need_zero) == 0) { | 
1931  | 0  |       if (mu_.compare_exchange_strong(  | 
1932  | 0  |               v,  | 
1933  | 0  |               (waitp->how->fast_or |  | 
1934  | 0  |                (v & ClearDesignatedWakerMask(flags & kMuHasBlocked))) +  | 
1935  | 0  |                   waitp->how->fast_add,  | 
1936  | 0  |               std::memory_order_acquire, std::memory_order_relaxed)) { | 
1937  | 0  |         if (waitp->cond == nullptr ||  | 
1938  | 0  |             EvalConditionAnnotated(waitp->cond, this, true, false,  | 
1939  | 0  |                                    waitp->how == kShared)) { | 
1940  | 0  |           break;  // we timed out, or condition true, so return  | 
1941  | 0  |         }  | 
1942  | 0  |         this->UnlockSlow(waitp);  // got lock but condition false  | 
1943  | 0  |         this->Block(waitp->thread);  | 
1944  | 0  |         flags |= kMuHasBlocked;  | 
1945  | 0  |         c = 0;  | 
1946  | 0  |       }  | 
1947  | 0  |     } else {  // need to access waiter list | 
1948  | 0  |       bool dowait = false;  | 
1949  | 0  |       if ((v & (kMuSpin | kMuWait)) == 0) {  // no waiters | 
1950  |  |         // This thread tries to become the one and only waiter.  | 
1951  | 0  |         PerThreadSynch* new_h = Enqueue(nullptr, waitp, v, flags);  | 
1952  | 0  |         intptr_t nv =  | 
1953  | 0  |             (v & ClearDesignatedWakerMask(flags & kMuHasBlocked) & kMuLow) |  | 
1954  | 0  |             kMuWait;  | 
1955  | 0  |         ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to empty list failed");  | 
1956  | 0  |         if (waitp->how == kExclusive && (v & kMuReader) != 0) { | 
1957  | 0  |           nv |= kMuWrWait;  | 
1958  | 0  |         }  | 
1959  | 0  |         if (mu_.compare_exchange_strong(  | 
1960  | 0  |                 v, reinterpret_cast<intptr_t>(new_h) | nv,  | 
1961  | 0  |                 std::memory_order_release, std::memory_order_relaxed)) { | 
1962  | 0  |           dowait = true;  | 
1963  | 0  |         } else {  // attempted Enqueue() failed | 
1964  |  |           // zero out the waitp field set by Enqueue()  | 
1965  | 0  |           waitp->thread->waitp = nullptr;  | 
1966  | 0  |         }  | 
1967  | 0  |       } else if ((v & waitp->how->slow_inc_need_zero &  | 
1968  | 0  |                   IgnoreWaitingWritersMask(flags & kMuHasBlocked)) == 0) { | 
1969  |  |         // This is a reader that needs to increment the reader count,  | 
1970  |  |         // but the count is currently held in the last waiter.  | 
1971  | 0  |         if (mu_.compare_exchange_strong(  | 
1972  | 0  |                 v,  | 
1973  | 0  |                 (v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |  | 
1974  | 0  |                     kMuSpin | kMuReader,  | 
1975  | 0  |                 std::memory_order_acquire, std::memory_order_relaxed)) { | 
1976  | 0  |           PerThreadSynch* h = GetPerThreadSynch(v);  | 
1977  | 0  |           h->readers += kMuOne;  // inc reader count in waiter  | 
1978  | 0  |           do {                   // release spinlock | 
1979  | 0  |             v = mu_.load(std::memory_order_relaxed);  | 
1980  | 0  |           } while (!mu_.compare_exchange_weak(v, (v & ~kMuSpin) | kMuReader,  | 
1981  | 0  |                                               std::memory_order_release,  | 
1982  | 0  |                                               std::memory_order_relaxed));  | 
1983  | 0  |           if (waitp->cond == nullptr ||  | 
1984  | 0  |               EvalConditionAnnotated(waitp->cond, this, true, false,  | 
1985  | 0  |                                      waitp->how == kShared)) { | 
1986  | 0  |             break;  // we timed out, or condition true, so return  | 
1987  | 0  |           }  | 
1988  | 0  |           this->UnlockSlow(waitp);  // got lock but condition false  | 
1989  | 0  |           this->Block(waitp->thread);  | 
1990  | 0  |           flags |= kMuHasBlocked;  | 
1991  | 0  |           c = 0;  | 
1992  | 0  |         }  | 
1993  | 0  |       } else if ((v & kMuSpin) == 0 &&  // attempt to queue ourselves  | 
1994  | 0  |                  mu_.compare_exchange_strong(  | 
1995  | 0  |                      v,  | 
1996  | 0  |                      (v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |  | 
1997  | 0  |                          kMuSpin | kMuWait,  | 
1998  | 0  |                      std::memory_order_acquire, std::memory_order_relaxed)) { | 
1999  | 0  |         PerThreadSynch* h = GetPerThreadSynch(v);  | 
2000  | 0  |         PerThreadSynch* new_h = Enqueue(h, waitp, v, flags);  | 
2001  | 0  |         intptr_t wr_wait = 0;  | 
2002  | 0  |         ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to list failed");  | 
2003  | 0  |         if (waitp->how == kExclusive && (v & kMuReader) != 0) { | 
2004  | 0  |           wr_wait = kMuWrWait;  // give priority to a waiting writer  | 
2005  | 0  |         }  | 
2006  | 0  |         do {  // release spinlock | 
2007  | 0  |           v = mu_.load(std::memory_order_relaxed);  | 
2008  | 0  |         } while (!mu_.compare_exchange_weak(  | 
2009  | 0  |             v,  | 
2010  | 0  |             (v & (kMuLow & ~kMuSpin)) | kMuWait | wr_wait |  | 
2011  | 0  |                 reinterpret_cast<intptr_t>(new_h),  | 
2012  | 0  |             std::memory_order_release, std::memory_order_relaxed));  | 
2013  | 0  |         dowait = true;  | 
2014  | 0  |       }  | 
2015  | 0  |       if (dowait) { | 
2016  | 0  |         this->Block(waitp->thread);  // wait until removed from list or timeout  | 
2017  | 0  |         flags |= kMuHasBlocked;  | 
2018  | 0  |         c = 0;  | 
2019  | 0  |       }  | 
2020  | 0  |     }  | 
2021  | 0  |     ABSL_RAW_CHECK(  | 
2022  | 0  |         waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,  | 
2023  | 0  |         "detected illegal recursion into Mutex code");  | 
2024  |  |     // delay, then try again  | 
2025  | 0  |     c = synchronization_internal::MutexDelay(c, GENTLE);  | 
2026  | 0  |   }  | 
2027  | 0  |   ABSL_RAW_CHECK(  | 
2028  | 0  |       waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,  | 
2029  | 0  |       "detected illegal recursion into Mutex code");  | 
2030  | 0  |   if ((v & kMuEvent) != 0) { | 
2031  | 0  |     PostSynchEvent(this, waitp->how == kExclusive  | 
2032  | 0  |                              ? SYNCH_EV_LOCK_RETURNING  | 
2033  | 0  |                              : SYNCH_EV_READERLOCK_RETURNING);  | 
2034  | 0  |   }  | 
2035  | 0  | }  | 
2036  |  |  | 
2037  |  | // Unlock this mutex, which is held by the current thread.  | 
2038  |  | // If waitp is non-zero, it must be the wait parameters for the current thread  | 
2039  |  | // which holds the lock but is not runnable because its condition is false  | 
2040  |  | // or it is in the process of blocking on a condition variable; it must requeue  | 
2041  |  | // itself on the mutex/condvar to wait for its condition to become true.  | 
2042  | 0  | ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams* waitp) { | 
2043  | 0  |   SchedulingGuard::ScopedDisable disable_rescheduling;  | 
2044  | 0  |   intptr_t v = mu_.load(std::memory_order_relaxed);  | 
2045  | 0  |   this->AssertReaderHeld();  | 
2046  | 0  |   CheckForMutexCorruption(v, "Unlock");  | 
2047  | 0  |   if ((v & kMuEvent) != 0) { | 
2048  | 0  |     PostSynchEvent(  | 
2049  | 0  |         this, (v & kMuWriter) != 0 ? SYNCH_EV_UNLOCK : SYNCH_EV_READERUNLOCK);  | 
2050  | 0  |   }  | 
2051  | 0  |   int c = 0;  | 
2052  |  |   // the waiter under consideration to wake, or zero  | 
2053  | 0  |   PerThreadSynch* w = nullptr;  | 
2054  |  |   // the predecessor to w or zero  | 
2055  | 0  |   PerThreadSynch* pw = nullptr;  | 
2056  |  |   // head of the list searched previously, or zero  | 
2057  | 0  |   PerThreadSynch* old_h = nullptr;  | 
2058  |  |   // a condition that's known to be false.  | 
2059  | 0  |   PerThreadSynch* wake_list = kPerThreadSynchNull;  // list of threads to wake  | 
2060  | 0  |   intptr_t wr_wait = 0;  // set to kMuWrWait if we wake a reader and a  | 
2061  |  |                          // later writer could have acquired the lock  | 
2062  |  |                          // (starvation avoidance)  | 
2063  | 0  |   ABSL_RAW_CHECK(waitp == nullptr || waitp->thread->waitp == nullptr ||  | 
2064  | 0  |                      waitp->thread->suppress_fatal_errors,  | 
2065  | 0  |                  "detected illegal recursion into Mutex code");  | 
2066  |  |   // This loop finds threads wake_list to wakeup if any, and removes them from  | 
2067  |  |   // the list of waiters.  In addition, it places waitp.thread on the queue of  | 
2068  |  |   // waiters if waitp is non-zero.  | 
2069  | 0  |   for (;;) { | 
2070  | 0  |     v = mu_.load(std::memory_order_relaxed);  | 
2071  | 0  |     if ((v & kMuWriter) != 0 && (v & (kMuWait | kMuDesig)) != kMuWait &&  | 
2072  | 0  |         waitp == nullptr) { | 
2073  |  |       // fast writer release (writer with no waiters or with designated waker)  | 
2074  | 0  |       if (mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),  | 
2075  | 0  |                                       std::memory_order_release,  | 
2076  | 0  |                                       std::memory_order_relaxed)) { | 
2077  | 0  |         return;  | 
2078  | 0  |       }  | 
2079  | 0  |     } else if ((v & (kMuReader | kMuWait)) == kMuReader && waitp == nullptr) { | 
2080  |  |       // fast reader release (reader with no waiters)  | 
2081  | 0  |       intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne;  | 
2082  | 0  |       if (mu_.compare_exchange_strong(v, v - clear, std::memory_order_release,  | 
2083  | 0  |                                       std::memory_order_relaxed)) { | 
2084  | 0  |         return;  | 
2085  | 0  |       }  | 
2086  | 0  |     } else if ((v & kMuSpin) == 0 &&  // attempt to get spinlock  | 
2087  | 0  |                mu_.compare_exchange_strong(v, v | kMuSpin,  | 
2088  | 0  |                                            std::memory_order_acquire,  | 
2089  | 0  |                                            std::memory_order_relaxed)) { | 
2090  | 0  |       if ((v & kMuWait) == 0) {  // no one to wake | 
2091  | 0  |         intptr_t nv;  | 
2092  | 0  |         bool do_enqueue = true;  // always Enqueue() the first time  | 
2093  | 0  |         ABSL_RAW_CHECK(waitp != nullptr,  | 
2094  | 0  |                        "UnlockSlow is confused");  // about to sleep  | 
2095  | 0  |         do {  // must loop to release spinlock as reader count may change | 
2096  | 0  |           v = mu_.load(std::memory_order_relaxed);  | 
2097  |  |           // decrement reader count if there are readers  | 
2098  | 0  |           intptr_t new_readers = (v >= kMuOne) ? v - kMuOne : v;  | 
2099  | 0  |           PerThreadSynch* new_h = nullptr;  | 
2100  | 0  |           if (do_enqueue) { | 
2101  |  |             // If we are enqueuing on a CondVar (waitp->cv_word != nullptr) then  | 
2102  |  |             // we must not retry here.  The initial attempt will always have  | 
2103  |  |             // succeeded, further attempts would enqueue us against *this due to  | 
2104  |  |             // Fer() handling.  | 
2105  | 0  |             do_enqueue = (waitp->cv_word == nullptr);  | 
2106  | 0  |             new_h = Enqueue(nullptr, waitp, new_readers, kMuIsCond);  | 
2107  | 0  |           }  | 
2108  | 0  |           intptr_t clear = kMuWrWait | kMuWriter;  // by default clear write bit  | 
2109  | 0  |           if ((v & kMuWriter) == 0 && ExactlyOneReader(v)) {  // last reader | 
2110  | 0  |             clear = kMuWrWait | kMuReader;                    // clear read bit  | 
2111  | 0  |           }  | 
2112  | 0  |           nv = (v & kMuLow & ~clear & ~kMuSpin);  | 
2113  | 0  |           if (new_h != nullptr) { | 
2114  | 0  |             nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);  | 
2115  | 0  |           } else {  // new_h could be nullptr if we queued ourselves on a | 
2116  |  |                     // CondVar  | 
2117  |  |             // In that case, we must place the reader count back in the mutex  | 
2118  |  |             // word, as Enqueue() did not store it in the new waiter.  | 
2119  | 0  |             nv |= new_readers & kMuHigh;  | 
2120  | 0  |           }  | 
2121  |  |           // release spinlock & our lock; retry if reader-count changed  | 
2122  |  |           // (writer count cannot change since we hold lock)  | 
2123  | 0  |         } while (!mu_.compare_exchange_weak(v, nv, std::memory_order_release,  | 
2124  | 0  |                                             std::memory_order_relaxed));  | 
2125  | 0  |         break;  | 
2126  | 0  |       }  | 
2127  |  |  | 
2128  |  |       // There are waiters.  | 
2129  |  |       // Set h to the head of the circular waiter list.  | 
2130  | 0  |       PerThreadSynch* h = GetPerThreadSynch(v);  | 
2131  | 0  |       if ((v & kMuReader) != 0 && (h->readers & kMuHigh) > kMuOne) { | 
2132  |  |         // a reader but not the last  | 
2133  | 0  |         h->readers -= kMuOne;    // release our lock  | 
2134  | 0  |         intptr_t nv = v;         // normally just release spinlock  | 
2135  | 0  |         if (waitp != nullptr) {  // but waitp!=nullptr => must queue ourselves | 
2136  | 0  |           PerThreadSynch* new_h = Enqueue(h, waitp, v, kMuIsCond);  | 
2137  | 0  |           ABSL_RAW_CHECK(new_h != nullptr,  | 
2138  | 0  |                          "waiters disappeared during Enqueue()!");  | 
2139  | 0  |           nv &= kMuLow;  | 
2140  | 0  |           nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);  | 
2141  | 0  |         }  | 
2142  | 0  |         mu_.store(nv, std::memory_order_release);  // release spinlock  | 
2143  |  |         // can release with a store because there were waiters  | 
2144  | 0  |         break;  | 
2145  | 0  |       }  | 
2146  |  |  | 
2147  |  |       // Either we didn't search before, or we marked the queue  | 
2148  |  |       // as "maybe_unlocking" and no one else should have changed it.  | 
2149  | 0  |       ABSL_RAW_CHECK(old_h == nullptr || h->maybe_unlocking,  | 
2150  | 0  |                      "Mutex queue changed beneath us");  | 
2151  |  |  | 
2152  |  |       // The lock is becoming free, and there's a waiter  | 
2153  | 0  |       if (old_h != nullptr &&  | 
2154  | 0  |           !old_h->may_skip) {    // we used old_h as a terminator | 
2155  | 0  |         old_h->may_skip = true;  // allow old_h to skip once more  | 
2156  | 0  |         ABSL_RAW_CHECK(old_h->skip == nullptr, "illegal skip from head");  | 
2157  | 0  |         if (h != old_h && MuEquivalentWaiter(old_h, old_h->next)) { | 
2158  | 0  |           old_h->skip = old_h->next;  // old_h not head & can skip to successor  | 
2159  | 0  |         }  | 
2160  | 0  |       }  | 
2161  | 0  |       if (h->next->waitp->how == kExclusive &&  | 
2162  | 0  |           h->next->waitp->cond == nullptr) { | 
2163  |  |         // easy case: writer with no condition; no need to search  | 
2164  | 0  |         pw = h;  // wake w, the successor of h (=pw)  | 
2165  | 0  |         w = h->next;  | 
2166  | 0  |         w->wake = true;  | 
2167  |  |         // We are waking up a writer.  This writer may be racing against  | 
2168  |  |         // an already awake reader for the lock.  We want the  | 
2169  |  |         // writer to usually win this race,  | 
2170  |  |         // because if it doesn't, we can potentially keep taking a reader  | 
2171  |  |         // perpetually and writers will starve.  Worse than  | 
2172  |  |         // that, this can also starve other readers if kMuWrWait gets set  | 
2173  |  |         // later.  | 
2174  | 0  |         wr_wait = kMuWrWait;  | 
2175  | 0  |       } else if (w != nullptr && (w->waitp->how == kExclusive || h == old_h)) { | 
2176  |  |         // we found a waiter w to wake on a previous iteration and either it's  | 
2177  |  |         // a writer, or we've searched the entire list so we have all the  | 
2178  |  |         // readers.  | 
2179  | 0  |         if (pw == nullptr) {  // if w's predecessor is unknown, it must be h | 
2180  | 0  |           pw = h;  | 
2181  | 0  |         }  | 
2182  | 0  |       } else { | 
2183  |  |         // At this point we don't know all the waiters to wake, and the first  | 
2184  |  |         // waiter has a condition or is a reader.  We avoid searching over  | 
2185  |  |         // waiters we've searched on previous iterations by starting at  | 
2186  |  |         // old_h if it's set.  If old_h==h, there's no one to wakeup at all.  | 
2187  | 0  |         if (old_h == h) {  // we've searched before, and nothing's new | 
2188  |  |                            // so there's no one to wake.  | 
2189  | 0  |           intptr_t nv = (v & ~(kMuReader | kMuWriter | kMuWrWait));  | 
2190  | 0  |           h->readers = 0;  | 
2191  | 0  |           h->maybe_unlocking = false;  // finished unlocking  | 
2192  | 0  |           if (waitp != nullptr) {      // we must queue ourselves and sleep | 
2193  | 0  |             PerThreadSynch* new_h = Enqueue(h, waitp, v, kMuIsCond);  | 
2194  | 0  |             nv &= kMuLow;  | 
2195  | 0  |             if (new_h != nullptr) { | 
2196  | 0  |               nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);  | 
2197  | 0  |             }  // else new_h could be nullptr if we queued ourselves on a  | 
2198  |  |                // CondVar  | 
2199  | 0  |           }  | 
2200  |  |           // release spinlock & lock  | 
2201  |  |           // can release with a store because there were waiters  | 
2202  | 0  |           mu_.store(nv, std::memory_order_release);  | 
2203  | 0  |           break;  | 
2204  | 0  |         }  | 
2205  |  |  | 
2206  |  |         // set up to walk the list  | 
2207  | 0  |         PerThreadSynch* w_walk;   // current waiter during list walk  | 
2208  | 0  |         PerThreadSynch* pw_walk;  // previous waiter during list walk  | 
2209  | 0  |         if (old_h != nullptr) {  // we've searched up to old_h before | 
2210  | 0  |           pw_walk = old_h;  | 
2211  | 0  |           w_walk = old_h->next;  | 
2212  | 0  |         } else {  // no prior search, start at beginning | 
2213  | 0  |           pw_walk =  | 
2214  | 0  |               nullptr;  // h->next's predecessor may change; don't record it  | 
2215  | 0  |           w_walk = h->next;  | 
2216  | 0  |         }  | 
2217  |  | 
  | 
2218  | 0  |         h->may_skip = false;  // ensure we never skip past h in future searches  | 
2219  |  |                               // even if other waiters are queued after it.  | 
2220  | 0  |         ABSL_RAW_CHECK(h->skip == nullptr, "illegal skip from head");  | 
2221  |  |  | 
2222  | 0  |         h->maybe_unlocking = true;  // we're about to scan the waiter list  | 
2223  |  |                                     // without the spinlock held.  | 
2224  |  |                                     // Enqueue must be conservative about  | 
2225  |  |                                     // priority queuing.  | 
2226  |  |  | 
2227  |  |         // We must release the spinlock to evaluate the conditions.  | 
2228  | 0  |         mu_.store(v, std::memory_order_release);  // release just spinlock  | 
2229  |  |         // can release with a store because there were waiters  | 
2230  |  |  | 
2231  |  |         // h is the last waiter queued, and w_walk the first unsearched waiter.  | 
2232  |  |         // Without the spinlock, the locations mu_ and h->next may now change  | 
2233  |  |         // underneath us, but since we hold the lock itself, the only legal  | 
2234  |  |         // change is to add waiters between h and w_walk.  Therefore, it's safe  | 
2235  |  |         // to walk the path from w_walk to h inclusive. (TryRemove() can remove  | 
2236  |  |         // a waiter anywhere, but it acquires both the spinlock and the Mutex)  | 
2237  |  | 
  | 
2238  | 0  |         old_h = h;  // remember we searched to here  | 
2239  |  |  | 
2240  |  |         // Walk the path upto and including h looking for waiters we can wake.  | 
2241  | 0  |         while (pw_walk != h) { | 
2242  | 0  |           w_walk->wake = false;  | 
2243  | 0  |           if (w_walk->waitp->cond ==  | 
2244  | 0  |                   nullptr ||  // no condition => vacuously true OR  | 
2245  |  |                               // this thread's condition is true  | 
2246  | 0  |               EvalConditionIgnored(this, w_walk->waitp->cond)) { | 
2247  | 0  |             if (w == nullptr) { | 
2248  | 0  |               w_walk->wake = true;  // can wake this waiter  | 
2249  | 0  |               w = w_walk;  | 
2250  | 0  |               pw = pw_walk;  | 
2251  | 0  |               if (w_walk->waitp->how == kExclusive) { | 
2252  | 0  |                 wr_wait = kMuWrWait;  | 
2253  | 0  |                 break;  // bail if waking this writer  | 
2254  | 0  |               }  | 
2255  | 0  |             } else if (w_walk->waitp->how == kShared) {  // wake if a reader | 
2256  | 0  |               w_walk->wake = true;  | 
2257  | 0  |             } else {  // writer with true condition | 
2258  | 0  |               wr_wait = kMuWrWait;  | 
2259  | 0  |             }  | 
2260  | 0  |           }  | 
2261  | 0  |           if (w_walk->wake) {  // we're waking reader w_walk | 
2262  | 0  |             pw_walk = w_walk;  // don't skip similar waiters  | 
2263  | 0  |           } else {             // not waking; skip as much as possible | 
2264  | 0  |             pw_walk = Skip(w_walk);  | 
2265  | 0  |           }  | 
2266  |  |           // If pw_walk == h, then load of pw_walk->next can race with  | 
2267  |  |           // concurrent write in Enqueue(). However, at the same time  | 
2268  |  |           // we do not need to do the load, because we will bail out  | 
2269  |  |           // from the loop anyway.  | 
2270  | 0  |           if (pw_walk != h) { | 
2271  | 0  |             w_walk = pw_walk->next;  | 
2272  | 0  |           }  | 
2273  | 0  |         }  | 
2274  |  | 
  | 
2275  | 0  |         continue;  // restart for(;;)-loop to wakeup w or to find more waiters  | 
2276  | 0  |       }  | 
2277  | 0  |       ABSL_RAW_CHECK(pw->next == w, "pw not w's predecessor");  | 
2278  |  |       // The first (and perhaps only) waiter we've chosen to wake is w, whose  | 
2279  |  |       // predecessor is pw.  If w is a reader, we must wake all the other  | 
2280  |  |       // waiters with wake==true as well.  We may also need to queue  | 
2281  |  |       // ourselves if waitp != null.  The spinlock and the lock are still  | 
2282  |  |       // held.  | 
2283  |  |  | 
2284  |  |       // This traverses the list in [ pw->next, h ], where h is the head,  | 
2285  |  |       // removing all elements with wake==true and placing them in the  | 
2286  |  |       // singly-linked list wake_list.  Returns the new head.  | 
2287  | 0  |       h = DequeueAllWakeable(h, pw, &wake_list);  | 
2288  |  | 
  | 
2289  | 0  |       intptr_t nv = (v & kMuEvent) | kMuDesig;  | 
2290  |  |       // assume no waiters left,  | 
2291  |  |       // set kMuDesig for INV1a  | 
2292  |  | 
  | 
2293  | 0  |       if (waitp != nullptr) {  // we must queue ourselves and sleep | 
2294  | 0  |         h = Enqueue(h, waitp, v, kMuIsCond);  | 
2295  |  |         // h is new last waiter; could be null if we queued ourselves on a  | 
2296  |  |         // CondVar  | 
2297  | 0  |       }  | 
2298  |  | 
  | 
2299  | 0  |       ABSL_RAW_CHECK(wake_list != kPerThreadSynchNull,  | 
2300  | 0  |                      "unexpected empty wake list");  | 
2301  |  |  | 
2302  | 0  |       if (h != nullptr) {  // there are waiters left | 
2303  | 0  |         h->readers = 0;  | 
2304  | 0  |         h->maybe_unlocking = false;  // finished unlocking  | 
2305  | 0  |         nv |= wr_wait | kMuWait | reinterpret_cast<intptr_t>(h);  | 
2306  | 0  |       }  | 
2307  |  |  | 
2308  |  |       // release both spinlock & lock  | 
2309  |  |       // can release with a store because there were waiters  | 
2310  | 0  |       mu_.store(nv, std::memory_order_release);  | 
2311  | 0  |       break;  // out of for(;;)-loop  | 
2312  | 0  |     }  | 
2313  |  |     // aggressive here; no one can proceed till we do  | 
2314  | 0  |     c = synchronization_internal::MutexDelay(c, AGGRESSIVE);  | 
2315  | 0  |   }  // end of for(;;)-loop  | 
2316  |  |  | 
2317  | 0  |   if (wake_list != kPerThreadSynchNull) { | 
2318  | 0  |     int64_t total_wait_cycles = 0;  | 
2319  | 0  |     int64_t max_wait_cycles = 0;  | 
2320  | 0  |     int64_t now = CycleClock::Now();  | 
2321  | 0  |     do { | 
2322  |  |       // Profile lock contention events only if the waiter was trying to acquire  | 
2323  |  |       // the lock, not waiting on a condition variable or Condition.  | 
2324  | 0  |       if (!wake_list->cond_waiter) { | 
2325  | 0  |         int64_t cycles_waited =  | 
2326  | 0  |             (now - wake_list->waitp->contention_start_cycles);  | 
2327  | 0  |         total_wait_cycles += cycles_waited;  | 
2328  | 0  |         if (max_wait_cycles == 0) max_wait_cycles = cycles_waited;  | 
2329  | 0  |         wake_list->waitp->contention_start_cycles = now;  | 
2330  | 0  |         wake_list->waitp->should_submit_contention_data = true;  | 
2331  | 0  |       }  | 
2332  | 0  |       wake_list = Wakeup(wake_list);  // wake waiters  | 
2333  | 0  |     } while (wake_list != kPerThreadSynchNull);  | 
2334  | 0  |     if (total_wait_cycles > 0) { | 
2335  | 0  |       mutex_tracer("slow release", this, total_wait_cycles); | 
2336  | 0  |       ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);  | 
2337  | 0  |       submit_profile_data(total_wait_cycles);  | 
2338  | 0  |       ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);  | 
2339  | 0  |     }  | 
2340  | 0  |   }  | 
2341  | 0  | }  | 
2342  |  |  | 
2343  |  | // Used by CondVar implementation to reacquire mutex after waking from  | 
2344  |  | // condition variable.  This routine is used instead of Lock() because the  | 
2345  |  | // waiting thread may have been moved from the condition variable queue to the  | 
2346  |  | // mutex queue without a wakeup, by Trans().  In that case, when the thread is  | 
2347  |  | // finally woken, the woken thread will believe it has been woken from the  | 
2348  |  | // condition variable (i.e. its PC will be in when in the CondVar code), when  | 
2349  |  | // in fact it has just been woken from the mutex.  Thus, it must enter the slow  | 
2350  |  | // path of the mutex in the same state as if it had just woken from the mutex.  | 
2351  |  | // That is, it must ensure to clear kMuDesig (INV1b).  | 
2352  | 0  | void Mutex::Trans(MuHow how) { | 
2353  | 0  |   this->LockSlow(how, nullptr, kMuHasBlocked | kMuIsCond);  | 
2354  | 0  | }  | 
2355  |  |  | 
2356  |  | // Used by CondVar implementation to effectively wake thread w from the  | 
2357  |  | // condition variable.  If this mutex is free, we simply wake the thread.  | 
2358  |  | // It will later acquire the mutex with high probability.  Otherwise, we  | 
2359  |  | // enqueue thread w on this mutex.  | 
2360  | 0  | void Mutex::Fer(PerThreadSynch* w) { | 
2361  | 0  |   SchedulingGuard::ScopedDisable disable_rescheduling;  | 
2362  | 0  |   int c = 0;  | 
2363  | 0  |   ABSL_RAW_CHECK(w->waitp->cond == nullptr,  | 
2364  | 0  |                  "Mutex::Fer while waiting on Condition");  | 
2365  | 0  |   ABSL_RAW_CHECK(w->waitp->cv_word == nullptr,  | 
2366  | 0  |                  "Mutex::Fer with pending CondVar queueing");  | 
2367  |  |   // The CondVar timeout is not relevant for the Mutex wait.  | 
2368  | 0  |   w->waitp->timeout = {}; | 
2369  | 0  |   for (;;) { | 
2370  | 0  |     intptr_t v = mu_.load(std::memory_order_relaxed);  | 
2371  |  |     // Note: must not queue if the mutex is unlocked (nobody will wake it).  | 
2372  |  |     // For example, we can have only kMuWait (conditional) or maybe  | 
2373  |  |     // kMuWait|kMuWrWait.  | 
2374  |  |     // conflicting != 0 implies that the waking thread cannot currently take  | 
2375  |  |     // the mutex, which in turn implies that someone else has it and can wake  | 
2376  |  |     // us if we queue.  | 
2377  | 0  |     const intptr_t conflicting =  | 
2378  | 0  |         kMuWriter | (w->waitp->how == kShared ? 0 : kMuReader);  | 
2379  | 0  |     if ((v & conflicting) == 0) { | 
2380  | 0  |       w->next = nullptr;  | 
2381  | 0  |       w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);  | 
2382  | 0  |       IncrementSynchSem(this, w);  | 
2383  | 0  |       return;  | 
2384  | 0  |     } else { | 
2385  | 0  |       if ((v & (kMuSpin | kMuWait)) == 0) {  // no waiters | 
2386  |  |         // This thread tries to become the one and only waiter.  | 
2387  | 0  |         PerThreadSynch* new_h =  | 
2388  | 0  |             Enqueue(nullptr, w->waitp, v, kMuIsCond | kMuIsFer);  | 
2389  | 0  |         ABSL_RAW_CHECK(new_h != nullptr,  | 
2390  | 0  |                        "Enqueue failed");  // we must queue ourselves  | 
2391  | 0  |         if (mu_.compare_exchange_strong(  | 
2392  | 0  |                 v, reinterpret_cast<intptr_t>(new_h) | (v & kMuLow) | kMuWait,  | 
2393  | 0  |                 std::memory_order_release, std::memory_order_relaxed)) { | 
2394  | 0  |           return;  | 
2395  | 0  |         }  | 
2396  | 0  |       } else if ((v & kMuSpin) == 0 &&  | 
2397  | 0  |                  mu_.compare_exchange_strong(v, v | kMuSpin | kMuWait)) { | 
2398  | 0  |         PerThreadSynch* h = GetPerThreadSynch(v);  | 
2399  | 0  |         PerThreadSynch* new_h = Enqueue(h, w->waitp, v, kMuIsCond | kMuIsFer);  | 
2400  | 0  |         ABSL_RAW_CHECK(new_h != nullptr,  | 
2401  | 0  |                        "Enqueue failed");  // we must queue ourselves  | 
2402  | 0  |         do { | 
2403  | 0  |           v = mu_.load(std::memory_order_relaxed);  | 
2404  | 0  |         } while (!mu_.compare_exchange_weak(  | 
2405  | 0  |             v,  | 
2406  | 0  |             (v & kMuLow & ~kMuSpin) | kMuWait |  | 
2407  | 0  |                 reinterpret_cast<intptr_t>(new_h),  | 
2408  | 0  |             std::memory_order_release, std::memory_order_relaxed));  | 
2409  | 0  |         return;  | 
2410  | 0  |       }  | 
2411  | 0  |     }  | 
2412  | 0  |     c = synchronization_internal::MutexDelay(c, GENTLE);  | 
2413  | 0  |   }  | 
2414  | 0  | }  | 
2415  |  |  | 
2416  | 0  | void Mutex::AssertHeld() const { | 
2417  | 0  |   if ((mu_.load(std::memory_order_relaxed) & kMuWriter) == 0) { | 
2418  | 0  |     SynchEvent* e = GetSynchEvent(this);  | 
2419  | 0  |     ABSL_RAW_LOG(FATAL, "thread should hold write lock on Mutex %p %s",  | 
2420  | 0  |                  static_cast<const void*>(this), (e == nullptr ? "" : e->name));  | 
2421  | 0  |   }  | 
2422  | 0  | }  | 
2423  |  |  | 
2424  | 0  | void Mutex::AssertReaderHeld() const { | 
2425  | 0  |   if ((mu_.load(std::memory_order_relaxed) & (kMuReader | kMuWriter)) == 0) { | 
2426  | 0  |     SynchEvent* e = GetSynchEvent(this);  | 
2427  | 0  |     ABSL_RAW_LOG(FATAL,  | 
2428  | 0  |                  "thread should hold at least a read lock on Mutex %p %s",  | 
2429  | 0  |                  static_cast<const void*>(this), (e == nullptr ? "" : e->name));  | 
2430  | 0  |   }  | 
2431  | 0  | }  | 
2432  |  |  | 
2433  |  | // -------------------------------- condition variables  | 
2434  |  | static const intptr_t kCvSpin = 0x0001L;   // spinlock protects waiter list  | 
2435  |  | static const intptr_t kCvEvent = 0x0002L;  // record events  | 
2436  |  |  | 
2437  |  | static const intptr_t kCvLow = 0x0003L;  // low order bits of CV  | 
2438  |  |  | 
2439  |  | // Hack to make constant values available to gdb pretty printer  | 
2440  |  | enum { | 
2441  |  |   kGdbCvSpin = kCvSpin,  | 
2442  |  |   kGdbCvEvent = kCvEvent,  | 
2443  |  |   kGdbCvLow = kCvLow,  | 
2444  |  | };  | 
2445  |  |  | 
2446  |  | static_assert(PerThreadSynch::kAlignment > kCvLow,  | 
2447  |  |               "PerThreadSynch::kAlignment must be greater than kCvLow");  | 
2448  |  |  | 
2449  | 0  | void CondVar::EnableDebugLog(const char* name) { | 
2450  | 0  |   SynchEvent* e = EnsureSynchEvent(&this->cv_, name, kCvEvent, kCvSpin);  | 
2451  | 0  |   e->log = true;  | 
2452  | 0  |   UnrefSynchEvent(e);  | 
2453  | 0  | }  | 
2454  |  |  | 
2455  | 0  | CondVar::~CondVar() { | 
2456  | 0  |   if ((cv_.load(std::memory_order_relaxed) & kCvEvent) != 0) { | 
2457  | 0  |     ForgetSynchEvent(&this->cv_, kCvEvent, kCvSpin);  | 
2458  | 0  |   }  | 
2459  | 0  | }  | 
2460  |  |  | 
2461  |  | // Remove thread s from the list of waiters on this condition variable.  | 
2462  | 0  | void CondVar::Remove(PerThreadSynch* s) { | 
2463  | 0  |   SchedulingGuard::ScopedDisable disable_rescheduling;  | 
2464  | 0  |   intptr_t v;  | 
2465  | 0  |   int c = 0;  | 
2466  | 0  |   for (v = cv_.load(std::memory_order_relaxed);;  | 
2467  | 0  |        v = cv_.load(std::memory_order_relaxed)) { | 
2468  | 0  |     if ((v & kCvSpin) == 0 &&  // attempt to acquire spinlock  | 
2469  | 0  |         cv_.compare_exchange_strong(v, v | kCvSpin, std::memory_order_acquire,  | 
2470  | 0  |                                     std::memory_order_relaxed)) { | 
2471  | 0  |       PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);  | 
2472  | 0  |       if (h != nullptr) { | 
2473  | 0  |         PerThreadSynch* w = h;  | 
2474  | 0  |         while (w->next != s && w->next != h) {  // search for thread | 
2475  | 0  |           w = w->next;  | 
2476  | 0  |         }  | 
2477  | 0  |         if (w->next == s) {  // found thread; remove it | 
2478  | 0  |           w->next = s->next;  | 
2479  | 0  |           if (h == s) { | 
2480  | 0  |             h = (w == s) ? nullptr : w;  | 
2481  | 0  |           }  | 
2482  | 0  |           s->next = nullptr;  | 
2483  | 0  |           s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);  | 
2484  | 0  |         }  | 
2485  | 0  |       }  | 
2486  |  |       // release spinlock  | 
2487  | 0  |       cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),  | 
2488  | 0  |                 std::memory_order_release);  | 
2489  | 0  |       return;  | 
2490  | 0  |     } else { | 
2491  |  |       // try again after a delay  | 
2492  | 0  |       c = synchronization_internal::MutexDelay(c, GENTLE);  | 
2493  | 0  |     }  | 
2494  | 0  |   }  | 
2495  | 0  | }  | 
2496  |  |  | 
2497  |  | // Queue thread waitp->thread on condition variable word cv_word using  | 
2498  |  | // wait parameters waitp.  | 
2499  |  | // We split this into a separate routine, rather than simply doing it as part  | 
2500  |  | // of WaitCommon().  If we were to queue ourselves on the condition variable  | 
2501  |  | // before calling Mutex::UnlockSlow(), the Mutex code might be re-entered (via  | 
2502  |  | // the logging code, or via a Condition function) and might potentially attempt  | 
2503  |  | // to block this thread.  That would be a problem if the thread were already on  | 
2504  |  | // a condition variable waiter queue.  Thus, we use the waitp->cv_word to tell  | 
2505  |  | // the unlock code to call CondVarEnqueue() to queue the thread on the condition  | 
2506  |  | // variable queue just before the mutex is to be unlocked, and (most  | 
2507  |  | // importantly) after any call to an external routine that might re-enter the  | 
2508  |  | // mutex code.  | 
2509  | 0  | static void CondVarEnqueue(SynchWaitParams* waitp) { | 
2510  |  |   // This thread might be transferred to the Mutex queue by Fer() when  | 
2511  |  |   // we are woken.  To make sure that is what happens, Enqueue() doesn't  | 
2512  |  |   // call CondVarEnqueue() again but instead uses its normal code.  We  | 
2513  |  |   // must do this before we queue ourselves so that cv_word will be null  | 
2514  |  |   // when seen by the dequeuer, who may wish immediately to requeue  | 
2515  |  |   // this thread on another queue.  | 
2516  | 0  |   std::atomic<intptr_t>* cv_word = waitp->cv_word;  | 
2517  | 0  |   waitp->cv_word = nullptr;  | 
2518  |  | 
  | 
2519  | 0  |   intptr_t v = cv_word->load(std::memory_order_relaxed);  | 
2520  | 0  |   int c = 0;  | 
2521  | 0  |   while ((v & kCvSpin) != 0 ||  // acquire spinlock  | 
2522  | 0  |          !cv_word->compare_exchange_weak(v, v | kCvSpin,  | 
2523  | 0  |                                          std::memory_order_acquire,  | 
2524  | 0  |                                          std::memory_order_relaxed)) { | 
2525  | 0  |     c = synchronization_internal::MutexDelay(c, GENTLE);  | 
2526  | 0  |     v = cv_word->load(std::memory_order_relaxed);  | 
2527  | 0  |   }  | 
2528  | 0  |   ABSL_RAW_CHECK(waitp->thread->waitp == nullptr, "waiting when shouldn't be");  | 
2529  | 0  |   waitp->thread->waitp = waitp;  // prepare ourselves for waiting  | 
2530  | 0  |   PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);  | 
2531  | 0  |   if (h == nullptr) {  // add this thread to waiter list | 
2532  | 0  |     waitp->thread->next = waitp->thread;  | 
2533  | 0  |   } else { | 
2534  | 0  |     waitp->thread->next = h->next;  | 
2535  | 0  |     h->next = waitp->thread;  | 
2536  | 0  |   }  | 
2537  | 0  |   waitp->thread->state.store(PerThreadSynch::kQueued,  | 
2538  | 0  |                              std::memory_order_relaxed);  | 
2539  | 0  |   cv_word->store((v & kCvEvent) | reinterpret_cast<intptr_t>(waitp->thread),  | 
2540  | 0  |                  std::memory_order_release);  | 
2541  | 0  | }  | 
2542  |  |  | 
2543  | 0  | bool CondVar::WaitCommon(Mutex* mutex, KernelTimeout t) { | 
2544  | 0  |   bool rc = false;  // return value; true iff we timed-out  | 
2545  |  | 
  | 
2546  | 0  |   intptr_t mutex_v = mutex->mu_.load(std::memory_order_relaxed);  | 
2547  | 0  |   Mutex::MuHow mutex_how = ((mutex_v & kMuWriter) != 0) ? kExclusive : kShared;  | 
2548  | 0  |   ABSL_TSAN_MUTEX_PRE_UNLOCK(mutex, TsanFlags(mutex_how));  | 
2549  |  |  | 
2550  |  |   // maybe trace this call  | 
2551  | 0  |   intptr_t v = cv_.load(std::memory_order_relaxed);  | 
2552  | 0  |   cond_var_tracer("Wait", this); | 
2553  | 0  |   if ((v & kCvEvent) != 0) { | 
2554  | 0  |     PostSynchEvent(this, SYNCH_EV_WAIT);  | 
2555  | 0  |   }  | 
2556  |  |  | 
2557  |  |   // Release mu and wait on condition variable.  | 
2558  | 0  |   SynchWaitParams waitp(mutex_how, nullptr, t, mutex,  | 
2559  | 0  |                         Synch_GetPerThreadAnnotated(mutex), &cv_);  | 
2560  |  |   // UnlockSlow() will call CondVarEnqueue() just before releasing the  | 
2561  |  |   // Mutex, thus queuing this thread on the condition variable.  See  | 
2562  |  |   // CondVarEnqueue() for the reasons.  | 
2563  | 0  |   mutex->UnlockSlow(&waitp);  | 
2564  |  |  | 
2565  |  |   // wait for signal  | 
2566  | 0  |   while (waitp.thread->state.load(std::memory_order_acquire) ==  | 
2567  | 0  |          PerThreadSynch::kQueued) { | 
2568  | 0  |     if (!Mutex::DecrementSynchSem(mutex, waitp.thread, t)) { | 
2569  |  |       // DecrementSynchSem returned due to timeout.  | 
2570  |  |       // Now we will either (1) remove ourselves from the wait list in Remove  | 
2571  |  |       // below, in which case Remove will set thread.state = kAvailable and  | 
2572  |  |       // we will not call DecrementSynchSem again; or (2) Signal/SignalAll  | 
2573  |  |       // has removed us concurrently and is calling Wakeup, which will set  | 
2574  |  |       // thread.state = kAvailable and post to the semaphore.  | 
2575  |  |       // It's important to reset the timeout for the case (2) because otherwise  | 
2576  |  |       // we can live-lock in this loop since DecrementSynchSem will always  | 
2577  |  |       // return immediately due to timeout, but Signal/SignalAll is not  | 
2578  |  |       // necessary set thread.state = kAvailable yet (and is not scheduled  | 
2579  |  |       // due to thread priorities or other scheduler artifacts).  | 
2580  |  |       // Note this could also be resolved if Signal/SignalAll would set  | 
2581  |  |       // thread.state = kAvailable while holding the wait list spin lock.  | 
2582  |  |       // But this can't be easily done for SignalAll since it grabs the whole  | 
2583  |  |       // wait list with a single compare-exchange and does not really grab  | 
2584  |  |       // the spin lock.  | 
2585  | 0  |       t = KernelTimeout::Never();  | 
2586  | 0  |       this->Remove(waitp.thread);  | 
2587  | 0  |       rc = true;  | 
2588  | 0  |     }  | 
2589  | 0  |   }  | 
2590  |  | 
  | 
2591  | 0  |   ABSL_RAW_CHECK(waitp.thread->waitp != nullptr, "not waiting when should be");  | 
2592  | 0  |   waitp.thread->waitp = nullptr;  // cleanup  | 
2593  |  |  | 
2594  |  |   // maybe trace this call  | 
2595  | 0  |   cond_var_tracer("Unwait", this); | 
2596  | 0  |   if ((v & kCvEvent) != 0) { | 
2597  | 0  |     PostSynchEvent(this, SYNCH_EV_WAIT_RETURNING);  | 
2598  | 0  |   }  | 
2599  |  |  | 
2600  |  |   // From synchronization point of view Wait is unlock of the mutex followed  | 
2601  |  |   // by lock of the mutex. We've annotated start of unlock in the beginning  | 
2602  |  |   // of the function. Now, finish unlock and annotate lock of the mutex.  | 
2603  |  |   // (Trans is effectively lock).  | 
2604  | 0  |   ABSL_TSAN_MUTEX_POST_UNLOCK(mutex, TsanFlags(mutex_how));  | 
2605  | 0  |   ABSL_TSAN_MUTEX_PRE_LOCK(mutex, TsanFlags(mutex_how));  | 
2606  | 0  |   mutex->Trans(mutex_how);  // Reacquire mutex  | 
2607  | 0  |   ABSL_TSAN_MUTEX_POST_LOCK(mutex, TsanFlags(mutex_how), 0);  | 
2608  | 0  |   return rc;  | 
2609  | 0  | }  | 
2610  |  |  | 
2611  | 0  | void CondVar::Signal() { | 
2612  | 0  |   SchedulingGuard::ScopedDisable disable_rescheduling;  | 
2613  | 0  |   ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);  | 
2614  | 0  |   intptr_t v;  | 
2615  | 0  |   int c = 0;  | 
2616  | 0  |   for (v = cv_.load(std::memory_order_relaxed); v != 0;  | 
2617  | 0  |        v = cv_.load(std::memory_order_relaxed)) { | 
2618  | 0  |     if ((v & kCvSpin) == 0 &&  // attempt to acquire spinlock  | 
2619  | 0  |         cv_.compare_exchange_strong(v, v | kCvSpin, std::memory_order_acquire,  | 
2620  | 0  |                                     std::memory_order_relaxed)) { | 
2621  | 0  |       PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);  | 
2622  | 0  |       PerThreadSynch* w = nullptr;  | 
2623  | 0  |       if (h != nullptr) {  // remove first waiter | 
2624  | 0  |         w = h->next;  | 
2625  | 0  |         if (w == h) { | 
2626  | 0  |           h = nullptr;  | 
2627  | 0  |         } else { | 
2628  | 0  |           h->next = w->next;  | 
2629  | 0  |         }  | 
2630  | 0  |       }  | 
2631  |  |       // release spinlock  | 
2632  | 0  |       cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),  | 
2633  | 0  |                 std::memory_order_release);  | 
2634  | 0  |       if (w != nullptr) { | 
2635  | 0  |         w->waitp->cvmu->Fer(w);  // wake waiter, if there was one  | 
2636  | 0  |         cond_var_tracer("Signal wakeup", this); | 
2637  | 0  |       }  | 
2638  | 0  |       if ((v & kCvEvent) != 0) { | 
2639  | 0  |         PostSynchEvent(this, SYNCH_EV_SIGNAL);  | 
2640  | 0  |       }  | 
2641  | 0  |       ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);  | 
2642  | 0  |       return;  | 
2643  | 0  |     } else { | 
2644  | 0  |       c = synchronization_internal::MutexDelay(c, GENTLE);  | 
2645  | 0  |     }  | 
2646  | 0  |   }  | 
2647  | 0  |   ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);  | 
2648  | 0  | }  | 
2649  |  |  | 
2650  | 0  | void CondVar::SignalAll() { | 
2651  | 0  |   ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);  | 
2652  | 0  |   intptr_t v;  | 
2653  | 0  |   int c = 0;  | 
2654  | 0  |   for (v = cv_.load(std::memory_order_relaxed); v != 0;  | 
2655  | 0  |        v = cv_.load(std::memory_order_relaxed)) { | 
2656  |  |     // empty the list if spinlock free  | 
2657  |  |     // We do this by simply setting the list to empty using  | 
2658  |  |     // compare and swap.   We then have the entire list in our hands,  | 
2659  |  |     // which cannot be changing since we grabbed it while no one  | 
2660  |  |     // held the lock.  | 
2661  | 0  |     if ((v & kCvSpin) == 0 &&  | 
2662  | 0  |         cv_.compare_exchange_strong(v, v & kCvEvent, std::memory_order_acquire,  | 
2663  | 0  |                                     std::memory_order_relaxed)) { | 
2664  | 0  |       PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);  | 
2665  | 0  |       if (h != nullptr) { | 
2666  | 0  |         PerThreadSynch* w;  | 
2667  | 0  |         PerThreadSynch* n = h->next;  | 
2668  | 0  |         do {  // for every thread, wake it up | 
2669  | 0  |           w = n;  | 
2670  | 0  |           n = n->next;  | 
2671  | 0  |           w->waitp->cvmu->Fer(w);  | 
2672  | 0  |         } while (w != h);  | 
2673  | 0  |         cond_var_tracer("SignalAll wakeup", this); | 
2674  | 0  |       }  | 
2675  | 0  |       if ((v & kCvEvent) != 0) { | 
2676  | 0  |         PostSynchEvent(this, SYNCH_EV_SIGNALALL);  | 
2677  | 0  |       }  | 
2678  | 0  |       ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);  | 
2679  | 0  |       return;  | 
2680  | 0  |     } else { | 
2681  |  |       // try again after a delay  | 
2682  | 0  |       c = synchronization_internal::MutexDelay(c, GENTLE);  | 
2683  | 0  |     }  | 
2684  | 0  |   }  | 
2685  | 0  |   ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);  | 
2686  | 0  | }  | 
2687  |  |  | 
2688  | 0  | void ReleasableMutexLock::Release() { | 
2689  | 0  |   ABSL_RAW_CHECK(this->mu_ != nullptr,  | 
2690  | 0  |                  "ReleasableMutexLock::Release may only be called once");  | 
2691  | 0  |   this->mu_->Unlock();  | 
2692  | 0  |   this->mu_ = nullptr;  | 
2693  | 0  | }  | 
2694  |  |  | 
2695  |  | #ifdef ABSL_HAVE_THREAD_SANITIZER  | 
2696  |  | extern "C" void __tsan_read1(void* addr);  | 
2697  |  | #else  | 
2698  |  | #define __tsan_read1(addr)  // do nothing if TSan not enabled  | 
2699  |  | #endif  | 
2700  |  |  | 
2701  |  | // A function that just returns its argument, dereferenced  | 
2702  | 0  | static bool Dereference(void* arg) { | 
2703  |  |   // ThreadSanitizer does not instrument this file for memory accesses.  | 
2704  |  |   // This function dereferences a user variable that can participate  | 
2705  |  |   // in a data race, so we need to manually tell TSan about this memory access.  | 
2706  | 0  |   __tsan_read1(arg);  | 
2707  | 0  |   return *(static_cast<bool*>(arg));  | 
2708  | 0  | }  | 
2709  |  |  | 
2710  |  | ABSL_CONST_INIT const Condition Condition::kTrue;  | 
2711  |  |  | 
2712  |  | Condition::Condition(bool (*func)(void*), void* arg)  | 
2713  | 0  |     : eval_(&CallVoidPtrFunction), arg_(arg) { | 
2714  | 0  |   static_assert(sizeof(&func) <= sizeof(callback_),  | 
2715  | 0  |                 "An overlarge function pointer passed to Condition.");  | 
2716  | 0  |   StoreCallback(func);  | 
2717  | 0  | }  | 
2718  |  |  | 
2719  | 0  | bool Condition::CallVoidPtrFunction(const Condition* c) { | 
2720  | 0  |   using FunctionPointer = bool (*)(void*);  | 
2721  | 0  |   FunctionPointer function_pointer;  | 
2722  | 0  |   std::memcpy(&function_pointer, c->callback_, sizeof(function_pointer));  | 
2723  | 0  |   return (*function_pointer)(c->arg_);  | 
2724  | 0  | }  | 
2725  |  |  | 
2726  |  | Condition::Condition(const bool* cond)  | 
2727  |  |     : eval_(CallVoidPtrFunction),  | 
2728  |  |       // const_cast is safe since Dereference does not modify arg  | 
2729  | 0  |       arg_(const_cast<bool*>(cond)) { | 
2730  | 0  |   using FunctionPointer = bool (*)(void*);  | 
2731  | 0  |   const FunctionPointer dereference = Dereference;  | 
2732  | 0  |   StoreCallback(dereference);  | 
2733  | 0  | }  | 
2734  |  |  | 
2735  | 0  | bool Condition::Eval() const { return (*this->eval_)(this); } | 
2736  |  |  | 
2737  | 0  | bool Condition::GuaranteedEqual(const Condition* a, const Condition* b) { | 
2738  | 0  |   if (a == nullptr || b == nullptr) { | 
2739  | 0  |     return a == b;  | 
2740  | 0  |   }  | 
2741  |  |   // Check equality of the representative fields.  | 
2742  | 0  |   return a->eval_ == b->eval_ && a->arg_ == b->arg_ &&  | 
2743  | 0  |          !memcmp(a->callback_, b->callback_, sizeof(a->callback_));  | 
2744  | 0  | }  | 
2745  |  |  | 
2746  |  | ABSL_NAMESPACE_END  | 
2747  |  | }  // namespace absl  |