/src/capnproto/c++/src/kj/mutex.h
Line | Count | Source |
1 | | // Copyright (c) 2013-2014 Sandstorm Development Group, Inc. and contributors |
2 | | // Licensed under the MIT License: |
3 | | // |
4 | | // Permission is hereby granted, free of charge, to any person obtaining a copy |
5 | | // of this software and associated documentation files (the "Software"), to deal |
6 | | // in the Software without restriction, including without limitation the rights |
7 | | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
8 | | // copies of the Software, and to permit persons to whom the Software is |
9 | | // furnished to do so, subject to the following conditions: |
10 | | // |
11 | | // The above copyright notice and this permission notice shall be included in |
12 | | // all copies or substantial portions of the Software. |
13 | | // |
14 | | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
17 | | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
18 | | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
19 | | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
20 | | // THE SOFTWARE. |
21 | | |
22 | | #pragma once |
23 | | |
24 | | #include "memory.h" |
25 | | #include <inttypes.h> |
26 | | #include "time.h" |
27 | | #include "source-location.h" |
28 | | |
29 | | KJ_BEGIN_HEADER |
30 | | |
31 | | #if __linux__ && !defined(KJ_USE_FUTEX) |
32 | | #define KJ_USE_FUTEX 1 |
33 | | #endif |
34 | | |
35 | | #if !KJ_USE_FUTEX && !_WIN32 && !__CYGWIN__ |
36 | | // We fall back to pthreads when we don't have a better platform-specific primitive. pthreads |
37 | | // mutexes are bloated, though, so we like to avoid them. Hence on Linux we use futex(), and on |
38 | | // Windows we use SRW locks and friends. On Cygwin we prefer the Win32 primitives both because they |
39 | | // are more efficient and because I ran into problems with Cygwin's implementation of RW locks |
40 | | // seeming to allow multiple threads to lock the same mutex (but I didn't investigate very |
41 | | // closely). |
42 | | // |
43 | | // TODO(someday): Write efficient low-level locking primitives for other platforms. |
44 | | #include <pthread.h> |
45 | | #endif |
46 | | |
47 | | namespace kj { |
48 | | |
49 | | class Exception; |
50 | | |
51 | | using LockSourceLocation = NoopSourceLocation; |
52 | | using LockSourceLocationArg = NoopSourceLocation; |
53 | | |
54 | | // ======================================================================================= |
55 | | // Private details -- public interfaces follow below. |
56 | | |
57 | | namespace _ { // private |
58 | | class Mutex { |
59 | | // Internal implementation details. See `MutexGuarded<T>`. |
60 | | |
61 | | struct Waiter; |
62 | | |
63 | | public: |
64 | | Mutex(); |
65 | | ~Mutex(); |
66 | | KJ_DISALLOW_COPY_AND_MOVE(Mutex); |
67 | | |
68 | | enum Exclusivity { |
69 | | EXCLUSIVE, |
70 | | SHARED |
71 | | }; |
72 | | |
73 | | bool lock(Exclusivity exclusivity, Maybe<Duration> timeout, LockSourceLocationArg location); |
74 | | void unlock(Exclusivity exclusivity, Waiter* waiterToSkip = nullptr); |
75 | | |
76 | | void assertLockedByCaller(Exclusivity exclusivity) const; |
77 | | // In debug mode, assert that the mutex is locked by the calling thread, or if that is |
78 | | // non-trivial, assert that the mutex is locked (which should be good enough to catch problems |
79 | | // in unit tests). In non-debug builds, do nothing. |
80 | | |
81 | | class Predicate { |
82 | | public: |
83 | | virtual bool check() = 0; |
84 | | }; |
85 | | |
86 | | void wait(Predicate& predicate, Maybe<Duration> timeout, LockSourceLocationArg location); |
87 | | // If predicate.check() returns false, unlock the mutex until predicate.check() returns true, or |
88 | | // when the timeout (if any) expires. The mutex is always re-locked when this returns regardless |
89 | | // of whether the timeout expired, and including if it throws. |
90 | | // |
91 | | // Requires that the mutex is already exclusively locked before calling. |
92 | | |
93 | | void induceSpuriousWakeupForTest(); |
94 | | // Utility method for mutex-test.c++ which causes a spurious thread wakeup on all threads that |
95 | | // are waiting for a wait() condition. Assuming correct implementation, all those threads |
96 | | // should immediately go back to sleep. |
97 | | |
98 | | #if KJ_USE_FUTEX |
99 | | uint numReadersWaitingForTest() const; |
100 | | // The number of reader locks that are currently blocked on this lock (must be called while |
101 | | // holding the writer lock). This is really only a utility method for mutex-test.c++ so it can |
102 | | // validate certain invariants. |
103 | | #endif |
104 | | |
105 | | private: |
106 | | #if KJ_USE_FUTEX |
107 | | uint futex; |
108 | | // bit 31 (msb) = set if exclusive lock held |
109 | | // bit 30 (msb) = set if threads are waiting for exclusive lock |
110 | | // bits 0-29 = count of readers; If an exclusive lock is held, this is the count of threads |
111 | | // waiting for a read lock, otherwise it is the count of threads that currently hold a read |
112 | | // lock. |
113 | | |
114 | | #ifdef KJ_CONTENTION_WARNING_THRESHOLD |
115 | | bool printContendedReader = false; |
116 | | #endif |
117 | | |
118 | | static constexpr uint EXCLUSIVE_HELD = 1u << 31; |
119 | | static constexpr uint EXCLUSIVE_REQUESTED = 1u << 30; |
120 | | static constexpr uint SHARED_COUNT_MASK = EXCLUSIVE_REQUESTED - 1; |
121 | | |
122 | | #elif _WIN32 || __CYGWIN__ |
123 | | uintptr_t srwLock; // Actually an SRWLOCK, but don't want to #include <windows.h> in header. |
124 | | |
125 | | #else |
126 | | mutable pthread_rwlock_t mutex; |
127 | | #endif |
128 | | |
129 | | struct Waiter { |
130 | | kj::Maybe<Waiter&> next; |
131 | | kj::Maybe<Waiter&>* prev; |
132 | | Predicate& predicate; |
133 | | Maybe<Own<Exception>> exception; |
134 | | #if KJ_USE_FUTEX |
135 | | uint futex; |
136 | | bool hasTimeout; |
137 | | #elif _WIN32 || __CYGWIN__ |
138 | | uintptr_t condvar; |
139 | | // Actually CONDITION_VARIABLE, but don't want to #include <windows.h> in header. |
140 | | #else |
141 | | pthread_cond_t condvar; |
142 | | |
143 | | pthread_mutex_t stupidMutex; |
144 | | // pthread condvars are only compatible with basic pthread mutexes, not rwlocks, for no |
145 | | // particularly good reason. To work around this, we need an extra mutex per condvar. |
146 | | #endif |
147 | | }; |
148 | | |
149 | | kj::Maybe<Waiter&> waitersHead = kj::none; |
150 | | kj::Maybe<Waiter&>* waitersTail = &waitersHead; |
151 | | // linked list of waiters; can only modify under lock |
152 | | |
153 | | inline void addWaiter(Waiter& waiter); |
154 | | inline void removeWaiter(Waiter& waiter); |
155 | | bool checkPredicate(Waiter& waiter); |
156 | | #if _WIN32 || __CYGWIN__ |
157 | | void wakeReadyWaiter(Waiter* waiterToSkip); |
158 | | #endif |
159 | | }; |
160 | | |
161 | | class Once { |
162 | | // Internal implementation details. See `Lazy<T>`. |
163 | | |
164 | | public: |
165 | | #if KJ_USE_FUTEX |
166 | | inline Once(bool startInitialized = false) |
167 | 0 | : futex(startInitialized ? INITIALIZED : UNINITIALIZED) {} |
168 | | #else |
169 | | Once(bool startInitialized = false); |
170 | | ~Once(); |
171 | | #endif |
172 | | KJ_DISALLOW_COPY_AND_MOVE(Once); |
173 | | |
174 | | class Initializer { |
175 | | public: |
176 | | virtual void run() = 0; |
177 | | }; |
178 | | |
179 | | void runOnce(Initializer& init, LockSourceLocationArg location); |
180 | | |
181 | | #if _WIN32 || __CYGWIN__ // TODO(perf): Can we make this inline on win32 somehow? |
182 | | bool isInitialized() noexcept; |
183 | | |
184 | | #else |
185 | 0 | inline bool isInitialized() noexcept { |
186 | 0 | // Fast path check to see if runOnce() would simply return immediately. |
187 | 0 | #if KJ_USE_FUTEX |
188 | 0 | return __atomic_load_n(&futex, __ATOMIC_ACQUIRE) == INITIALIZED; |
189 | 0 | #else |
190 | 0 | return __atomic_load_n(&state, __ATOMIC_ACQUIRE) == INITIALIZED; |
191 | 0 | #endif |
192 | 0 | } |
193 | | #endif |
194 | | |
195 | | void reset(); |
196 | | // Returns the state from initialized to uninitialized. It is an error to call this when |
197 | | // not already initialized, or when runOnce() or isInitialized() might be called concurrently in |
198 | | // another thread. |
199 | | |
200 | | private: |
201 | | #if KJ_USE_FUTEX |
202 | | uint futex; |
203 | | |
204 | | enum State { |
205 | | UNINITIALIZED, |
206 | | INITIALIZING, |
207 | | INITIALIZING_WITH_WAITERS, |
208 | | INITIALIZED |
209 | | }; |
210 | | |
211 | | #elif _WIN32 || __CYGWIN__ |
212 | | uintptr_t initOnce; // Actually an INIT_ONCE, but don't want to #include <windows.h> in header. |
213 | | |
214 | | #else |
215 | | enum State { |
216 | | UNINITIALIZED, |
217 | | INITIALIZED |
218 | | }; |
219 | | State state; |
220 | | pthread_mutex_t mutex; |
221 | | #endif |
222 | | }; |
223 | | |
224 | | } // namespace _ (private) |
225 | | |
226 | | // ======================================================================================= |
227 | | // Public interface |
228 | | |
229 | | template <typename T> |
230 | | class Locked { |
231 | | // Return type for `MutexGuarded<T>::lock()`. `Locked<T>` provides access to the bounded object |
232 | | // and unlocks the mutex when it goes out of scope. |
233 | | |
234 | | public: |
235 | | KJ_DISALLOW_COPY(Locked); |
236 | 0 | inline Locked(): mutex(nullptr), ptr(nullptr) {} |
237 | | inline Locked(Locked&& other): mutex(other.mutex), ptr(other.ptr) { |
238 | | other.mutex = nullptr; |
239 | | other.ptr = nullptr; |
240 | | } |
241 | 867 | inline ~Locked() { |
242 | 867 | if (mutex != nullptr) mutex->unlock(isConst<T>() ? _::Mutex::SHARED : _::Mutex::EXCLUSIVE); |
243 | 867 | } kj::Locked<kj::Maybe<kj::HashMap<unsigned int, kj::Own<capnp::_::SegmentReader, decltype(nullptr)> > > >::~Locked() Line | Count | Source | 241 | 867 | inline ~Locked() { | 242 | 867 | if (mutex != nullptr) mutex->unlock(isConst<T>() ? _::Mutex::SHARED : _::Mutex::EXCLUSIVE); | 243 | 867 | } |
Unexecuted instantiation: kj::Locked<std::__1::deque<kj::_::FiberStack*, std::__1::allocator<kj::_::FiberStack*> > const>::~Locked() Unexecuted instantiation: kj::Locked<std::__1::deque<kj::_::FiberStack*, std::__1::allocator<kj::_::FiberStack*> > >::~Locked() Unexecuted instantiation: kj::Locked<kj::Executor::Impl::State>::~Locked() Unexecuted instantiation: kj::Locked<kj::Executor::Impl::State const>::~Locked() |
244 | | |
245 | 0 | inline Locked& operator=(Locked&& other) { |
246 | 0 | if (mutex != nullptr) mutex->unlock(isConst<T>() ? _::Mutex::SHARED : _::Mutex::EXCLUSIVE); |
247 | 0 | mutex = other.mutex; |
248 | 0 | ptr = other.ptr; |
249 | 0 | other.mutex = nullptr; |
250 | 0 | other.ptr = nullptr; |
251 | 0 | return *this; |
252 | 0 | } |
253 | | |
254 | | inline void release() { |
255 | | if (mutex != nullptr) mutex->unlock(isConst<T>() ? _::Mutex::SHARED : _::Mutex::EXCLUSIVE); |
256 | | mutex = nullptr; |
257 | | ptr = nullptr; |
258 | | } |
259 | | |
260 | 110 | inline T* operator->() { return ptr; }kj::Locked<kj::Maybe<kj::HashMap<unsigned int, kj::Own<capnp::_::SegmentReader, decltype(nullptr)> > > >::operator->() Line | Count | Source | 260 | 110 | inline T* operator->() { return ptr; } |
Unexecuted instantiation: kj::Locked<std::__1::deque<kj::_::FiberStack*, std::__1::allocator<kj::_::FiberStack*> > const>::operator->() Unexecuted instantiation: kj::Locked<std::__1::deque<kj::_::FiberStack*, std::__1::allocator<kj::_::FiberStack*> > >::operator->() Unexecuted instantiation: kj::Locked<kj::Executor::Impl::State>::operator->() Unexecuted instantiation: kj::Locked<kj::Executor::Impl::State const>::operator->() |
261 | | inline const T* operator->() const { return ptr; } |
262 | 1.09k | inline T& operator*() { return *ptr; }kj::Locked<kj::Maybe<kj::HashMap<unsigned int, kj::Own<capnp::_::SegmentReader, decltype(nullptr)> > > >::operator*() Line | Count | Source | 262 | 1.09k | inline T& operator*() { return *ptr; } |
Unexecuted instantiation: kj::Locked<std::__1::deque<kj::_::FiberStack*, std::__1::allocator<kj::_::FiberStack*> > >::operator*() |
263 | | inline const T& operator*() const { return *ptr; } |
264 | | inline T* get() { return ptr; } |
265 | | inline const T* get() const { return ptr; } |
266 | | inline operator T*() { return ptr; } |
267 | | inline operator const T*() const { return ptr; } |
268 | | |
269 | | template <typename Cond> |
270 | | void wait(Cond&& condition, Maybe<Duration> timeout = kj::none, |
271 | 0 | LockSourceLocationArg location = {}) { |
272 | | // Unlocks the lock until `condition(state)` evaluates true (where `state` is type `const T&` |
273 | | // referencing the object protected by the lock). |
274 | | |
275 | | // We can't wait on a shared lock because the internal bookkeeping needed for a wait requires |
276 | | // the protection of an exclusive lock. |
277 | 0 | static_assert(!isConst<T>(), "cannot wait() on shared lock"); |
278 | |
|
279 | 0 | struct PredicateImpl final: public _::Mutex::Predicate { |
280 | 0 | bool check() override { |
281 | 0 | return condition(value); |
282 | 0 | } Unexecuted instantiation: async.c++:kj::Locked<kj::Executor::Impl::State>::wait<kj::_::XThreadEvent::ensureDoneOrCanceled()::$_0>(kj::_::XThreadEvent::ensureDoneOrCanceled()::$_0&&, kj::Maybe<kj::Quantity<long, kj::_::NanosecondLabel> >, kj::NoopSourceLocation)::PredicateImpl::check() Unexecuted instantiation: async.c++:kj::Locked<kj::Executor::Impl::State>::wait<kj::_::XThreadEvent::ensureDoneOrCanceled()::$_3>(kj::_::XThreadEvent::ensureDoneOrCanceled()::$_3&&, kj::Maybe<kj::Quantity<long, kj::_::NanosecondLabel> >, kj::NoopSourceLocation)::PredicateImpl::check() Unexecuted instantiation: async.c++:kj::Locked<kj::Executor::Impl::State>::wait<kj::_::XThreadEvent::ensureDoneOrCanceled()::$_4>(kj::_::XThreadEvent::ensureDoneOrCanceled()::$_4&&, kj::Maybe<kj::Quantity<long, kj::_::NanosecondLabel> >, kj::NoopSourceLocation)::PredicateImpl::check() Unexecuted instantiation: async.c++:kj::Locked<kj::Executor::Impl::State>::wait<kj::_::XThreadPaf::destroy()::$_0>(kj::_::XThreadPaf::destroy()::$_0&&, kj::Maybe<kj::Quantity<long, kj::_::NanosecondLabel> >, kj::NoopSourceLocation)::PredicateImpl::check() Unexecuted instantiation: async.c++:kj::Locked<kj::Executor::Impl::State>::wait<kj::Executor::send(kj::_::XThreadEvent&, bool) const::$_1>(kj::Executor::send(kj::_::XThreadEvent&, bool) const::$_1&&, kj::Maybe<kj::Quantity<long, kj::_::NanosecondLabel> >, kj::NoopSourceLocation)::PredicateImpl::check() Unexecuted instantiation: async.c++:kj::Locked<kj::Executor::Impl::State>::wait<kj::Executor::wait()::$_1>(kj::Executor::wait()::$_1&&, kj::Maybe<kj::Quantity<long, kj::_::NanosecondLabel> >, kj::NoopSourceLocation)::PredicateImpl::check() |
283 | |
|
284 | 0 | Cond&& condition; |
285 | 0 | const T& value; |
286 | |
|
287 | 0 | PredicateImpl(Cond&& condition, const T& value) |
288 | 0 | : condition(kj::fwd<Cond>(condition)), value(value) {}Unexecuted instantiation: async.c++:kj::Locked<kj::Executor::Impl::State>::wait<kj::_::XThreadEvent::ensureDoneOrCanceled()::$_0>(kj::_::XThreadEvent::ensureDoneOrCanceled()::$_0&&, kj::Maybe<kj::Quantity<long, kj::_::NanosecondLabel> >, kj::NoopSourceLocation)::PredicateImpl::PredicateImpl(kj::_::XThreadEvent::ensureDoneOrCanceled()::$_0&&, kj::Executor::Impl::State const&) Unexecuted instantiation: async.c++:kj::Locked<kj::Executor::Impl::State>::wait<kj::_::XThreadEvent::ensureDoneOrCanceled()::$_3>(kj::_::XThreadEvent::ensureDoneOrCanceled()::$_3&&, kj::Maybe<kj::Quantity<long, kj::_::NanosecondLabel> >, kj::NoopSourceLocation)::PredicateImpl::PredicateImpl(kj::_::XThreadEvent::ensureDoneOrCanceled()::$_3&&, kj::Executor::Impl::State const&) Unexecuted instantiation: async.c++:kj::Locked<kj::Executor::Impl::State>::wait<kj::_::XThreadEvent::ensureDoneOrCanceled()::$_4>(kj::_::XThreadEvent::ensureDoneOrCanceled()::$_4&&, kj::Maybe<kj::Quantity<long, kj::_::NanosecondLabel> >, kj::NoopSourceLocation)::PredicateImpl::PredicateImpl(kj::_::XThreadEvent::ensureDoneOrCanceled()::$_4&&, kj::Executor::Impl::State const&) Unexecuted instantiation: async.c++:kj::Locked<kj::Executor::Impl::State>::wait<kj::_::XThreadPaf::destroy()::$_0>(kj::_::XThreadPaf::destroy()::$_0&&, kj::Maybe<kj::Quantity<long, kj::_::NanosecondLabel> >, kj::NoopSourceLocation)::PredicateImpl::PredicateImpl(kj::_::XThreadPaf::destroy()::$_0&&, kj::Executor::Impl::State const&) Unexecuted instantiation: async.c++:kj::Locked<kj::Executor::Impl::State>::wait<kj::Executor::send(kj::_::XThreadEvent&, bool) const::$_1>(kj::Executor::send(kj::_::XThreadEvent&, bool) const::$_1&&, kj::Maybe<kj::Quantity<long, kj::_::NanosecondLabel> >, kj::NoopSourceLocation)::PredicateImpl::PredicateImpl(kj::Executor::send(kj::_::XThreadEvent&, bool) const::$_1&&, kj::Executor::Impl::State const&) Unexecuted instantiation: async.c++:kj::Locked<kj::Executor::Impl::State>::wait<kj::Executor::wait()::$_1>(kj::Executor::wait()::$_1&&, kj::Maybe<kj::Quantity<long, kj::_::NanosecondLabel> >, kj::NoopSourceLocation)::PredicateImpl::PredicateImpl(kj::Executor::wait()::$_1&&, kj::Executor::Impl::State const&) |
289 | 0 | }; |
290 | |
|
291 | 0 | PredicateImpl impl(kj::fwd<Cond>(condition), *ptr); |
292 | 0 | mutex->wait(impl, timeout, location); |
293 | 0 | } Unexecuted instantiation: async.c++:void kj::Locked<kj::Executor::Impl::State>::wait<kj::_::XThreadEvent::ensureDoneOrCanceled()::$_0>(kj::_::XThreadEvent::ensureDoneOrCanceled()::$_0&&, kj::Maybe<kj::Quantity<long, kj::_::NanosecondLabel> >, kj::NoopSourceLocation) Unexecuted instantiation: async.c++:void kj::Locked<kj::Executor::Impl::State>::wait<kj::_::XThreadEvent::ensureDoneOrCanceled()::$_3>(kj::_::XThreadEvent::ensureDoneOrCanceled()::$_3&&, kj::Maybe<kj::Quantity<long, kj::_::NanosecondLabel> >, kj::NoopSourceLocation) Unexecuted instantiation: async.c++:void kj::Locked<kj::Executor::Impl::State>::wait<kj::_::XThreadEvent::ensureDoneOrCanceled()::$_4>(kj::_::XThreadEvent::ensureDoneOrCanceled()::$_4&&, kj::Maybe<kj::Quantity<long, kj::_::NanosecondLabel> >, kj::NoopSourceLocation) Unexecuted instantiation: async.c++:void kj::Locked<kj::Executor::Impl::State>::wait<kj::_::XThreadPaf::destroy()::$_0>(kj::_::XThreadPaf::destroy()::$_0&&, kj::Maybe<kj::Quantity<long, kj::_::NanosecondLabel> >, kj::NoopSourceLocation) Unexecuted instantiation: async.c++:void kj::Locked<kj::Executor::Impl::State>::wait<kj::Executor::send(kj::_::XThreadEvent&, bool) const::$_1>(kj::Executor::send(kj::_::XThreadEvent&, bool) const::$_1&&, kj::Maybe<kj::Quantity<long, kj::_::NanosecondLabel> >, kj::NoopSourceLocation) Unexecuted instantiation: async.c++:void kj::Locked<kj::Executor::Impl::State>::wait<kj::Executor::wait()::$_1>(kj::Executor::wait()::$_1&&, kj::Maybe<kj::Quantity<long, kj::_::NanosecondLabel> >, kj::NoopSourceLocation) |
294 | | |
295 | | private: |
296 | | _::Mutex* mutex; |
297 | | T* ptr; |
298 | | |
299 | 867 | inline Locked(_::Mutex& mutex, T& value): mutex(&mutex), ptr(&value) {}kj::Locked<kj::Maybe<kj::HashMap<unsigned int, kj::Own<capnp::_::SegmentReader, decltype(nullptr)> > > >::Locked(kj::_::Mutex&, kj::Maybe<kj::HashMap<unsigned int, kj::Own<capnp::_::SegmentReader, decltype(nullptr)> > >&) Line | Count | Source | 299 | 867 | inline Locked(_::Mutex& mutex, T& value): mutex(&mutex), ptr(&value) {} |
Unexecuted instantiation: kj::Locked<std::__1::deque<kj::_::FiberStack*, std::__1::allocator<kj::_::FiberStack*> > const>::Locked(kj::_::Mutex&, std::__1::deque<kj::_::FiberStack*, std::__1::allocator<kj::_::FiberStack*> > const&) Unexecuted instantiation: kj::Locked<std::__1::deque<kj::_::FiberStack*, std::__1::allocator<kj::_::FiberStack*> > >::Locked(kj::_::Mutex&, std::__1::deque<kj::_::FiberStack*, std::__1::allocator<kj::_::FiberStack*> >&) Unexecuted instantiation: kj::Locked<kj::Executor::Impl::State>::Locked(kj::_::Mutex&, kj::Executor::Impl::State&) Unexecuted instantiation: kj::Locked<kj::Executor::Impl::State const>::Locked(kj::_::Mutex&, kj::Executor::Impl::State const&) |
300 | | |
301 | | template <typename U> |
302 | | friend class MutexGuarded; |
303 | | template <typename U> |
304 | | friend class ExternalMutexGuarded; |
305 | | |
306 | | #if KJ_MUTEX_TEST |
307 | | public: |
308 | | #endif |
309 | | void induceSpuriousWakeupForTest() { mutex->induceSpuriousWakeupForTest(); } |
310 | | // Utility method for mutex-test.c++ which causes a spurious thread wakeup on all threads that |
311 | | // are waiting for a when() condition. Assuming correct implementation, all those threads should |
312 | | // immediately go back to sleep. |
313 | | }; |
314 | | |
315 | | template <typename T> |
316 | | class MutexGuarded { |
317 | | // An object of type T, bounded by a mutex. In order to access the object, you must lock it. |
318 | | // |
319 | | // Write locks are not "recursive" -- trying to lock again in a thread that already holds a lock |
320 | | // will deadlock. Recursive write locks are usually a sign of bad design. |
321 | | // |
322 | | // Unfortunately, **READ LOCKS ARE NOT RECURSIVE** either. Common sense says they should be. |
323 | | // But on many operating systems (BSD, OSX), recursively read-locking a pthread_rwlock is |
324 | | // actually unsafe. The problem is that writers are "prioritized" over readers, so a read lock |
325 | | // request will block if any write lock requests are outstanding. So, if thread A takes a read |
326 | | // lock, thread B requests a write lock (and starts waiting), and then thread A tries to take |
327 | | // another read lock recursively, the result is deadlock. |
328 | | |
329 | | public: |
330 | | template <typename... Params> |
331 | | explicit MutexGuarded(Params&&... params); |
332 | | // Initialize the mutex-bounded object by passing the given parameters to its constructor. |
333 | | |
334 | | Locked<T> lockExclusive(LockSourceLocationArg location = {}) const; |
335 | | // Exclusively locks the object and returns it. The returned `Locked<T>` can be passed by |
336 | | // move, similar to `Own<T>`. |
337 | | // |
338 | | // This method is declared `const` in accordance with KJ style rules which say that constness |
339 | | // should be used to indicate thread-safety. It is safe to share a const pointer between threads, |
340 | | // but it is not safe to share a mutable pointer. Since the whole point of MutexGuarded is to |
341 | | // be shared between threads, its methods should be const, even though locking it produces a |
342 | | // non-const pointer to the contained object. |
343 | | |
344 | | Locked<const T> lockShared(LockSourceLocationArg location = {}) const; |
345 | | // Lock the value for shared access. Multiple shared locks can be taken concurrently, but cannot |
346 | | // be held at the same time as a non-shared lock. |
347 | | |
348 | | Maybe<Locked<T>> lockExclusiveWithTimeout(Duration timeout, |
349 | | LockSourceLocationArg location = {}) const; |
350 | | // Attempts to exclusively lock the object. If the timeout elapses before the lock is acquired, |
351 | | // this returns null. |
352 | | |
353 | | Maybe<Locked<const T>> lockSharedWithTimeout(Duration timeout, |
354 | | LockSourceLocationArg location = {}) const; |
355 | | // Attempts to lock the value for shared access. If the timeout elapses before the lock is acquired, |
356 | | // this returns null. |
357 | | |
358 | | inline const T& getWithoutLock() const { return value; } |
359 | 0 | inline T& getWithoutLock() { return value; } |
360 | | // Escape hatch for cases where some external factor guarantees that it's safe to get the |
361 | | // value. You should treat these like const_cast -- be highly suspicious of any use. |
362 | | |
363 | | inline const T& getAlreadyLockedShared() const; |
364 | | inline T& getAlreadyLockedShared(); |
365 | | inline T& getAlreadyLockedExclusive() const; |
366 | | // Like `getWithoutLock()`, but asserts that the lock is already held by the calling thread. |
367 | | |
368 | | template <typename Cond, typename Func> |
369 | | auto when(Cond&& condition, Func&& callback, Maybe<Duration> timeout = kj::none, |
370 | | LockSourceLocationArg location = {}) const |
371 | 0 | -> decltype(callback(instance<T&>())) { |
372 | | // Waits until condition(state) returns true, then calls callback(state) under lock. |
373 | | // |
374 | | // `condition`, when called, receives as its parameter a const reference to the state, which is |
375 | | // locked (either shared or exclusive). `callback` receives a mutable reference, which is |
376 | | // exclusively locked. |
377 | | // |
378 | | // `condition()` may be called multiple times, from multiple threads, while waiting for the |
379 | | // condition to become true. It may even return true once, but then be called more times. |
380 | | // It is guaranteed, though, that at the time `callback()` is finally called, `condition()` |
381 | | // would currently return true (assuming it is a pure function of the guarded data). |
382 | | // |
383 | | // If `timeout` is specified, then after the given amount of time, the callback will be called |
384 | | // regardless of whether the condition is true. In this case, when `callback()` is called, |
385 | | // `condition()` may in fact evaluate false, but *only* if the timeout was reached. |
386 | | // |
387 | | // TODO(cleanup): lock->wait() is a better interface. Can we deprecate this one? |
388 | |
|
389 | 0 | auto lock = lockExclusive(); |
390 | 0 | lock.wait(kj::fwd<Cond>(condition), timeout, location); |
391 | 0 | return callback(value); |
392 | 0 | } |
393 | | |
394 | | private: |
395 | | mutable _::Mutex mutex; |
396 | | mutable T value; |
397 | | }; |
398 | | |
399 | | template <typename T> |
400 | | class MutexGuarded<const T> { |
401 | | // MutexGuarded cannot guard a const type. This would be pointless anyway, and would complicate |
402 | | // the implementation of Locked<T>, which uses constness to decide what kind of lock it holds. |
403 | | static_assert(sizeof(T) < 0, "MutexGuarded's type cannot be const."); |
404 | | }; |
405 | | |
406 | | template <typename T> |
407 | | class ExternalMutexGuarded { |
408 | | // Holds a value that can only be manipulated while some other mutex is locked. |
409 | | // |
410 | | // The ExternalMutexGuarded<T> lives *outside* the scope of any lock on the mutex, but ensures |
411 | | // that the value it holds can only be accessed under lock by forcing the caller to present a |
412 | | // lock before accessing the value. |
413 | | // |
414 | | // Additionally, ExternalMutexGuarded<T>'s destructor will take an exclusive lock on the mutex |
415 | | // while destroying the held value, unless the value has been release()ed before hand. |
416 | | // |
417 | | // The type T must have the following properties (which probably all movable types satisfy): |
418 | | // - T is movable. |
419 | | // - Immediately after any of the following has happened, T's destructor is effectively a no-op |
420 | | // (hence certainly not requiring locks): |
421 | | // - The value has been default-constructed. |
422 | | // - The value has been initialized by-move from a default-constructed T. |
423 | | // - The value has been moved away. |
424 | | // - If ExternalMutexGuarded<T> is ever moved, then T must have a move constructor and move |
425 | | // assignment operator that do not follow any pointers, therefore do not need to take a lock. |
426 | | public: |
427 | | ExternalMutexGuarded(LockSourceLocationArg location = {}) |
428 | | : location(location) {} |
429 | | |
430 | | template <typename U, typename... Params> |
431 | | ExternalMutexGuarded(Locked<U> lock, Params&&... params, LockSourceLocationArg location = {}) |
432 | | : mutex(lock.mutex), |
433 | | value(kj::fwd<Params>(params)...), |
434 | | location(location) {} |
435 | | // Construct the value in-place. This constructor requires passing ownership of the lock into |
436 | | // the constructor. Normally this should be a lock that you take on the line calling the |
437 | | // constructor, like: |
438 | | // |
439 | | // ExternalMutexGuarded<T> foo(someMutexGuarded.lockExclusive()); |
440 | | // |
441 | | // The reason this constructor does not accept an lvalue reference to an existing lock is because |
442 | | // this would be deadlock-prone: If an exception were thrown immediately after the constructor |
443 | | // completed, then the destructor would deadlock, because the lock would still be held. An |
444 | | // ExternalMutexGuarded must live outside the scope of any locks to avoid such a deadlock. |
445 | | |
446 | | ~ExternalMutexGuarded() noexcept(false) { |
447 | | if (mutex != nullptr) { |
448 | | mutex->lock(_::Mutex::EXCLUSIVE, kj::none, location); |
449 | | KJ_DEFER(mutex->unlock(_::Mutex::EXCLUSIVE)); |
450 | | value = T(); |
451 | | } |
452 | | } |
453 | | |
454 | | ExternalMutexGuarded(ExternalMutexGuarded&& other) |
455 | | : mutex(other.mutex), value(kj::mv(other.value)), location(other.location) { |
456 | | other.mutex = nullptr; |
457 | | } |
458 | | ExternalMutexGuarded& operator=(ExternalMutexGuarded&& other) { |
459 | | mutex = other.mutex; |
460 | | value = kj::mv(other.value); |
461 | | location = other.location; |
462 | | other.mutex = nullptr; |
463 | | return *this; |
464 | | } |
465 | | |
466 | | template <typename U> |
467 | | void set(Locked<U>& lock, T&& newValue) { |
468 | | KJ_IREQUIRE(mutex == nullptr); |
469 | | mutex = lock.mutex; |
470 | | value = kj::mv(newValue); |
471 | | } |
472 | | |
473 | | template <typename U> |
474 | | T& get(Locked<U>& lock) { |
475 | | KJ_IREQUIRE(lock.mutex == mutex); |
476 | | return value; |
477 | | } |
478 | | |
479 | | template <typename U> |
480 | | const T& get(Locked<const U>& lock) const { |
481 | | KJ_IREQUIRE(lock.mutex == mutex); |
482 | | return value; |
483 | | } |
484 | | |
485 | | template <typename U> |
486 | | T release(Locked<U>& lock) { |
487 | | // Release (move away) the value. This allows the destructor to skip locking the mutex. |
488 | | KJ_IREQUIRE(lock.mutex == mutex); |
489 | | T result = kj::mv(value); |
490 | | mutex = nullptr; |
491 | | return result; |
492 | | } |
493 | | |
494 | | private: |
495 | | _::Mutex* mutex = nullptr; |
496 | | T value; |
497 | | KJ_NO_UNIQUE_ADDRESS LockSourceLocation location; |
498 | | // When built against C++20 (or clang >= 9.0), the overhead of this is elided. Otherwise this |
499 | | // struct will be 1 byte larger than it would otherwise be. |
500 | | }; |
501 | | |
502 | | template <typename T> |
503 | | class Lazy { |
504 | | // A lazily-initialized value. |
505 | | |
506 | | public: |
507 | | template <typename Func> |
508 | | T& get(Func&& init, LockSourceLocationArg location = {}); |
509 | | template <typename Func> |
510 | | const T& get(Func&& init, LockSourceLocationArg location = {}) const; |
511 | | // The first thread to call get() will invoke the given init function to construct the value. |
512 | | // Other threads will block until construction completes, then return the same value. |
513 | | // |
514 | | // `init` is a functor(typically a lambda) which takes `SpaceFor<T>&` as its parameter and returns |
515 | | // `Own<T>`. If `init` throws an exception, the exception is propagated out of that thread's |
516 | | // call to `get()`, and subsequent calls behave as if `get()` hadn't been called at all yet -- |
517 | | // in other words, subsequent calls retry initialization until it succeeds. |
518 | | |
519 | | private: |
520 | | mutable _::Once once; |
521 | | mutable SpaceFor<T> space; |
522 | | mutable Own<T> value; |
523 | | |
524 | | template <typename Func> |
525 | | class InitImpl; |
526 | | }; |
527 | | |
528 | | // ======================================================================================= |
529 | | // Inline implementation details |
530 | | |
531 | | template <typename T> |
532 | | template <typename... Params> |
533 | | inline MutexGuarded<T>::MutexGuarded(Params&&... params) |
534 | 3.62k | : value(kj::fwd<Params>(params)...) {}kj::MutexGuarded<kj::Maybe<kj::HashMap<unsigned int, kj::Own<capnp::_::SegmentReader, decltype(nullptr)> > > >::MutexGuarded<>() Line | Count | Source | 534 | 3.62k | : value(kj::fwd<Params>(params)...) {} |
Unexecuted instantiation: kj::MutexGuarded<std::__1::deque<kj::_::FiberStack*, std::__1::allocator<kj::_::FiberStack*> > >::MutexGuarded<>() Unexecuted instantiation: kj::MutexGuarded<kj::Executor::Impl::State>::MutexGuarded<kj::EventLoop&>(kj::EventLoop&) |
535 | | |
536 | | template <typename T> |
537 | | inline Locked<T> MutexGuarded<T>::lockExclusive(LockSourceLocationArg location) |
538 | 867 | const { |
539 | 867 | mutex.lock(_::Mutex::EXCLUSIVE, kj::none, location); |
540 | 867 | return Locked<T>(mutex, value); |
541 | 867 | } kj::MutexGuarded<kj::Maybe<kj::HashMap<unsigned int, kj::Own<capnp::_::SegmentReader, decltype(nullptr)> > > >::lockExclusive(kj::NoopSourceLocation) const Line | Count | Source | 538 | 867 | const { | 539 | 867 | mutex.lock(_::Mutex::EXCLUSIVE, kj::none, location); | 540 | 867 | return Locked<T>(mutex, value); | 541 | 867 | } |
Unexecuted instantiation: kj::MutexGuarded<std::__1::deque<kj::_::FiberStack*, std::__1::allocator<kj::_::FiberStack*> > >::lockExclusive(kj::NoopSourceLocation) const Unexecuted instantiation: kj::MutexGuarded<kj::Executor::Impl::State>::lockExclusive(kj::NoopSourceLocation) const |
542 | | |
543 | | template <typename T> |
544 | 0 | inline Locked<const T> MutexGuarded<T>::lockShared(LockSourceLocationArg location) const { |
545 | 0 | mutex.lock(_::Mutex::SHARED, kj::none, location); |
546 | 0 | return Locked<const T>(mutex, value); |
547 | 0 | } Unexecuted instantiation: kj::MutexGuarded<std::__1::deque<kj::_::FiberStack*, std::__1::allocator<kj::_::FiberStack*> > >::lockShared(kj::NoopSourceLocation) const Unexecuted instantiation: kj::MutexGuarded<kj::Executor::Impl::State>::lockShared(kj::NoopSourceLocation) const |
548 | | |
549 | | template <typename T> |
550 | | inline Maybe<Locked<T>> MutexGuarded<T>::lockExclusiveWithTimeout(Duration timeout, |
551 | | LockSourceLocationArg location) const { |
552 | | if (mutex.lock(_::Mutex::EXCLUSIVE, timeout, location)) { |
553 | | return Locked<T>(mutex, value); |
554 | | } else { |
555 | | return kj::none; |
556 | | } |
557 | | } |
558 | | |
559 | | template <typename T> |
560 | | inline Maybe<Locked<const T>> MutexGuarded<T>::lockSharedWithTimeout(Duration timeout, |
561 | | LockSourceLocationArg location) const { |
562 | | if (mutex.lock(_::Mutex::SHARED, timeout, location)) { |
563 | | return Locked<const T>(mutex, value); |
564 | | } else { |
565 | | return kj::none; |
566 | | } |
567 | | } |
568 | | |
569 | | template <typename T> |
570 | | inline const T& MutexGuarded<T>::getAlreadyLockedShared() const { |
571 | | #ifdef KJ_DEBUG |
572 | | mutex.assertLockedByCaller(_::Mutex::SHARED); |
573 | | #endif |
574 | | return value; |
575 | | } |
576 | | template <typename T> |
577 | | inline T& MutexGuarded<T>::getAlreadyLockedShared() { |
578 | | #ifdef KJ_DEBUG |
579 | | mutex.assertLockedByCaller(_::Mutex::SHARED); |
580 | | #endif |
581 | | return value; |
582 | | } |
583 | | template <typename T> |
584 | | inline T& MutexGuarded<T>::getAlreadyLockedExclusive() const { |
585 | | #ifdef KJ_DEBUG |
586 | | mutex.assertLockedByCaller(_::Mutex::EXCLUSIVE); |
587 | | #endif |
588 | | return const_cast<T&>(value); |
589 | | } |
590 | | |
591 | | template <typename T> |
592 | | template <typename Func> |
593 | | class Lazy<T>::InitImpl: public _::Once::Initializer { |
594 | | public: |
595 | | inline InitImpl(const Lazy<T>& lazy, Func&& func): lazy(lazy), func(kj::fwd<Func>(func)) {} |
596 | | |
597 | | void run() override { |
598 | | lazy.value = func(lazy.space); |
599 | | } |
600 | | |
601 | | private: |
602 | | const Lazy<T>& lazy; |
603 | | Func func; |
604 | | }; |
605 | | |
606 | | template <typename T> |
607 | | template <typename Func> |
608 | | inline T& Lazy<T>::get(Func&& init, LockSourceLocationArg location) { |
609 | | if (!once.isInitialized()) { |
610 | | InitImpl<Func> initImpl(*this, kj::fwd<Func>(init)); |
611 | | once.runOnce(initImpl, location); |
612 | | } |
613 | | return *value; |
614 | | } |
615 | | |
616 | | template <typename T> |
617 | | template <typename Func> |
618 | | inline const T& Lazy<T>::get(Func&& init, LockSourceLocationArg location) const { |
619 | | if (!once.isInitialized()) { |
620 | | InitImpl<Func> initImpl(*this, kj::fwd<Func>(init)); |
621 | | once.runOnce(initImpl, location); |
622 | | } |
623 | | return *value; |
624 | | } |
625 | | |
626 | | } // namespace kj |
627 | | |
628 | | KJ_END_HEADER |