/src/Fast-DDS/src/cpp/utils/shared_mutex.hpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | Copyright Howard Hinnant 2007-2010. Distributed under the Boost |
3 | | Software License, Version 1.0. (see http://www.boost.org/LICENSE_1_0.txt) |
4 | | The original implementation has been modified to support the POSIX priorities: |
5 | | |
6 | | PTHREAD_RWLOCK_PREFER_READER_NP |
7 | | This is the default. A thread may hold multiple read |
8 | | locks; that is, read locks are recursive. According to |
9 | | The Single Unix Specification, the behavior is unspecified |
10 | | when a reader tries to place a lock, and there is no write |
11 | | lock but writers are waiting. Giving preference to the |
12 | | reader, as is set by PTHREAD_RWLOCK_PREFER_READER_NP, |
13 | | implies that the reader will receive the requested lock, |
14 | | even if a writer is waiting. As long as there are |
15 | | readers, the writer will be starved. |
16 | | |
17 | | PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP |
18 | | Setting the lock kind to this avoids writer starvation as |
19 | | long as any read locking is not done in a recursive |
20 | | fashion. |
21 | | |
22 | | The C++ Standard has not yet (C++20) imposed any requirements on shared_mutex implementation thus |
23 | | each platform made its own choices: |
24 | | Windows & Boost defaults to PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP. |
25 | | Linux & Mac defaults to PTHREAD_RWLOCK_PREFER_READER_NP. |
26 | | */ |
27 | | |
28 | | /** |
29 | | * @file shared_mutex.hpp |
30 | | */ |
31 | | |
32 | | #ifndef _UTILS_SHARED_MUTEX_HPP_ |
33 | | #define _UTILS_SHARED_MUTEX_HPP_ |
34 | | |
35 | | #include <climits> |
36 | | #include <condition_variable> |
37 | | #include <map> |
38 | | #include <mutex> |
39 | | #include <system_error> |
40 | | #include <thread> |
41 | | |
42 | | namespace eprosima { |
43 | | namespace detail { |
44 | | |
45 | | // mimic POSIX Read-Write lock syntax |
46 | | enum class shared_mutex_type |
47 | | { |
48 | | PTHREAD_RWLOCK_PREFER_READER_NP, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP |
49 | | }; |
50 | | |
51 | | class shared_mutex_base |
52 | | { |
53 | | |
54 | | protected: |
55 | | |
56 | | typedef std::mutex mutex_t; |
57 | | typedef std::condition_variable cond_t; |
58 | | typedef unsigned count_t; |
59 | | |
60 | | mutex_t mut_; |
61 | | cond_t gate1_; |
62 | | count_t state_; |
63 | | |
64 | | static const count_t write_entered_ = 1U << (sizeof(count_t) * CHAR_BIT - 1); |
65 | | static const count_t n_readers_ = ~write_entered_; |
66 | | |
67 | | public: |
68 | | |
69 | | shared_mutex_base() |
70 | 1.75k | : state_(0) |
71 | 1.75k | { |
72 | 1.75k | } |
73 | | |
74 | | ~shared_mutex_base() |
75 | 1.75k | { |
76 | 1.75k | std::lock_guard<mutex_t> _(mut_); |
77 | 1.75k | } |
78 | | |
79 | | shared_mutex_base( |
80 | | const shared_mutex_base&) = delete; |
81 | | shared_mutex_base& operator =( |
82 | | const shared_mutex_base&) = delete; |
83 | | |
84 | | // Exclusive ownership |
85 | | |
86 | | bool try_lock() |
87 | 0 | { |
88 | 0 | std::lock_guard<mutex_t> _(mut_); |
89 | 0 | if (state_ == 0) |
90 | 0 | { |
91 | 0 | state_ = write_entered_; |
92 | 0 | return true; |
93 | 0 | } |
94 | 0 | return false; |
95 | 0 | } |
96 | | |
97 | | void unlock() |
98 | 7.39k | { |
99 | 7.39k | std::lock_guard<mutex_t> _(mut_); |
100 | 7.39k | state_ = 0; |
101 | 7.39k | gate1_.notify_all(); |
102 | 7.39k | } |
103 | | |
104 | | // Shared ownership |
105 | | |
106 | | void lock_shared() |
107 | 15.4k | { |
108 | 15.4k | std::unique_lock<mutex_t> lk(mut_); |
109 | 15.4k | while ((state_ & write_entered_) || (state_ & n_readers_) == n_readers_) |
110 | 0 | { |
111 | 0 | gate1_.wait(lk); |
112 | 0 | } |
113 | 15.4k | count_t num_readers = (state_ & n_readers_) + 1; |
114 | 15.4k | state_ &= ~n_readers_; |
115 | 15.4k | state_ |= num_readers; |
116 | 15.4k | } |
117 | | |
118 | | bool try_lock_shared() |
119 | 0 | { |
120 | 0 | std::lock_guard<mutex_t> _(mut_); |
121 | 0 | count_t num_readers = state_ & n_readers_; |
122 | 0 | if (!(state_ & write_entered_) && num_readers != n_readers_) |
123 | 0 | { |
124 | 0 | ++num_readers; |
125 | 0 | state_ &= ~n_readers_; |
126 | 0 | state_ |= num_readers; |
127 | 0 | return true; |
128 | 0 | } |
129 | 0 | return false; |
130 | 0 | } |
131 | | |
132 | | }; |
133 | | |
134 | | template<shared_mutex_type> |
135 | | class shared_mutex; |
136 | | |
137 | | // original Hinnant implementation prioritizing writers |
138 | | |
139 | | template<> |
140 | | class shared_mutex<shared_mutex_type::PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP> |
141 | | : public shared_mutex_base |
142 | | { |
143 | | cond_t gate2_; |
144 | | |
145 | | public: |
146 | | |
147 | | void lock() |
148 | 0 | { |
149 | 0 | std::unique_lock<mutex_t> lk(mut_); |
150 | 0 | while (state_ & write_entered_) |
151 | 0 | { |
152 | 0 | gate1_.wait(lk); |
153 | 0 | } |
154 | 0 | state_ |= write_entered_; |
155 | 0 | while (state_ & n_readers_) |
156 | 0 | { |
157 | 0 | gate2_.wait(lk); |
158 | 0 | } |
159 | 0 | } |
160 | | |
161 | | void unlock_shared() |
162 | 0 | { |
163 | 0 | std::lock_guard<mutex_t> _(mut_); |
164 | 0 | count_t num_readers = (state_ & n_readers_) - 1; |
165 | 0 | state_ &= ~n_readers_; |
166 | 0 | state_ |= num_readers; |
167 | 0 | if (state_ & write_entered_) |
168 | 0 | { |
169 | 0 | if (num_readers == 0) |
170 | 0 | { |
171 | 0 | gate2_.notify_one(); |
172 | 0 | } |
173 | 0 | } |
174 | 0 | else if (num_readers == n_readers_ - 1) |
175 | 0 | { |
176 | 0 | gate1_.notify_one(); |
177 | 0 | } |
178 | 0 | } |
179 | | |
180 | | }; |
181 | | |
182 | | // implementation not locking readers on behalf of writers |
183 | | |
184 | | template<> |
185 | | class shared_mutex<shared_mutex_type::PTHREAD_RWLOCK_PREFER_READER_NP> |
186 | | : public shared_mutex_base |
187 | | { |
188 | | count_t writer_waiting_ = 0; |
189 | | |
190 | | public: |
191 | | |
192 | | void lock() |
193 | 7.39k | { |
194 | 7.39k | std::unique_lock<mutex_t> lk(mut_); |
195 | 7.39k | ++writer_waiting_; |
196 | 7.39k | while (state_ & n_readers_ || state_ & write_entered_) |
197 | 0 | { |
198 | 0 | gate1_.wait(lk); |
199 | 0 | } |
200 | 7.39k | state_ |= write_entered_; |
201 | 7.39k | --writer_waiting_; |
202 | 7.39k | } |
203 | | |
204 | | void unlock_shared() |
205 | 15.4k | { |
206 | 15.4k | std::lock_guard<mutex_t> _(mut_); |
207 | 15.4k | count_t num_readers = (state_ & n_readers_) - 1; |
208 | 15.4k | state_ &= ~n_readers_; |
209 | 15.4k | state_ |= num_readers; |
210 | | |
211 | 15.4k | if ((writer_waiting_ && num_readers == 0) |
212 | 15.4k | || (num_readers == n_readers_ - 1)) |
213 | 0 | { |
214 | 0 | gate1_.notify_one(); |
215 | 0 | } |
216 | 15.4k | } |
217 | | |
218 | | }; |
219 | | |
220 | | // Debugger wrapper class that provides insight |
221 | | template<class sm> |
222 | | class debug_wrapper : public sm |
223 | | { |
224 | | std::mutex wm_; |
225 | | // Identity of the exclusive owner if any |
226 | | std::thread::id exclusive_owner_ = {}; |
227 | | // key_type thread_id, mapped_type number of locks |
228 | | std::map<std::thread::id, unsigned int> shared_owners_; |
229 | | |
230 | | public: |
231 | | |
232 | | ~debug_wrapper() |
233 | | { |
234 | | std::lock_guard<std::mutex> _(wm_); |
235 | | } |
236 | | |
237 | | // Exclusive ownership |
238 | | |
239 | | void lock() |
240 | | { |
241 | | sm::lock(); |
242 | | std::lock_guard<std::mutex> _(wm_); |
243 | | exclusive_owner_ = std::this_thread::get_id(); |
244 | | } |
245 | | |
246 | | bool try_lock() |
247 | | { |
248 | | bool res = sm::try_lock(); |
249 | | std::lock_guard<std::mutex> _(wm_); |
250 | | if (res) |
251 | | { |
252 | | exclusive_owner_ = std::this_thread::get_id(); |
253 | | } |
254 | | return res; |
255 | | } |
256 | | |
257 | | void unlock() |
258 | | { |
259 | | sm::unlock(); |
260 | | std::lock_guard<std::mutex> _(wm_); |
261 | | exclusive_owner_ = std::thread::id(); |
262 | | } |
263 | | |
264 | | // Shared ownership |
265 | | |
266 | | void lock_shared() |
267 | | { |
268 | | sm::lock_shared(); |
269 | | std::lock_guard<std::mutex> _(wm_); |
270 | | ++shared_owners_[std::this_thread::get_id()]; |
271 | | } |
272 | | |
273 | | bool try_lock_shared() |
274 | | { |
275 | | bool res = sm::try_lock_shared(); |
276 | | std::lock_guard<std::mutex> _(wm_); |
277 | | if (res) |
278 | | { |
279 | | ++shared_owners_[std::this_thread::get_id()]; |
280 | | } |
281 | | return res; |
282 | | } |
283 | | |
284 | | void unlock_shared() |
285 | | { |
286 | | sm::unlock_shared(); |
287 | | std::lock_guard<std::mutex> _(wm_); |
288 | | auto owner = shared_owners_.find(std::this_thread::get_id()); |
289 | | if ( owner != shared_owners_.end() && 0 == --owner->second ) |
290 | | { |
291 | | shared_owners_.erase(owner); |
292 | | } |
293 | | } |
294 | | |
295 | | }; |
296 | | |
297 | | } // namespace detail |
298 | | } // namespace eprosima |
299 | | |
300 | | #if defined(__has_include) && __has_include(<version>) |
301 | | # include <version> |
302 | | #endif // if defined(__has_include) && __has_include(<version>) |
303 | | |
304 | | // Detect if the shared_lock feature is available |
305 | | #if defined(__has_include) && __has_include(<version>) && !defined(__cpp_lib_shared_mutex) || \ |
306 | | /* deprecated procedure if the good one is not available*/ \ |
307 | | ( !(defined(__has_include) && __has_include(<version>)) && \ |
308 | | !(defined(HAVE_CXX17) && HAVE_CXX17) && __cplusplus < 201703 ) |
309 | | |
310 | | namespace eprosima { |
311 | | |
312 | | template <class Mutex> |
313 | | class shared_lock |
314 | | { |
315 | | public: |
316 | | |
317 | | typedef Mutex mutex_type; |
318 | | |
319 | | private: |
320 | | |
321 | | mutex_type* m_; |
322 | | bool owns_; |
323 | | |
324 | | struct __nat |
325 | | { |
326 | | int _; |
327 | | }; |
328 | | |
329 | | public: |
330 | | |
331 | | shared_lock() |
332 | | : m_(nullptr) |
333 | | , owns_(false) |
334 | | { |
335 | | } |
336 | | |
337 | | explicit shared_lock( |
338 | | mutex_type& m) |
339 | 15.4k | : m_(&m) |
340 | 15.4k | , owns_(true) |
341 | 15.4k | { |
342 | 15.4k | m_->lock_shared(); |
343 | 15.4k | } |
344 | | |
345 | | shared_lock( |
346 | | mutex_type& m, |
347 | | std::defer_lock_t) |
348 | | : m_(&m) |
349 | | , owns_(false) |
350 | | { |
351 | | } |
352 | | |
353 | | shared_lock( |
354 | | mutex_type& m, |
355 | | std::try_to_lock_t) |
356 | | : m_(&m) |
357 | | , owns_(m.try_lock_shared()) |
358 | | { |
359 | | } |
360 | | |
361 | | shared_lock( |
362 | | mutex_type& m, |
363 | | std::adopt_lock_t) |
364 | | : m_(&m) |
365 | | , owns_(true) |
366 | | { |
367 | | } |
368 | | |
369 | | template <class Clock, class Duration> |
370 | | shared_lock( |
371 | | mutex_type& m, |
372 | | const std::chrono::time_point<Clock, Duration>& abs_time) |
373 | | : m_(&m) |
374 | | , owns_(m.try_lock_shared_until(abs_time)) |
375 | | { |
376 | | } |
377 | | |
378 | | template <class Rep, class Period> |
379 | | shared_lock( |
380 | | mutex_type& m, |
381 | | const std::chrono::duration<Rep, Period>& rel_time) |
382 | | : m_(&m) |
383 | | , owns_(m.try_lock_shared_for(rel_time)) |
384 | | { |
385 | | } |
386 | | |
387 | | ~shared_lock() |
388 | 15.4k | { |
389 | 15.4k | if (owns_) |
390 | 15.4k | { |
391 | 15.4k | m_->unlock_shared(); |
392 | 15.4k | } |
393 | 15.4k | } |
394 | | |
395 | | shared_lock( |
396 | | shared_lock const&) = delete; |
397 | | shared_lock& operator =( |
398 | | shared_lock const&) = delete; |
399 | | |
400 | | shared_lock( |
401 | | shared_lock&& sl) |
402 | | : m_(sl.m_) |
403 | | , owns_(sl.owns_) |
404 | | { |
405 | | sl.m_ = nullptr; sl.owns_ = false; |
406 | | } |
407 | | |
408 | | shared_lock& operator =( |
409 | | shared_lock&& sl) |
410 | | { |
411 | | if (owns_) |
412 | | { |
413 | | m_->unlock_shared(); |
414 | | } |
415 | | m_ = sl.m_; |
416 | | owns_ = sl.owns_; |
417 | | sl.m_ = nullptr; |
418 | | sl.owns_ = false; |
419 | | return *this; |
420 | | } |
421 | | |
422 | | explicit shared_lock( |
423 | | std::unique_lock<mutex_type>&& ul) |
424 | | : m_(ul.mutex()) |
425 | | , owns_(ul.owns_lock()) |
426 | | { |
427 | | if (owns_) |
428 | | { |
429 | | m_->unlock_and_lock_shared(); |
430 | | } |
431 | | ul.release(); |
432 | | } |
433 | | |
434 | | void lock(); |
435 | | bool try_lock(); |
436 | | template <class Rep, class Period> |
437 | | bool try_lock_for( |
438 | | const std::chrono::duration<Rep, Period>& rel_time) |
439 | | { |
440 | | return try_lock_until(std::chrono::steady_clock::now() + rel_time); |
441 | | } |
442 | | |
443 | | template <class Clock, class Duration> |
444 | | bool |
445 | | try_lock_until( |
446 | | const std::chrono::time_point<Clock, Duration>& abs_time); |
447 | | void unlock(); |
448 | | |
449 | | void swap( |
450 | | shared_lock&& u) |
451 | | { |
452 | | std::swap(m_, u.m_); |
453 | | std::swap(owns_, u.owns_); |
454 | | } |
455 | | |
456 | | mutex_type* release() |
457 | | { |
458 | | mutex_type* r = m_; |
459 | | m_ = nullptr; |
460 | | owns_ = false; |
461 | | return r; |
462 | | } |
463 | | |
464 | | bool owns_lock() const |
465 | | { |
466 | | return owns_; |
467 | | } |
468 | | |
469 | | operator int __nat::* () const { |
470 | | return owns_ ? &__nat::_ : 0; |
471 | | } |
472 | | mutex_type* mutex() const |
473 | | { |
474 | | return m_; |
475 | | } |
476 | | |
477 | | }; |
478 | | |
479 | | template <class Mutex> |
480 | | void |
481 | | shared_lock<Mutex>::lock() |
482 | | { |
483 | | if (m_ == nullptr) |
484 | | { |
485 | | throw std::system_error(std::error_code(EPERM, std::system_category()), |
486 | | "shared_lock::lock: references null mutex"); |
487 | | } |
488 | | if (owns_) |
489 | | { |
490 | | throw std::system_error(std::error_code(EDEADLK, std::system_category()), |
491 | | "shared_lock::lock: already locked"); |
492 | | } |
493 | | m_->lock_shared(); |
494 | | owns_ = true; |
495 | | } |
496 | | |
497 | | template <class Mutex> |
498 | | bool |
499 | | shared_lock<Mutex>::try_lock() |
500 | | { |
501 | | if (m_ == nullptr) |
502 | | { |
503 | | throw std::system_error(std::error_code(EPERM, std::system_category()), |
504 | | "shared_lock::try_lock: references null mutex"); |
505 | | } |
506 | | if (owns_) |
507 | | { |
508 | | throw std::system_error(std::error_code(EDEADLK, std::system_category()), |
509 | | "shared_lock::try_lock: already locked"); |
510 | | } |
511 | | owns_ = m_->try_lock_shared(); |
512 | | return owns_; |
513 | | } |
514 | | |
515 | | template <class Mutex> |
516 | | template <class Clock, class Duration> |
517 | | bool |
518 | | shared_lock<Mutex>::try_lock_until( |
519 | | const std::chrono::time_point<Clock, Duration>& abs_time) |
520 | | { |
521 | | if (m_ == nullptr) |
522 | | { |
523 | | throw std::system_error(std::error_code(EPERM, std::system_category()), |
524 | | "shared_lock::try_lock_until: references null mutex"); |
525 | | } |
526 | | if (owns_) |
527 | | { |
528 | | throw std::system_error(std::error_code(EDEADLK, std::system_category()), |
529 | | "shared_lock::try_lock_until: already locked"); |
530 | | } |
531 | | owns_ = m_->try_lock_shared_until(abs_time); |
532 | | return owns_; |
533 | | } |
534 | | |
535 | | template <class Mutex> |
536 | | void |
537 | | shared_lock<Mutex>::unlock() |
538 | | { |
539 | | if (!owns_) |
540 | | { |
541 | | throw std::system_error(std::error_code(EPERM, std::system_category()), |
542 | | "shared_lock::unlock: not locked"); |
543 | | } |
544 | | m_->unlock_shared(); |
545 | | owns_ = false; |
546 | | } |
547 | | |
548 | | template <class Mutex> |
549 | | inline |
550 | | void |
551 | | swap( |
552 | | shared_lock<Mutex>& x, |
553 | | shared_lock<Mutex>& y) |
554 | | { |
555 | | x.swap(y); |
556 | | } |
557 | | |
558 | | } //namespace eprosima |
559 | | |
560 | | #else // fallback to STL |
561 | | |
562 | | #include <shared_mutex> |
563 | | |
564 | | namespace eprosima { |
565 | | |
566 | | using std::shared_lock; |
567 | | using std::swap; |
568 | | |
569 | | } //namespace eprosima |
570 | | |
571 | | #endif // shared_lock selection |
572 | | |
573 | | #ifndef USE_THIRDPARTY_SHARED_MUTEX |
574 | | # if defined(_MSC_VER) && _MSVC_LANG < 202302L |
575 | | # pragma message("warning: USE_THIRDPARTY_SHARED_MUTEX not defined. By default use framework version.") |
576 | | # else |
577 | | # warning "USE_THIRDPARTY_SHARED_MUTEX not defined. By default use framework version." |
578 | | # endif // if defined(_MSC_VER) && _MSVC_LANG < 202302L |
579 | | # define USE_THIRDPARTY_SHARED_MUTEX 0 |
580 | | #endif // ifndef USE_THIRDPARTY_SHARED_MUTEX |
581 | | |
582 | | // Detect if the share_mutex feature is available or if the user forces it |
583 | | #if defined(__has_include) && __has_include(<version>) && !defined(__cpp_lib_shared_mutex) || \ |
584 | | /* allow users to ignore shared_mutex framework implementation */ \ |
585 | | (~USE_THIRDPARTY_SHARED_MUTEX + 1) || \ |
586 | | /* deprecated procedure if the good one is not available*/ \ |
587 | | ( !(defined(__has_include) && __has_include(<version>)) && \ |
588 | | !(defined(HAVE_CXX17) && HAVE_CXX17) && __cplusplus < 201703 ) |
589 | | |
590 | | /* |
591 | | Fast DDS defaults to PTHREAD_RWLOCK_PREFER_READER_NP for two main reasons: |
592 | | |
593 | | - It allows reader side recursiveness. If we have two threads (T1, T2) and |
594 | | called S a shared lock and E and exclusive one. |
595 | | |
596 | | T1: S -> S |
597 | | T2: E |
598 | | |
599 | | PTHREAD_RWLOCK_PREFER_READER_NP will never deadlock. The S locks are not |
600 | | influenced by the E locks. |
601 | | |
602 | | PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP will deadlock. before T1 |
603 | | takes S twice. That happens because: |
604 | | + T1's second S will wait for E (writer is prioritized) |
605 | | + E will wait for T1's first S lock (writer needs atomic access) |
606 | | + T1's first S cannot unlock because is blocked in the second S. |
607 | | |
608 | | Thus, shared_mutex<PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP> is |
609 | | non-recursive. |
610 | | |
611 | | - It prevents ABBA deadlocks with other mutexes. If we have three threads |
612 | | (Ti) and P is an ordinary mutex: |
613 | | |
614 | | T1: P -> S |
615 | | T2: S -> P |
616 | | T3: E |
617 | | |
618 | | PTHREAD_RWLOCK_PREFER_READER_NP will never deadlock. The S locks are not |
619 | | influenced by the E locks. Starvation issues can be managed in the user |
620 | | code. |
621 | | |
622 | | PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP will deadlock if T3 takes E |
623 | | before T1 takes S. That happens because: |
624 | | + T1's S will wait for E (writer is prioritized) |
625 | | + E will wait for T2's S lock (writer needs atomic access) |
626 | | + T2's S cannot unlock because is blocked in P (owned by T1). |
627 | | |
628 | | Thus, shared_mutex<PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP> must be |
629 | | managed like an ordinary mutex in deadlock sense. |
630 | | */ |
631 | | |
632 | | namespace eprosima { |
633 | | |
634 | | #ifdef NDEBUG |
635 | | using shared_mutex = detail::shared_mutex<detail::shared_mutex_type::PTHREAD_RWLOCK_PREFER_READER_NP>; |
636 | | #else |
637 | | using shared_mutex = |
638 | | detail::debug_wrapper<detail::shared_mutex<detail::shared_mutex_type::PTHREAD_RWLOCK_PREFER_READER_NP>>; |
639 | | #endif // NDEBUG |
640 | | |
641 | | } //namespace eprosima |
642 | | |
643 | | #else // fallback to STL |
644 | | |
645 | | #include <shared_mutex> |
646 | | |
647 | | namespace eprosima { |
648 | | |
649 | | using std::shared_mutex; |
650 | | |
651 | | } //namespace eprosima |
652 | | |
653 | | #endif // shared_mutex selection |
654 | | |
655 | | #endif // _UTILS_SHARED_MUTEX_HPP_ |