/src/icu/icu4c/source/common/umutex.h
Line | Count | Source (jump to first uncovered line) |
1 | | // © 2016 and later: Unicode, Inc. and others. |
2 | | // License & terms of use: http://www.unicode.org/copyright.html |
3 | | /* |
4 | | ********************************************************************** |
5 | | * Copyright (C) 1997-2015, International Business Machines |
6 | | * Corporation and others. All Rights Reserved. |
7 | | ********************************************************************** |
8 | | * |
9 | | * File UMUTEX.H |
10 | | * |
11 | | * Modification History: |
12 | | * |
13 | | * Date Name Description |
14 | | * 04/02/97 aliu Creation. |
15 | | * 04/07/99 srl rewrite - C interface, multiple mutices |
16 | | * 05/13/99 stephen Changed to umutex (from cmutex) |
17 | | ****************************************************************************** |
18 | | */ |
19 | | |
20 | | #ifndef UMUTEX_H |
21 | | #define UMUTEX_H |
22 | | |
23 | | #include <atomic> |
24 | | #include <condition_variable> |
25 | | #include <mutex> |
26 | | #include <type_traits> |
27 | | |
28 | | #include "unicode/utypes.h" |
29 | | #include "unicode/uclean.h" |
30 | | #include "unicode/uobject.h" |
31 | | |
32 | | #include "putilimp.h" |
33 | | |
34 | | #if defined(U_USER_ATOMICS_H) || defined(U_USER_MUTEX_H) |
35 | | // Support for including an alternate implementation of atomic & mutex operations has been withdrawn. |
36 | | // See issue ICU-20185. |
37 | | #error U_USER_ATOMICS and U_USER_MUTEX_H are not supported |
38 | | #endif |
39 | | |
40 | | // Export an explicit template instantiation of std::atomic<int32_t>. |
41 | | // When building DLLs for Windows this is required as it is used as a data member of the exported SharedObject class. |
42 | | // See digitlst.h, pluralaffix.h, datefmt.h, and others for similar examples. |
43 | | // |
44 | | // Similar story for std::atomic<std::mutex *>, and the exported UMutex class. |
45 | | #if U_PF_WINDOWS <= U_PLATFORM && U_PLATFORM <= U_PF_CYGWIN && !defined(U_IN_DOXYGEN) |
46 | | #if defined(__clang__) || defined(_MSC_VER) |
47 | | #if defined(__clang__) |
48 | | // Suppress the warning that the explicit instantiation after explicit specialization has no effect. |
49 | | #pragma clang diagnostic push |
50 | | #pragma clang diagnostic ignored "-Winstantiation-after-specialization" |
51 | | #endif |
52 | | template struct U_COMMON_API std::atomic<int32_t>; |
53 | | template struct U_COMMON_API std::atomic<std::mutex *>; |
54 | | #if defined(__clang__) |
55 | | #pragma clang diagnostic pop |
56 | | #endif |
57 | | #elif defined(__GNUC__) |
58 | | // For GCC this class is already exported/visible, so no need for U_COMMON_API. |
59 | | template struct std::atomic<int32_t>; |
60 | | template struct std::atomic<std::mutex *>; |
61 | | #endif |
62 | | #endif |
63 | | |
64 | | |
65 | | U_NAMESPACE_BEGIN |
66 | | |
67 | | /**************************************************************************** |
68 | | * |
69 | | * Low Level Atomic Operations, ICU wrappers for. |
70 | | * |
71 | | ****************************************************************************/ |
72 | | |
73 | | typedef std::atomic<int32_t> u_atomic_int32_t; |
74 | | |
75 | 2.05G | inline int32_t umtx_loadAcquire(u_atomic_int32_t &var) { |
76 | 2.05G | return var.load(std::memory_order_acquire); |
77 | 2.05G | } |
78 | | |
79 | 506k | inline void umtx_storeRelease(u_atomic_int32_t &var, int32_t val) { |
80 | 506k | var.store(val, std::memory_order_release); |
81 | 506k | } |
82 | | |
83 | 25.1M | inline int32_t umtx_atomic_inc(u_atomic_int32_t *var) { |
84 | 25.1M | return var->fetch_add(1) + 1; |
85 | 25.1M | } |
86 | | |
87 | 58.0M | inline int32_t umtx_atomic_dec(u_atomic_int32_t *var) { |
88 | 58.0M | return var->fetch_sub(1) - 1; |
89 | 58.0M | } |
90 | | |
91 | | |
92 | | /************************************************************************************************* |
93 | | * |
94 | | * UInitOnce Definitions. |
95 | | * |
96 | | *************************************************************************************************/ |
97 | | |
98 | | struct U_COMMON_API UInitOnce { |
99 | | u_atomic_int32_t fState {0}; |
100 | | UErrorCode fErrCode {U_ZERO_ERROR}; |
101 | 1.71M | void reset() {fState = 0;} |
102 | 96.6k | UBool isReset() {return umtx_loadAcquire(fState) == 0;} |
103 | | // Note: isReset() is used by service registration code. |
104 | | // Thread safety of this usage needs review. |
105 | | }; |
106 | | |
107 | | U_COMMON_API UBool U_EXPORT2 umtx_initImplPreInit(UInitOnce &); |
108 | | U_COMMON_API void U_EXPORT2 umtx_initImplPostInit(UInitOnce &); |
109 | | |
110 | | template<class T> void umtx_initOnce(UInitOnce &uio, T *obj, void (U_CALLCONV T::*fp)()) { |
111 | | if (umtx_loadAcquire(uio.fState) == 2) { |
112 | | return; |
113 | | } |
114 | | if (umtx_initImplPreInit(uio)) { |
115 | | (obj->*fp)(); |
116 | | umtx_initImplPostInit(uio); |
117 | | } |
118 | | } |
119 | | |
120 | | |
121 | | // umtx_initOnce variant for plain functions, or static class functions. |
122 | | // No context parameter. |
123 | 769k | inline void umtx_initOnce(UInitOnce &uio, void (U_CALLCONV *fp)()) { |
124 | 769k | if (umtx_loadAcquire(uio.fState) == 2) { |
125 | 769k | return; |
126 | 769k | } |
127 | 72 | if (umtx_initImplPreInit(uio)) { |
128 | 72 | (*fp)(); |
129 | 72 | umtx_initImplPostInit(uio); |
130 | 72 | } |
131 | 72 | } |
132 | | |
133 | | // umtx_initOnce variant for plain functions, or static class functions. |
134 | | // With ErrorCode, No context parameter. |
135 | 67.6M | inline void umtx_initOnce(UInitOnce &uio, void (U_CALLCONV *fp)(UErrorCode &), UErrorCode &errCode) { |
136 | 67.6M | if (U_FAILURE(errCode)) { |
137 | 1.55k | return; |
138 | 1.55k | } |
139 | 67.6M | if (umtx_loadAcquire(uio.fState) != 2 && umtx_initImplPreInit(uio)) { |
140 | | // We run the initialization. |
141 | 193 | (*fp)(errCode); |
142 | 193 | uio.fErrCode = errCode; |
143 | 193 | umtx_initImplPostInit(uio); |
144 | 67.6M | } else { |
145 | | // Someone else already ran the initialization. |
146 | 67.6M | if (U_FAILURE(uio.fErrCode)) { |
147 | 0 | errCode = uio.fErrCode; |
148 | 0 | } |
149 | 67.6M | } |
150 | 67.6M | } |
151 | | |
152 | | // umtx_initOnce variant for plain functions, or static class functions, |
153 | | // with a context parameter. |
154 | | template<class T> void umtx_initOnce(UInitOnce &uio, void (U_CALLCONV *fp)(T), T context) { |
155 | | if (umtx_loadAcquire(uio.fState) == 2) { |
156 | | return; |
157 | | } |
158 | | if (umtx_initImplPreInit(uio)) { |
159 | | (*fp)(context); |
160 | | umtx_initImplPostInit(uio); |
161 | | } |
162 | | } |
163 | | |
164 | | // umtx_initOnce variant for plain functions, or static class functions, |
165 | | // with a context parameter and an error code. |
166 | 7.23M | template<class T> void umtx_initOnce(UInitOnce &uio, void (U_CALLCONV *fp)(T, UErrorCode &), T context, UErrorCode &errCode) { |
167 | 7.23M | if (U_FAILURE(errCode)) { |
168 | 0 | return; |
169 | 0 | } |
170 | 7.23M | if (umtx_loadAcquire(uio.fState) != 2 && umtx_initImplPreInit(uio)) { |
171 | | // We run the initialization. |
172 | 145 | (*fp)(context, errCode); |
173 | 145 | uio.fErrCode = errCode; |
174 | 145 | umtx_initImplPostInit(uio); |
175 | 7.23M | } else { |
176 | | // Someone else already ran the initialization. |
177 | 7.23M | if (U_FAILURE(uio.fErrCode)) { |
178 | 0 | errCode = uio.fErrCode; |
179 | 0 | } |
180 | 7.23M | } |
181 | 7.23M | } void icu_78::umtx_initOnce<UPropertySource>(icu_78::UInitOnce&, void (*)(UPropertySource, UErrorCode&), UPropertySource, UErrorCode&) Line | Count | Source | 166 | 179k | template<class T> void umtx_initOnce(UInitOnce &uio, void (U_CALLCONV *fp)(T, UErrorCode &), T context, UErrorCode &errCode) { | 167 | 179k | if (U_FAILURE(errCode)) { | 168 | 0 | return; | 169 | 0 | } | 170 | 179k | if (umtx_loadAcquire(uio.fState) != 2 && umtx_initImplPreInit(uio)) { | 171 | | // We run the initialization. | 172 | 63 | (*fp)(context, errCode); | 173 | 63 | uio.fErrCode = errCode; | 174 | 63 | umtx_initImplPostInit(uio); | 175 | 179k | } else { | 176 | | // Someone else already ran the initialization. | 177 | 179k | if (U_FAILURE(uio.fErrCode)) { | 178 | 0 | errCode = uio.fErrCode; | 179 | 0 | } | 180 | 179k | } | 181 | 179k | } |
void icu_78::umtx_initOnce<UProperty>(icu_78::UInitOnce&, void (*)(UProperty, UErrorCode&), UProperty, UErrorCode&) Line | Count | Source | 166 | 212k | template<class T> void umtx_initOnce(UInitOnce &uio, void (U_CALLCONV *fp)(T, UErrorCode &), T context, UErrorCode &errCode) { | 167 | 212k | if (U_FAILURE(errCode)) { | 168 | 0 | return; | 169 | 0 | } | 170 | 212k | if (umtx_loadAcquire(uio.fState) != 2 && umtx_initImplPreInit(uio)) { | 171 | | // We run the initialization. | 172 | 67 | (*fp)(context, errCode); | 173 | 67 | uio.fErrCode = errCode; | 174 | 67 | umtx_initImplPostInit(uio); | 175 | 212k | } else { | 176 | | // Someone else already ran the initialization. | 177 | 212k | if (U_FAILURE(uio.fErrCode)) { | 178 | 0 | errCode = uio.fErrCode; | 179 | 0 | } | 180 | 212k | } | 181 | 212k | } |
void icu_78::umtx_initOnce<char const*>(icu_78::UInitOnce&, void (*)(char const*, UErrorCode&), char const*, UErrorCode&) Line | Count | Source | 166 | 66.0k | template<class T> void umtx_initOnce(UInitOnce &uio, void (U_CALLCONV *fp)(T, UErrorCode &), T context, UErrorCode &errCode) { | 167 | 66.0k | if (U_FAILURE(errCode)) { | 168 | 0 | return; | 169 | 0 | } | 170 | 66.0k | if (umtx_loadAcquire(uio.fState) != 2 && umtx_initImplPreInit(uio)) { | 171 | | // We run the initialization. | 172 | 10 | (*fp)(context, errCode); | 173 | 10 | uio.fErrCode = errCode; | 174 | 10 | umtx_initImplPostInit(uio); | 175 | 66.0k | } else { | 176 | | // Someone else already ran the initialization. | 177 | 66.0k | if (U_FAILURE(uio.fErrCode)) { | 178 | 0 | errCode = uio.fErrCode; | 179 | 0 | } | 180 | 66.0k | } | 181 | 66.0k | } |
void icu_78::umtx_initOnce<icu_78::Normalizer2Impl*>(icu_78::UInitOnce&, void (*)(icu_78::Normalizer2Impl*, UErrorCode&), icu_78::Normalizer2Impl*, UErrorCode&) Line | Count | Source | 166 | 6.77M | template<class T> void umtx_initOnce(UInitOnce &uio, void (U_CALLCONV *fp)(T, UErrorCode &), T context, UErrorCode &errCode) { | 167 | 6.77M | if (U_FAILURE(errCode)) { | 168 | 0 | return; | 169 | 0 | } | 170 | 6.77M | if (umtx_loadAcquire(uio.fState) != 2 && umtx_initImplPreInit(uio)) { | 171 | | // We run the initialization. | 172 | 2 | (*fp)(context, errCode); | 173 | 2 | uio.fErrCode = errCode; | 174 | 2 | umtx_initImplPostInit(uio); | 175 | 6.77M | } else { | 176 | | // Someone else already ran the initialization. | 177 | 6.77M | if (U_FAILURE(uio.fErrCode)) { | 178 | 0 | errCode = uio.fErrCode; | 179 | 0 | } | 180 | 6.77M | } | 181 | 6.77M | } |
Unexecuted instantiation: void icu_78::umtx_initOnce<icu_78::OlsonTimeZone*>(icu_78::UInitOnce&, void (*)(icu_78::OlsonTimeZone*, UErrorCode&), icu_78::OlsonTimeZone*, UErrorCode&) Unexecuted instantiation: void icu_78::umtx_initOnce<icu_78::CollationTailoring const*>(icu_78::UInitOnce&, void (*)(icu_78::CollationTailoring const*, UErrorCode&), icu_78::CollationTailoring const*, UErrorCode&) void icu_78::umtx_initOnce<USystemTimeZoneType>(icu_78::UInitOnce&, void (*)(USystemTimeZoneType, UErrorCode&), USystemTimeZoneType, UErrorCode&) Line | Count | Source | 166 | 5.34k | template<class T> void umtx_initOnce(UInitOnce &uio, void (U_CALLCONV *fp)(T, UErrorCode &), T context, UErrorCode &errCode) { | 167 | 5.34k | if (U_FAILURE(errCode)) { | 168 | 0 | return; | 169 | 0 | } | 170 | 5.34k | if (umtx_loadAcquire(uio.fState) != 2 && umtx_initImplPreInit(uio)) { | 171 | | // We run the initialization. | 172 | 3 | (*fp)(context, errCode); | 173 | 3 | uio.fErrCode = errCode; | 174 | 3 | umtx_initImplPostInit(uio); | 175 | 5.33k | } else { | 176 | | // Someone else already ran the initialization. | 177 | 5.33k | if (U_FAILURE(uio.fErrCode)) { | 178 | 0 | errCode = uio.fErrCode; | 179 | 0 | } | 180 | 5.33k | } | 181 | 5.34k | } |
|
182 | | |
183 | | // UMutex should be constexpr-constructible, so that no initialization code |
184 | | // is run during startup. |
185 | | // This works on all C++ libraries except MS VS before VS2019. |
186 | | #if (defined(_CPPLIB_VER) && !defined(_MSVC_STL_VERSION)) || \ |
187 | | (defined(_MSVC_STL_VERSION) && _MSVC_STL_VERSION < 142) |
188 | | // (VS std lib older than VS2017) || (VS std lib version < VS2019) |
189 | | # define UMUTEX_CONSTEXPR |
190 | | #else |
191 | | # define UMUTEX_CONSTEXPR constexpr |
192 | | #endif |
193 | | |
194 | | /** |
195 | | * UMutex - ICU Mutex class. |
196 | | * |
197 | | * This is the preferred Mutex class for use within ICU implementation code. |
198 | | * It is a thin wrapper over C++ std::mutex, with these additions: |
199 | | * - Static instances are safe, not triggering static construction or destruction, |
200 | | * and the associated order of construction or destruction issues. |
201 | | * - Plumbed into u_cleanup() for destructing the underlying std::mutex, |
202 | | * which frees any OS level resources they may be holding. |
203 | | * |
204 | | * Limitations: |
205 | | * - Static or global instances only. Cannot be heap allocated. Cannot appear as a |
206 | | * member of another class. |
207 | | * - No condition variables or other advanced features. If needed, you will need to use |
208 | | * std::mutex and std::condition_variable directly. For an example, see unifiedcache.cpp |
209 | | * |
210 | | * Typical Usage: |
211 | | * static UMutex myMutex; |
212 | | * |
213 | | * { |
214 | | * Mutex lock(myMutex); |
215 | | * ... // Do stuff that is protected by myMutex; |
216 | | * } // myMutex is released when lock goes out of scope. |
217 | | */ |
218 | | |
219 | | class U_COMMON_API UMutex { |
220 | | public: |
221 | 0 | UMUTEX_CONSTEXPR UMutex() {} |
222 | | ~UMutex() = default; |
223 | | |
224 | | UMutex(const UMutex &other) = delete; |
225 | | UMutex &operator =(const UMutex &other) = delete; |
226 | | void *operator new(size_t) = delete; |
227 | | |
228 | | // requirements for C++ BasicLockable, allows UMutex to work with std::lock_guard |
229 | 52.4M | void lock() { |
230 | 52.4M | std::mutex *m = fMutex.load(std::memory_order_acquire); |
231 | 52.4M | if (m == nullptr) { m = getMutex(); } |
232 | 52.4M | m->lock(); |
233 | 52.4M | } |
234 | 52.4M | void unlock() { fMutex.load(std::memory_order_relaxed)->unlock(); } |
235 | | |
236 | | static void cleanup(); |
237 | | |
238 | | private: |
239 | | alignas(std::mutex) char fStorage[sizeof(std::mutex)] {}; |
240 | | std::atomic<std::mutex *> fMutex { nullptr }; |
241 | | |
242 | | /** All initialized UMutexes are kept in a linked list, so that they can be found, |
243 | | * and the underlying std::mutex destructed, by u_cleanup(). |
244 | | */ |
245 | | UMutex *fListLink { nullptr }; |
246 | | static UMutex *gListHead; |
247 | | |
248 | | /** Out-of-line function to lazily initialize a UMutex on first use. |
249 | | * Initial fast check is inline, in lock(). The returned value may never |
250 | | * be nullptr. |
251 | | */ |
252 | | std::mutex *getMutex(); |
253 | | }; |
254 | | |
255 | | |
256 | | /* Lock a mutex. |
257 | | * @param mutex The given mutex to be locked. Pass NULL to specify |
258 | | * the global ICU mutex. Recursive locks are an error |
259 | | * and may cause a deadlock on some platforms. |
260 | | */ |
261 | | U_CAPI void U_EXPORT2 umtx_lock(UMutex* mutex); |
262 | | |
263 | | /* Unlock a mutex. |
264 | | * @param mutex The given mutex to be unlocked. Pass NULL to specify |
265 | | * the global ICU mutex. |
266 | | */ |
267 | | U_CAPI void U_EXPORT2 umtx_unlock (UMutex* mutex); |
268 | | |
269 | | |
270 | | U_NAMESPACE_END |
271 | | |
272 | | #endif /* UMUTEX_H */ |
273 | | /*eof*/ |