/src/icu/icu4c/source/common/umutex.h
Line | Count | Source |
1 | | // © 2016 and later: Unicode, Inc. and others. |
2 | | // License & terms of use: http://www.unicode.org/copyright.html |
3 | | /* |
4 | | ********************************************************************** |
5 | | * Copyright (C) 1997-2015, International Business Machines |
6 | | * Corporation and others. All Rights Reserved. |
7 | | ********************************************************************** |
8 | | * |
9 | | * File UMUTEX.H |
10 | | * |
11 | | * Modification History: |
12 | | * |
13 | | * Date Name Description |
14 | | * 04/02/97 aliu Creation. |
15 | | * 04/07/99 srl rewrite - C interface, multiple mutices |
16 | | * 05/13/99 stephen Changed to umutex (from cmutex) |
17 | | ****************************************************************************** |
18 | | */ |
19 | | |
20 | | #ifndef UMUTEX_H |
21 | | #define UMUTEX_H |
22 | | |
23 | | #include <atomic> |
24 | | #include <condition_variable> |
25 | | #include <mutex> |
26 | | #include <type_traits> |
27 | | |
28 | | #include "unicode/utypes.h" |
29 | | #include "unicode/uclean.h" |
30 | | #include "unicode/uobject.h" |
31 | | |
32 | | #include "putilimp.h" |
33 | | |
34 | | #if defined(U_USER_ATOMICS_H) || defined(U_USER_MUTEX_H) |
35 | | // Support for including an alternate implementation of atomic & mutex operations has been withdrawn. |
36 | | // See issue ICU-20185. |
37 | | #error U_USER_ATOMICS and U_USER_MUTEX_H are not supported |
38 | | #endif |
39 | | |
40 | | U_NAMESPACE_BEGIN |
41 | | |
42 | | /**************************************************************************** |
43 | | * |
44 | | * Low Level Atomic Operations, ICU wrappers for. |
45 | | * |
46 | | ****************************************************************************/ |
47 | | |
48 | | typedef std::atomic<int32_t> u_atomic_int32_t; |
49 | | |
50 | 1.47G | inline int32_t umtx_loadAcquire(u_atomic_int32_t &var) { |
51 | 1.47G | return var.load(std::memory_order_acquire); |
52 | 1.47G | } |
53 | | |
54 | 549k | inline void umtx_storeRelease(u_atomic_int32_t &var, int32_t val) { |
55 | 549k | var.store(val, std::memory_order_release); |
56 | 549k | } |
57 | | |
58 | 25.4M | inline int32_t umtx_atomic_inc(u_atomic_int32_t *var) { |
59 | 25.4M | return var->fetch_add(1) + 1; |
60 | 25.4M | } |
61 | | |
62 | 51.2M | inline int32_t umtx_atomic_dec(u_atomic_int32_t *var) { |
63 | 51.2M | return var->fetch_sub(1) - 1; |
64 | 51.2M | } |
65 | | |
66 | | |
67 | | /************************************************************************************************* |
68 | | * |
69 | | * UInitOnce Definitions. |
70 | | * |
71 | | *************************************************************************************************/ |
72 | | |
73 | | struct U_COMMON_API_CLASS UInitOnce { |
74 | | private: |
75 | | friend U_COMMON_API UBool U_EXPORT2 umtx_initImplPreInit(UInitOnce&); |
76 | | friend U_COMMON_API void U_EXPORT2 umtx_initImplPostInit(UInitOnce&); |
77 | | template <typename T> friend void umtx_initOnce(UInitOnce&, T*, void (T::*)()); |
78 | | friend void umtx_initOnce(UInitOnce&, void (*)()); |
79 | | friend void umtx_initOnce(UInitOnce&, void (*)(UErrorCode&), UErrorCode&); |
80 | | template <typename T> friend void umtx_initOnce(UInitOnce&, void (*)(T), T); |
81 | | template <typename T> friend void umtx_initOnce(UInitOnce&, void (*)(T, UErrorCode&), T, UErrorCode&); |
82 | | |
83 | | u_atomic_int32_t fState{0}; |
84 | | UErrorCode fErrCode{U_ZERO_ERROR}; |
85 | | |
86 | | public: |
87 | 1.61M | U_COMMON_API void reset() { fState = 0; } |
88 | 104k | U_COMMON_API UBool isReset() { return umtx_loadAcquire(fState) == 0; } |
89 | | // Note: isReset() is used by service registration code. |
90 | | // Thread safety of this usage needs review. |
91 | | }; |
92 | | |
93 | | U_COMMON_API UBool U_EXPORT2 umtx_initImplPreInit(UInitOnce &); |
94 | | U_COMMON_API void U_EXPORT2 umtx_initImplPostInit(UInitOnce &); |
95 | | |
96 | | template<class T> void umtx_initOnce(UInitOnce &uio, T *obj, void (U_CALLCONV T::*fp)()) { |
97 | | if (umtx_loadAcquire(uio.fState) == 2) { |
98 | | return; |
99 | | } |
100 | | if (umtx_initImplPreInit(uio)) { |
101 | | (obj->*fp)(); |
102 | | umtx_initImplPostInit(uio); |
103 | | } |
104 | | } |
105 | | |
106 | | |
107 | | // umtx_initOnce variant for plain functions, or static class functions. |
108 | | // No context parameter. |
109 | 763k | inline void umtx_initOnce(UInitOnce &uio, void (U_CALLCONV *fp)()) { |
110 | 763k | if (umtx_loadAcquire(uio.fState) == 2) { |
111 | 763k | return; |
112 | 763k | } |
113 | 73 | if (umtx_initImplPreInit(uio)) { |
114 | 73 | (*fp)(); |
115 | 73 | umtx_initImplPostInit(uio); |
116 | 73 | } |
117 | 73 | } |
118 | | |
119 | | // umtx_initOnce variant for plain functions, or static class functions. |
120 | | // With ErrorCode, No context parameter. |
121 | 75.2M | inline void umtx_initOnce(UInitOnce &uio, void (U_CALLCONV *fp)(UErrorCode &), UErrorCode &errCode) { |
122 | 75.2M | if (U_FAILURE(errCode)) { |
123 | 1.52k | return; |
124 | 1.52k | } |
125 | 75.2M | if (umtx_loadAcquire(uio.fState) != 2 && umtx_initImplPreInit(uio)) { |
126 | | // We run the initialization. |
127 | 207 | (*fp)(errCode); |
128 | 207 | uio.fErrCode = errCode; |
129 | 207 | umtx_initImplPostInit(uio); |
130 | 75.2M | } else { |
131 | | // Someone else already ran the initialization. |
132 | 75.2M | if (U_FAILURE(uio.fErrCode)) { |
133 | 0 | errCode = uio.fErrCode; |
134 | 0 | } |
135 | 75.2M | } |
136 | 75.2M | } |
137 | | |
138 | | // umtx_initOnce variant for plain functions, or static class functions, |
139 | | // with a context parameter. |
140 | | template<class T> void umtx_initOnce(UInitOnce &uio, void (U_CALLCONV *fp)(T), T context) { |
141 | | if (umtx_loadAcquire(uio.fState) == 2) { |
142 | | return; |
143 | | } |
144 | | if (umtx_initImplPreInit(uio)) { |
145 | | (*fp)(context); |
146 | | umtx_initImplPostInit(uio); |
147 | | } |
148 | | } |
149 | | |
150 | | // umtx_initOnce variant for plain functions, or static class functions, |
151 | | // with a context parameter and an error code. |
152 | 6.40M | template<class T> void umtx_initOnce(UInitOnce &uio, void (U_CALLCONV *fp)(T, UErrorCode &), T context, UErrorCode &errCode) { |
153 | 6.40M | if (U_FAILURE(errCode)) { |
154 | 0 | return; |
155 | 0 | } |
156 | 6.40M | if (umtx_loadAcquire(uio.fState) != 2 && umtx_initImplPreInit(uio)) { |
157 | | // We run the initialization. |
158 | 163 | (*fp)(context, errCode); |
159 | 163 | uio.fErrCode = errCode; |
160 | 163 | umtx_initImplPostInit(uio); |
161 | 6.40M | } else { |
162 | | // Someone else already ran the initialization. |
163 | 6.40M | if (U_FAILURE(uio.fErrCode)) { |
164 | 0 | errCode = uio.fErrCode; |
165 | 0 | } |
166 | 6.40M | } |
167 | 6.40M | } void icu_79::umtx_initOnce<UPropertySource>(icu_79::UInitOnce&, void (*)(UPropertySource, UErrorCode&), UPropertySource, UErrorCode&) Line | Count | Source | 152 | 145k | template<class T> void umtx_initOnce(UInitOnce &uio, void (U_CALLCONV *fp)(T, UErrorCode &), T context, UErrorCode &errCode) { | 153 | 145k | if (U_FAILURE(errCode)) { | 154 | 0 | return; | 155 | 0 | } | 156 | 145k | if (umtx_loadAcquire(uio.fState) != 2 && umtx_initImplPreInit(uio)) { | 157 | | // We run the initialization. | 158 | 70 | (*fp)(context, errCode); | 159 | 70 | uio.fErrCode = errCode; | 160 | 70 | umtx_initImplPostInit(uio); | 161 | 145k | } else { | 162 | | // Someone else already ran the initialization. | 163 | 145k | if (U_FAILURE(uio.fErrCode)) { | 164 | 0 | errCode = uio.fErrCode; | 165 | 0 | } | 166 | 145k | } | 167 | 145k | } |
void icu_79::umtx_initOnce<UProperty>(icu_79::UInitOnce&, void (*)(UProperty, UErrorCode&), UProperty, UErrorCode&) Line | Count | Source | 152 | 233k | template<class T> void umtx_initOnce(UInitOnce &uio, void (U_CALLCONV *fp)(T, UErrorCode &), T context, UErrorCode &errCode) { | 153 | 233k | if (U_FAILURE(errCode)) { | 154 | 0 | return; | 155 | 0 | } | 156 | 233k | if (umtx_loadAcquire(uio.fState) != 2 && umtx_initImplPreInit(uio)) { | 157 | | // We run the initialization. | 158 | 77 | (*fp)(context, errCode); | 159 | 77 | uio.fErrCode = errCode; | 160 | 77 | umtx_initImplPostInit(uio); | 161 | 233k | } else { | 162 | | // Someone else already ran the initialization. | 163 | 233k | if (U_FAILURE(uio.fErrCode)) { | 164 | 0 | errCode = uio.fErrCode; | 165 | 0 | } | 166 | 233k | } | 167 | 233k | } |
void icu_79::umtx_initOnce<char const*>(icu_79::UInitOnce&, void (*)(char const*, UErrorCode&), char const*, UErrorCode&) Line | Count | Source | 152 | 74.0k | template<class T> void umtx_initOnce(UInitOnce &uio, void (U_CALLCONV *fp)(T, UErrorCode &), T context, UErrorCode &errCode) { | 153 | 74.0k | if (U_FAILURE(errCode)) { | 154 | 0 | return; | 155 | 0 | } | 156 | 74.0k | if (umtx_loadAcquire(uio.fState) != 2 && umtx_initImplPreInit(uio)) { | 157 | | // We run the initialization. | 158 | 11 | (*fp)(context, errCode); | 159 | 11 | uio.fErrCode = errCode; | 160 | 11 | umtx_initImplPostInit(uio); | 161 | 74.0k | } else { | 162 | | // Someone else already ran the initialization. | 163 | 74.0k | if (U_FAILURE(uio.fErrCode)) { | 164 | 0 | errCode = uio.fErrCode; | 165 | 0 | } | 166 | 74.0k | } | 167 | 74.0k | } |
void icu_79::umtx_initOnce<icu_79::Normalizer2Impl*>(icu_79::UInitOnce&, void (*)(icu_79::Normalizer2Impl*, UErrorCode&), icu_79::Normalizer2Impl*, UErrorCode&) Line | Count | Source | 152 | 5.94M | template<class T> void umtx_initOnce(UInitOnce &uio, void (U_CALLCONV *fp)(T, UErrorCode &), T context, UErrorCode &errCode) { | 153 | 5.94M | if (U_FAILURE(errCode)) { | 154 | 0 | return; | 155 | 0 | } | 156 | 5.94M | if (umtx_loadAcquire(uio.fState) != 2 && umtx_initImplPreInit(uio)) { | 157 | | // We run the initialization. | 158 | 2 | (*fp)(context, errCode); | 159 | 2 | uio.fErrCode = errCode; | 160 | 2 | umtx_initImplPostInit(uio); | 161 | 5.94M | } else { | 162 | | // Someone else already ran the initialization. | 163 | 5.94M | if (U_FAILURE(uio.fErrCode)) { | 164 | 0 | errCode = uio.fErrCode; | 165 | 0 | } | 166 | 5.94M | } | 167 | 5.94M | } |
Unexecuted instantiation: void icu_79::umtx_initOnce<icu_79::OlsonTimeZone*>(icu_79::UInitOnce&, void (*)(icu_79::OlsonTimeZone*, UErrorCode&), icu_79::OlsonTimeZone*, UErrorCode&) Unexecuted instantiation: void icu_79::umtx_initOnce<icu_79::CollationTailoring const*>(icu_79::UInitOnce&, void (*)(icu_79::CollationTailoring const*, UErrorCode&), icu_79::CollationTailoring const*, UErrorCode&) void icu_79::umtx_initOnce<USystemTimeZoneType>(icu_79::UInitOnce&, void (*)(USystemTimeZoneType, UErrorCode&), USystemTimeZoneType, UErrorCode&) Line | Count | Source | 152 | 4.98k | template<class T> void umtx_initOnce(UInitOnce &uio, void (U_CALLCONV *fp)(T, UErrorCode &), T context, UErrorCode &errCode) { | 153 | 4.98k | if (U_FAILURE(errCode)) { | 154 | 0 | return; | 155 | 0 | } | 156 | 4.98k | if (umtx_loadAcquire(uio.fState) != 2 && umtx_initImplPreInit(uio)) { | 157 | | // We run the initialization. | 158 | 3 | (*fp)(context, errCode); | 159 | 3 | uio.fErrCode = errCode; | 160 | 3 | umtx_initImplPostInit(uio); | 161 | 4.98k | } else { | 162 | | // Someone else already ran the initialization. | 163 | 4.98k | if (U_FAILURE(uio.fErrCode)) { | 164 | 0 | errCode = uio.fErrCode; | 165 | 0 | } | 166 | 4.98k | } | 167 | 4.98k | } |
|
168 | | |
169 | | // UMutex should be constexpr-constructible, so that no initialization code |
170 | | // is run during startup. |
171 | | // This works on all C++ libraries except MS VS before VS2019. |
172 | | #if (defined(_CPPLIB_VER) && !defined(_MSVC_STL_VERSION)) || \ |
173 | | (defined(_MSVC_STL_VERSION) && _MSVC_STL_VERSION < 142) |
174 | | // (VS std lib older than VS2017) || (VS std lib version < VS2019) |
175 | | # define UMUTEX_CONSTEXPR |
176 | | #else |
177 | | # define UMUTEX_CONSTEXPR constexpr |
178 | | #endif |
179 | | |
180 | | /** |
181 | | * UMutex - ICU Mutex class. |
182 | | * |
183 | | * This is the preferred Mutex class for use within ICU implementation code. |
184 | | * It is a thin wrapper over C++ std::mutex, with these additions: |
185 | | * - Static instances are safe, not triggering static construction or destruction, |
186 | | * and the associated order of construction or destruction issues. |
187 | | * - Plumbed into u_cleanup() for destructing the underlying std::mutex, |
188 | | * which frees any OS level resources they may be holding. |
189 | | * |
190 | | * Limitations: |
191 | | * - Static or global instances only. Cannot be heap allocated. Cannot appear as a |
192 | | * member of another class. |
193 | | * - No condition variables or other advanced features. If needed, you will need to use |
194 | | * std::mutex and std::condition_variable directly. For an example, see unifiedcache.cpp |
195 | | * |
196 | | * Typical Usage: |
197 | | * static UMutex myMutex; |
198 | | * |
199 | | * { |
200 | | * Mutex lock(myMutex); |
201 | | * ... // Do stuff that is protected by myMutex; |
202 | | * } // myMutex is released when lock goes out of scope. |
203 | | */ |
204 | | |
205 | | class U_COMMON_API_CLASS UMutex { |
206 | | public: |
207 | 0 | U_COMMON_API UMUTEX_CONSTEXPR UMutex() {} |
208 | | U_COMMON_API ~UMutex() = default; |
209 | | |
210 | | UMutex(const UMutex& other) = delete; |
211 | | UMutex& operator=(const UMutex& other) = delete; |
212 | | void* operator new(size_t) = delete; |
213 | | |
214 | | // requirements for C++ BasicLockable, allows UMutex to work with std::lock_guard |
215 | 63.4M | U_COMMON_API void lock() { |
216 | 63.4M | std::mutex *m = fMutex.load(std::memory_order_acquire); |
217 | 63.4M | if (m == nullptr) { m = getMutex(); } |
218 | 63.4M | m->lock(); |
219 | 63.4M | } |
220 | 63.4M | U_COMMON_API void unlock() { fMutex.load(std::memory_order_relaxed)->unlock(); } |
221 | | |
222 | | U_COMMON_API static void cleanup(); |
223 | | |
224 | | private: |
225 | | alignas(std::mutex) char fStorage[sizeof(std::mutex)] {}; |
226 | | std::atomic<std::mutex *> fMutex { nullptr }; |
227 | | |
228 | | /** All initialized UMutexes are kept in a linked list, so that they can be found, |
229 | | * and the underlying std::mutex destructed, by u_cleanup(). |
230 | | */ |
231 | | UMutex *fListLink { nullptr }; |
232 | | static UMutex *gListHead; |
233 | | |
234 | | /** Out-of-line function to lazily initialize a UMutex on first use. |
235 | | * Initial fast check is inline, in lock(). The returned value may never |
236 | | * be nullptr. |
237 | | */ |
238 | | std::mutex *getMutex(); |
239 | | }; |
240 | | |
241 | | |
242 | | /* Lock a mutex. |
243 | | * @param mutex The given mutex to be locked. Pass NULL to specify |
244 | | * the global ICU mutex. Recursive locks are an error |
245 | | * and may cause a deadlock on some platforms. |
246 | | */ |
247 | | U_CAPI void U_EXPORT2 umtx_lock(UMutex* mutex); |
248 | | |
249 | | /* Unlock a mutex. |
250 | | * @param mutex The given mutex to be unlocked. Pass NULL to specify |
251 | | * the global ICU mutex. |
252 | | */ |
253 | | U_CAPI void U_EXPORT2 umtx_unlock (UMutex* mutex); |
254 | | |
255 | | |
256 | | U_NAMESPACE_END |
257 | | |
258 | | #endif /* UMUTEX_H */ |
259 | | /*eof*/ |