/src/FreeRDP/winpr/libwinpr/synch/critical.c
Line | Count | Source |
1 | | /** |
2 | | * WinPR: Windows Portable Runtime |
3 | | * Synchronization Functions |
4 | | * |
5 | | * Copyright 2012 Marc-Andre Moreau <marcandre.moreau@gmail.com> |
6 | | * Copyright 2013 Norbert Federa <norbert.federa@thincast.com> |
7 | | * |
8 | | * Licensed under the Apache License, Version 2.0 (the "License"); |
9 | | * you may not use this file except in compliance with the License. |
10 | | * You may obtain a copy of the License at |
11 | | * |
12 | | * http://www.apache.org/licenses/LICENSE-2.0 |
13 | | * |
14 | | * Unless required by applicable law or agreed to in writing, software |
15 | | * distributed under the License is distributed on an "AS IS" BASIS, |
16 | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
17 | | * See the License for the specific language governing permissions and |
18 | | * limitations under the License. |
19 | | */ |
20 | | |
21 | | #include <winpr/config.h> |
22 | | |
23 | | #include <winpr/assert.h> |
24 | | #include <winpr/tchar.h> |
25 | | #include <winpr/synch.h> |
26 | | #include <winpr/sysinfo.h> |
27 | | #include <winpr/interlocked.h> |
28 | | #include <winpr/thread.h> |
29 | | |
30 | | #include "synch.h" |
31 | | |
32 | | #ifdef WINPR_HAVE_UNISTD_H |
33 | | #include <unistd.h> |
34 | | #endif |
35 | | |
36 | | #if defined(__APPLE__) |
37 | | #include <mach/task.h> |
38 | | #include <mach/mach.h> |
39 | | #include <mach/semaphore.h> |
40 | | #endif |
41 | | |
42 | | #ifndef _WIN32 |
43 | | |
44 | | #include "../log.h" |
45 | | #define TAG WINPR_TAG("synch.critical") |
46 | | |
47 | | VOID InitializeCriticalSection(LPCRITICAL_SECTION lpCriticalSection) |
48 | 0 | { |
49 | 0 | if (!InitializeCriticalSectionEx(lpCriticalSection, 0, 0)) |
50 | 0 | WLog_ERR(TAG, "InitializeCriticalSectionEx failed"); |
51 | 0 | } |
52 | | |
53 | | BOOL InitializeCriticalSectionEx(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount, |
54 | | DWORD Flags) |
55 | 35.3k | { |
56 | 35.3k | WINPR_ASSERT(lpCriticalSection); |
57 | | /** |
58 | | * See http://msdn.microsoft.com/en-us/library/ff541979(v=vs.85).aspx |
59 | | * - The LockCount field indicates the number of times that any thread has |
60 | | * called the EnterCriticalSection routine for this critical section, |
61 | | * minus one. This field starts at -1 for an unlocked critical section. |
62 | | * Each call of EnterCriticalSection increments this value; each call of |
63 | | * LeaveCriticalSection decrements it. |
64 | | * - The RecursionCount field indicates the number of times that the owning |
65 | | * thread has called EnterCriticalSection for this critical section. |
66 | | */ |
67 | 35.3k | if (Flags != 0) |
68 | 0 | { |
69 | 0 | WLog_WARN(TAG, "Flags unimplemented"); |
70 | 0 | } |
71 | | |
72 | 35.3k | lpCriticalSection->DebugInfo = nullptr; |
73 | 35.3k | lpCriticalSection->LockCount = -1; |
74 | 35.3k | lpCriticalSection->SpinCount = 0; |
75 | 35.3k | lpCriticalSection->RecursionCount = 0; |
76 | 35.3k | lpCriticalSection->OwningThread = nullptr; |
77 | 35.3k | lpCriticalSection->LockSemaphore = (winpr_sem_t*)malloc(sizeof(winpr_sem_t)); |
78 | | |
79 | 35.3k | if (!lpCriticalSection->LockSemaphore) |
80 | 0 | return FALSE; |
81 | | |
82 | | #if defined(__APPLE__) |
83 | | |
84 | | if (semaphore_create(mach_task_self(), lpCriticalSection->LockSemaphore, SYNC_POLICY_FIFO, 0) != |
85 | | KERN_SUCCESS) |
86 | | goto out_fail; |
87 | | |
88 | | #else |
89 | | |
90 | 35.3k | if (sem_init(lpCriticalSection->LockSemaphore, 0, 0) != 0) |
91 | 0 | goto out_fail; |
92 | | |
93 | 35.3k | #endif |
94 | 35.3k | SetCriticalSectionSpinCount(lpCriticalSection, dwSpinCount); |
95 | 35.3k | return TRUE; |
96 | 0 | out_fail: |
97 | 0 | free(lpCriticalSection->LockSemaphore); |
98 | 0 | return FALSE; |
99 | 35.3k | } |
100 | | |
101 | | BOOL InitializeCriticalSectionAndSpinCount(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount) |
102 | 35.3k | { |
103 | 35.3k | return InitializeCriticalSectionEx(lpCriticalSection, dwSpinCount, 0); |
104 | 35.3k | } |
105 | | |
106 | | DWORD SetCriticalSectionSpinCount(WINPR_ATTR_UNUSED LPCRITICAL_SECTION lpCriticalSection, |
107 | | WINPR_ATTR_UNUSED DWORD dwSpinCount) |
108 | 35.3k | { |
109 | 35.3k | WINPR_ASSERT(lpCriticalSection); |
110 | | #if !defined(WINPR_CRITICAL_SECTION_DISABLE_SPINCOUNT) |
111 | | SYSTEM_INFO sysinfo; |
112 | | DWORD dwPreviousSpinCount = lpCriticalSection->SpinCount; |
113 | | |
114 | | if (dwSpinCount) |
115 | | { |
116 | | /* Don't spin on uniprocessor systems! */ |
117 | | GetNativeSystemInfo(&sysinfo); |
118 | | |
119 | | if (sysinfo.dwNumberOfProcessors < 2) |
120 | | dwSpinCount = 0; |
121 | | } |
122 | | |
123 | | lpCriticalSection->SpinCount = dwSpinCount; |
124 | | return dwPreviousSpinCount; |
125 | | #else |
126 | | // WLog_ERR("TODO", "TODO: implement"); |
127 | 35.3k | return 0; |
128 | 35.3k | #endif |
129 | 35.3k | } |
130 | | |
131 | | static VOID WaitForCriticalSection(LPCRITICAL_SECTION lpCriticalSection) |
132 | 0 | { |
133 | 0 | WINPR_ASSERT(lpCriticalSection); |
134 | 0 | WINPR_ASSERT(lpCriticalSection->LockSemaphore); |
135 | |
|
136 | | #if defined(__APPLE__) |
137 | | semaphore_wait(*((winpr_sem_t*)lpCriticalSection->LockSemaphore)); |
138 | | #else |
139 | 0 | sem_wait((winpr_sem_t*)lpCriticalSection->LockSemaphore); |
140 | 0 | #endif |
141 | 0 | } |
142 | | |
143 | | static VOID UnWaitCriticalSection(LPCRITICAL_SECTION lpCriticalSection) |
144 | 0 | { |
145 | 0 | WINPR_ASSERT(lpCriticalSection); |
146 | 0 | WINPR_ASSERT(lpCriticalSection->LockSemaphore); |
147 | | #if defined __APPLE__ |
148 | | semaphore_signal(*((winpr_sem_t*)lpCriticalSection->LockSemaphore)); |
149 | | #else |
150 | 0 | sem_post((winpr_sem_t*)lpCriticalSection->LockSemaphore); |
151 | 0 | #endif |
152 | 0 | } |
153 | | |
154 | | VOID EnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection) |
155 | 3.16M | { |
156 | 3.16M | WINPR_ASSERT(lpCriticalSection); |
157 | | #if !defined(WINPR_CRITICAL_SECTION_DISABLE_SPINCOUNT) |
158 | | ULONG SpinCount = lpCriticalSection->SpinCount; |
159 | | |
160 | | /* If we're lucky or if the current thread is already owner we can return early */ |
161 | | if (SpinCount && TryEnterCriticalSection(lpCriticalSection)) |
162 | | return; |
163 | | |
164 | | /* Spin requested times but don't compete with another waiting thread */ |
165 | | while (SpinCount-- && lpCriticalSection->LockCount < 1) |
166 | | { |
167 | | /* Atomically try to acquire and check the if the section is free. */ |
168 | | if (InterlockedCompareExchange(&lpCriticalSection->LockCount, 0, -1) == -1) |
169 | | { |
170 | | lpCriticalSection->RecursionCount = 1; |
171 | | lpCriticalSection->OwningThread = (HANDLE)(ULONG_PTR)GetCurrentThreadId(); |
172 | | return; |
173 | | } |
174 | | |
175 | | /* Failed to get the lock. Let the scheduler know that we're spinning. */ |
176 | | if (sched_yield() != 0) |
177 | | { |
178 | | /** |
179 | | * On some operating systems sched_yield is a stub. |
180 | | * usleep should at least trigger a context switch if any thread is waiting. |
181 | | * A ThreadYield() would be nice in winpr ... |
182 | | */ |
183 | | usleep(1); |
184 | | } |
185 | | } |
186 | | |
187 | | #endif |
188 | | |
189 | | /* First try the fastest possible path to get the lock. */ |
190 | 3.16M | if (InterlockedIncrement(&lpCriticalSection->LockCount)) |
191 | 0 | { |
192 | | /* Section is already locked. Check if it is owned by the current thread. */ |
193 | 0 | if (lpCriticalSection->OwningThread == (HANDLE)(ULONG_PTR)GetCurrentThreadId()) |
194 | 0 | { |
195 | | /* Recursion. No need to wait. */ |
196 | 0 | lpCriticalSection->RecursionCount++; |
197 | 0 | return; |
198 | 0 | } |
199 | | |
200 | | /* Section is locked by another thread. We have to wait. */ |
201 | 0 | WaitForCriticalSection(lpCriticalSection); |
202 | 0 | } |
203 | | |
204 | | /* We got the lock. Own it ... */ |
205 | 3.16M | lpCriticalSection->RecursionCount = 1; |
206 | 3.16M | lpCriticalSection->OwningThread = (HANDLE)(ULONG_PTR)GetCurrentThreadId(); |
207 | 3.16M | } |
208 | | |
209 | | BOOL TryEnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection) |
210 | 0 | { |
211 | 0 | HANDLE current_thread = (HANDLE)(ULONG_PTR)GetCurrentThreadId(); |
212 | |
|
213 | 0 | WINPR_ASSERT(lpCriticalSection); |
214 | | |
215 | | /* Atomically acquire the the lock if the section is free. */ |
216 | 0 | if (InterlockedCompareExchange(&lpCriticalSection->LockCount, 0, -1) == -1) |
217 | 0 | { |
218 | 0 | lpCriticalSection->RecursionCount = 1; |
219 | 0 | lpCriticalSection->OwningThread = current_thread; |
220 | 0 | return TRUE; |
221 | 0 | } |
222 | | |
223 | | /* Section is already locked. Check if it is owned by the current thread. */ |
224 | 0 | if (lpCriticalSection->OwningThread == current_thread) |
225 | 0 | { |
226 | | /* Recursion, return success */ |
227 | 0 | lpCriticalSection->RecursionCount++; |
228 | 0 | InterlockedIncrement(&lpCriticalSection->LockCount); |
229 | 0 | return TRUE; |
230 | 0 | } |
231 | | |
232 | 0 | return FALSE; |
233 | 0 | } |
234 | | |
235 | | VOID LeaveCriticalSection(LPCRITICAL_SECTION lpCriticalSection) |
236 | 3.16M | { |
237 | 3.16M | WINPR_ASSERT(lpCriticalSection); |
238 | | |
239 | | /* Decrement RecursionCount and check if this is the last LeaveCriticalSection call ...*/ |
240 | 3.16M | if (--lpCriticalSection->RecursionCount < 1) |
241 | 3.16M | { |
242 | | /* Last recursion, clear owner, unlock and if there are other waiting threads ... */ |
243 | 3.16M | lpCriticalSection->OwningThread = nullptr; |
244 | | |
245 | 3.16M | if (InterlockedDecrement(&lpCriticalSection->LockCount) >= 0) |
246 | 0 | { |
247 | | /* ...signal the semaphore to unblock the next waiting thread */ |
248 | 0 | UnWaitCriticalSection(lpCriticalSection); |
249 | 0 | } |
250 | 3.16M | } |
251 | 0 | else |
252 | 0 | { |
253 | 0 | (void)InterlockedDecrement(&lpCriticalSection->LockCount); |
254 | 0 | } |
255 | 3.16M | } |
256 | | |
257 | | VOID DeleteCriticalSection(LPCRITICAL_SECTION lpCriticalSection) |
258 | 35.3k | { |
259 | 35.3k | WINPR_ASSERT(lpCriticalSection); |
260 | | |
261 | 35.3k | lpCriticalSection->LockCount = -1; |
262 | 35.3k | lpCriticalSection->SpinCount = 0; |
263 | 35.3k | lpCriticalSection->RecursionCount = 0; |
264 | 35.3k | lpCriticalSection->OwningThread = nullptr; |
265 | | |
266 | 35.3k | if (lpCriticalSection->LockSemaphore != nullptr) |
267 | 35.3k | { |
268 | | #if defined __APPLE__ |
269 | | semaphore_destroy(mach_task_self(), *((winpr_sem_t*)lpCriticalSection->LockSemaphore)); |
270 | | #else |
271 | 35.3k | sem_destroy((winpr_sem_t*)lpCriticalSection->LockSemaphore); |
272 | 35.3k | #endif |
273 | 35.3k | free(lpCriticalSection->LockSemaphore); |
274 | 35.3k | lpCriticalSection->LockSemaphore = nullptr; |
275 | 35.3k | } |
276 | 35.3k | } |
277 | | |
278 | | #endif |