/src/FreeRDP/winpr/libwinpr/synch/critical.c
Line | Count | Source (jump to first uncovered line) |
1 | | /** |
2 | | * WinPR: Windows Portable Runtime |
3 | | * Synchronization Functions |
4 | | * |
5 | | * Copyright 2012 Marc-Andre Moreau <marcandre.moreau@gmail.com> |
6 | | * Copyright 2013 Norbert Federa <norbert.federa@thincast.com> |
7 | | * |
8 | | * Licensed under the Apache License, Version 2.0 (the "License"); |
9 | | * you may not use this file except in compliance with the License. |
10 | | * You may obtain a copy of the License at |
11 | | * |
12 | | * http://www.apache.org/licenses/LICENSE-2.0 |
13 | | * |
14 | | * Unless required by applicable law or agreed to in writing, software |
15 | | * distributed under the License is distributed on an "AS IS" BASIS, |
16 | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
17 | | * See the License for the specific language governing permissions and |
18 | | * limitations under the License. |
19 | | */ |
20 | | |
21 | | #include <winpr/config.h> |
22 | | |
23 | | #include <winpr/assert.h> |
24 | | #include <winpr/tchar.h> |
25 | | #include <winpr/synch.h> |
26 | | #include <winpr/sysinfo.h> |
27 | | #include <winpr/interlocked.h> |
28 | | #include <winpr/thread.h> |
29 | | |
30 | | #include "synch.h" |
31 | | |
32 | | #ifdef WINPR_HAVE_UNISTD_H |
33 | | #include <unistd.h> |
34 | | #endif |
35 | | |
36 | | #if defined(__APPLE__) |
37 | | #include <mach/task.h> |
38 | | #include <mach/mach.h> |
39 | | #include <mach/semaphore.h> |
40 | | #endif |
41 | | |
42 | | #ifndef _WIN32 |
43 | | |
44 | | #include "../log.h" |
45 | | #define TAG WINPR_TAG("synch.critical") |
46 | | |
47 | | VOID InitializeCriticalSection(LPCRITICAL_SECTION lpCriticalSection) |
48 | 29.9k | { |
49 | 29.9k | InitializeCriticalSectionEx(lpCriticalSection, 0, 0); |
50 | 29.9k | } |
51 | | |
52 | | BOOL InitializeCriticalSectionEx(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount, |
53 | | DWORD Flags) |
54 | 299k | { |
55 | 299k | WINPR_ASSERT(lpCriticalSection); |
56 | | /** |
57 | | * See http://msdn.microsoft.com/en-us/library/ff541979(v=vs.85).aspx |
58 | | * - The LockCount field indicates the number of times that any thread has |
59 | | * called the EnterCriticalSection routine for this critical section, |
60 | | * minus one. This field starts at -1 for an unlocked critical section. |
61 | | * Each call of EnterCriticalSection increments this value; each call of |
62 | | * LeaveCriticalSection decrements it. |
63 | | * - The RecursionCount field indicates the number of times that the owning |
64 | | * thread has called EnterCriticalSection for this critical section. |
65 | | */ |
66 | 299k | if (Flags != 0) |
67 | 0 | { |
68 | 0 | WLog_WARN(TAG, "Flags unimplemented"); |
69 | 0 | } |
70 | | |
71 | 299k | lpCriticalSection->DebugInfo = NULL; |
72 | 299k | lpCriticalSection->LockCount = -1; |
73 | 299k | lpCriticalSection->SpinCount = 0; |
74 | 299k | lpCriticalSection->RecursionCount = 0; |
75 | 299k | lpCriticalSection->OwningThread = NULL; |
76 | 299k | lpCriticalSection->LockSemaphore = (winpr_sem_t*)malloc(sizeof(winpr_sem_t)); |
77 | | |
78 | 299k | if (!lpCriticalSection->LockSemaphore) |
79 | 0 | return FALSE; |
80 | | |
81 | | #if defined(__APPLE__) |
82 | | |
83 | | if (semaphore_create(mach_task_self(), lpCriticalSection->LockSemaphore, SYNC_POLICY_FIFO, 0) != |
84 | | KERN_SUCCESS) |
85 | | goto out_fail; |
86 | | |
87 | | #else |
88 | | |
89 | 299k | if (sem_init(lpCriticalSection->LockSemaphore, 0, 0) != 0) |
90 | 0 | goto out_fail; |
91 | | |
92 | 299k | #endif |
93 | 299k | SetCriticalSectionSpinCount(lpCriticalSection, dwSpinCount); |
94 | 299k | return TRUE; |
95 | 0 | out_fail: |
96 | 0 | free(lpCriticalSection->LockSemaphore); |
97 | 0 | return FALSE; |
98 | 299k | } |
99 | | |
100 | | BOOL InitializeCriticalSectionAndSpinCount(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount) |
101 | 269k | { |
102 | 269k | return InitializeCriticalSectionEx(lpCriticalSection, dwSpinCount, 0); |
103 | 269k | } |
104 | | |
105 | | DWORD SetCriticalSectionSpinCount(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount) |
106 | 299k | { |
107 | 299k | WINPR_ASSERT(lpCriticalSection); |
108 | | #if !defined(WINPR_CRITICAL_SECTION_DISABLE_SPINCOUNT) |
109 | | SYSTEM_INFO sysinfo; |
110 | | DWORD dwPreviousSpinCount = lpCriticalSection->SpinCount; |
111 | | |
112 | | if (dwSpinCount) |
113 | | { |
114 | | /* Don't spin on uniprocessor systems! */ |
115 | | GetNativeSystemInfo(&sysinfo); |
116 | | |
117 | | if (sysinfo.dwNumberOfProcessors < 2) |
118 | | dwSpinCount = 0; |
119 | | } |
120 | | |
121 | | lpCriticalSection->SpinCount = dwSpinCount; |
122 | | return dwPreviousSpinCount; |
123 | | #else |
124 | 299k | return 0; |
125 | 299k | #endif |
126 | 299k | } |
127 | | |
128 | | static VOID _WaitForCriticalSection(LPCRITICAL_SECTION lpCriticalSection) |
129 | 0 | { |
130 | 0 | WINPR_ASSERT(lpCriticalSection); |
131 | | #if defined(__APPLE__) |
132 | | semaphore_wait(*((winpr_sem_t*)lpCriticalSection->LockSemaphore)); |
133 | | #else |
134 | 0 | sem_wait((winpr_sem_t*)lpCriticalSection->LockSemaphore); |
135 | 0 | #endif |
136 | 0 | } |
137 | | |
138 | | static VOID _UnWaitCriticalSection(LPCRITICAL_SECTION lpCriticalSection) |
139 | 0 | { |
140 | 0 | WINPR_ASSERT(lpCriticalSection); |
141 | | #if defined __APPLE__ |
142 | | semaphore_signal(*((winpr_sem_t*)lpCriticalSection->LockSemaphore)); |
143 | | #else |
144 | 0 | sem_post((winpr_sem_t*)lpCriticalSection->LockSemaphore); |
145 | 0 | #endif |
146 | 0 | } |
147 | | |
148 | | VOID EnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection) |
149 | 7.86M | { |
150 | 7.86M | WINPR_ASSERT(lpCriticalSection); |
151 | | #if !defined(WINPR_CRITICAL_SECTION_DISABLE_SPINCOUNT) |
152 | | ULONG SpinCount = lpCriticalSection->SpinCount; |
153 | | |
154 | | /* If we're lucky or if the current thread is already owner we can return early */ |
155 | | if (SpinCount && TryEnterCriticalSection(lpCriticalSection)) |
156 | | return; |
157 | | |
158 | | /* Spin requested times but don't compete with another waiting thread */ |
159 | | while (SpinCount-- && lpCriticalSection->LockCount < 1) |
160 | | { |
161 | | /* Atomically try to acquire and check the if the section is free. */ |
162 | | if (InterlockedCompareExchange(&lpCriticalSection->LockCount, 0, -1) == -1) |
163 | | { |
164 | | lpCriticalSection->RecursionCount = 1; |
165 | | lpCriticalSection->OwningThread = (HANDLE)(ULONG_PTR)GetCurrentThreadId(); |
166 | | return; |
167 | | } |
168 | | |
169 | | /* Failed to get the lock. Let the scheduler know that we're spinning. */ |
170 | | if (sched_yield() != 0) |
171 | | { |
172 | | /** |
173 | | * On some operating systems sched_yield is a stub. |
174 | | * usleep should at least trigger a context switch if any thread is waiting. |
175 | | * A ThreadYield() would be nice in winpr ... |
176 | | */ |
177 | | usleep(1); |
178 | | } |
179 | | } |
180 | | |
181 | | #endif |
182 | | |
183 | | /* First try the fastest possible path to get the lock. */ |
184 | 7.86M | if (InterlockedIncrement(&lpCriticalSection->LockCount)) |
185 | 0 | { |
186 | | /* Section is already locked. Check if it is owned by the current thread. */ |
187 | 0 | if (lpCriticalSection->OwningThread == (HANDLE)(ULONG_PTR)GetCurrentThreadId()) |
188 | 0 | { |
189 | | /* Recursion. No need to wait. */ |
190 | 0 | lpCriticalSection->RecursionCount++; |
191 | 0 | return; |
192 | 0 | } |
193 | | |
194 | | /* Section is locked by another thread. We have to wait. */ |
195 | 0 | _WaitForCriticalSection(lpCriticalSection); |
196 | 0 | } |
197 | | |
198 | | /* We got the lock. Own it ... */ |
199 | 7.86M | lpCriticalSection->RecursionCount = 1; |
200 | 7.86M | lpCriticalSection->OwningThread = (HANDLE)(ULONG_PTR)GetCurrentThreadId(); |
201 | 7.86M | } |
202 | | |
203 | | BOOL TryEnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection) |
204 | 0 | { |
205 | 0 | HANDLE current_thread = (HANDLE)(ULONG_PTR)GetCurrentThreadId(); |
206 | |
|
207 | 0 | WINPR_ASSERT(lpCriticalSection); |
208 | | |
209 | | /* Atomically acquire the the lock if the section is free. */ |
210 | 0 | if (InterlockedCompareExchange(&lpCriticalSection->LockCount, 0, -1) == -1) |
211 | 0 | { |
212 | 0 | lpCriticalSection->RecursionCount = 1; |
213 | 0 | lpCriticalSection->OwningThread = current_thread; |
214 | 0 | return TRUE; |
215 | 0 | } |
216 | | |
217 | | /* Section is already locked. Check if it is owned by the current thread. */ |
218 | 0 | if (lpCriticalSection->OwningThread == current_thread) |
219 | 0 | { |
220 | | /* Recursion, return success */ |
221 | 0 | lpCriticalSection->RecursionCount++; |
222 | 0 | InterlockedIncrement(&lpCriticalSection->LockCount); |
223 | 0 | return TRUE; |
224 | 0 | } |
225 | | |
226 | 0 | return FALSE; |
227 | 0 | } |
228 | | |
229 | | VOID LeaveCriticalSection(LPCRITICAL_SECTION lpCriticalSection) |
230 | 7.85M | { |
231 | 7.85M | WINPR_ASSERT(lpCriticalSection); |
232 | | |
233 | | /* Decrement RecursionCount and check if this is the last LeaveCriticalSection call ...*/ |
234 | 7.85M | if (--lpCriticalSection->RecursionCount < 1) |
235 | 7.85M | { |
236 | | /* Last recursion, clear owner, unlock and if there are other waiting threads ... */ |
237 | 7.85M | lpCriticalSection->OwningThread = NULL; |
238 | | |
239 | 7.85M | if (InterlockedDecrement(&lpCriticalSection->LockCount) >= 0) |
240 | 0 | { |
241 | | /* ...signal the semaphore to unblock the next waiting thread */ |
242 | 0 | _UnWaitCriticalSection(lpCriticalSection); |
243 | 0 | } |
244 | 7.85M | } |
245 | 0 | else |
246 | 0 | { |
247 | 0 | InterlockedDecrement(&lpCriticalSection->LockCount); |
248 | 0 | } |
249 | 7.85M | } |
250 | | |
251 | | VOID DeleteCriticalSection(LPCRITICAL_SECTION lpCriticalSection) |
252 | 299k | { |
253 | 299k | WINPR_ASSERT(lpCriticalSection); |
254 | | |
255 | 299k | lpCriticalSection->LockCount = -1; |
256 | 299k | lpCriticalSection->SpinCount = 0; |
257 | 299k | lpCriticalSection->RecursionCount = 0; |
258 | 299k | lpCriticalSection->OwningThread = NULL; |
259 | | |
260 | 299k | if (lpCriticalSection->LockSemaphore != NULL) |
261 | 299k | { |
262 | | #if defined __APPLE__ |
263 | | semaphore_destroy(mach_task_self(), *((winpr_sem_t*)lpCriticalSection->LockSemaphore)); |
264 | | #else |
265 | 299k | sem_destroy((winpr_sem_t*)lpCriticalSection->LockSemaphore); |
266 | 299k | #endif |
267 | 299k | free(lpCriticalSection->LockSemaphore); |
268 | 299k | lpCriticalSection->LockSemaphore = NULL; |
269 | 299k | } |
270 | 299k | } |
271 | | |
272 | | #endif |