/src/FreeRDP/winpr/libwinpr/synch/critical.c
Line | Count | Source (jump to first uncovered line) |
1 | | /** |
2 | | * WinPR: Windows Portable Runtime |
3 | | * Synchronization Functions |
4 | | * |
5 | | * Copyright 2012 Marc-Andre Moreau <marcandre.moreau@gmail.com> |
6 | | * Copyright 2013 Norbert Federa <norbert.federa@thincast.com> |
7 | | * |
8 | | * Licensed under the Apache License, Version 2.0 (the "License"); |
9 | | * you may not use this file except in compliance with the License. |
10 | | * You may obtain a copy of the License at |
11 | | * |
12 | | * http://www.apache.org/licenses/LICENSE-2.0 |
13 | | * |
14 | | * Unless required by applicable law or agreed to in writing, software |
15 | | * distributed under the License is distributed on an "AS IS" BASIS, |
16 | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
17 | | * See the License for the specific language governing permissions and |
18 | | * limitations under the License. |
19 | | */ |
20 | | |
21 | | #include <winpr/config.h> |
22 | | |
23 | | #include <winpr/assert.h> |
24 | | #include <winpr/tchar.h> |
25 | | #include <winpr/synch.h> |
26 | | #include <winpr/sysinfo.h> |
27 | | #include <winpr/interlocked.h> |
28 | | #include <winpr/thread.h> |
29 | | |
30 | | #include "synch.h" |
31 | | |
32 | | #ifdef WINPR_HAVE_UNISTD_H |
33 | | #include <unistd.h> |
34 | | #endif |
35 | | |
36 | | #if defined(__APPLE__) |
37 | | #include <mach/task.h> |
38 | | #include <mach/mach.h> |
39 | | #include <mach/semaphore.h> |
40 | | #endif |
41 | | |
42 | | #ifndef _WIN32 |
43 | | |
44 | | #include "../log.h" |
45 | | #define TAG WINPR_TAG("synch.critical") |
46 | | |
47 | | VOID InitializeCriticalSection(LPCRITICAL_SECTION lpCriticalSection) |
48 | 0 | { |
49 | 0 | InitializeCriticalSectionEx(lpCriticalSection, 0, 0); |
50 | 0 | } |
51 | | |
52 | | BOOL InitializeCriticalSectionEx(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount, |
53 | | DWORD Flags) |
54 | 0 | { |
55 | 0 | WINPR_ASSERT(lpCriticalSection); |
56 | | /** |
57 | | * See http://msdn.microsoft.com/en-us/library/ff541979(v=vs.85).aspx |
58 | | * - The LockCount field indicates the number of times that any thread has |
59 | | * called the EnterCriticalSection routine for this critical section, |
60 | | * minus one. This field starts at -1 for an unlocked critical section. |
61 | | * Each call of EnterCriticalSection increments this value; each call of |
62 | | * LeaveCriticalSection decrements it. |
63 | | * - The RecursionCount field indicates the number of times that the owning |
64 | | * thread has called EnterCriticalSection for this critical section. |
65 | | */ |
66 | 0 | if (Flags != 0) |
67 | 0 | { |
68 | 0 | WLog_WARN(TAG, "Flags unimplemented"); |
69 | 0 | } |
70 | |
|
71 | 0 | lpCriticalSection->DebugInfo = NULL; |
72 | 0 | lpCriticalSection->LockCount = -1; |
73 | 0 | lpCriticalSection->SpinCount = 0; |
74 | 0 | lpCriticalSection->RecursionCount = 0; |
75 | 0 | lpCriticalSection->OwningThread = NULL; |
76 | 0 | lpCriticalSection->LockSemaphore = (winpr_sem_t*)malloc(sizeof(winpr_sem_t)); |
77 | |
|
78 | 0 | if (!lpCriticalSection->LockSemaphore) |
79 | 0 | return FALSE; |
80 | | |
81 | | #if defined(__APPLE__) |
82 | | |
83 | | if (semaphore_create(mach_task_self(), lpCriticalSection->LockSemaphore, SYNC_POLICY_FIFO, 0) != |
84 | | KERN_SUCCESS) |
85 | | goto out_fail; |
86 | | |
87 | | #else |
88 | | |
89 | 0 | if (sem_init(lpCriticalSection->LockSemaphore, 0, 0) != 0) |
90 | 0 | goto out_fail; |
91 | | |
92 | 0 | #endif |
93 | 0 | SetCriticalSectionSpinCount(lpCriticalSection, dwSpinCount); |
94 | 0 | return TRUE; |
95 | 0 | out_fail: |
96 | 0 | free(lpCriticalSection->LockSemaphore); |
97 | 0 | return FALSE; |
98 | 0 | } |
99 | | |
100 | | BOOL InitializeCriticalSectionAndSpinCount(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount) |
101 | 0 | { |
102 | 0 | return InitializeCriticalSectionEx(lpCriticalSection, dwSpinCount, 0); |
103 | 0 | } |
104 | | |
105 | | DWORD SetCriticalSectionSpinCount(WINPR_ATTR_UNUSED LPCRITICAL_SECTION lpCriticalSection, |
106 | | WINPR_ATTR_UNUSED DWORD dwSpinCount) |
107 | 0 | { |
108 | 0 | WINPR_ASSERT(lpCriticalSection); |
109 | | #if !defined(WINPR_CRITICAL_SECTION_DISABLE_SPINCOUNT) |
110 | | SYSTEM_INFO sysinfo; |
111 | | DWORD dwPreviousSpinCount = lpCriticalSection->SpinCount; |
112 | | |
113 | | if (dwSpinCount) |
114 | | { |
115 | | /* Don't spin on uniprocessor systems! */ |
116 | | GetNativeSystemInfo(&sysinfo); |
117 | | |
118 | | if (sysinfo.dwNumberOfProcessors < 2) |
119 | | dwSpinCount = 0; |
120 | | } |
121 | | |
122 | | lpCriticalSection->SpinCount = dwSpinCount; |
123 | | return dwPreviousSpinCount; |
124 | | #else |
125 | | // WLog_ERR("TODO", "TODO: implement"); |
126 | 0 | return 0; |
127 | 0 | #endif |
128 | 0 | } |
129 | | |
130 | | static VOID WaitForCriticalSection(LPCRITICAL_SECTION lpCriticalSection) |
131 | 0 | { |
132 | 0 | WINPR_ASSERT(lpCriticalSection); |
133 | | #if defined(__APPLE__) |
134 | | semaphore_wait(*((winpr_sem_t*)lpCriticalSection->LockSemaphore)); |
135 | | #else |
136 | 0 | sem_wait((winpr_sem_t*)lpCriticalSection->LockSemaphore); |
137 | 0 | #endif |
138 | 0 | } |
139 | | |
140 | | static VOID UnWaitCriticalSection(LPCRITICAL_SECTION lpCriticalSection) |
141 | 0 | { |
142 | 0 | WINPR_ASSERT(lpCriticalSection); |
143 | | #if defined __APPLE__ |
144 | | semaphore_signal(*((winpr_sem_t*)lpCriticalSection->LockSemaphore)); |
145 | | #else |
146 | 0 | sem_post((winpr_sem_t*)lpCriticalSection->LockSemaphore); |
147 | 0 | #endif |
148 | 0 | } |
149 | | |
150 | | VOID EnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection) |
151 | 0 | { |
152 | 0 | WINPR_ASSERT(lpCriticalSection); |
153 | | #if !defined(WINPR_CRITICAL_SECTION_DISABLE_SPINCOUNT) |
154 | | ULONG SpinCount = lpCriticalSection->SpinCount; |
155 | | |
156 | | /* If we're lucky or if the current thread is already owner we can return early */ |
157 | | if (SpinCount && TryEnterCriticalSection(lpCriticalSection)) |
158 | | return; |
159 | | |
160 | | /* Spin requested times but don't compete with another waiting thread */ |
161 | | while (SpinCount-- && lpCriticalSection->LockCount < 1) |
162 | | { |
163 | | /* Atomically try to acquire and check the if the section is free. */ |
164 | | if (InterlockedCompareExchange(&lpCriticalSection->LockCount, 0, -1) == -1) |
165 | | { |
166 | | lpCriticalSection->RecursionCount = 1; |
167 | | lpCriticalSection->OwningThread = (HANDLE)(ULONG_PTR)GetCurrentThreadId(); |
168 | | return; |
169 | | } |
170 | | |
171 | | /* Failed to get the lock. Let the scheduler know that we're spinning. */ |
172 | | if (sched_yield() != 0) |
173 | | { |
174 | | /** |
175 | | * On some operating systems sched_yield is a stub. |
176 | | * usleep should at least trigger a context switch if any thread is waiting. |
177 | | * A ThreadYield() would be nice in winpr ... |
178 | | */ |
179 | | usleep(1); |
180 | | } |
181 | | } |
182 | | |
183 | | #endif |
184 | | |
185 | | /* First try the fastest possible path to get the lock. */ |
186 | 0 | if (InterlockedIncrement(&lpCriticalSection->LockCount)) |
187 | 0 | { |
188 | | /* Section is already locked. Check if it is owned by the current thread. */ |
189 | 0 | if (lpCriticalSection->OwningThread == (HANDLE)(ULONG_PTR)GetCurrentThreadId()) |
190 | 0 | { |
191 | | /* Recursion. No need to wait. */ |
192 | 0 | lpCriticalSection->RecursionCount++; |
193 | 0 | return; |
194 | 0 | } |
195 | | |
196 | | /* Section is locked by another thread. We have to wait. */ |
197 | 0 | WaitForCriticalSection(lpCriticalSection); |
198 | 0 | } |
199 | | |
200 | | /* We got the lock. Own it ... */ |
201 | 0 | lpCriticalSection->RecursionCount = 1; |
202 | 0 | lpCriticalSection->OwningThread = (HANDLE)(ULONG_PTR)GetCurrentThreadId(); |
203 | 0 | } |
204 | | |
205 | | BOOL TryEnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection) |
206 | 0 | { |
207 | 0 | HANDLE current_thread = (HANDLE)(ULONG_PTR)GetCurrentThreadId(); |
208 | |
|
209 | 0 | WINPR_ASSERT(lpCriticalSection); |
210 | | |
211 | | /* Atomically acquire the the lock if the section is free. */ |
212 | 0 | if (InterlockedCompareExchange(&lpCriticalSection->LockCount, 0, -1) == -1) |
213 | 0 | { |
214 | 0 | lpCriticalSection->RecursionCount = 1; |
215 | 0 | lpCriticalSection->OwningThread = current_thread; |
216 | 0 | return TRUE; |
217 | 0 | } |
218 | | |
219 | | /* Section is already locked. Check if it is owned by the current thread. */ |
220 | 0 | if (lpCriticalSection->OwningThread == current_thread) |
221 | 0 | { |
222 | | /* Recursion, return success */ |
223 | 0 | lpCriticalSection->RecursionCount++; |
224 | 0 | InterlockedIncrement(&lpCriticalSection->LockCount); |
225 | 0 | return TRUE; |
226 | 0 | } |
227 | | |
228 | 0 | return FALSE; |
229 | 0 | } |
230 | | |
231 | | VOID LeaveCriticalSection(LPCRITICAL_SECTION lpCriticalSection) |
232 | 0 | { |
233 | 0 | WINPR_ASSERT(lpCriticalSection); |
234 | | |
235 | | /* Decrement RecursionCount and check if this is the last LeaveCriticalSection call ...*/ |
236 | 0 | if (--lpCriticalSection->RecursionCount < 1) |
237 | 0 | { |
238 | | /* Last recursion, clear owner, unlock and if there are other waiting threads ... */ |
239 | 0 | lpCriticalSection->OwningThread = NULL; |
240 | |
|
241 | 0 | if (InterlockedDecrement(&lpCriticalSection->LockCount) >= 0) |
242 | 0 | { |
243 | | /* ...signal the semaphore to unblock the next waiting thread */ |
244 | 0 | UnWaitCriticalSection(lpCriticalSection); |
245 | 0 | } |
246 | 0 | } |
247 | 0 | else |
248 | 0 | { |
249 | 0 | (void)InterlockedDecrement(&lpCriticalSection->LockCount); |
250 | 0 | } |
251 | 0 | } |
252 | | |
253 | | VOID DeleteCriticalSection(LPCRITICAL_SECTION lpCriticalSection) |
254 | 0 | { |
255 | 0 | WINPR_ASSERT(lpCriticalSection); |
256 | | |
257 | 0 | lpCriticalSection->LockCount = -1; |
258 | 0 | lpCriticalSection->SpinCount = 0; |
259 | 0 | lpCriticalSection->RecursionCount = 0; |
260 | 0 | lpCriticalSection->OwningThread = NULL; |
261 | |
|
262 | 0 | if (lpCriticalSection->LockSemaphore != NULL) |
263 | 0 | { |
264 | | #if defined __APPLE__ |
265 | | semaphore_destroy(mach_task_self(), *((winpr_sem_t*)lpCriticalSection->LockSemaphore)); |
266 | | #else |
267 | 0 | sem_destroy((winpr_sem_t*)lpCriticalSection->LockSemaphore); |
268 | 0 | #endif |
269 | 0 | free(lpCriticalSection->LockSemaphore); |
270 | 0 | lpCriticalSection->LockSemaphore = NULL; |
271 | 0 | } |
272 | 0 | } |
273 | | |
274 | | #endif |