/src/FreeRDP/winpr/libwinpr/synch/critical.c
Line  | Count  | Source (jump to first uncovered line)  | 
1  |  | /**  | 
2  |  |  * WinPR: Windows Portable Runtime  | 
3  |  |  * Synchronization Functions  | 
4  |  |  *  | 
5  |  |  * Copyright 2012 Marc-Andre Moreau <marcandre.moreau@gmail.com>  | 
6  |  |  * Copyright 2013 Norbert Federa <norbert.federa@thincast.com>  | 
7  |  |  *  | 
8  |  |  * Licensed under the Apache License, Version 2.0 (the "License");  | 
9  |  |  * you may not use this file except in compliance with the License.  | 
10  |  |  * You may obtain a copy of the License at  | 
11  |  |  *  | 
12  |  |  *     http://www.apache.org/licenses/LICENSE-2.0  | 
13  |  |  *  | 
14  |  |  * Unless required by applicable law or agreed to in writing, software  | 
15  |  |  * distributed under the License is distributed on an "AS IS" BASIS,  | 
16  |  |  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  | 
17  |  |  * See the License for the specific language governing permissions and  | 
18  |  |  * limitations under the License.  | 
19  |  |  */  | 
20  |  |  | 
21  |  | #include <winpr/config.h>  | 
22  |  |  | 
23  |  | #include <winpr/assert.h>  | 
24  |  | #include <winpr/tchar.h>  | 
25  |  | #include <winpr/synch.h>  | 
26  |  | #include <winpr/sysinfo.h>  | 
27  |  | #include <winpr/interlocked.h>  | 
28  |  | #include <winpr/thread.h>  | 
29  |  |  | 
30  |  | #include "synch.h"  | 
31  |  |  | 
32  |  | #ifdef WINPR_HAVE_UNISTD_H  | 
33  |  | #include <unistd.h>  | 
34  |  | #endif  | 
35  |  |  | 
36  |  | #if defined(__APPLE__)  | 
37  |  | #include <mach/task.h>  | 
38  |  | #include <mach/mach.h>  | 
39  |  | #include <mach/semaphore.h>  | 
40  |  | #endif  | 
41  |  |  | 
42  |  | #ifndef _WIN32  | 
43  |  |  | 
44  |  | #include "../log.h"  | 
45  |  | #define TAG WINPR_TAG("synch.critical") | 
46  |  |  | 
47  |  | VOID InitializeCriticalSection(LPCRITICAL_SECTION lpCriticalSection)  | 
48  | 26.5k  | { | 
49  | 26.5k  |   InitializeCriticalSectionEx(lpCriticalSection, 0, 0);  | 
50  | 26.5k  | }  | 
51  |  |  | 
52  |  | BOOL InitializeCriticalSectionEx(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount,  | 
53  |  |                                  DWORD Flags)  | 
54  | 269k  | { | 
55  | 269k  |   WINPR_ASSERT(lpCriticalSection);  | 
56  |  |   /**  | 
57  |  |    * See http://msdn.microsoft.com/en-us/library/ff541979(v=vs.85).aspx  | 
58  |  |    * - The LockCount field indicates the number of times that any thread has  | 
59  |  |    *   called the EnterCriticalSection routine for this critical section,  | 
60  |  |    *   minus one. This field starts at -1 for an unlocked critical section.  | 
61  |  |    *   Each call of EnterCriticalSection increments this value; each call of  | 
62  |  |    *   LeaveCriticalSection decrements it.  | 
63  |  |    * - The RecursionCount field indicates the number of times that the owning  | 
64  |  |    *   thread has called EnterCriticalSection for this critical section.  | 
65  |  |    */  | 
66  | 269k  |   if (Flags != 0)  | 
67  | 0  |   { | 
68  | 0  |     WLog_WARN(TAG, "Flags unimplemented");  | 
69  | 0  |   }  | 
70  |  |  | 
71  | 269k  |   lpCriticalSection->DebugInfo = NULL;  | 
72  | 269k  |   lpCriticalSection->LockCount = -1;  | 
73  | 269k  |   lpCriticalSection->SpinCount = 0;  | 
74  | 269k  |   lpCriticalSection->RecursionCount = 0;  | 
75  | 269k  |   lpCriticalSection->OwningThread = NULL;  | 
76  | 269k  |   lpCriticalSection->LockSemaphore = (winpr_sem_t*)malloc(sizeof(winpr_sem_t));  | 
77  |  |  | 
78  | 269k  |   if (!lpCriticalSection->LockSemaphore)  | 
79  | 0  |     return FALSE;  | 
80  |  |  | 
81  |  | #if defined(__APPLE__)  | 
82  |  |  | 
83  |  |   if (semaphore_create(mach_task_self(), lpCriticalSection->LockSemaphore, SYNC_POLICY_FIFO, 0) !=  | 
84  |  |       KERN_SUCCESS)  | 
85  |  |     goto out_fail;  | 
86  |  |  | 
87  |  | #else  | 
88  |  |  | 
89  | 269k  |   if (sem_init(lpCriticalSection->LockSemaphore, 0, 0) != 0)  | 
90  | 0  |     goto out_fail;  | 
91  |  |  | 
92  | 269k  | #endif  | 
93  | 269k  |   SetCriticalSectionSpinCount(lpCriticalSection, dwSpinCount);  | 
94  | 269k  |   return TRUE;  | 
95  | 0  | out_fail:  | 
96  | 0  |   free(lpCriticalSection->LockSemaphore);  | 
97  | 0  |   return FALSE;  | 
98  | 269k  | }  | 
99  |  |  | 
100  |  | BOOL InitializeCriticalSectionAndSpinCount(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount)  | 
101  | 243k  | { | 
102  | 243k  |   return InitializeCriticalSectionEx(lpCriticalSection, dwSpinCount, 0);  | 
103  | 243k  | }  | 
104  |  |  | 
105  |  | DWORD SetCriticalSectionSpinCount(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount)  | 
106  | 269k  | { | 
107  | 269k  |   WINPR_ASSERT(lpCriticalSection);  | 
108  |  | #if !defined(WINPR_CRITICAL_SECTION_DISABLE_SPINCOUNT)  | 
109  |  |   SYSTEM_INFO sysinfo;  | 
110  |  |   DWORD dwPreviousSpinCount = lpCriticalSection->SpinCount;  | 
111  |  |  | 
112  |  |   if (dwSpinCount)  | 
113  |  |   { | 
114  |  |     /* Don't spin on uniprocessor systems! */  | 
115  |  |     GetNativeSystemInfo(&sysinfo);  | 
116  |  |  | 
117  |  |     if (sysinfo.dwNumberOfProcessors < 2)  | 
118  |  |       dwSpinCount = 0;  | 
119  |  |   }  | 
120  |  |  | 
121  |  |   lpCriticalSection->SpinCount = dwSpinCount;  | 
122  |  |   return dwPreviousSpinCount;  | 
123  |  | #else  | 
124  | 269k  |   return 0;  | 
125  | 269k  | #endif  | 
126  | 269k  | }  | 
127  |  |  | 
128  |  | static VOID WaitForCriticalSection(LPCRITICAL_SECTION lpCriticalSection)  | 
129  | 0  | { | 
130  | 0  |   WINPR_ASSERT(lpCriticalSection);  | 
131  |  | #if defined(__APPLE__)  | 
132  |  |   semaphore_wait(*((winpr_sem_t*)lpCriticalSection->LockSemaphore));  | 
133  |  | #else  | 
134  | 0  |   sem_wait((winpr_sem_t*)lpCriticalSection->LockSemaphore);  | 
135  | 0  | #endif  | 
136  | 0  | }  | 
137  |  |  | 
138  |  | static VOID UnWaitCriticalSection(LPCRITICAL_SECTION lpCriticalSection)  | 
139  | 0  | { | 
140  | 0  |   WINPR_ASSERT(lpCriticalSection);  | 
141  |  | #if defined __APPLE__  | 
142  |  |   semaphore_signal(*((winpr_sem_t*)lpCriticalSection->LockSemaphore));  | 
143  |  | #else  | 
144  | 0  |   sem_post((winpr_sem_t*)lpCriticalSection->LockSemaphore);  | 
145  | 0  | #endif  | 
146  | 0  | }  | 
147  |  |  | 
148  |  | VOID EnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection)  | 
149  | 9.14M  | { | 
150  | 9.14M  |   WINPR_ASSERT(lpCriticalSection);  | 
151  |  | #if !defined(WINPR_CRITICAL_SECTION_DISABLE_SPINCOUNT)  | 
152  |  |   ULONG SpinCount = lpCriticalSection->SpinCount;  | 
153  |  |  | 
154  |  |   /* If we're lucky or if the current thread is already owner we can return early */  | 
155  |  |   if (SpinCount && TryEnterCriticalSection(lpCriticalSection))  | 
156  |  |     return;  | 
157  |  |  | 
158  |  |   /* Spin requested times but don't compete with another waiting thread */  | 
159  |  |   while (SpinCount-- && lpCriticalSection->LockCount < 1)  | 
160  |  |   { | 
161  |  |     /* Atomically try to acquire and check the if the section is free. */  | 
162  |  |     if (InterlockedCompareExchange(&lpCriticalSection->LockCount, 0, -1) == -1)  | 
163  |  |     { | 
164  |  |       lpCriticalSection->RecursionCount = 1;  | 
165  |  |       lpCriticalSection->OwningThread = (HANDLE)(ULONG_PTR)GetCurrentThreadId();  | 
166  |  |       return;  | 
167  |  |     }  | 
168  |  |  | 
169  |  |     /* Failed to get the lock. Let the scheduler know that we're spinning. */  | 
170  |  |     if (sched_yield() != 0)  | 
171  |  |     { | 
172  |  |       /**  | 
173  |  |        * On some operating systems sched_yield is a stub.  | 
174  |  |        * usleep should at least trigger a context switch if any thread is waiting.  | 
175  |  |        * A ThreadYield() would be nice in winpr ...  | 
176  |  |        */  | 
177  |  |       usleep(1);  | 
178  |  |     }  | 
179  |  |   }  | 
180  |  |  | 
181  |  | #endif  | 
182  |  |  | 
183  |  |   /* First try the fastest possible path to get the lock. */  | 
184  | 9.14M  |   if (InterlockedIncrement(&lpCriticalSection->LockCount))  | 
185  | 345k  |   { | 
186  |  |     /* Section is already locked. Check if it is owned by the current thread. */  | 
187  | 345k  |     if (lpCriticalSection->OwningThread == (HANDLE)(ULONG_PTR)GetCurrentThreadId())  | 
188  | 345k  |     { | 
189  |  |       /* Recursion. No need to wait. */  | 
190  | 345k  |       lpCriticalSection->RecursionCount++;  | 
191  | 345k  |       return;  | 
192  | 345k  |     }  | 
193  |  |  | 
194  |  |     /* Section is locked by another thread. We have to wait. */  | 
195  | 0  |     WaitForCriticalSection(lpCriticalSection);  | 
196  | 0  |   }  | 
197  |  |  | 
198  |  |   /* We got the lock. Own it ... */  | 
199  | 8.80M  |   lpCriticalSection->RecursionCount = 1;  | 
200  | 8.80M  |   lpCriticalSection->OwningThread = (HANDLE)(ULONG_PTR)GetCurrentThreadId();  | 
201  | 8.80M  | }  | 
202  |  |  | 
203  |  | BOOL TryEnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection)  | 
204  | 0  | { | 
205  | 0  |   HANDLE current_thread = (HANDLE)(ULONG_PTR)GetCurrentThreadId();  | 
206  |  | 
  | 
207  | 0  |   WINPR_ASSERT(lpCriticalSection);  | 
208  |  |  | 
209  |  |   /* Atomically acquire the the lock if the section is free. */  | 
210  | 0  |   if (InterlockedCompareExchange(&lpCriticalSection->LockCount, 0, -1) == -1)  | 
211  | 0  |   { | 
212  | 0  |     lpCriticalSection->RecursionCount = 1;  | 
213  | 0  |     lpCriticalSection->OwningThread = current_thread;  | 
214  | 0  |     return TRUE;  | 
215  | 0  |   }  | 
216  |  |  | 
217  |  |   /* Section is already locked. Check if it is owned by the current thread. */  | 
218  | 0  |   if (lpCriticalSection->OwningThread == current_thread)  | 
219  | 0  |   { | 
220  |  |     /* Recursion, return success */  | 
221  | 0  |     lpCriticalSection->RecursionCount++;  | 
222  | 0  |     InterlockedIncrement(&lpCriticalSection->LockCount);  | 
223  | 0  |     return TRUE;  | 
224  | 0  |   }  | 
225  |  |  | 
226  | 0  |   return FALSE;  | 
227  | 0  | }  | 
228  |  |  | 
229  |  | VOID LeaveCriticalSection(LPCRITICAL_SECTION lpCriticalSection)  | 
230  | 9.14M  | { | 
231  | 9.14M  |   WINPR_ASSERT(lpCriticalSection);  | 
232  |  |  | 
233  |  |   /* Decrement RecursionCount and check if this is the last LeaveCriticalSection call ...*/  | 
234  | 9.14M  |   if (--lpCriticalSection->RecursionCount < 1)  | 
235  | 8.80M  |   { | 
236  |  |     /* Last recursion, clear owner, unlock and if there are other waiting threads ... */  | 
237  | 8.80M  |     lpCriticalSection->OwningThread = NULL;  | 
238  |  |  | 
239  | 8.80M  |     if (InterlockedDecrement(&lpCriticalSection->LockCount) >= 0)  | 
240  | 0  |     { | 
241  |  |       /* ...signal the semaphore to unblock the next waiting thread */  | 
242  | 0  |       UnWaitCriticalSection(lpCriticalSection);  | 
243  | 0  |     }  | 
244  | 8.80M  |   }  | 
245  | 345k  |   else  | 
246  | 345k  |   { | 
247  | 345k  |     InterlockedDecrement(&lpCriticalSection->LockCount);  | 
248  | 345k  |   }  | 
249  | 9.14M  | }  | 
250  |  |  | 
251  |  | VOID DeleteCriticalSection(LPCRITICAL_SECTION lpCriticalSection)  | 
252  | 269k  | { | 
253  | 269k  |   WINPR_ASSERT(lpCriticalSection);  | 
254  |  |  | 
255  | 269k  |   lpCriticalSection->LockCount = -1;  | 
256  | 269k  |   lpCriticalSection->SpinCount = 0;  | 
257  | 269k  |   lpCriticalSection->RecursionCount = 0;  | 
258  | 269k  |   lpCriticalSection->OwningThread = NULL;  | 
259  |  |  | 
260  | 269k  |   if (lpCriticalSection->LockSemaphore != NULL)  | 
261  | 269k  |   { | 
262  |  | #if defined __APPLE__  | 
263  |  |     semaphore_destroy(mach_task_self(), *((winpr_sem_t*)lpCriticalSection->LockSemaphore));  | 
264  |  | #else  | 
265  | 269k  |     sem_destroy((winpr_sem_t*)lpCriticalSection->LockSemaphore);  | 
266  | 269k  | #endif  | 
267  | 269k  |     free(lpCriticalSection->LockSemaphore);  | 
268  | 269k  |     lpCriticalSection->LockSemaphore = NULL;  | 
269  | 269k  |   }  | 
270  | 269k  | }  | 
271  |  |  | 
272  |  | #endif  |