Coverage Report

Created: 2025-07-01 06:46

/src/FreeRDP/winpr/libwinpr/synch/critical.c
Line
Count
Source (jump to first uncovered line)
1
/**
2
 * WinPR: Windows Portable Runtime
3
 * Synchronization Functions
4
 *
5
 * Copyright 2012 Marc-Andre Moreau <marcandre.moreau@gmail.com>
6
 * Copyright 2013 Norbert Federa <norbert.federa@thincast.com>
7
 *
8
 * Licensed under the Apache License, Version 2.0 (the "License");
9
 * you may not use this file except in compliance with the License.
10
 * You may obtain a copy of the License at
11
 *
12
 *     http://www.apache.org/licenses/LICENSE-2.0
13
 *
14
 * Unless required by applicable law or agreed to in writing, software
15
 * distributed under the License is distributed on an "AS IS" BASIS,
16
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
 * See the License for the specific language governing permissions and
18
 * limitations under the License.
19
 */
20
21
#include <winpr/config.h>
22
23
#include <winpr/assert.h>
24
#include <winpr/tchar.h>
25
#include <winpr/synch.h>
26
#include <winpr/sysinfo.h>
27
#include <winpr/interlocked.h>
28
#include <winpr/thread.h>
29
30
#include "synch.h"
31
32
#ifdef WINPR_HAVE_UNISTD_H
33
#include <unistd.h>
34
#endif
35
36
#if defined(__APPLE__)
37
#include <mach/task.h>
38
#include <mach/mach.h>
39
#include <mach/semaphore.h>
40
#endif
41
42
#ifndef _WIN32
43
44
#include "../log.h"
45
#define TAG WINPR_TAG("synch.critical")
46
47
VOID InitializeCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
48
0
{
49
0
  InitializeCriticalSectionEx(lpCriticalSection, 0, 0);
50
0
}
51
52
BOOL InitializeCriticalSectionEx(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount,
53
                                 DWORD Flags)
54
4
{
55
4
  WINPR_ASSERT(lpCriticalSection);
56
  /**
57
   * See http://msdn.microsoft.com/en-us/library/ff541979(v=vs.85).aspx
58
   * - The LockCount field indicates the number of times that any thread has
59
   *   called the EnterCriticalSection routine for this critical section,
60
   *   minus one. This field starts at -1 for an unlocked critical section.
61
   *   Each call of EnterCriticalSection increments this value; each call of
62
   *   LeaveCriticalSection decrements it.
63
   * - The RecursionCount field indicates the number of times that the owning
64
   *   thread has called EnterCriticalSection for this critical section.
65
   */
66
4
  if (Flags != 0)
67
0
  {
68
0
    WLog_WARN(TAG, "Flags unimplemented");
69
0
  }
70
71
4
  lpCriticalSection->DebugInfo = NULL;
72
4
  lpCriticalSection->LockCount = -1;
73
4
  lpCriticalSection->SpinCount = 0;
74
4
  lpCriticalSection->RecursionCount = 0;
75
4
  lpCriticalSection->OwningThread = NULL;
76
4
  lpCriticalSection->LockSemaphore = (winpr_sem_t*)malloc(sizeof(winpr_sem_t));
77
78
4
  if (!lpCriticalSection->LockSemaphore)
79
0
    return FALSE;
80
81
#if defined(__APPLE__)
82
83
  if (semaphore_create(mach_task_self(), lpCriticalSection->LockSemaphore, SYNC_POLICY_FIFO, 0) !=
84
      KERN_SUCCESS)
85
    goto out_fail;
86
87
#else
88
89
4
  if (sem_init(lpCriticalSection->LockSemaphore, 0, 0) != 0)
90
0
    goto out_fail;
91
92
4
#endif
93
4
  SetCriticalSectionSpinCount(lpCriticalSection, dwSpinCount);
94
4
  return TRUE;
95
0
out_fail:
96
0
  free(lpCriticalSection->LockSemaphore);
97
0
  return FALSE;
98
4
}
99
100
BOOL InitializeCriticalSectionAndSpinCount(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount)
101
4
{
102
4
  return InitializeCriticalSectionEx(lpCriticalSection, dwSpinCount, 0);
103
4
}
104
105
DWORD SetCriticalSectionSpinCount(WINPR_ATTR_UNUSED LPCRITICAL_SECTION lpCriticalSection,
106
                                  WINPR_ATTR_UNUSED DWORD dwSpinCount)
107
4
{
108
4
  WINPR_ASSERT(lpCriticalSection);
109
#if !defined(WINPR_CRITICAL_SECTION_DISABLE_SPINCOUNT)
110
  SYSTEM_INFO sysinfo;
111
  DWORD dwPreviousSpinCount = lpCriticalSection->SpinCount;
112
113
  if (dwSpinCount)
114
  {
115
    /* Don't spin on uniprocessor systems! */
116
    GetNativeSystemInfo(&sysinfo);
117
118
    if (sysinfo.dwNumberOfProcessors < 2)
119
      dwSpinCount = 0;
120
  }
121
122
  lpCriticalSection->SpinCount = dwSpinCount;
123
  return dwPreviousSpinCount;
124
#else
125
  // WLog_ERR("TODO", "TODO: implement");
126
4
  return 0;
127
4
#endif
128
4
}
129
130
static VOID WaitForCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
131
0
{
132
0
  WINPR_ASSERT(lpCriticalSection);
133
#if defined(__APPLE__)
134
  semaphore_wait(*((winpr_sem_t*)lpCriticalSection->LockSemaphore));
135
#else
136
0
  sem_wait((winpr_sem_t*)lpCriticalSection->LockSemaphore);
137
0
#endif
138
0
}
139
140
static VOID UnWaitCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
141
0
{
142
0
  WINPR_ASSERT(lpCriticalSection);
143
#if defined __APPLE__
144
  semaphore_signal(*((winpr_sem_t*)lpCriticalSection->LockSemaphore));
145
#else
146
0
  sem_post((winpr_sem_t*)lpCriticalSection->LockSemaphore);
147
0
#endif
148
0
}
149
150
VOID EnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
151
1.02k
{
152
1.02k
  WINPR_ASSERT(lpCriticalSection);
153
#if !defined(WINPR_CRITICAL_SECTION_DISABLE_SPINCOUNT)
154
  ULONG SpinCount = lpCriticalSection->SpinCount;
155
156
  /* If we're lucky or if the current thread is already owner we can return early */
157
  if (SpinCount && TryEnterCriticalSection(lpCriticalSection))
158
    return;
159
160
  /* Spin requested times but don't compete with another waiting thread */
161
  while (SpinCount-- && lpCriticalSection->LockCount < 1)
162
  {
163
    /* Atomically try to acquire and check the if the section is free. */
164
    if (InterlockedCompareExchange(&lpCriticalSection->LockCount, 0, -1) == -1)
165
    {
166
      lpCriticalSection->RecursionCount = 1;
167
      lpCriticalSection->OwningThread = (HANDLE)(ULONG_PTR)GetCurrentThreadId();
168
      return;
169
    }
170
171
    /* Failed to get the lock. Let the scheduler know that we're spinning. */
172
    if (sched_yield() != 0)
173
    {
174
      /**
175
       * On some operating systems sched_yield is a stub.
176
       * usleep should at least trigger a context switch if any thread is waiting.
177
       * A ThreadYield() would be nice in winpr ...
178
       */
179
      usleep(1);
180
    }
181
  }
182
183
#endif
184
185
  /* First try the fastest possible path to get the lock. */
186
1.02k
  if (InterlockedIncrement(&lpCriticalSection->LockCount))
187
0
  {
188
    /* Section is already locked. Check if it is owned by the current thread. */
189
0
    if (lpCriticalSection->OwningThread == (HANDLE)(ULONG_PTR)GetCurrentThreadId())
190
0
    {
191
      /* Recursion. No need to wait. */
192
0
      lpCriticalSection->RecursionCount++;
193
0
      return;
194
0
    }
195
196
    /* Section is locked by another thread. We have to wait. */
197
0
    WaitForCriticalSection(lpCriticalSection);
198
0
  }
199
200
  /* We got the lock. Own it ... */
201
1.02k
  lpCriticalSection->RecursionCount = 1;
202
1.02k
  lpCriticalSection->OwningThread = (HANDLE)(ULONG_PTR)GetCurrentThreadId();
203
1.02k
}
204
205
BOOL TryEnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
206
0
{
207
0
  HANDLE current_thread = (HANDLE)(ULONG_PTR)GetCurrentThreadId();
208
209
0
  WINPR_ASSERT(lpCriticalSection);
210
211
  /* Atomically acquire the the lock if the section is free. */
212
0
  if (InterlockedCompareExchange(&lpCriticalSection->LockCount, 0, -1) == -1)
213
0
  {
214
0
    lpCriticalSection->RecursionCount = 1;
215
0
    lpCriticalSection->OwningThread = current_thread;
216
0
    return TRUE;
217
0
  }
218
219
  /* Section is already locked. Check if it is owned by the current thread. */
220
0
  if (lpCriticalSection->OwningThread == current_thread)
221
0
  {
222
    /* Recursion, return success */
223
0
    lpCriticalSection->RecursionCount++;
224
0
    InterlockedIncrement(&lpCriticalSection->LockCount);
225
0
    return TRUE;
226
0
  }
227
228
0
  return FALSE;
229
0
}
230
231
VOID LeaveCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
232
1.02k
{
233
1.02k
  WINPR_ASSERT(lpCriticalSection);
234
235
  /* Decrement RecursionCount and check if this is the last LeaveCriticalSection call ...*/
236
1.02k
  if (--lpCriticalSection->RecursionCount < 1)
237
1.02k
  {
238
    /* Last recursion, clear owner, unlock and if there are other waiting threads ... */
239
1.02k
    lpCriticalSection->OwningThread = NULL;
240
241
1.02k
    if (InterlockedDecrement(&lpCriticalSection->LockCount) >= 0)
242
0
    {
243
      /* ...signal the semaphore to unblock the next waiting thread */
244
0
      UnWaitCriticalSection(lpCriticalSection);
245
0
    }
246
1.02k
  }
247
0
  else
248
0
  {
249
0
    (void)InterlockedDecrement(&lpCriticalSection->LockCount);
250
0
  }
251
1.02k
}
252
253
VOID DeleteCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
254
4
{
255
4
  WINPR_ASSERT(lpCriticalSection);
256
257
4
  lpCriticalSection->LockCount = -1;
258
4
  lpCriticalSection->SpinCount = 0;
259
4
  lpCriticalSection->RecursionCount = 0;
260
4
  lpCriticalSection->OwningThread = NULL;
261
262
4
  if (lpCriticalSection->LockSemaphore != NULL)
263
4
  {
264
#if defined __APPLE__
265
    semaphore_destroy(mach_task_self(), *((winpr_sem_t*)lpCriticalSection->LockSemaphore));
266
#else
267
4
    sem_destroy((winpr_sem_t*)lpCriticalSection->LockSemaphore);
268
4
#endif
269
4
    free(lpCriticalSection->LockSemaphore);
270
4
    lpCriticalSection->LockSemaphore = NULL;
271
4
  }
272
4
}
273
274
#endif