Coverage Report

Created: 2026-04-12 07:03

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/FreeRDP/winpr/libwinpr/synch/critical.c
Line
Count
Source
1
/**
2
 * WinPR: Windows Portable Runtime
3
 * Synchronization Functions
4
 *
5
 * Copyright 2012 Marc-Andre Moreau <marcandre.moreau@gmail.com>
6
 * Copyright 2013 Norbert Federa <norbert.federa@thincast.com>
7
 *
8
 * Licensed under the Apache License, Version 2.0 (the "License");
9
 * you may not use this file except in compliance with the License.
10
 * You may obtain a copy of the License at
11
 *
12
 *     http://www.apache.org/licenses/LICENSE-2.0
13
 *
14
 * Unless required by applicable law or agreed to in writing, software
15
 * distributed under the License is distributed on an "AS IS" BASIS,
16
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
 * See the License for the specific language governing permissions and
18
 * limitations under the License.
19
 */
20
21
#include <winpr/config.h>
22
23
#include <winpr/assert.h>
24
#include <winpr/tchar.h>
25
#include <winpr/synch.h>
26
#include <winpr/sysinfo.h>
27
#include <winpr/interlocked.h>
28
#include <winpr/thread.h>
29
30
#include "synch.h"
31
32
#ifdef WINPR_HAVE_UNISTD_H
33
#include <unistd.h>
34
#endif
35
36
#if defined(__APPLE__)
37
#include <mach/task.h>
38
#include <mach/mach.h>
39
#include <mach/semaphore.h>
40
#endif
41
42
#ifndef _WIN32
43
44
#include "../log.h"
45
#define TAG WINPR_TAG("synch.critical")
46
47
VOID InitializeCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
48
35.5k
{
49
35.5k
  if (!InitializeCriticalSectionEx(lpCriticalSection, 0, 0))
50
0
    WLog_ERR(TAG, "InitializeCriticalSectionEx failed");
51
35.5k
}
52
53
BOOL InitializeCriticalSectionEx(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount,
54
                                 DWORD Flags)
55
228k
{
56
228k
  WINPR_ASSERT(lpCriticalSection);
57
  /**
58
   * See http://msdn.microsoft.com/en-us/library/ff541979(v=vs.85).aspx
59
   * - The LockCount field indicates the number of times that any thread has
60
   *   called the EnterCriticalSection routine for this critical section,
61
   *   minus one. This field starts at -1 for an unlocked critical section.
62
   *   Each call of EnterCriticalSection increments this value; each call of
63
   *   LeaveCriticalSection decrements it.
64
   * - The RecursionCount field indicates the number of times that the owning
65
   *   thread has called EnterCriticalSection for this critical section.
66
   */
67
228k
  if (Flags != 0)
68
0
  {
69
0
    WLog_WARN(TAG, "Flags unimplemented");
70
0
  }
71
72
228k
  lpCriticalSection->DebugInfo = nullptr;
73
228k
  lpCriticalSection->LockCount = -1;
74
228k
  lpCriticalSection->SpinCount = 0;
75
228k
  lpCriticalSection->RecursionCount = 0;
76
228k
  lpCriticalSection->OwningThread = nullptr;
77
228k
  lpCriticalSection->LockSemaphore = (winpr_sem_t*)malloc(sizeof(winpr_sem_t));
78
79
228k
  if (!lpCriticalSection->LockSemaphore)
80
0
    return FALSE;
81
82
#if defined(__APPLE__)
83
84
  if (semaphore_create(mach_task_self(), lpCriticalSection->LockSemaphore, SYNC_POLICY_FIFO, 0) !=
85
      KERN_SUCCESS)
86
    goto out_fail;
87
88
#else
89
90
228k
  if (sem_init(lpCriticalSection->LockSemaphore, 0, 0) != 0)
91
0
    goto out_fail;
92
93
228k
#endif
94
228k
  SetCriticalSectionSpinCount(lpCriticalSection, dwSpinCount);
95
228k
  return TRUE;
96
0
out_fail:
97
0
  free(lpCriticalSection->LockSemaphore);
98
0
  return FALSE;
99
228k
}
100
101
BOOL InitializeCriticalSectionAndSpinCount(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount)
102
193k
{
103
193k
  return InitializeCriticalSectionEx(lpCriticalSection, dwSpinCount, 0);
104
193k
}
105
106
DWORD SetCriticalSectionSpinCount(WINPR_ATTR_UNUSED LPCRITICAL_SECTION lpCriticalSection,
107
                                  WINPR_ATTR_UNUSED DWORD dwSpinCount)
108
228k
{
109
228k
  WINPR_ASSERT(lpCriticalSection);
110
#if !defined(WINPR_CRITICAL_SECTION_DISABLE_SPINCOUNT)
111
  SYSTEM_INFO sysinfo;
112
  DWORD dwPreviousSpinCount = lpCriticalSection->SpinCount;
113
114
  if (dwSpinCount)
115
  {
116
    /* Don't spin on uniprocessor systems! */
117
    GetNativeSystemInfo(&sysinfo);
118
119
    if (sysinfo.dwNumberOfProcessors < 2)
120
      dwSpinCount = 0;
121
  }
122
123
  lpCriticalSection->SpinCount = dwSpinCount;
124
  return dwPreviousSpinCount;
125
#else
126
  // WLog_ERR("TODO", "TODO: implement");
127
228k
  return 0;
128
228k
#endif
129
228k
}
130
131
static VOID WaitForCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
132
0
{
133
0
  WINPR_ASSERT(lpCriticalSection);
134
0
  WINPR_ASSERT(lpCriticalSection->LockSemaphore);
135
136
#if defined(__APPLE__)
137
  semaphore_wait(*((winpr_sem_t*)lpCriticalSection->LockSemaphore));
138
#else
139
0
  sem_wait((winpr_sem_t*)lpCriticalSection->LockSemaphore);
140
0
#endif
141
0
}
142
143
static VOID UnWaitCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
144
0
{
145
0
  WINPR_ASSERT(lpCriticalSection);
146
0
  WINPR_ASSERT(lpCriticalSection->LockSemaphore);
147
#if defined __APPLE__
148
  semaphore_signal(*((winpr_sem_t*)lpCriticalSection->LockSemaphore));
149
#else
150
0
  sem_post((winpr_sem_t*)lpCriticalSection->LockSemaphore);
151
0
#endif
152
0
}
153
154
VOID EnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
155
8.95M
{
156
8.95M
  WINPR_ASSERT(lpCriticalSection);
157
#if !defined(WINPR_CRITICAL_SECTION_DISABLE_SPINCOUNT)
158
  ULONG SpinCount = lpCriticalSection->SpinCount;
159
160
  /* If we're lucky or if the current thread is already owner we can return early */
161
  if (SpinCount && TryEnterCriticalSection(lpCriticalSection))
162
    return;
163
164
  /* Spin requested times but don't compete with another waiting thread */
165
  while (SpinCount-- && lpCriticalSection->LockCount < 1)
166
  {
167
    /* Atomically try to acquire and check the if the section is free. */
168
    if (InterlockedCompareExchange(&lpCriticalSection->LockCount, 0, -1) == -1)
169
    {
170
      lpCriticalSection->RecursionCount = 1;
171
      lpCriticalSection->OwningThread = (HANDLE)(ULONG_PTR)GetCurrentThreadId();
172
      return;
173
    }
174
175
    /* Failed to get the lock. Let the scheduler know that we're spinning. */
176
    if (sched_yield() != 0)
177
    {
178
      /**
179
       * On some operating systems sched_yield is a stub.
180
       * usleep should at least trigger a context switch if any thread is waiting.
181
       * A ThreadYield() would be nice in winpr ...
182
       */
183
      usleep(1);
184
    }
185
  }
186
187
#endif
188
189
  /* First try the fastest possible path to get the lock. */
190
8.95M
  if (InterlockedIncrement(&lpCriticalSection->LockCount))
191
18.7k
  {
192
    /* Section is already locked. Check if it is owned by the current thread. */
193
18.7k
    if (lpCriticalSection->OwningThread == (HANDLE)(ULONG_PTR)GetCurrentThreadId())
194
18.7k
    {
195
      /* Recursion. No need to wait. */
196
18.7k
      lpCriticalSection->RecursionCount++;
197
18.7k
      return;
198
18.7k
    }
199
200
    /* Section is locked by another thread. We have to wait. */
201
0
    WaitForCriticalSection(lpCriticalSection);
202
0
  }
203
204
  /* We got the lock. Own it ... */
205
8.93M
  lpCriticalSection->RecursionCount = 1;
206
8.93M
  lpCriticalSection->OwningThread = (HANDLE)(ULONG_PTR)GetCurrentThreadId();
207
8.93M
}
208
209
BOOL TryEnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
210
0
{
211
0
  HANDLE current_thread = (HANDLE)(ULONG_PTR)GetCurrentThreadId();
212
213
0
  WINPR_ASSERT(lpCriticalSection);
214
215
  /* Atomically acquire the the lock if the section is free. */
216
0
  if (InterlockedCompareExchange(&lpCriticalSection->LockCount, 0, -1) == -1)
217
0
  {
218
0
    lpCriticalSection->RecursionCount = 1;
219
0
    lpCriticalSection->OwningThread = current_thread;
220
0
    return TRUE;
221
0
  }
222
223
  /* Section is already locked. Check if it is owned by the current thread. */
224
0
  if (lpCriticalSection->OwningThread == current_thread)
225
0
  {
226
    /* Recursion, return success */
227
0
    lpCriticalSection->RecursionCount++;
228
0
    InterlockedIncrement(&lpCriticalSection->LockCount);
229
0
    return TRUE;
230
0
  }
231
232
0
  return FALSE;
233
0
}
234
235
VOID LeaveCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
236
8.95M
{
237
8.95M
  WINPR_ASSERT(lpCriticalSection);
238
239
  /* Decrement RecursionCount and check if this is the last LeaveCriticalSection call ...*/
240
8.95M
  if (--lpCriticalSection->RecursionCount < 1)
241
8.93M
  {
242
    /* Last recursion, clear owner, unlock and if there are other waiting threads ... */
243
8.93M
    lpCriticalSection->OwningThread = nullptr;
244
245
8.93M
    if (InterlockedDecrement(&lpCriticalSection->LockCount) >= 0)
246
0
    {
247
      /* ...signal the semaphore to unblock the next waiting thread */
248
0
      UnWaitCriticalSection(lpCriticalSection);
249
0
    }
250
8.93M
  }
251
18.7k
  else
252
18.7k
  {
253
18.7k
    (void)InterlockedDecrement(&lpCriticalSection->LockCount);
254
18.7k
  }
255
8.95M
}
256
257
VOID DeleteCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
258
228k
{
259
228k
  WINPR_ASSERT(lpCriticalSection);
260
261
228k
  lpCriticalSection->LockCount = -1;
262
228k
  lpCriticalSection->SpinCount = 0;
263
228k
  lpCriticalSection->RecursionCount = 0;
264
228k
  lpCriticalSection->OwningThread = nullptr;
265
266
228k
  if (lpCriticalSection->LockSemaphore != nullptr)
267
228k
  {
268
#if defined __APPLE__
269
    semaphore_destroy(mach_task_self(), *((winpr_sem_t*)lpCriticalSection->LockSemaphore));
270
#else
271
228k
    sem_destroy((winpr_sem_t*)lpCriticalSection->LockSemaphore);
272
228k
#endif
273
228k
    free(lpCriticalSection->LockSemaphore);
274
228k
    lpCriticalSection->LockSemaphore = nullptr;
275
228k
  }
276
228k
}
277
278
#endif