Coverage Report

Created: 2026-01-09 06:43

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/FreeRDP/winpr/libwinpr/synch/critical.c
Line
Count
Source
1
/**
2
 * WinPR: Windows Portable Runtime
3
 * Synchronization Functions
4
 *
5
 * Copyright 2012 Marc-Andre Moreau <marcandre.moreau@gmail.com>
6
 * Copyright 2013 Norbert Federa <norbert.federa@thincast.com>
7
 *
8
 * Licensed under the Apache License, Version 2.0 (the "License");
9
 * you may not use this file except in compliance with the License.
10
 * You may obtain a copy of the License at
11
 *
12
 *     http://www.apache.org/licenses/LICENSE-2.0
13
 *
14
 * Unless required by applicable law or agreed to in writing, software
15
 * distributed under the License is distributed on an "AS IS" BASIS,
16
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
 * See the License for the specific language governing permissions and
18
 * limitations under the License.
19
 */
20
21
#include <winpr/config.h>
22
23
#include <winpr/assert.h>
24
#include <winpr/tchar.h>
25
#include <winpr/synch.h>
26
#include <winpr/sysinfo.h>
27
#include <winpr/interlocked.h>
28
#include <winpr/thread.h>
29
30
#include "synch.h"
31
32
#ifdef WINPR_HAVE_UNISTD_H
33
#include <unistd.h>
34
#endif
35
36
#if defined(__APPLE__)
37
#include <mach/task.h>
38
#include <mach/mach.h>
39
#include <mach/semaphore.h>
40
#endif
41
42
#ifndef _WIN32
43
44
#include "../log.h"
45
#define TAG WINPR_TAG("synch.critical")
46
47
VOID InitializeCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
48
0
{
49
0
  InitializeCriticalSectionEx(lpCriticalSection, 0, 0);
50
0
}
51
52
BOOL InitializeCriticalSectionEx(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount,
53
                                 DWORD Flags)
54
5.94k
{
55
5.94k
  WINPR_ASSERT(lpCriticalSection);
56
  /**
57
   * See http://msdn.microsoft.com/en-us/library/ff541979(v=vs.85).aspx
58
   * - The LockCount field indicates the number of times that any thread has
59
   *   called the EnterCriticalSection routine for this critical section,
60
   *   minus one. This field starts at -1 for an unlocked critical section.
61
   *   Each call of EnterCriticalSection increments this value; each call of
62
   *   LeaveCriticalSection decrements it.
63
   * - The RecursionCount field indicates the number of times that the owning
64
   *   thread has called EnterCriticalSection for this critical section.
65
   */
66
5.94k
  if (Flags != 0)
67
0
  {
68
0
    WLog_WARN(TAG, "Flags unimplemented");
69
0
  }
70
71
5.94k
  lpCriticalSection->DebugInfo = NULL;
72
5.94k
  lpCriticalSection->LockCount = -1;
73
5.94k
  lpCriticalSection->SpinCount = 0;
74
5.94k
  lpCriticalSection->RecursionCount = 0;
75
5.94k
  lpCriticalSection->OwningThread = NULL;
76
5.94k
  lpCriticalSection->LockSemaphore = (winpr_sem_t*)malloc(sizeof(winpr_sem_t));
77
78
5.94k
  if (!lpCriticalSection->LockSemaphore)
79
0
    return FALSE;
80
81
#if defined(__APPLE__)
82
83
  if (semaphore_create(mach_task_self(), lpCriticalSection->LockSemaphore, SYNC_POLICY_FIFO, 0) !=
84
      KERN_SUCCESS)
85
    goto out_fail;
86
87
#else
88
89
5.94k
  if (sem_init(lpCriticalSection->LockSemaphore, 0, 0) != 0)
90
0
    goto out_fail;
91
92
5.94k
#endif
93
5.94k
  SetCriticalSectionSpinCount(lpCriticalSection, dwSpinCount);
94
5.94k
  return TRUE;
95
0
out_fail:
96
0
  free(lpCriticalSection->LockSemaphore);
97
0
  return FALSE;
98
5.94k
}
99
100
BOOL InitializeCriticalSectionAndSpinCount(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount)
101
5.94k
{
102
5.94k
  return InitializeCriticalSectionEx(lpCriticalSection, dwSpinCount, 0);
103
5.94k
}
104
105
DWORD SetCriticalSectionSpinCount(WINPR_ATTR_UNUSED LPCRITICAL_SECTION lpCriticalSection,
106
                                  WINPR_ATTR_UNUSED DWORD dwSpinCount)
107
5.94k
{
108
5.94k
  WINPR_ASSERT(lpCriticalSection);
109
#if !defined(WINPR_CRITICAL_SECTION_DISABLE_SPINCOUNT)
110
  SYSTEM_INFO sysinfo;
111
  DWORD dwPreviousSpinCount = lpCriticalSection->SpinCount;
112
113
  if (dwSpinCount)
114
  {
115
    /* Don't spin on uniprocessor systems! */
116
    GetNativeSystemInfo(&sysinfo);
117
118
    if (sysinfo.dwNumberOfProcessors < 2)
119
      dwSpinCount = 0;
120
  }
121
122
  lpCriticalSection->SpinCount = dwSpinCount;
123
  return dwPreviousSpinCount;
124
#else
125
  // WLog_ERR("TODO", "TODO: implement");
126
5.94k
  return 0;
127
5.94k
#endif
128
5.94k
}
129
130
static VOID WaitForCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
131
0
{
132
0
  WINPR_ASSERT(lpCriticalSection);
133
0
  WINPR_ASSERT(lpCriticalSection->LockSemaphore);
134
135
#if defined(__APPLE__)
136
  semaphore_wait(*((winpr_sem_t*)lpCriticalSection->LockSemaphore));
137
#else
138
0
  sem_wait((winpr_sem_t*)lpCriticalSection->LockSemaphore);
139
0
#endif
140
0
}
141
142
static VOID UnWaitCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
143
0
{
144
0
  WINPR_ASSERT(lpCriticalSection);
145
0
  WINPR_ASSERT(lpCriticalSection->LockSemaphore);
146
#if defined __APPLE__
147
  semaphore_signal(*((winpr_sem_t*)lpCriticalSection->LockSemaphore));
148
#else
149
0
  sem_post((winpr_sem_t*)lpCriticalSection->LockSemaphore);
150
0
#endif
151
0
}
152
153
VOID EnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
154
52.3k
{
155
52.3k
  WINPR_ASSERT(lpCriticalSection);
156
#if !defined(WINPR_CRITICAL_SECTION_DISABLE_SPINCOUNT)
157
  ULONG SpinCount = lpCriticalSection->SpinCount;
158
159
  /* If we're lucky or if the current thread is already owner we can return early */
160
  if (SpinCount && TryEnterCriticalSection(lpCriticalSection))
161
    return;
162
163
  /* Spin requested times but don't compete with another waiting thread */
164
  while (SpinCount-- && lpCriticalSection->LockCount < 1)
165
  {
166
    /* Atomically try to acquire and check the if the section is free. */
167
    if (InterlockedCompareExchange(&lpCriticalSection->LockCount, 0, -1) == -1)
168
    {
169
      lpCriticalSection->RecursionCount = 1;
170
      lpCriticalSection->OwningThread = (HANDLE)(ULONG_PTR)GetCurrentThreadId();
171
      return;
172
    }
173
174
    /* Failed to get the lock. Let the scheduler know that we're spinning. */
175
    if (sched_yield() != 0)
176
    {
177
      /**
178
       * On some operating systems sched_yield is a stub.
179
       * usleep should at least trigger a context switch if any thread is waiting.
180
       * A ThreadYield() would be nice in winpr ...
181
       */
182
      usleep(1);
183
    }
184
  }
185
186
#endif
187
188
  /* First try the fastest possible path to get the lock. */
189
52.3k
  if (InterlockedIncrement(&lpCriticalSection->LockCount))
190
0
  {
191
    /* Section is already locked. Check if it is owned by the current thread. */
192
0
    if (lpCriticalSection->OwningThread == (HANDLE)(ULONG_PTR)GetCurrentThreadId())
193
0
    {
194
      /* Recursion. No need to wait. */
195
0
      lpCriticalSection->RecursionCount++;
196
0
      return;
197
0
    }
198
199
    /* Section is locked by another thread. We have to wait. */
200
0
    WaitForCriticalSection(lpCriticalSection);
201
0
  }
202
203
  /* We got the lock. Own it ... */
204
52.3k
  lpCriticalSection->RecursionCount = 1;
205
52.3k
  lpCriticalSection->OwningThread = (HANDLE)(ULONG_PTR)GetCurrentThreadId();
206
52.3k
}
207
208
BOOL TryEnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
209
0
{
210
0
  HANDLE current_thread = (HANDLE)(ULONG_PTR)GetCurrentThreadId();
211
212
0
  WINPR_ASSERT(lpCriticalSection);
213
214
  /* Atomically acquire the the lock if the section is free. */
215
0
  if (InterlockedCompareExchange(&lpCriticalSection->LockCount, 0, -1) == -1)
216
0
  {
217
0
    lpCriticalSection->RecursionCount = 1;
218
0
    lpCriticalSection->OwningThread = current_thread;
219
0
    return TRUE;
220
0
  }
221
222
  /* Section is already locked. Check if it is owned by the current thread. */
223
0
  if (lpCriticalSection->OwningThread == current_thread)
224
0
  {
225
    /* Recursion, return success */
226
0
    lpCriticalSection->RecursionCount++;
227
0
    InterlockedIncrement(&lpCriticalSection->LockCount);
228
0
    return TRUE;
229
0
  }
230
231
0
  return FALSE;
232
0
}
233
234
VOID LeaveCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
235
52.3k
{
236
52.3k
  WINPR_ASSERT(lpCriticalSection);
237
238
  /* Decrement RecursionCount and check if this is the last LeaveCriticalSection call ...*/
239
52.3k
  if (--lpCriticalSection->RecursionCount < 1)
240
52.3k
  {
241
    /* Last recursion, clear owner, unlock and if there are other waiting threads ... */
242
52.3k
    lpCriticalSection->OwningThread = NULL;
243
244
52.3k
    if (InterlockedDecrement(&lpCriticalSection->LockCount) >= 0)
245
0
    {
246
      /* ...signal the semaphore to unblock the next waiting thread */
247
0
      UnWaitCriticalSection(lpCriticalSection);
248
0
    }
249
52.3k
  }
250
0
  else
251
0
  {
252
0
    (void)InterlockedDecrement(&lpCriticalSection->LockCount);
253
0
  }
254
52.3k
}
255
256
VOID DeleteCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
257
5.94k
{
258
5.94k
  WINPR_ASSERT(lpCriticalSection);
259
260
5.94k
  lpCriticalSection->LockCount = -1;
261
5.94k
  lpCriticalSection->SpinCount = 0;
262
5.94k
  lpCriticalSection->RecursionCount = 0;
263
5.94k
  lpCriticalSection->OwningThread = NULL;
264
265
5.94k
  if (lpCriticalSection->LockSemaphore != NULL)
266
5.94k
  {
267
#if defined __APPLE__
268
    semaphore_destroy(mach_task_self(), *((winpr_sem_t*)lpCriticalSection->LockSemaphore));
269
#else
270
5.94k
    sem_destroy((winpr_sem_t*)lpCriticalSection->LockSemaphore);
271
5.94k
#endif
272
5.94k
    free(lpCriticalSection->LockSemaphore);
273
    lpCriticalSection->LockSemaphore = NULL;
274
5.94k
  }
275
5.94k
}
276
277
#endif