FreeRDP
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Modules Pages
critical.c
1
21#include <winpr/config.h>
22
23#include <winpr/assert.h>
24#include <winpr/tchar.h>
25#include <winpr/synch.h>
26#include <winpr/sysinfo.h>
27#include <winpr/interlocked.h>
28#include <winpr/thread.h>
29
30#include "synch.h"
31
32#ifdef WINPR_HAVE_UNISTD_H
33#include <unistd.h>
34#endif
35
36#if defined(__APPLE__)
37#include <mach/task.h>
38#include <mach/mach.h>
39#include <mach/semaphore.h>
40#endif
41
42#ifndef _WIN32
43
44#include "../log.h"
45#define TAG WINPR_TAG("synch.critical")
46
47VOID InitializeCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
48{
49 InitializeCriticalSectionEx(lpCriticalSection, 0, 0);
50}
51
52BOOL InitializeCriticalSectionEx(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount,
53 DWORD Flags)
54{
55 WINPR_ASSERT(lpCriticalSection);
66 if (Flags != 0)
67 {
68 WLog_WARN(TAG, "Flags unimplemented");
69 }
70
71 lpCriticalSection->DebugInfo = NULL;
72 lpCriticalSection->LockCount = -1;
73 lpCriticalSection->SpinCount = 0;
74 lpCriticalSection->RecursionCount = 0;
75 lpCriticalSection->OwningThread = NULL;
76 lpCriticalSection->LockSemaphore = (winpr_sem_t*)malloc(sizeof(winpr_sem_t));
77
78 if (!lpCriticalSection->LockSemaphore)
79 return FALSE;
80
81#if defined(__APPLE__)
82
83 if (semaphore_create(mach_task_self(), lpCriticalSection->LockSemaphore, SYNC_POLICY_FIFO, 0) !=
84 KERN_SUCCESS)
85 goto out_fail;
86
87#else
88
89 if (sem_init(lpCriticalSection->LockSemaphore, 0, 0) != 0)
90 goto out_fail;
91
92#endif
93 SetCriticalSectionSpinCount(lpCriticalSection, dwSpinCount);
94 return TRUE;
95out_fail:
96 free(lpCriticalSection->LockSemaphore);
97 return FALSE;
98}
99
100BOOL InitializeCriticalSectionAndSpinCount(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount)
101{
102 return InitializeCriticalSectionEx(lpCriticalSection, dwSpinCount, 0);
103}
104
105DWORD SetCriticalSectionSpinCount(WINPR_ATTR_UNUSED LPCRITICAL_SECTION lpCriticalSection,
106 WINPR_ATTR_UNUSED DWORD dwSpinCount)
107{
108 WINPR_ASSERT(lpCriticalSection);
109#if !defined(WINPR_CRITICAL_SECTION_DISABLE_SPINCOUNT)
110 SYSTEM_INFO sysinfo;
111 DWORD dwPreviousSpinCount = lpCriticalSection->SpinCount;
112
113 if (dwSpinCount)
114 {
115 /* Don't spin on uniprocessor systems! */
116 GetNativeSystemInfo(&sysinfo);
117
118 if (sysinfo.dwNumberOfProcessors < 2)
119 dwSpinCount = 0;
120 }
121
122 lpCriticalSection->SpinCount = dwSpinCount;
123 return dwPreviousSpinCount;
124#else
125 // WLog_ERR("TODO", "TODO: implement");
126 return 0;
127#endif
128}
129
130static VOID WaitForCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
131{
132 WINPR_ASSERT(lpCriticalSection);
133#if defined(__APPLE__)
134 semaphore_wait(*((winpr_sem_t*)lpCriticalSection->LockSemaphore));
135#else
136 sem_wait((winpr_sem_t*)lpCriticalSection->LockSemaphore);
137#endif
138}
139
140static VOID UnWaitCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
141{
142 WINPR_ASSERT(lpCriticalSection);
143#if defined __APPLE__
144 semaphore_signal(*((winpr_sem_t*)lpCriticalSection->LockSemaphore));
145#else
146 sem_post((winpr_sem_t*)lpCriticalSection->LockSemaphore);
147#endif
148}
149
150VOID EnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
151{
152 WINPR_ASSERT(lpCriticalSection);
153#if !defined(WINPR_CRITICAL_SECTION_DISABLE_SPINCOUNT)
154 ULONG SpinCount = lpCriticalSection->SpinCount;
155
156 /* If we're lucky or if the current thread is already owner we can return early */
157 if (SpinCount && TryEnterCriticalSection(lpCriticalSection))
158 return;
159
160 /* Spin requested times but don't compete with another waiting thread */
161 while (SpinCount-- && lpCriticalSection->LockCount < 1)
162 {
163 /* Atomically try to acquire and check the if the section is free. */
164 if (InterlockedCompareExchange(&lpCriticalSection->LockCount, 0, -1) == -1)
165 {
166 lpCriticalSection->RecursionCount = 1;
167 lpCriticalSection->OwningThread = (HANDLE)(ULONG_PTR)GetCurrentThreadId();
168 return;
169 }
170
171 /* Failed to get the lock. Let the scheduler know that we're spinning. */
172 if (sched_yield() != 0)
173 {
179 usleep(1);
180 }
181 }
182
183#endif
184
185 /* First try the fastest possible path to get the lock. */
186 if (InterlockedIncrement(&lpCriticalSection->LockCount))
187 {
188 /* Section is already locked. Check if it is owned by the current thread. */
189 if (lpCriticalSection->OwningThread == (HANDLE)(ULONG_PTR)GetCurrentThreadId())
190 {
191 /* Recursion. No need to wait. */
192 lpCriticalSection->RecursionCount++;
193 return;
194 }
195
196 /* Section is locked by another thread. We have to wait. */
197 WaitForCriticalSection(lpCriticalSection);
198 }
199
200 /* We got the lock. Own it ... */
201 lpCriticalSection->RecursionCount = 1;
202 lpCriticalSection->OwningThread = (HANDLE)(ULONG_PTR)GetCurrentThreadId();
203}
204
205BOOL TryEnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
206{
207 HANDLE current_thread = (HANDLE)(ULONG_PTR)GetCurrentThreadId();
208
209 WINPR_ASSERT(lpCriticalSection);
210
211 /* Atomically acquire the the lock if the section is free. */
212 if (InterlockedCompareExchange(&lpCriticalSection->LockCount, 0, -1) == -1)
213 {
214 lpCriticalSection->RecursionCount = 1;
215 lpCriticalSection->OwningThread = current_thread;
216 return TRUE;
217 }
218
219 /* Section is already locked. Check if it is owned by the current thread. */
220 if (lpCriticalSection->OwningThread == current_thread)
221 {
222 /* Recursion, return success */
223 lpCriticalSection->RecursionCount++;
224 InterlockedIncrement(&lpCriticalSection->LockCount);
225 return TRUE;
226 }
227
228 return FALSE;
229}
230
231VOID LeaveCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
232{
233 WINPR_ASSERT(lpCriticalSection);
234
235 /* Decrement RecursionCount and check if this is the last LeaveCriticalSection call ...*/
236 if (--lpCriticalSection->RecursionCount < 1)
237 {
238 /* Last recursion, clear owner, unlock and if there are other waiting threads ... */
239 lpCriticalSection->OwningThread = NULL;
240
241 if (InterlockedDecrement(&lpCriticalSection->LockCount) >= 0)
242 {
243 /* ...signal the semaphore to unblock the next waiting thread */
244 UnWaitCriticalSection(lpCriticalSection);
245 }
246 }
247 else
248 {
249 (void)InterlockedDecrement(&lpCriticalSection->LockCount);
250 }
251}
252
253VOID DeleteCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
254{
255 WINPR_ASSERT(lpCriticalSection);
256
257 lpCriticalSection->LockCount = -1;
258 lpCriticalSection->SpinCount = 0;
259 lpCriticalSection->RecursionCount = 0;
260 lpCriticalSection->OwningThread = NULL;
261
262 if (lpCriticalSection->LockSemaphore != NULL)
263 {
264#if defined __APPLE__
265 semaphore_destroy(mach_task_self(), *((winpr_sem_t*)lpCriticalSection->LockSemaphore));
266#else
267 sem_destroy((winpr_sem_t*)lpCriticalSection->LockSemaphore);
268#endif
269 free(lpCriticalSection->LockSemaphore);
270 lpCriticalSection->LockSemaphore = NULL;
271 }
272}
273
274#endif