FreeRDP
critical.c
1 
21 #include <winpr/config.h>
22 
23 #include <winpr/assert.h>
24 #include <winpr/tchar.h>
25 #include <winpr/synch.h>
26 #include <winpr/sysinfo.h>
27 #include <winpr/interlocked.h>
28 #include <winpr/thread.h>
29 
30 #include "synch.h"
31 
32 #ifdef WINPR_HAVE_UNISTD_H
33 #include <unistd.h>
34 #endif
35 
36 #if defined(__APPLE__)
37 #include <mach/task.h>
38 #include <mach/mach.h>
39 #include <mach/semaphore.h>
40 #endif
41 
42 #ifndef _WIN32
43 
44 #include "../log.h"
45 #define TAG WINPR_TAG("synch.critical")
46 
47 VOID InitializeCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
48 {
49  InitializeCriticalSectionEx(lpCriticalSection, 0, 0);
50 }
51 
52 BOOL InitializeCriticalSectionEx(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount,
53  DWORD Flags)
54 {
55  WINPR_ASSERT(lpCriticalSection);
66  if (Flags != 0)
67  {
68  WLog_WARN(TAG, "Flags unimplemented");
69  }
70 
71  lpCriticalSection->DebugInfo = NULL;
72  lpCriticalSection->LockCount = -1;
73  lpCriticalSection->SpinCount = 0;
74  lpCriticalSection->RecursionCount = 0;
75  lpCriticalSection->OwningThread = NULL;
76  lpCriticalSection->LockSemaphore = (winpr_sem_t*)malloc(sizeof(winpr_sem_t));
77 
78  if (!lpCriticalSection->LockSemaphore)
79  return FALSE;
80 
81 #if defined(__APPLE__)
82 
83  if (semaphore_create(mach_task_self(), lpCriticalSection->LockSemaphore, SYNC_POLICY_FIFO, 0) !=
84  KERN_SUCCESS)
85  goto out_fail;
86 
87 #else
88 
89  if (sem_init(lpCriticalSection->LockSemaphore, 0, 0) != 0)
90  goto out_fail;
91 
92 #endif
93  SetCriticalSectionSpinCount(lpCriticalSection, dwSpinCount);
94  return TRUE;
95 out_fail:
96  free(lpCriticalSection->LockSemaphore);
97  return FALSE;
98 }
99 
100 BOOL InitializeCriticalSectionAndSpinCount(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount)
101 {
102  return InitializeCriticalSectionEx(lpCriticalSection, dwSpinCount, 0);
103 }
104 
105 DWORD SetCriticalSectionSpinCount(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount)
106 {
107  WINPR_ASSERT(lpCriticalSection);
108 #if !defined(WINPR_CRITICAL_SECTION_DISABLE_SPINCOUNT)
109  SYSTEM_INFO sysinfo;
110  DWORD dwPreviousSpinCount = lpCriticalSection->SpinCount;
111 
112  if (dwSpinCount)
113  {
114  /* Don't spin on uniprocessor systems! */
115  GetNativeSystemInfo(&sysinfo);
116 
117  if (sysinfo.dwNumberOfProcessors < 2)
118  dwSpinCount = 0;
119  }
120 
121  lpCriticalSection->SpinCount = dwSpinCount;
122  return dwPreviousSpinCount;
123 #else
124  return 0;
125 #endif
126 }
127 
128 static VOID WaitForCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
129 {
130  WINPR_ASSERT(lpCriticalSection);
131 #if defined(__APPLE__)
132  semaphore_wait(*((winpr_sem_t*)lpCriticalSection->LockSemaphore));
133 #else
134  sem_wait((winpr_sem_t*)lpCriticalSection->LockSemaphore);
135 #endif
136 }
137 
138 static VOID UnWaitCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
139 {
140  WINPR_ASSERT(lpCriticalSection);
141 #if defined __APPLE__
142  semaphore_signal(*((winpr_sem_t*)lpCriticalSection->LockSemaphore));
143 #else
144  sem_post((winpr_sem_t*)lpCriticalSection->LockSemaphore);
145 #endif
146 }
147 
148 VOID EnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
149 {
150  WINPR_ASSERT(lpCriticalSection);
151 #if !defined(WINPR_CRITICAL_SECTION_DISABLE_SPINCOUNT)
152  ULONG SpinCount = lpCriticalSection->SpinCount;
153 
154  /* If we're lucky or if the current thread is already owner we can return early */
155  if (SpinCount && TryEnterCriticalSection(lpCriticalSection))
156  return;
157 
158  /* Spin requested times but don't compete with another waiting thread */
159  while (SpinCount-- && lpCriticalSection->LockCount < 1)
160  {
161  /* Atomically try to acquire and check the if the section is free. */
162  if (InterlockedCompareExchange(&lpCriticalSection->LockCount, 0, -1) == -1)
163  {
164  lpCriticalSection->RecursionCount = 1;
165  lpCriticalSection->OwningThread = (HANDLE)(ULONG_PTR)GetCurrentThreadId();
166  return;
167  }
168 
169  /* Failed to get the lock. Let the scheduler know that we're spinning. */
170  if (sched_yield() != 0)
171  {
177  usleep(1);
178  }
179  }
180 
181 #endif
182 
183  /* First try the fastest possible path to get the lock. */
184  if (InterlockedIncrement(&lpCriticalSection->LockCount))
185  {
186  /* Section is already locked. Check if it is owned by the current thread. */
187  if (lpCriticalSection->OwningThread == (HANDLE)(ULONG_PTR)GetCurrentThreadId())
188  {
189  /* Recursion. No need to wait. */
190  lpCriticalSection->RecursionCount++;
191  return;
192  }
193 
194  /* Section is locked by another thread. We have to wait. */
195  WaitForCriticalSection(lpCriticalSection);
196  }
197 
198  /* We got the lock. Own it ... */
199  lpCriticalSection->RecursionCount = 1;
200  lpCriticalSection->OwningThread = (HANDLE)(ULONG_PTR)GetCurrentThreadId();
201 }
202 
203 BOOL TryEnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
204 {
205  HANDLE current_thread = (HANDLE)(ULONG_PTR)GetCurrentThreadId();
206 
207  WINPR_ASSERT(lpCriticalSection);
208 
209  /* Atomically acquire the the lock if the section is free. */
210  if (InterlockedCompareExchange(&lpCriticalSection->LockCount, 0, -1) == -1)
211  {
212  lpCriticalSection->RecursionCount = 1;
213  lpCriticalSection->OwningThread = current_thread;
214  return TRUE;
215  }
216 
217  /* Section is already locked. Check if it is owned by the current thread. */
218  if (lpCriticalSection->OwningThread == current_thread)
219  {
220  /* Recursion, return success */
221  lpCriticalSection->RecursionCount++;
222  InterlockedIncrement(&lpCriticalSection->LockCount);
223  return TRUE;
224  }
225 
226  return FALSE;
227 }
228 
229 VOID LeaveCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
230 {
231  WINPR_ASSERT(lpCriticalSection);
232 
233  /* Decrement RecursionCount and check if this is the last LeaveCriticalSection call ...*/
234  if (--lpCriticalSection->RecursionCount < 1)
235  {
236  /* Last recursion, clear owner, unlock and if there are other waiting threads ... */
237  lpCriticalSection->OwningThread = NULL;
238 
239  if (InterlockedDecrement(&lpCriticalSection->LockCount) >= 0)
240  {
241  /* ...signal the semaphore to unblock the next waiting thread */
242  UnWaitCriticalSection(lpCriticalSection);
243  }
244  }
245  else
246  {
247  (void)InterlockedDecrement(&lpCriticalSection->LockCount);
248  }
249 }
250 
251 VOID DeleteCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
252 {
253  WINPR_ASSERT(lpCriticalSection);
254 
255  lpCriticalSection->LockCount = -1;
256  lpCriticalSection->SpinCount = 0;
257  lpCriticalSection->RecursionCount = 0;
258  lpCriticalSection->OwningThread = NULL;
259 
260  if (lpCriticalSection->LockSemaphore != NULL)
261  {
262 #if defined __APPLE__
263  semaphore_destroy(mach_task_self(), *((winpr_sem_t*)lpCriticalSection->LockSemaphore));
264 #else
265  sem_destroy((winpr_sem_t*)lpCriticalSection->LockSemaphore);
266 #endif
267  free(lpCriticalSection->LockSemaphore);
268  lpCriticalSection->LockSemaphore = NULL;
269  }
270 }
271 
272 #endif