FreeRDP
Loading...
Searching...
No Matches
critical.c
1
21#include <winpr/config.h>
22
23#include <winpr/assert.h>
24#include <winpr/tchar.h>
25#include <winpr/synch.h>
26#include <winpr/sysinfo.h>
27#include <winpr/interlocked.h>
28#include <winpr/thread.h>
29
30#include "synch.h"
31
32#ifdef WINPR_HAVE_UNISTD_H
33#include <unistd.h>
34#endif
35
36#if defined(__APPLE__)
37#include <mach/task.h>
38#include <mach/mach.h>
39#include <mach/semaphore.h>
40#endif
41
42#ifndef _WIN32
43
44#include "../log.h"
45#define TAG WINPR_TAG("synch.critical")
46
47VOID InitializeCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
48{
49 InitializeCriticalSectionEx(lpCriticalSection, 0, 0);
50}
51
52BOOL InitializeCriticalSectionEx(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount,
53 DWORD Flags)
54{
55 WINPR_ASSERT(lpCriticalSection);
66 if (Flags != 0)
67 {
68 WLog_WARN(TAG, "Flags unimplemented");
69 }
70
71 lpCriticalSection->DebugInfo = NULL;
72 lpCriticalSection->LockCount = -1;
73 lpCriticalSection->SpinCount = 0;
74 lpCriticalSection->RecursionCount = 0;
75 lpCriticalSection->OwningThread = NULL;
76 lpCriticalSection->LockSemaphore = (winpr_sem_t*)malloc(sizeof(winpr_sem_t));
77
78 if (!lpCriticalSection->LockSemaphore)
79 return FALSE;
80
81#if defined(__APPLE__)
82
83 if (semaphore_create(mach_task_self(), lpCriticalSection->LockSemaphore, SYNC_POLICY_FIFO, 0) !=
84 KERN_SUCCESS)
85 goto out_fail;
86
87#else
88
89 if (sem_init(lpCriticalSection->LockSemaphore, 0, 0) != 0)
90 goto out_fail;
91
92#endif
93 SetCriticalSectionSpinCount(lpCriticalSection, dwSpinCount);
94 return TRUE;
95out_fail:
96 free(lpCriticalSection->LockSemaphore);
97 return FALSE;
98}
99
100BOOL InitializeCriticalSectionAndSpinCount(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount)
101{
102 return InitializeCriticalSectionEx(lpCriticalSection, dwSpinCount, 0);
103}
104
105DWORD SetCriticalSectionSpinCount(WINPR_ATTR_UNUSED LPCRITICAL_SECTION lpCriticalSection,
106 WINPR_ATTR_UNUSED DWORD dwSpinCount)
107{
108 WINPR_ASSERT(lpCriticalSection);
109#if !defined(WINPR_CRITICAL_SECTION_DISABLE_SPINCOUNT)
110 SYSTEM_INFO sysinfo;
111 DWORD dwPreviousSpinCount = lpCriticalSection->SpinCount;
112
113 if (dwSpinCount)
114 {
115 /* Don't spin on uniprocessor systems! */
116 GetNativeSystemInfo(&sysinfo);
117
118 if (sysinfo.dwNumberOfProcessors < 2)
119 dwSpinCount = 0;
120 }
121
122 lpCriticalSection->SpinCount = dwSpinCount;
123 return dwPreviousSpinCount;
124#else
125 // WLog_ERR("TODO", "TODO: implement");
126 return 0;
127#endif
128}
129
130static VOID WaitForCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
131{
132 WINPR_ASSERT(lpCriticalSection);
133 WINPR_ASSERT(lpCriticalSection->LockSemaphore);
134
135#if defined(__APPLE__)
136 semaphore_wait(*((winpr_sem_t*)lpCriticalSection->LockSemaphore));
137#else
138 sem_wait((winpr_sem_t*)lpCriticalSection->LockSemaphore);
139#endif
140}
141
142static VOID UnWaitCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
143{
144 WINPR_ASSERT(lpCriticalSection);
145 WINPR_ASSERT(lpCriticalSection->LockSemaphore);
146#if defined __APPLE__
147 semaphore_signal(*((winpr_sem_t*)lpCriticalSection->LockSemaphore));
148#else
149 sem_post((winpr_sem_t*)lpCriticalSection->LockSemaphore);
150#endif
151}
152
153VOID EnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
154{
155 WINPR_ASSERT(lpCriticalSection);
156#if !defined(WINPR_CRITICAL_SECTION_DISABLE_SPINCOUNT)
157 ULONG SpinCount = lpCriticalSection->SpinCount;
158
159 /* If we're lucky or if the current thread is already owner we can return early */
160 if (SpinCount && TryEnterCriticalSection(lpCriticalSection))
161 return;
162
163 /* Spin requested times but don't compete with another waiting thread */
164 while (SpinCount-- && lpCriticalSection->LockCount < 1)
165 {
166 /* Atomically try to acquire and check the if the section is free. */
167 if (InterlockedCompareExchange(&lpCriticalSection->LockCount, 0, -1) == -1)
168 {
169 lpCriticalSection->RecursionCount = 1;
170 lpCriticalSection->OwningThread = (HANDLE)(ULONG_PTR)GetCurrentThreadId();
171 return;
172 }
173
174 /* Failed to get the lock. Let the scheduler know that we're spinning. */
175 if (sched_yield() != 0)
176 {
182 usleep(1);
183 }
184 }
185
186#endif
187
188 /* First try the fastest possible path to get the lock. */
189 if (InterlockedIncrement(&lpCriticalSection->LockCount))
190 {
191 /* Section is already locked. Check if it is owned by the current thread. */
192 if (lpCriticalSection->OwningThread == (HANDLE)(ULONG_PTR)GetCurrentThreadId())
193 {
194 /* Recursion. No need to wait. */
195 lpCriticalSection->RecursionCount++;
196 return;
197 }
198
199 /* Section is locked by another thread. We have to wait. */
200 WaitForCriticalSection(lpCriticalSection);
201 }
202
203 /* We got the lock. Own it ... */
204 lpCriticalSection->RecursionCount = 1;
205 lpCriticalSection->OwningThread = (HANDLE)(ULONG_PTR)GetCurrentThreadId();
206}
207
208BOOL TryEnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
209{
210 HANDLE current_thread = (HANDLE)(ULONG_PTR)GetCurrentThreadId();
211
212 WINPR_ASSERT(lpCriticalSection);
213
214 /* Atomically acquire the the lock if the section is free. */
215 if (InterlockedCompareExchange(&lpCriticalSection->LockCount, 0, -1) == -1)
216 {
217 lpCriticalSection->RecursionCount = 1;
218 lpCriticalSection->OwningThread = current_thread;
219 return TRUE;
220 }
221
222 /* Section is already locked. Check if it is owned by the current thread. */
223 if (lpCriticalSection->OwningThread == current_thread)
224 {
225 /* Recursion, return success */
226 lpCriticalSection->RecursionCount++;
227 InterlockedIncrement(&lpCriticalSection->LockCount);
228 return TRUE;
229 }
230
231 return FALSE;
232}
233
234VOID LeaveCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
235{
236 WINPR_ASSERT(lpCriticalSection);
237
238 /* Decrement RecursionCount and check if this is the last LeaveCriticalSection call ...*/
239 if (--lpCriticalSection->RecursionCount < 1)
240 {
241 /* Last recursion, clear owner, unlock and if there are other waiting threads ... */
242 lpCriticalSection->OwningThread = NULL;
243
244 if (InterlockedDecrement(&lpCriticalSection->LockCount) >= 0)
245 {
246 /* ...signal the semaphore to unblock the next waiting thread */
247 UnWaitCriticalSection(lpCriticalSection);
248 }
249 }
250 else
251 {
252 (void)InterlockedDecrement(&lpCriticalSection->LockCount);
253 }
254}
255
256VOID DeleteCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
257{
258 WINPR_ASSERT(lpCriticalSection);
259
260 lpCriticalSection->LockCount = -1;
261 lpCriticalSection->SpinCount = 0;
262 lpCriticalSection->RecursionCount = 0;
263 lpCriticalSection->OwningThread = NULL;
264
265 if (lpCriticalSection->LockSemaphore != NULL)
266 {
267#if defined __APPLE__
268 semaphore_destroy(mach_task_self(), *((winpr_sem_t*)lpCriticalSection->LockSemaphore));
269#else
270 sem_destroy((winpr_sem_t*)lpCriticalSection->LockSemaphore);
271#endif
272 free(lpCriticalSection->LockSemaphore);
273 lpCriticalSection->LockSemaphore = NULL;
274 }
275}
276
277#endif