FreeRDP
Loading...
Searching...
No Matches
critical.c
1
21#include <winpr/config.h>
22
23#include <winpr/assert.h>
24#include <winpr/tchar.h>
25#include <winpr/synch.h>
26#include <winpr/sysinfo.h>
27#include <winpr/interlocked.h>
28#include <winpr/thread.h>
29
30#include "synch.h"
31
32#ifdef WINPR_HAVE_UNISTD_H
33#include <unistd.h>
34#endif
35
36#if defined(__APPLE__)
37#include <mach/task.h>
38#include <mach/mach.h>
39#include <mach/semaphore.h>
40#endif
41
42#ifndef _WIN32
43
44#include "../log.h"
45#define TAG WINPR_TAG("synch.critical")
46
47VOID InitializeCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
48{
49 if (!InitializeCriticalSectionEx(lpCriticalSection, 0, 0))
50 WLog_ERR(TAG, "InitializeCriticalSectionEx failed");
51}
52
53BOOL InitializeCriticalSectionEx(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount,
54 DWORD Flags)
55{
56 WINPR_ASSERT(lpCriticalSection);
67 if (Flags != 0)
68 {
69 WLog_WARN(TAG, "Flags unimplemented");
70 }
71
72 lpCriticalSection->DebugInfo = nullptr;
73 lpCriticalSection->LockCount = -1;
74 lpCriticalSection->SpinCount = 0;
75 lpCriticalSection->RecursionCount = 0;
76 lpCriticalSection->OwningThread = nullptr;
77 lpCriticalSection->LockSemaphore = (winpr_sem_t*)malloc(sizeof(winpr_sem_t));
78
79 if (!lpCriticalSection->LockSemaphore)
80 return FALSE;
81
82#if defined(__APPLE__)
83
84 if (semaphore_create(mach_task_self(), lpCriticalSection->LockSemaphore, SYNC_POLICY_FIFO, 0) !=
85 KERN_SUCCESS)
86 goto out_fail;
87
88#else
89
90 if (sem_init(lpCriticalSection->LockSemaphore, 0, 0) != 0)
91 goto out_fail;
92
93#endif
94 SetCriticalSectionSpinCount(lpCriticalSection, dwSpinCount);
95 return TRUE;
96out_fail:
97 free(lpCriticalSection->LockSemaphore);
98 return FALSE;
99}
100
101BOOL InitializeCriticalSectionAndSpinCount(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount)
102{
103 return InitializeCriticalSectionEx(lpCriticalSection, dwSpinCount, 0);
104}
105
106DWORD SetCriticalSectionSpinCount(WINPR_ATTR_UNUSED LPCRITICAL_SECTION lpCriticalSection,
107 WINPR_ATTR_UNUSED DWORD dwSpinCount)
108{
109 WINPR_ASSERT(lpCriticalSection);
110#if !defined(WINPR_CRITICAL_SECTION_DISABLE_SPINCOUNT)
111 SYSTEM_INFO sysinfo;
112 DWORD dwPreviousSpinCount = lpCriticalSection->SpinCount;
113
114 if (dwSpinCount)
115 {
116 /* Don't spin on uniprocessor systems! */
117 GetNativeSystemInfo(&sysinfo);
118
119 if (sysinfo.dwNumberOfProcessors < 2)
120 dwSpinCount = 0;
121 }
122
123 lpCriticalSection->SpinCount = dwSpinCount;
124 return dwPreviousSpinCount;
125#else
126 // WLog_ERR("TODO", "TODO: implement");
127 return 0;
128#endif
129}
130
131static VOID WaitForCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
132{
133 WINPR_ASSERT(lpCriticalSection);
134 WINPR_ASSERT(lpCriticalSection->LockSemaphore);
135
136#if defined(__APPLE__)
137 semaphore_wait(*((winpr_sem_t*)lpCriticalSection->LockSemaphore));
138#else
139 sem_wait((winpr_sem_t*)lpCriticalSection->LockSemaphore);
140#endif
141}
142
143static VOID UnWaitCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
144{
145 WINPR_ASSERT(lpCriticalSection);
146 WINPR_ASSERT(lpCriticalSection->LockSemaphore);
147#if defined __APPLE__
148 semaphore_signal(*((winpr_sem_t*)lpCriticalSection->LockSemaphore));
149#else
150 sem_post((winpr_sem_t*)lpCriticalSection->LockSemaphore);
151#endif
152}
153
154VOID EnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
155{
156 WINPR_ASSERT(lpCriticalSection);
157#if !defined(WINPR_CRITICAL_SECTION_DISABLE_SPINCOUNT)
158 ULONG SpinCount = lpCriticalSection->SpinCount;
159
160 /* If we're lucky or if the current thread is already owner we can return early */
161 if (SpinCount && TryEnterCriticalSection(lpCriticalSection))
162 return;
163
164 /* Spin requested times but don't compete with another waiting thread */
165 while (SpinCount-- && lpCriticalSection->LockCount < 1)
166 {
167 /* Atomically try to acquire and check the if the section is free. */
168 if (InterlockedCompareExchange(&lpCriticalSection->LockCount, 0, -1) == -1)
169 {
170 lpCriticalSection->RecursionCount = 1;
171 lpCriticalSection->OwningThread = (HANDLE)(ULONG_PTR)GetCurrentThreadId();
172 return;
173 }
174
175 /* Failed to get the lock. Let the scheduler know that we're spinning. */
176 if (sched_yield() != 0)
177 {
183 usleep(1);
184 }
185 }
186
187#endif
188
189 /* First try the fastest possible path to get the lock. */
190 if (InterlockedIncrement(&lpCriticalSection->LockCount))
191 {
192 /* Section is already locked. Check if it is owned by the current thread. */
193 if (lpCriticalSection->OwningThread == (HANDLE)(ULONG_PTR)GetCurrentThreadId())
194 {
195 /* Recursion. No need to wait. */
196 lpCriticalSection->RecursionCount++;
197 return;
198 }
199
200 /* Section is locked by another thread. We have to wait. */
201 WaitForCriticalSection(lpCriticalSection);
202 }
203
204 /* We got the lock. Own it ... */
205 lpCriticalSection->RecursionCount = 1;
206 lpCriticalSection->OwningThread = (HANDLE)(ULONG_PTR)GetCurrentThreadId();
207}
208
209BOOL TryEnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
210{
211 HANDLE current_thread = (HANDLE)(ULONG_PTR)GetCurrentThreadId();
212
213 WINPR_ASSERT(lpCriticalSection);
214
215 /* Atomically acquire the the lock if the section is free. */
216 if (InterlockedCompareExchange(&lpCriticalSection->LockCount, 0, -1) == -1)
217 {
218 lpCriticalSection->RecursionCount = 1;
219 lpCriticalSection->OwningThread = current_thread;
220 return TRUE;
221 }
222
223 /* Section is already locked. Check if it is owned by the current thread. */
224 if (lpCriticalSection->OwningThread == current_thread)
225 {
226 /* Recursion, return success */
227 lpCriticalSection->RecursionCount++;
228 InterlockedIncrement(&lpCriticalSection->LockCount);
229 return TRUE;
230 }
231
232 return FALSE;
233}
234
235VOID LeaveCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
236{
237 WINPR_ASSERT(lpCriticalSection);
238
239 /* Decrement RecursionCount and check if this is the last LeaveCriticalSection call ...*/
240 if (--lpCriticalSection->RecursionCount < 1)
241 {
242 /* Last recursion, clear owner, unlock and if there are other waiting threads ... */
243 lpCriticalSection->OwningThread = nullptr;
244
245 if (InterlockedDecrement(&lpCriticalSection->LockCount) >= 0)
246 {
247 /* ...signal the semaphore to unblock the next waiting thread */
248 UnWaitCriticalSection(lpCriticalSection);
249 }
250 }
251 else
252 {
253 (void)InterlockedDecrement(&lpCriticalSection->LockCount);
254 }
255}
256
257VOID DeleteCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
258{
259 WINPR_ASSERT(lpCriticalSection);
260
261 lpCriticalSection->LockCount = -1;
262 lpCriticalSection->SpinCount = 0;
263 lpCriticalSection->RecursionCount = 0;
264 lpCriticalSection->OwningThread = nullptr;
265
266 if (lpCriticalSection->LockSemaphore != nullptr)
267 {
268#if defined __APPLE__
269 semaphore_destroy(mach_task_self(), *((winpr_sem_t*)lpCriticalSection->LockSemaphore));
270#else
271 sem_destroy((winpr_sem_t*)lpCriticalSection->LockSemaphore);
272#endif
273 free(lpCriticalSection->LockSemaphore);
274 lpCriticalSection->LockSemaphore = nullptr;
275 }
276}
277
278#endif