VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp@ 49998

Last change on this file since 49998 was 49998, checked in by vboxsync, 11 years ago

pdmR3R0CritSectEnterContended: Deal with VERR_INTERRUPTED while trying to wait in ring-0.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 26.3 KB
Line 
1/* $Id: PDMAllCritSect.cpp 49998 2013-12-24 18:48:22Z vboxsync $ */
2/** @file
3 * PDM - Write-Only Critical Section, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
23#include "PDMInternal.h"
24#include <VBox/vmm/pdmcritsect.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/vm.h>
28#include <VBox/err.h>
29#include <VBox/vmm/hm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37# include <iprt/semaphore.h>
38#endif
39#if defined(IN_RING3) || defined(IN_RING0)
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47/** The number loops to spin for in ring-3. */
48#define PDMCRITSECT_SPIN_COUNT_R3 20
49/** The number loops to spin for in ring-0. */
50#define PDMCRITSECT_SPIN_COUNT_R0 256
51/** The number loops to spin for in the raw-mode context. */
52#define PDMCRITSECT_SPIN_COUNT_RC 256
53
54
55/* Undefine the automatic VBOX_STRICT API mappings. */
56#undef PDMCritSectEnter
57#undef PDMCritSectTryEnter
58
59
60/**
61 * Gets the ring-3 native thread handle of the calling thread.
62 *
63 * @returns native thread handle (ring-3).
64 * @param pCritSect The critical section. This is used in R0 and RC.
65 */
66DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PCPDMCRITSECT pCritSect)
67{
68#ifdef IN_RING3
69 NOREF(pCritSect);
70 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
71#else
72 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
73 NIL_RTNATIVETHREAD);
74 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
75 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
76 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
77#endif
78 return hNativeSelf;
79}
80
81
82/**
83 * Tail code called when we've won the battle for the lock.
84 *
85 * @returns VINF_SUCCESS.
86 *
87 * @param pCritSect The critical section.
88 * @param hNativeSelf The native handle of this thread.
89 */
90DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
91{
92 AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
93 Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
94
95 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
96 Assert(pCritSect->s.Core.cNestings == 1);
97 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
98
99# ifdef PDMCRITSECT_STRICT
100 RTLockValidatorRecExclSetOwner(pCritSect->s.Core.pValidatorRec, NIL_RTTHREAD, pSrcPos, true);
101# else
102 NOREF(pSrcPos);
103# endif
104
105 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
106 return VINF_SUCCESS;
107}
108
109
110#if defined(IN_RING3) || defined(IN_RING0)
111/**
112 * Deals with the contended case in ring-3 and ring-0.
113 *
114 * @returns VINF_SUCCESS or VERR_SEM_DESTROYED.
115 * @param pCritSect The critsect.
116 * @param hNativeSelf The native thread handle.
117 */
118static int pdmR3R0CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
119{
120 /*
121 * Start waiting.
122 */
123 if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
124 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
125# ifdef IN_RING3
126 STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
127# else
128 STAM_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
129# endif
130
131 /*
132 * The wait loop.
133 */
134 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
135 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
136# ifdef IN_RING3
137# ifdef PDMCRITSECT_STRICT
138 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
139 int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
140 if (RT_FAILURE(rc2))
141 return rc2;
142# else
143 RTTHREAD hThreadSelf = RTThreadSelf();
144# endif
145# endif
146 for (unsigned i = 0;; i++)
147 {
148 /*
149 * Do the wait.
150 *
151 * In ring-3 this gets cluttered by lock validation and thread state
152 * maintainence.
153 *
154 * In ring-0 we have to deal with the possibility that the thread has
155 * been signalled and the interruptible wait function returning
156 * immediately. We handle this by prefering the interruptible wait
157 * and alternating with short period of non-interruptible waiting.
158 */
159# ifdef IN_RING3
160# ifdef PDMCRITSECT_STRICT
161 int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos,
162 !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING),
163 RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, true);
164 if (RT_FAILURE(rc9))
165 return rc9;
166# else
167 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true);
168# endif
169 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
170 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
171# else /* IN_RING0 */
172 int rc;
173 if ((i & 1) == 0)
174 rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
175 else
176 rc = SUPSemEventWait(pSession, hEvent, 1000 /*ms*/);
177# endif /* IN_RING0 */
178
179 /*
180 * Deal with the return code and critsect destruction.
181 */
182 if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
183 return VERR_SEM_DESTROYED;
184 if (rc == VINF_SUCCESS)
185 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
186# ifdef IN_RING3
187 AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
188# else
189 AssertMsg(rc == (!(i & 1) ? VERR_INTERRUPTED : VERR_TIMEOUT), ("rc=%Rrc i=%u\n", rc, i));
190# endif
191 }
192 /* won't get here */
193}
194#endif /* IN_RING3 || IN_RING0 */
195
196
197/**
198 * Common worker for the debug and normal APIs.
199 *
200 * @returns VINF_SUCCESS if entered successfully.
201 * @returns rcBusy when encountering a busy critical section in GC/R0.
202 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
203 * during the operation.
204 *
205 * @param pCritSect The PDM critical section to enter.
206 * @param rcBusy The status code to return when we're in GC or R0
207 * and the section is busy.
208 */
209DECL_FORCE_INLINE(int) pdmCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos)
210{
211 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
212 Assert(pCritSect->s.Core.cNestings >= 0);
213
214 /*
215 * If the critical section has already been destroyed, then inform the caller.
216 */
217 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
218 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
219 VERR_SEM_DESTROYED);
220
221 /*
222 * See if we're lucky.
223 */
224 /* NOP ... */
225 if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
226 return VINF_SUCCESS;
227
228 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
229 /* ... not owned ... */
230 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
231 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
232
233 /* ... or nested. */
234 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
235 {
236 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
237 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
238 Assert(pCritSect->s.Core.cNestings > 1);
239 return VINF_SUCCESS;
240 }
241
242 /*
243 * Spin for a bit without incrementing the counter.
244 */
245 /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
246 * cpu systems. */
247 int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
248 while (cSpinsLeft-- > 0)
249 {
250 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
251 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
252 ASMNopPause();
253 /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
254 cli'ed pendingpreemption check up front using sti w/ instruction fusing
255 for avoiding races. Hmm ... This is assuming the other party is actually
256 executing code on another CPU ... which we could keep track of if we
257 wanted. */
258 }
259
260#ifdef IN_RING3
261 /*
262 * Take the slow path.
263 */
264 NOREF(rcBusy);
265 return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
266
267#else
268# ifdef IN_RING0
269 /** @todo If preemption is disabled it means we're in VT-x/AMD-V context
270 * and would be better off switching out of that while waiting for
271 * the lock. Several of the locks jumps back to ring-3 just to
272 * get the lock, the ring-3 code will then call the kernel to do
273 * the lock wait and when the call return it will call ring-0
274 * again and resume via in setjmp style. Not very efficient. */
275# if 0
276 if (ASMIntAreEnabled()) /** @todo this can be handled as well by changing
277 * callers not prepared for longjmp/blocking to
278 * use PDMCritSectTryEnter. */
279 {
280 /*
281 * Leave HM context while waiting if necessary.
282 */
283 int rc;
284 if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
285 {
286 STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000);
287 rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
288 }
289 else
290 {
291 STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000000);
292 PVM pVM = pCritSect->s.CTX_SUFF(pVM);
293 PVMCPU pVCpu = VMMGetCpu(pVM);
294 HMR0Leave(pVM, pVCpu);
295 RTThreadPreemptRestore(NIL_RTTHREAD, XXX);
296
297 rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
298
299 RTThreadPreemptDisable(NIL_RTTHREAD, XXX);
300 HMR0Enter(pVM, pVCpu);
301 }
302 return rc;
303 }
304# else
305 /*
306 * We preemption hasn't been disabled, we can block here in ring-0.
307 */
308 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
309 && ASMIntAreEnabled())
310 return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
311# endif
312#endif /* IN_RING0 */
313
314 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
315
316 /*
317 * Call ring-3 to acquire the critical section?
318 */
319 if (rcBusy == VINF_SUCCESS)
320 {
321 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
322 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
323 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_ENTER, MMHyperCCToR3(pVM, pCritSect));
324 }
325
326 /*
327 * Return busy.
328 */
329 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
330 return rcBusy;
331#endif /* !IN_RING3 */
332}
333
334
335/**
336 * Enters a PDM critical section.
337 *
338 * @returns VINF_SUCCESS if entered successfully.
339 * @returns rcBusy when encountering a busy critical section in RC/R0.
340 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
341 * during the operation.
342 *
343 * @param pCritSect The PDM critical section to enter.
344 * @param rcBusy The status code to return when we're in RC or R0
345 * and the section is busy. Pass VINF_SUCCESS to
346 * acquired the critical section thru a ring-3
347 * call if necessary.
348 */
349VMMDECL(int) PDMCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy)
350{
351#ifndef PDMCRITSECT_STRICT
352 return pdmCritSectEnter(pCritSect, rcBusy, NULL);
353#else
354 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
355 return pdmCritSectEnter(pCritSect, rcBusy, &SrcPos);
356#endif
357}
358
359
360/**
361 * Enters a PDM critical section, with location information for debugging.
362 *
363 * @returns VINF_SUCCESS if entered successfully.
364 * @returns rcBusy when encountering a busy critical section in RC/R0.
365 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
366 * during the operation.
367 *
368 * @param pCritSect The PDM critical section to enter.
369 * @param rcBusy The status code to return when we're in RC or R0
370 * and the section is busy. Pass VINF_SUCCESS to
371 * acquired the critical section thru a ring-3
372 * call if necessary.
373 * @param uId Some kind of locking location ID. Typically a
374 * return address up the stack. Optional (0).
375 * @param pszFile The file where the lock is being acquired from.
376 * Optional.
377 * @param iLine The line number in that file. Optional (0).
378 * @param pszFunction The function where the lock is being acquired
379 * from. Optional.
380 */
381VMMDECL(int) PDMCritSectEnterDebug(PPDMCRITSECT pCritSect, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
382{
383#ifdef PDMCRITSECT_STRICT
384 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
385 return pdmCritSectEnter(pCritSect, rcBusy, &SrcPos);
386#else
387 NOREF(uId); RT_SRC_POS_NOREF();
388 return pdmCritSectEnter(pCritSect, rcBusy, NULL);
389#endif
390}
391
392
393/**
394 * Common worker for the debug and normal APIs.
395 *
396 * @retval VINF_SUCCESS on success.
397 * @retval VERR_SEM_BUSY if the critsect was owned.
398 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
399 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
400 * during the operation.
401 *
402 * @param pCritSect The critical section.
403 */
404static int pdmCritSectTryEnter(PPDMCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
405{
406 /*
407 * If the critical section has already been destroyed, then inform the caller.
408 */
409 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
410 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
411 VERR_SEM_DESTROYED);
412
413 /*
414 * See if we're lucky.
415 */
416 /* NOP ... */
417 if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
418 return VINF_SUCCESS;
419
420 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
421 /* ... not owned ... */
422 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
423 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
424
425 /* ... or nested. */
426 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
427 {
428 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
429 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
430 Assert(pCritSect->s.Core.cNestings > 1);
431 return VINF_SUCCESS;
432 }
433
434 /* no spinning */
435
436 /*
437 * Return busy.
438 */
439#ifdef IN_RING3
440 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
441#else
442 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
443#endif
444 LogFlow(("PDMCritSectTryEnter: locked\n"));
445 return VERR_SEM_BUSY;
446}
447
448
449/**
450 * Try enter a critical section.
451 *
452 * @retval VINF_SUCCESS on success.
453 * @retval VERR_SEM_BUSY if the critsect was owned.
454 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
455 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
456 * during the operation.
457 *
458 * @param pCritSect The critical section.
459 */
460VMMDECL(int) PDMCritSectTryEnter(PPDMCRITSECT pCritSect)
461{
462#ifndef PDMCRITSECT_STRICT
463 return pdmCritSectTryEnter(pCritSect, NULL);
464#else
465 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
466 return pdmCritSectTryEnter(pCritSect, &SrcPos);
467#endif
468}
469
470
471/**
472 * Try enter a critical section, with location information for debugging.
473 *
474 * @retval VINF_SUCCESS on success.
475 * @retval VERR_SEM_BUSY if the critsect was owned.
476 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
477 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
478 * during the operation.
479 *
480 * @param pCritSect The critical section.
481 * @param uId Some kind of locking location ID. Typically a
482 * return address up the stack. Optional (0).
483 * @param pszFile The file where the lock is being acquired from.
484 * Optional.
485 * @param iLine The line number in that file. Optional (0).
486 * @param pszFunction The function where the lock is being acquired
487 * from. Optional.
488 */
489VMMDECL(int) PDMCritSectTryEnterDebug(PPDMCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
490{
491#ifdef PDMCRITSECT_STRICT
492 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
493 return pdmCritSectTryEnter(pCritSect, &SrcPos);
494#else
495 NOREF(uId); RT_SRC_POS_NOREF();
496 return pdmCritSectTryEnter(pCritSect, NULL);
497#endif
498}
499
500
501#ifdef IN_RING3
502/**
503 * Enters a PDM critical section.
504 *
505 * @returns VINF_SUCCESS if entered successfully.
506 * @returns rcBusy when encountering a busy critical section in GC/R0.
507 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
508 * during the operation.
509 *
510 * @param pCritSect The PDM critical section to enter.
511 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
512 */
513VMMR3DECL(int) PDMR3CritSectEnterEx(PPDMCRITSECT pCritSect, bool fCallRing3)
514{
515 int rc = PDMCritSectEnter(pCritSect, VERR_IGNORED);
516 if ( rc == VINF_SUCCESS
517 && fCallRing3
518 && pCritSect->s.Core.pValidatorRec
519 && pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
520 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
521 return rc;
522}
523#endif /* IN_RING3 */
524
525
526/**
527 * Leaves a critical section entered with PDMCritSectEnter().
528 *
529 * @returns Indication whether we really exited the critical section.
530 * @retval VINF_SUCCESS if we really exited.
531 * @retval VINF_SEM_NESTED if we only reduced the nesting count.
532 * @retval VERR_NOT_OWNER if you somehow ignore release assertions.
533 *
534 * @param pCritSect The PDM critical section to leave.
535 */
536VMMDECL(int) PDMCritSectLeave(PPDMCRITSECT pCritSect)
537{
538 AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
539 Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
540
541 /* Check for NOP sections before asserting ownership. */
542 if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
543 return VINF_SUCCESS;
544
545 /*
546 * Always check that the caller is the owner (screw performance).
547 */
548 RTNATIVETHREAD const hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
549 AssertReleaseMsgReturn(pCritSect->s.Core.NativeThreadOwner == hNativeSelf,
550 ("%p %s: %p != %p; cLockers=%d cNestings=%d\n", pCritSect, R3STRING(pCritSect->s.pszName),
551 pCritSect->s.Core.NativeThreadOwner, hNativeSelf,
552 pCritSect->s.Core.cLockers, pCritSect->s.Core.cNestings),
553 VERR_NOT_OWNER);
554 Assert(pCritSect->s.Core.cNestings >= 1);
555
556 /*
557 * Nested leave.
558 */
559 if (pCritSect->s.Core.cNestings > 1)
560 {
561 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
562 Assert(pCritSect->s.Core.cNestings >= 1);
563 ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
564 Assert(pCritSect->s.Core.cLockers >= 0);
565 return VINF_SEM_NESTED;
566 }
567
568#ifdef IN_RING0
569# if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */
570 if (1) /* SUPSemEventSignal is safe */
571# else
572 if (ASMIntAreEnabled())
573# endif
574#endif
575#if defined(IN_RING3) || defined(IN_RING0)
576 {
577 /*
578 * Leave for real.
579 */
580 /* update members. */
581# ifdef IN_RING3
582 RTSEMEVENT hEventToSignal = pCritSect->s.EventToSignal;
583 pCritSect->s.EventToSignal = NIL_RTSEMEVENT;
584# if defined(PDMCRITSECT_STRICT)
585 if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
586 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
587# endif
588 Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
589# endif
590 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
591 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
592 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
593 Assert(pCritSect->s.Core.cNestings == 0);
594
595 /* stop and decrement lockers. */
596 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
597 ASMCompilerBarrier();
598 if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
599 {
600 /* Someone is waiting, wake up one of them. */
601 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
602 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
603 int rc = SUPSemEventSignal(pSession, hEvent);
604 AssertRC(rc);
605 }
606
607# ifdef IN_RING3
608 /* Signal exit event. */
609 if (hEventToSignal != NIL_RTSEMEVENT)
610 {
611 LogBird(("Signalling %#x\n", hEventToSignal));
612 int rc = RTSemEventSignal(hEventToSignal);
613 AssertRC(rc);
614 }
615# endif
616
617# if defined(DEBUG_bird) && defined(IN_RING0)
618 VMMTrashVolatileXMMRegs();
619# endif
620 }
621#endif /* IN_RING3 || IN_RING0 */
622#ifdef IN_RING0
623 else
624#endif
625#if defined(IN_RING0) || defined(IN_RC)
626 {
627 /*
628 * Try leave it.
629 */
630 if (pCritSect->s.Core.cLockers == 0)
631 {
632 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
633 RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
634 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
635 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
636
637 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
638 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
639 return VINF_SUCCESS;
640
641 /* darn, someone raced in on us. */
642 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
643 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
644 Assert(pCritSect->s.Core.cNestings == 0);
645 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
646 }
647 ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
648
649 /*
650 * Queue the request.
651 */
652 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
653 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
654 uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
655 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
656 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves));
657 pVCpu->pdm.s.apQueuedCritSectLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
658 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
659 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
660 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
661 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
662 }
663#endif /* IN_RING0 || IN_RC */
664
665 return VINF_SUCCESS;
666}
667
668
669/**
670 * Checks the caller is the owner of the critical section.
671 *
672 * @returns true if owner.
673 * @returns false if not owner.
674 * @param pCritSect The critical section.
675 */
676VMMDECL(bool) PDMCritSectIsOwner(PCPDMCRITSECT pCritSect)
677{
678#ifdef IN_RING3
679 return RTCritSectIsOwner(&pCritSect->s.Core);
680#else
681 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
682 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
683 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
684 return false;
685 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
686 || pCritSect->s.Core.cNestings > 1;
687#endif
688}
689
690
691/**
692 * Checks the specified VCPU is the owner of the critical section.
693 *
694 * @returns true if owner.
695 * @returns false if not owner.
696 * @param pCritSect The critical section.
697 * @param pVCpu Pointer to the VMCPU.
698 */
699VMMDECL(bool) PDMCritSectIsOwnerEx(PCPDMCRITSECT pCritSect, PVMCPU pVCpu)
700{
701#ifdef IN_RING3
702 NOREF(pVCpu);
703 return RTCritSectIsOwner(&pCritSect->s.Core);
704#else
705 Assert(&pVCpu->CTX_SUFF(pVM)->aCpus[pVCpu->idCpu] == pVCpu);
706 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
707 return false;
708 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
709 || pCritSect->s.Core.cNestings > 1;
710#endif
711}
712
713
714/**
715 * Checks if anyone is waiting on the critical section we own.
716 *
717 * @returns true if someone is waiting.
718 * @returns false if no one is waiting.
719 * @param pCritSect The critical section.
720 */
721VMMDECL(bool) PDMCritSectHasWaiters(PCPDMCRITSECT pCritSect)
722{
723 AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, false);
724 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
725 return pCritSect->s.Core.cLockers >= pCritSect->s.Core.cNestings;
726}
727
728
729/**
730 * Checks if a critical section is initialized or not.
731 *
732 * @returns true if initialized.
733 * @returns false if not initialized.
734 * @param pCritSect The critical section.
735 */
736VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
737{
738 return RTCritSectIsInitialized(&pCritSect->s.Core);
739}
740
741
742/**
743 * Gets the recursion depth.
744 *
745 * @returns The recursion depth.
746 * @param pCritSect The critical section.
747 */
748VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect)
749{
750 return RTCritSectGetRecursion(&pCritSect->s.Core);
751}
752
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette