VirtualBox

source: vbox/trunk/src/VBox/Runtime/common/misc/lockvalidator.cpp@ 28317

Last change on this file since 28317 was 28317, checked in by vboxsync, 15 years ago

RTMemPageFree + all users: Added size parameter to RTMemPageFree so we can avoid tracking structures when using mmap/munmap.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 158.7 KB
Line 
1/* $Id: lockvalidator.cpp 28317 2010-04-14 18:06:05Z vboxsync $ */
2/** @file
3 * IPRT - Lock Validator.
4 */
5
6/*
7 * Copyright (C) 2009-2010 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31/*******************************************************************************
32* Header Files *
33*******************************************************************************/
34#include <iprt/lockvalidator.h>
35#include "internal/iprt.h"
36
37#include <iprt/asm.h>
38#include <iprt/assert.h>
39#include <iprt/env.h>
40#include <iprt/err.h>
41#include <iprt/mem.h>
42#include <iprt/once.h>
43#include <iprt/semaphore.h>
44#include <iprt/string.h>
45#include <iprt/thread.h>
46
47#include "internal/lockvalidator.h"
48#include "internal/magics.h"
49#include "internal/thread.h"
50
51
52/*******************************************************************************
53* Defined Constants And Macros *
54*******************************************************************************/
55/** Macro that asserts that a pointer is aligned correctly.
56 * Only used when fighting bugs. */
57#if 1
58# define RTLOCKVAL_ASSERT_PTR_ALIGN(p) \
59 AssertMsg(!((uintptr_t)(p) & (sizeof(uintptr_t) - 1)), ("%p\n", (p)));
60#else
61# define RTLOCKVAL_ASSERT_PTR_ALIGN(p) do { } while (0)
62#endif
63
64/** Hashes the class handle (pointer) into an apPriorLocksHash index. */
65#define RTLOCKVALCLASS_HASH(hClass) \
66 ( ((uintptr_t)(hClass) >> 6 ) \
67 % ( RT_SIZEOFMEMB(RTLOCKVALCLASSINT, apPriorLocksHash) \
68 / sizeof(PRTLOCKVALCLASSREF)) )
69
70/** The max value for RTLOCKVALCLASSINT::cRefs. */
71#define RTLOCKVALCLASS_MAX_REFS UINT32_C(0xffff0000)
72/** The max value for RTLOCKVALCLASSREF::cLookups. */
73#define RTLOCKVALCLASSREF_MAX_LOOKUPS UINT32_C(0xfffe0000)
74/** The absolute max value for RTLOCKVALCLASSREF::cLookups at which it will
75 * be set back to RTLOCKVALCLASSREF_MAX_LOOKUPS. */
76#define RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX UINT32_C(0xffff0000)
77
78
79/** @def RTLOCKVAL_WITH_RECURSION_RECORDS
80 * Enable recursion records. */
81#if defined(IN_RING3) || defined(DOXYGEN_RUNNING)
82# define RTLOCKVAL_WITH_RECURSION_RECORDS 1
83#endif
84
85/** @def RTLOCKVAL_WITH_VERBOSE_DUMPS
86 * Enables some extra verbosity in the lock dumping. */
87#if defined(DOXYGEN_RUNNING)
88# define RTLOCKVAL_WITH_VERBOSE_DUMPS
89#endif
90
91/** @def RTLOCKVAL_WITH_CLASS_HASH_STATS
92 * Enables collection prior class hash lookup statistics, dumping them when
93 * complaining about the class. */
94#if defined(DEBUG) || defined(DOXYGEN_RUNNING)
95# define RTLOCKVAL_WITH_CLASS_HASH_STATS
96#endif
97
98
99/*******************************************************************************
100* Structures and Typedefs *
101*******************************************************************************/
102/**
103 * Deadlock detection stack entry.
104 */
105typedef struct RTLOCKVALDDENTRY
106{
107 /** The current record. */
108 PRTLOCKVALRECUNION pRec;
109 /** The current entry number if pRec is a shared one. */
110 uint32_t iEntry;
111 /** The thread state of the thread we followed to get to pFirstSibling.
112 * This is only used for validating a deadlock stack. */
113 RTTHREADSTATE enmState;
114 /** The thread we followed to get to pFirstSibling.
115 * This is only used for validating a deadlock stack. */
116 PRTTHREADINT pThread;
117 /** What pThread is waiting on, i.e. where we entered the circular list of
118 * siblings. This is used for validating a deadlock stack as well as
119 * terminating the sibling walk. */
120 PRTLOCKVALRECUNION pFirstSibling;
121} RTLOCKVALDDENTRY;
122
123
124/**
125 * Deadlock detection stack.
126 */
127typedef struct RTLOCKVALDDSTACK
128{
129 /** The number stack entries. */
130 uint32_t c;
131 /** The stack entries. */
132 RTLOCKVALDDENTRY a[32];
133} RTLOCKVALDDSTACK;
134/** Pointer to a deadlock detction stack. */
135typedef RTLOCKVALDDSTACK *PRTLOCKVALDDSTACK;
136
137
138/**
139 * Reference to another class.
140 */
141typedef struct RTLOCKVALCLASSREF
142{
143 /** The class. */
144 RTLOCKVALCLASS hClass;
145 /** The number of lookups of this class. */
146 uint32_t volatile cLookups;
147 /** Indicates whether the entry was added automatically during order checking
148 * (true) or manually via the API (false). */
149 bool fAutodidacticism;
150 /** Reserved / explicit alignment padding. */
151 bool afReserved[3];
152} RTLOCKVALCLASSREF;
153/** Pointer to a class reference. */
154typedef RTLOCKVALCLASSREF *PRTLOCKVALCLASSREF;
155
156
157/** Pointer to a chunk of class references. */
158typedef struct RTLOCKVALCLASSREFCHUNK *PRTLOCKVALCLASSREFCHUNK;
159/**
160 * Chunk of class references.
161 */
162typedef struct RTLOCKVALCLASSREFCHUNK
163{
164 /** Array of refs. */
165#if 0 /** @todo for testing alloction of new chunks. */
166 RTLOCKVALCLASSREF aRefs[ARCH_BITS == 32 ? 10 : 8];
167#else
168 RTLOCKVALCLASSREF aRefs[2];
169#endif
170 /** Pointer to the next chunk. */
171 PRTLOCKVALCLASSREFCHUNK volatile pNext;
172} RTLOCKVALCLASSREFCHUNK;
173
174
175/**
176 * Lock class.
177 */
178typedef struct RTLOCKVALCLASSINT
179{
180 /** AVL node core. */
181 AVLLU32NODECORE Core;
182 /** Magic value (RTLOCKVALCLASS_MAGIC). */
183 uint32_t volatile u32Magic;
184 /** Reference counter. See RTLOCKVALCLASS_MAX_REFS. */
185 uint32_t volatile cRefs;
186 /** Whether the class is allowed to teach it self new locking order rules. */
187 bool fAutodidact;
188 /** Whether to allow recursion. */
189 bool fRecursionOk;
190 /** Strict release order. */
191 bool fStrictReleaseOrder;
192 /** Whether this class is in the tree. */
193 bool fInTree;
194 /** Donate a reference to the next retainer. This is a hack to make
195 * RTLockValidatorClassCreateUnique work. */
196 bool volatile fDonateRefToNextRetainer;
197 /** Reserved future use / explicit alignment. */
198 bool afReserved[3];
199 /** The minimum wait interval for which we do deadlock detection
200 * (milliseconds). */
201 RTMSINTERVAL cMsMinDeadlock;
202 /** The minimum wait interval for which we do order checks (milliseconds). */
203 RTMSINTERVAL cMsMinOrder;
204 /** More padding. */
205 uint32_t au32Reserved[ARCH_BITS == 32 ? 5 : 2];
206 /** Classes that may be taken prior to this one.
207 * This is a linked list where each node contains a chunk of locks so that we
208 * reduce the number of allocations as well as localize the data. */
209 RTLOCKVALCLASSREFCHUNK PriorLocks;
210 /** Hash table containing frequently encountered prior locks. */
211 PRTLOCKVALCLASSREF apPriorLocksHash[17];
212 /** Class name. (Allocated after the end of the block as usual.) */
213 char const *pszName;
214 /** Where this class was created.
215 * This is mainly used for finding automatically created lock classes.
216 * @remarks The strings are stored after this structure so we won't crash
217 * if the class lives longer than the module (dll/so/dylib) that
218 * spawned it. */
219 RTLOCKVALSRCPOS CreatePos;
220#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
221 /** Hash hits. */
222 uint32_t volatile cHashHits;
223 /** Hash misses. */
224 uint32_t volatile cHashMisses;
225#endif
226} RTLOCKVALCLASSINT;
227AssertCompileSize(AVLLU32NODECORE, ARCH_BITS == 32 ? 20 : 32);
228AssertCompileMemberOffset(RTLOCKVALCLASSINT, PriorLocks, 64);
229
230
231/*******************************************************************************
232* Global Variables *
233*******************************************************************************/
234/** Serializing object destruction and deadlock detection.
235 *
236 * This makes sure that none of the memory examined by the deadlock detection
237 * code will become invalid (reused for other purposes or made not present)
238 * while the detection is in progress.
239 *
240 * NS: RTLOCKVALREC*, RTTHREADINT and RTLOCKVALDRECSHRD::papOwners destruction.
241 * EW: Deadlock detection and some related activities.
242 */
243static RTSEMXROADS g_hLockValidatorXRoads = NIL_RTSEMXROADS;
244/** Serializing class tree insert and lookups. */
245static RTSEMRW g_hLockValClassTreeRWLock= NIL_RTSEMRW;
246/** Class tree. */
247static PAVLLU32NODECORE g_LockValClassTree = NULL;
248/** Critical section serializing the teaching new rules to the classes. */
249static RTCRITSECT g_LockValClassTeachCS;
250
251/** Whether the lock validator is enabled or disabled.
252 * Only applies to new locks. */
253static bool volatile g_fLockValidatorEnabled = true;
254/** Set if the lock validator is quiet. */
255#ifdef RT_STRICT
256static bool volatile g_fLockValidatorQuiet = false;
257#else
258static bool volatile g_fLockValidatorQuiet = true;
259#endif
260/** Set if the lock validator may panic. */
261#ifdef RT_STRICT
262static bool volatile g_fLockValidatorMayPanic = true;
263#else
264static bool volatile g_fLockValidatorMayPanic = false;
265#endif
266/** Whether to return an error status on wrong locking order. */
267static bool volatile g_fLockValSoftWrongOrder = false;
268
269
270/*******************************************************************************
271* Internal Functions *
272*******************************************************************************/
273static void rtLockValidatorClassDestroy(RTLOCKVALCLASSINT *pClass);
274static uint32_t rtLockValidatorStackDepth(PRTTHREADINT pThread);
275
276
277/**
278 * Lazy initialization of the lock validator globals.
279 */
280static void rtLockValidatorLazyInit(void)
281{
282 static uint32_t volatile s_fInitializing = false;
283 if (ASMAtomicCmpXchgU32(&s_fInitializing, true, false))
284 {
285 /*
286 * The locks.
287 */
288 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
289 RTCritSectInitEx(&g_LockValClassTeachCS, RTCRITSECT_FLAGS_NO_LOCK_VAL, NIL_RTLOCKVALCLASS,
290 RTLOCKVAL_SUB_CLASS_ANY, "RTLockVal-Teach");
291
292 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
293 {
294 RTSEMRW hSemRW;
295 int rc = RTSemRWCreateEx(&hSemRW, RTSEMRW_FLAGS_NO_LOCK_VAL, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_ANY, "RTLockVal-Tree");
296 if (RT_SUCCESS(rc))
297 ASMAtomicWriteHandle(&g_hLockValClassTreeRWLock, hSemRW);
298 }
299
300 if (g_hLockValidatorXRoads == NIL_RTSEMXROADS)
301 {
302 RTSEMXROADS hXRoads;
303 int rc = RTSemXRoadsCreate(&hXRoads);
304 if (RT_SUCCESS(rc))
305 ASMAtomicWriteHandle(&g_hLockValidatorXRoads, hXRoads);
306 }
307
308#ifdef IN_RING3
309 /*
310 * Check the environment for our config variables.
311 */
312 if (RTEnvExist("IPRT_LOCK_VALIDATOR_ENABLED"))
313 ASMAtomicWriteBool(&g_fLockValidatorEnabled, true);
314 if (RTEnvExist("IPRT_LOCK_VALIDATOR_DISABLED"))
315 ASMAtomicWriteBool(&g_fLockValidatorEnabled, false);
316
317 if (RTEnvExist("IPRT_LOCK_VALIDATOR_MAY_PANIC"))
318 ASMAtomicWriteBool(&g_fLockValidatorMayPanic, true);
319 if (RTEnvExist("IPRT_LOCK_VALIDATOR_MAY_NOT_PANIC"))
320 ASMAtomicWriteBool(&g_fLockValidatorMayPanic, false);
321
322 if (RTEnvExist("IPRT_LOCK_VALIDATOR_NOT_QUIET"))
323 ASMAtomicWriteBool(&g_fLockValidatorQuiet, false);
324 if (RTEnvExist("IPRT_LOCK_VALIDATOR_QUIET"))
325 ASMAtomicWriteBool(&g_fLockValidatorQuiet, true);
326
327 if (RTEnvExist("IPRT_LOCK_VALIDATOR_STRICT_ORDER"))
328 ASMAtomicWriteBool(&g_fLockValSoftWrongOrder, false);
329 if (RTEnvExist("IPRT_LOCK_VALIDATOR_SOFT_ORDER"))
330 ASMAtomicWriteBool(&g_fLockValSoftWrongOrder, true);
331#endif
332
333 /*
334 * Register cleanup
335 */
336 /** @todo register some cleanup callback if we care. */
337
338 ASMAtomicWriteU32(&s_fInitializing, false);
339 }
340}
341
342
343
344/** Wrapper around ASMAtomicReadPtr. */
345DECL_FORCE_INLINE(PRTLOCKVALRECUNION) rtLockValidatorReadRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec)
346{
347 PRTLOCKVALRECUNION p = (PRTLOCKVALRECUNION)ASMAtomicReadPtr((void * volatile *)ppRec);
348 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
349 return p;
350}
351
352
353/** Wrapper around ASMAtomicWritePtr. */
354DECL_FORCE_INLINE(void) rtLockValidatorWriteRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec, PRTLOCKVALRECUNION pRecNew)
355{
356 RTLOCKVAL_ASSERT_PTR_ALIGN(pRecNew);
357 ASMAtomicWritePtr((void * volatile *)ppRec, pRecNew);
358}
359
360
361/** Wrapper around ASMAtomicReadPtr. */
362DECL_FORCE_INLINE(PRTTHREADINT) rtLockValidatorReadThreadHandle(RTTHREAD volatile *phThread)
363{
364 PRTTHREADINT p = (PRTTHREADINT)ASMAtomicReadPtr((void * volatile *)phThread);
365 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
366 return p;
367}
368
369
370/** Wrapper around ASMAtomicUoReadPtr. */
371DECL_FORCE_INLINE(PRTLOCKVALRECSHRDOWN) rtLockValidatorUoReadSharedOwner(PRTLOCKVALRECSHRDOWN volatile *ppOwner)
372{
373 PRTLOCKVALRECSHRDOWN p = (PRTLOCKVALRECSHRDOWN)ASMAtomicUoReadPtr((void * volatile *)ppOwner);
374 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
375 return p;
376}
377
378
379/**
380 * Reads a volatile thread handle field and returns the thread name.
381 *
382 * @returns Thread name (read only).
383 * @param phThread The thread handle field.
384 */
385static const char *rtLockValidatorNameThreadHandle(RTTHREAD volatile *phThread)
386{
387 PRTTHREADINT pThread = rtLockValidatorReadThreadHandle(phThread);
388 if (!pThread)
389 return "<NIL>";
390 if (!VALID_PTR(pThread))
391 return "<INVALID>";
392 if (pThread->u32Magic != RTTHREADINT_MAGIC)
393 return "<BAD-THREAD-MAGIC>";
394 return pThread->szName;
395}
396
397
398/**
399 * Launch a simple assertion like complaint w/ panic.
400 *
401 * @param pszFile Where from - file.
402 * @param iLine Where from - line.
403 * @param pszFunction Where from - function.
404 * @param pszWhat What we're complaining about.
405 * @param ... Format arguments.
406 */
407static void rtLockValComplain(RT_SRC_POS_DECL, const char *pszWhat, ...)
408{
409 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
410 {
411 RTAssertMsg1Weak("RTLockValidator", iLine, pszFile, pszFunction);
412 va_list va;
413 va_start(va, pszWhat);
414 RTAssertMsg2WeakV(pszWhat, va);
415 va_end(va);
416 }
417 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
418 RTAssertPanic();
419}
420
421
422/**
423 * Describes the class.
424 *
425 * @param pszPrefix Message prefix.
426 * @param pClass The class to complain about.
427 * @param uSubClass My sub-class.
428 * @param fVerbose Verbose description including relations to other
429 * classes.
430 */
431static void rtLockValComplainAboutClass(const char *pszPrefix, RTLOCKVALCLASSINT *pClass, uint32_t uSubClass, bool fVerbose)
432{
433 if (ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
434 return;
435
436 /* Stringify the sub-class. */
437 const char *pszSubClass;
438 char szSubClass[32];
439 if (uSubClass < RTLOCKVAL_SUB_CLASS_USER)
440 switch (uSubClass)
441 {
442 case RTLOCKVAL_SUB_CLASS_NONE: pszSubClass = "none"; break;
443 case RTLOCKVAL_SUB_CLASS_ANY: pszSubClass = "any"; break;
444 default:
445 RTStrPrintf(szSubClass, sizeof(szSubClass), "invl-%u", uSubClass);
446 pszSubClass = szSubClass;
447 break;
448 }
449 else
450 {
451 RTStrPrintf(szSubClass, sizeof(szSubClass), "%u", uSubClass);
452 pszSubClass = szSubClass;
453 }
454
455 /* Validate the class pointer. */
456 if (!VALID_PTR(pClass))
457 {
458 RTAssertMsg2AddWeak("%sbad class=%p sub-class=%s\n", pszPrefix, pClass, pszSubClass);
459 return;
460 }
461 if (pClass->u32Magic != RTLOCKVALCLASS_MAGIC)
462 {
463 RTAssertMsg2AddWeak("%sbad class=%p magic=%#x sub-class=%s\n", pszPrefix, pClass, pClass->u32Magic, pszSubClass);
464 return;
465 }
466
467 /* OK, dump the class info. */
468 RTAssertMsg2AddWeak("%sclass=%p %s created={%Rbn(%u) %Rfn %p} sub-class=%s\n", pszPrefix,
469 pClass,
470 pClass->pszName,
471 pClass->CreatePos.pszFile,
472 pClass->CreatePos.uLine,
473 pClass->CreatePos.pszFunction,
474 pClass->CreatePos.uId,
475 pszSubClass);
476 if (fVerbose)
477 {
478 uint32_t i = 0;
479 uint32_t cPrinted = 0;
480 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
481 for (unsigned j = 0; j < RT_ELEMENTS(pChunk->aRefs); j++, i++)
482 {
483 RTLOCKVALCLASSINT *pCurClass = pChunk->aRefs[j].hClass;
484 if (pCurClass != NIL_RTLOCKVALCLASS)
485 {
486 RTAssertMsg2AddWeak("%s%s #%02u: %s, %s, %u lookup%s\n", pszPrefix,
487 cPrinted == 0
488 ? "Prior:"
489 : " ",
490 i,
491 pCurClass->pszName,
492 pChunk->aRefs[j].fAutodidacticism
493 ? "autodidactic"
494 : "manually ",
495 pChunk->aRefs[j].cLookups,
496 pChunk->aRefs[j].cLookups != 1 ? "s" : "");
497 cPrinted++;
498 }
499 }
500 if (!cPrinted)
501 RTAssertMsg2AddWeak("%sPrior: none\n", pszPrefix);
502#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
503 RTAssertMsg2AddWeak("%sHash Stats: %u hits, %u misses\n", pszPrefix, pClass->cHashHits, pClass->cHashMisses);
504#endif
505 }
506 else
507 {
508 uint32_t cPrinted = 0;
509 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
510 for (unsigned j = 0; j < RT_ELEMENTS(pChunk->aRefs); j++)
511 {
512 RTLOCKVALCLASSINT *pCurClass = pChunk->aRefs[j].hClass;
513 if (pCurClass != NIL_RTLOCKVALCLASS)
514 {
515 if ((cPrinted % 10) == 0)
516 RTAssertMsg2AddWeak("%sPrior classes: %s%s", pszPrefix, pCurClass->pszName,
517 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
518 else if ((cPrinted % 10) != 9)
519 RTAssertMsg2AddWeak(", %s%s", pCurClass->pszName,
520 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
521 else
522 RTAssertMsg2AddWeak(", %s%s\n", pCurClass->pszName,
523 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
524 cPrinted++;
525 }
526 }
527 if (!cPrinted)
528 RTAssertMsg2AddWeak("%sPrior classes: none\n", pszPrefix);
529 else if ((cPrinted % 10) != 0)
530 RTAssertMsg2AddWeak("\n");
531 }
532}
533
534
535/**
536 * Helper for getting the class name.
537 * @returns Class name string.
538 * @param pClass The class.
539 */
540static const char *rtLockValComplainGetClassName(RTLOCKVALCLASSINT *pClass)
541{
542 if (!pClass)
543 return "<nil-class>";
544 if (!VALID_PTR(pClass))
545 return "<bad-class-ptr>";
546 if (pClass->u32Magic != RTLOCKVALCLASS_MAGIC)
547 return "<bad-class-magic>";
548 if (!pClass->pszName)
549 return "<no-class-name>";
550 return pClass->pszName;
551}
552
553/**
554 * Formats the sub-class.
555 *
556 * @returns Stringified sub-class.
557 * @param uSubClass The name.
558 * @param pszBuf Buffer that is big enough.
559 */
560static const char *rtLockValComplainGetSubClassName(uint32_t uSubClass, char *pszBuf)
561{
562 if (uSubClass < RTLOCKVAL_SUB_CLASS_USER)
563 switch (uSubClass)
564 {
565 case RTLOCKVAL_SUB_CLASS_NONE: return "none";
566 case RTLOCKVAL_SUB_CLASS_ANY: return "any";
567 default:
568 RTStrPrintf(pszBuf, 32, "invl-%u", uSubClass);
569 break;
570 }
571 else
572 RTStrPrintf(pszBuf, 32, "%x", uSubClass);
573 return pszBuf;
574}
575
576
577/**
578 * Helper for rtLockValComplainAboutLock.
579 */
580DECL_FORCE_INLINE(void) rtLockValComplainAboutLockHlp(const char *pszPrefix, PRTLOCKVALRECUNION pRec, const char *pszSuffix,
581 uint32_t u32Magic, PCRTLOCKVALSRCPOS pSrcPos, uint32_t cRecursion,
582 const char *pszFrameType)
583{
584 char szBuf[32];
585 switch (u32Magic)
586 {
587 case RTLOCKVALRECEXCL_MAGIC:
588#ifdef RTLOCKVAL_WITH_VERBOSE_DUMPS
589 RTAssertMsg2AddWeak("%s%p %s xrec=%p own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
590 pRec->Excl.hLock, pRec->Excl.pszName, pRec,
591 rtLockValidatorNameThreadHandle(&pRec->Excl.hThread), cRecursion,
592 rtLockValComplainGetClassName(pRec->Excl.hClass),
593 rtLockValComplainGetSubClassName(pRec->Excl.uSubClass, szBuf),
594 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
595 pszFrameType, pszSuffix);
596#else
597 RTAssertMsg2AddWeak("%s%p %s own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
598 pRec->Excl.hLock, pRec->Excl.szName,
599 rtLockValidatorNameThreadHandle(&pRec->Excl.hThread), cRecursion,
600 rtLockValComplainGetClassName(pRec->Excl.hClass),
601 rtLockValComplainGetSubClassName(pRec->Excl.uSubClass, szBuf),
602 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
603 pszFrameType, pszSuffix);
604#endif
605 break;
606
607 case RTLOCKVALRECSHRD_MAGIC:
608 RTAssertMsg2AddWeak("%ss %p %s srec=%p cls=%s/%s [s%s]%s", pszPrefix,
609 pRec->Shared.hLock, pRec->Shared.szName, pRec,
610 rtLockValComplainGetClassName(pRec->Shared.hClass),
611 rtLockValComplainGetSubClassName(pRec->Shared.uSubClass, szBuf),
612 pszFrameType, pszSuffix);
613 break;
614
615 case RTLOCKVALRECSHRDOWN_MAGIC:
616 {
617 PRTLOCKVALRECSHRD pShared = pRec->ShrdOwner.pSharedRec;
618 if ( VALID_PTR(pShared)
619 && pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
620#ifdef RTLOCKVAL_WITH_VERBOSE_DUMPS
621 RTAssertMsg2AddWeak("%s%p %s srec=%p trec=%p own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [o%s]%s", pszPrefix,
622 pShared->hLock, pShared->pszName, pShared,
623 pRec, rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
624 rtLockValComplainGetClassName(pShared->hClass),
625 rtLockValComplainGetSubClassName(pShared->uSubClass, szBuf),
626 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
627 pszSuffix2, pszSuffix);
628#else
629 RTAssertMsg2AddWeak("%s%p %s own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [o%s]%s", pszPrefix,
630 pShared->hLock, pShared->szName,
631 rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
632 rtLockValComplainGetClassName(pShared->hClass),
633 rtLockValComplainGetSubClassName(pShared->uSubClass, szBuf),
634 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
635 pszFrameType, pszSuffix);
636#endif
637 else
638 RTAssertMsg2AddWeak("%sbad srec=%p trec=%p own=%s r=%u pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
639 pShared,
640 pRec, rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
641 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
642 pszFrameType, pszSuffix);
643 break;
644 }
645
646 default:
647 AssertMsgFailed(("%#x\n", u32Magic));
648 }
649}
650
651
652/**
653 * Describes the lock.
654 *
655 * @param pszPrefix Message prefix.
656 * @param pRec The lock record we're working on.
657 * @param pszSuffix Message suffix.
658 */
659static void rtLockValComplainAboutLock(const char *pszPrefix, PRTLOCKVALRECUNION pRec, const char *pszSuffix)
660{
661#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
662# define FIX_REC(r) 1
663#else
664# define FIX_REC(r) (r)
665#endif
666 if ( VALID_PTR(pRec)
667 && !ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
668 {
669 switch (pRec->Core.u32Magic)
670 {
671 case RTLOCKVALRECEXCL_MAGIC:
672 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECEXCL_MAGIC,
673 &pRec->Excl.SrcPos, FIX_REC(pRec->Excl.cRecursion), "");
674 break;
675
676 case RTLOCKVALRECSHRD_MAGIC:
677 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECSHRD_MAGIC, NULL, 0, "");
678 break;
679
680 case RTLOCKVALRECSHRDOWN_MAGIC:
681 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECSHRDOWN_MAGIC,
682 &pRec->ShrdOwner.SrcPos, FIX_REC(pRec->ShrdOwner.cRecursion), "");
683 break;
684
685 case RTLOCKVALRECNEST_MAGIC:
686 {
687 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
688 uint32_t u32Magic;
689 if ( VALID_PTR(pRealRec)
690 && ( (u32Magic = pRealRec->Core.u32Magic) == RTLOCKVALRECEXCL_MAGIC
691 || u32Magic == RTLOCKVALRECSHRD_MAGIC
692 || u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
693 )
694 rtLockValComplainAboutLockHlp(pszPrefix, pRealRec, pszSuffix, u32Magic,
695 &pRec->Nest.SrcPos, pRec->Nest.cRecursion, "/r");
696 else
697 RTAssertMsg2AddWeak("%sbad rrec=%p nrec=%p r=%u pos={%Rbn(%u) %Rfn %p}%s", pszPrefix,
698 pRealRec, pRec, pRec->Nest.cRecursion,
699 pRec->Nest.SrcPos.pszFile, pRec->Nest.SrcPos.uLine, pRec->Nest.SrcPos.pszFunction, pRec->Nest.SrcPos.uId,
700 pszSuffix);
701 break;
702 }
703
704 default:
705 RTAssertMsg2AddWeak("%spRec=%p u32Magic=%#x (bad)%s", pszPrefix, pRec, pRec->Core.u32Magic, pszSuffix);
706 break;
707 }
708 }
709#undef FIX_REC
710}
711
712
713/**
714 * Dump the lock stack.
715 *
716 * @param pThread The thread which lock stack we're gonna dump.
717 * @param cchIndent The indentation in chars.
718 * @param cMinFrames The minimum number of frames to consider
719 * dumping.
720 * @param pHighightRec Record that should be marked specially in the
721 * dump.
722 */
723static void rtLockValComplainAboutLockStack(PRTTHREADINT pThread, unsigned cchIndent, uint32_t cMinFrames,
724 PRTLOCKVALRECUNION pHighightRec)
725{
726 if ( VALID_PTR(pThread)
727 && !ASMAtomicUoReadBool(&g_fLockValidatorQuiet)
728 && pThread->u32Magic == RTTHREADINT_MAGIC
729 )
730 {
731 uint32_t cEntries = rtLockValidatorStackDepth(pThread);
732 if (cEntries >= cMinFrames)
733 {
734 RTAssertMsg2AddWeak("%*s---- start of lock stack for %p %s - %u entr%s ----\n", cchIndent, "",
735 pThread, pThread->szName, cEntries, cEntries == 1 ? "y" : "ies");
736 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
737 for (uint32_t i = 0; VALID_PTR(pCur); i++)
738 {
739 char szPrefix[80];
740 RTStrPrintf(szPrefix, sizeof(szPrefix), "%*s#%02u: ", cchIndent, "", i);
741 rtLockValComplainAboutLock(szPrefix, pCur, pHighightRec != pCur ? "\n" : " (*)\n");
742 switch (pCur->Core.u32Magic)
743 {
744 case RTLOCKVALRECEXCL_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown); break;
745 case RTLOCKVALRECSHRDOWN_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown); break;
746 case RTLOCKVALRECNEST_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown); break;
747 default:
748 RTAssertMsg2AddWeak("%*s<bad stack frame>\n", cchIndent, "");
749 pCur = NULL;
750 break;
751 }
752 }
753 RTAssertMsg2AddWeak("%*s---- end of lock stack ----\n", cchIndent, "");
754 }
755 }
756}
757
758
759/**
760 * Launch the initial complaint.
761 *
762 * @param pszWhat What we're complaining about.
763 * @param pSrcPos Where we are complaining from, as it were.
764 * @param pThreadSelf The calling thread.
765 * @param pRec The main lock involved. Can be NULL.
766 * @param fDumpStack Whether to dump the lock stack (true) or not
767 * (false).
768 */
769static void rtLockValComplainFirst(const char *pszWhat, PCRTLOCKVALSRCPOS pSrcPos, PRTTHREADINT pThreadSelf,
770 PRTLOCKVALRECUNION pRec, bool fDumpStack)
771{
772 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
773 {
774 ASMCompilerBarrier(); /* paranoia */
775 RTAssertMsg1Weak("RTLockValidator", pSrcPos ? pSrcPos->uLine : 0, pSrcPos ? pSrcPos->pszFile : NULL, pSrcPos ? pSrcPos->pszFunction : NULL);
776 if (pSrcPos && pSrcPos->uId)
777 RTAssertMsg2Weak("%s [uId=%p thrd=%s]\n", pszWhat, pSrcPos->uId, VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
778 else
779 RTAssertMsg2Weak("%s [thrd=%s]\n", pszWhat, VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
780 rtLockValComplainAboutLock("Lock: ", pRec, "\n");
781 if (fDumpStack)
782 rtLockValComplainAboutLockStack(pThreadSelf, 0, 1, pRec);
783 }
784}
785
786
787/**
788 * Continue bitching.
789 *
790 * @param pszFormat Format string.
791 * @param ... Format arguments.
792 */
793static void rtLockValComplainMore(const char *pszFormat, ...)
794{
795 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
796 {
797 va_list va;
798 va_start(va, pszFormat);
799 RTAssertMsg2AddWeakV(pszFormat, va);
800 va_end(va);
801 }
802}
803
804
805/**
806 * Raise a panic if enabled.
807 */
808static void rtLockValComplainPanic(void)
809{
810 if (ASMAtomicUoReadBool(&g_fLockValidatorMayPanic))
811 RTAssertPanic();
812}
813
814
815/**
816 * Copy a source position record.
817 *
818 * @param pDst The destination.
819 * @param pSrc The source. Can be NULL.
820 */
821DECL_FORCE_INLINE(void) rtLockValidatorSrcPosCopy(PRTLOCKVALSRCPOS pDst, PCRTLOCKVALSRCPOS pSrc)
822{
823 if (pSrc)
824 {
825 ASMAtomicUoWriteU32(&pDst->uLine, pSrc->uLine);
826 ASMAtomicUoWritePtr((void * volatile *)&pDst->pszFile, pSrc->pszFile);
827 ASMAtomicUoWritePtr((void * volatile *)&pDst->pszFunction, pSrc->pszFunction);
828 ASMAtomicUoWritePtr((void * volatile *)&pDst->uId, (void *)pSrc->uId);
829 }
830 else
831 {
832 ASMAtomicUoWriteU32(&pDst->uLine, 0);
833 ASMAtomicUoWritePtr((void * volatile *)&pDst->pszFile, NULL);
834 ASMAtomicUoWritePtr((void * volatile *)&pDst->pszFunction, NULL);
835 ASMAtomicUoWritePtr((void * volatile *)&pDst->uId, 0);
836 }
837}
838
839
840/**
841 * Init a source position record.
842 *
843 * @param pSrcPos The source position record.
844 */
845DECL_FORCE_INLINE(void) rtLockValidatorSrcPosInit(PRTLOCKVALSRCPOS pSrcPos)
846{
847 pSrcPos->pszFile = NULL;
848 pSrcPos->pszFunction = NULL;
849 pSrcPos->uId = 0;
850 pSrcPos->uLine = 0;
851#if HC_ARCH_BITS == 64
852 pSrcPos->u32Padding = 0;
853#endif
854}
855
856
857/* sdbm:
858 This algorithm was created for sdbm (a public-domain reimplementation of
859 ndbm) database library. it was found to do well in scrambling bits,
860 causing better distribution of the keys and fewer splits. it also happens
861 to be a good general hashing function with good distribution. the actual
862 function is hash(i) = hash(i - 1) * 65599 + str[i]; what is included below
863 is the faster version used in gawk. [there is even a faster, duff-device
864 version] the magic constant 65599 was picked out of thin air while
865 experimenting with different constants, and turns out to be a prime.
866 this is one of the algorithms used in berkeley db (see sleepycat) and
867 elsewhere. */
868DECL_FORCE_INLINE(uint32_t) sdbm(const char *str, uint32_t hash)
869{
870 uint8_t *pu8 = (uint8_t *)str;
871 int c;
872
873 while ((c = *pu8++))
874 hash = c + (hash << 6) + (hash << 16) - hash;
875
876 return hash;
877}
878
879
880/**
881 * Hashes the specified source position.
882 *
883 * @returns Hash.
884 * @param pSrcPos The source position record.
885 */
886static uint32_t rtLockValidatorSrcPosHash(PCRTLOCKVALSRCPOS pSrcPos)
887{
888 uint32_t uHash;
889 if ( ( pSrcPos->pszFile
890 || pSrcPos->pszFunction)
891 && pSrcPos->uLine != 0)
892 {
893 uHash = 0;
894 if (pSrcPos->pszFile)
895 uHash = sdbm(pSrcPos->pszFile, uHash);
896 if (pSrcPos->pszFunction)
897 uHash = sdbm(pSrcPos->pszFunction, uHash);
898 uHash += pSrcPos->uLine;
899 }
900 else
901 {
902 Assert(pSrcPos->uId);
903 uHash = (uint32_t)pSrcPos->uId;
904 }
905
906 return uHash;
907}
908
909
910/**
911 * Compares two source positions.
912 *
913 * @returns 0 if equal, < 0 if pSrcPos1 is smaller than pSrcPos2, > 0 if
914 * otherwise.
915 * @param pSrcPos1 The first source position.
916 * @param pSrcPos2 The second source position.
917 */
918static int rtLockValidatorSrcPosCompare(PCRTLOCKVALSRCPOS pSrcPos1, PCRTLOCKVALSRCPOS pSrcPos2)
919{
920 if (pSrcPos1->uLine != pSrcPos2->uLine)
921 return pSrcPos1->uLine < pSrcPos2->uLine ? -1 : 1;
922
923 int iDiff = RTStrCmp(pSrcPos1->pszFile, pSrcPos2->pszFile);
924 if (iDiff != 0)
925 return iDiff;
926
927 iDiff = RTStrCmp(pSrcPos1->pszFunction, pSrcPos2->pszFunction);
928 if (iDiff != 0)
929 return iDiff;
930
931 if (pSrcPos1->uId != pSrcPos2->uId)
932 return pSrcPos1->uId < pSrcPos2->uId ? -1 : 1;
933 return 0;
934}
935
936
937
938/**
939 * Serializes destruction of RTLOCKVALREC* and RTTHREADINT structures.
940 */
941DECLHIDDEN(void) rtLockValidatorSerializeDestructEnter(void)
942{
943 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
944 if (hXRoads != NIL_RTSEMXROADS)
945 RTSemXRoadsNSEnter(hXRoads);
946}
947
948
949/**
950 * Call after rtLockValidatorSerializeDestructEnter.
951 */
952DECLHIDDEN(void) rtLockValidatorSerializeDestructLeave(void)
953{
954 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
955 if (hXRoads != NIL_RTSEMXROADS)
956 RTSemXRoadsNSLeave(hXRoads);
957}
958
959
960/**
961 * Serializes deadlock detection against destruction of the objects being
962 * inspected.
963 */
964DECLINLINE(void) rtLockValidatorSerializeDetectionEnter(void)
965{
966 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
967 if (hXRoads != NIL_RTSEMXROADS)
968 RTSemXRoadsEWEnter(hXRoads);
969}
970
971
972/**
973 * Call after rtLockValidatorSerializeDetectionEnter.
974 */
975DECLHIDDEN(void) rtLockValidatorSerializeDetectionLeave(void)
976{
977 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
978 if (hXRoads != NIL_RTSEMXROADS)
979 RTSemXRoadsEWLeave(hXRoads);
980}
981
982
983/**
984 * Initializes the per thread lock validator data.
985 *
986 * @param pPerThread The data.
987 */
988DECLHIDDEN(void) rtLockValidatorInitPerThread(RTLOCKVALPERTHREAD *pPerThread)
989{
990 pPerThread->bmFreeShrdOwners = UINT32_MAX;
991
992 /* ASSUMES the rest has already been zeroed. */
993 Assert(pPerThread->pRec == NULL);
994 Assert(pPerThread->cWriteLocks == 0);
995 Assert(pPerThread->cReadLocks == 0);
996 Assert(pPerThread->fInValidator == false);
997 Assert(pPerThread->pStackTop == NULL);
998}
999
1000
1001/**
1002 * Delete the per thread lock validator data.
1003 *
1004 * @param pPerThread The data.
1005 */
1006DECLHIDDEN(void) rtLockValidatorDeletePerThread(RTLOCKVALPERTHREAD *pPerThread)
1007{
1008 /*
1009 * Check that the thread doesn't own any locks at this time.
1010 */
1011 if (pPerThread->pStackTop)
1012 {
1013 rtLockValComplainFirst("Thread terminating owning locks!", NULL,
1014 RT_FROM_MEMBER(pPerThread, RTTHREADINT, LockValidator),
1015 pPerThread->pStackTop, true);
1016 rtLockValComplainPanic();
1017 }
1018
1019 /*
1020 * Free the recursion records.
1021 */
1022 PRTLOCKVALRECNEST pCur = pPerThread->pFreeNestRecs;
1023 pPerThread->pFreeNestRecs = NULL;
1024 while (pCur)
1025 {
1026 PRTLOCKVALRECNEST pNext = pCur->pNextFree;
1027 RTMemFree(pCur);
1028 pCur = pNext;
1029 }
1030}
1031
1032RTDECL(int) RTLockValidatorClassCreateEx(PRTLOCKVALCLASS phClass, PCRTLOCKVALSRCPOS pSrcPos,
1033 bool fAutodidact, bool fRecursionOk, bool fStrictReleaseOrder,
1034 RTMSINTERVAL cMsMinDeadlock, RTMSINTERVAL cMsMinOrder,
1035 const char *pszNameFmt, ...)
1036{
1037 va_list va;
1038 va_start(va, pszNameFmt);
1039 int rc = RTLockValidatorClassCreateExV(phClass, pSrcPos, fAutodidact, fRecursionOk, fStrictReleaseOrder,
1040 cMsMinDeadlock, cMsMinOrder, pszNameFmt, va);
1041 va_end(va);
1042 return rc;
1043}
1044
1045
1046RTDECL(int) RTLockValidatorClassCreateExV(PRTLOCKVALCLASS phClass, PCRTLOCKVALSRCPOS pSrcPos,
1047 bool fAutodidact, bool fRecursionOk, bool fStrictReleaseOrder,
1048 RTMSINTERVAL cMsMinDeadlock, RTMSINTERVAL cMsMinOrder,
1049 const char *pszNameFmt, va_list va)
1050{
1051 Assert(cMsMinDeadlock >= 1);
1052 Assert(cMsMinOrder >= 1);
1053 AssertPtr(pSrcPos);
1054
1055 /*
1056 * Format the name and calc its length.
1057 */
1058 size_t cbName;
1059 char szName[32];
1060 if (pszNameFmt && *pszNameFmt)
1061 cbName = RTStrPrintfV(szName, sizeof(szName), pszNameFmt, va) + 1;
1062 else
1063 {
1064 static uint32_t volatile s_cAnonymous = 0;
1065 uint32_t i = ASMAtomicIncU32(&s_cAnonymous);
1066 cbName = RTStrPrintf(szName, sizeof(szName), "anon-%u", i - 1) + 1;
1067 }
1068
1069 /*
1070 * Figure out the file and function name lengths and allocate memory for
1071 * it all.
1072 */
1073 size_t const cbFile = pSrcPos->pszFile ? strlen(pSrcPos->pszFile) + 1 : 0;
1074 size_t const cbFunction = pSrcPos->pszFile ? strlen(pSrcPos->pszFunction) + 1 : 0;
1075 RTLOCKVALCLASSINT *pThis = (RTLOCKVALCLASSINT *)RTMemAllocVar(sizeof(*pThis) + cbFile + cbFunction + cbName);
1076 if (!pThis)
1077 return VERR_NO_MEMORY;
1078
1079 /*
1080 * Initialize the class data.
1081 */
1082 pThis->Core.Key = rtLockValidatorSrcPosHash(pSrcPos);
1083 pThis->Core.uchHeight = 0;
1084 pThis->Core.pLeft = NULL;
1085 pThis->Core.pRight = NULL;
1086 pThis->Core.pList = NULL;
1087 pThis->u32Magic = RTLOCKVALCLASS_MAGIC;
1088 pThis->cRefs = 1;
1089 pThis->fAutodidact = fAutodidact;
1090 pThis->fRecursionOk = fRecursionOk;
1091 pThis->fStrictReleaseOrder = fStrictReleaseOrder;
1092 pThis->fInTree = false;
1093 pThis->fDonateRefToNextRetainer = false;
1094 pThis->afReserved[0] = false;
1095 pThis->afReserved[1] = false;
1096 pThis->afReserved[2] = false;
1097 pThis->cMsMinDeadlock = cMsMinDeadlock;
1098 pThis->cMsMinOrder = cMsMinOrder;
1099 for (unsigned i = 0; i < RT_ELEMENTS(pThis->au32Reserved); i++)
1100 pThis->au32Reserved[i] = 0;
1101 for (unsigned i = 0; i < RT_ELEMENTS(pThis->au32Reserved); i++)
1102 {
1103 pThis->PriorLocks.aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1104 pThis->PriorLocks.aRefs[i].cLookups = 0;
1105 pThis->PriorLocks.aRefs[i].fAutodidacticism = false;
1106 pThis->PriorLocks.aRefs[i].afReserved[0] = false;
1107 pThis->PriorLocks.aRefs[i].afReserved[1] = false;
1108 pThis->PriorLocks.aRefs[i].afReserved[2] = false;
1109 }
1110 pThis->PriorLocks.pNext = NULL;
1111 for (unsigned i = 0; i < RT_ELEMENTS(pThis->apPriorLocksHash); i++)
1112 pThis->apPriorLocksHash[i] = NULL;
1113 char *pszDst = (char *)(pThis + 1);
1114 pThis->pszName = (char *)memcpy(pszDst, szName, cbName);
1115 pszDst += cbName;
1116 rtLockValidatorSrcPosCopy(&pThis->CreatePos, pSrcPos);
1117 pThis->CreatePos.pszFile = pSrcPos->pszFile ? (char *)memcpy(pszDst, pSrcPos->pszFile, cbFile) : NULL;
1118 pszDst += cbFile;
1119 pThis->CreatePos.pszFunction= pSrcPos->pszFunction ? (char *)memcpy(pszDst, pSrcPos->pszFunction, cbFunction) : NULL;
1120 Assert(rtLockValidatorSrcPosHash(&pThis->CreatePos) == pThis->Core.Key);
1121#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1122 pThis->cHashHits = 0;
1123 pThis->cHashMisses = 0;
1124#endif
1125
1126 *phClass = pThis;
1127 return VINF_SUCCESS;
1128}
1129
1130
1131RTDECL(int) RTLockValidatorClassCreate(PRTLOCKVALCLASS phClass, bool fAutodidact, RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1132{
1133 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1134 va_list va;
1135 va_start(va, pszNameFmt);
1136 int rc = RTLockValidatorClassCreateExV(phClass, &SrcPos,
1137 fAutodidact, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1138 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1139 pszNameFmt, va);
1140 va_end(va);
1141 return rc;
1142}
1143
1144
1145/**
1146 * Creates a new lock validator class with a reference that is consumed by the
1147 * first call to RTLockValidatorClassRetain.
1148 *
1149 * This is tailored for use in the parameter list of a semaphore constructor.
1150 *
1151 * @returns Class handle with a reference that is automatically consumed by the
1152 * first retainer. NIL_RTLOCKVALCLASS if we run into trouble.
1153 *
1154 * @param pszFile The source position of the call, file.
1155 * @param iLine The source position of the call, line.
1156 * @param pszFunction The source position of the call, function.
1157 * @param pszNameFmt Class name format string, optional (NULL). Max
1158 * length is 32 bytes.
1159 * @param ... Format string arguments.
1160 */
1161RTDECL(RTLOCKVALCLASS) RTLockValidatorClassCreateUnique(RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1162{
1163 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1164 RTLOCKVALCLASSINT *pClass;
1165 va_list va;
1166 va_start(va, pszNameFmt);
1167 int rc = RTLockValidatorClassCreateExV(&pClass, &SrcPos,
1168 true /*fAutodidact*/, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1169 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1170 pszNameFmt, va);
1171 va_end(va);
1172 if (RT_FAILURE(rc))
1173 return NIL_RTLOCKVALCLASS;
1174 ASMAtomicWriteBool(&pClass->fDonateRefToNextRetainer, true); /* see rtLockValidatorClassRetain */
1175 return pClass;
1176}
1177
1178
1179/**
1180 * Internal class retainer.
1181 * @returns The new reference count.
1182 * @param pClass The class.
1183 */
1184DECL_FORCE_INLINE(uint32_t) rtLockValidatorClassRetain(RTLOCKVALCLASSINT *pClass)
1185{
1186 uint32_t cRefs = ASMAtomicIncU32(&pClass->cRefs);
1187 if (cRefs > RTLOCKVALCLASS_MAX_REFS)
1188 ASMAtomicWriteU32(&pClass->cRefs, RTLOCKVALCLASS_MAX_REFS);
1189 else if ( cRefs == 2
1190 && ASMAtomicXchgBool(&pClass->fDonateRefToNextRetainer, false))
1191 cRefs = ASMAtomicDecU32(&pClass->cRefs);
1192 return cRefs;
1193}
1194
1195
1196/**
1197 * Validates and retains a lock validator class.
1198 *
1199 * @returns @a hClass on success, NIL_RTLOCKVALCLASS on failure.
1200 * @param hClass The class handle. NIL_RTLOCKVALCLASS is ok.
1201 */
1202DECL_FORCE_INLINE(RTLOCKVALCLASS) rtLockValidatorClassValidateAndRetain(RTLOCKVALCLASS hClass)
1203{
1204 if (hClass == NIL_RTLOCKVALCLASS)
1205 return hClass;
1206 AssertPtrReturn(hClass, NIL_RTLOCKVALCLASS);
1207 AssertReturn(hClass->u32Magic == RTLOCKVALCLASS_MAGIC, NIL_RTLOCKVALCLASS);
1208 rtLockValidatorClassRetain(hClass);
1209 return hClass;
1210}
1211
1212
1213/**
1214 * Internal class releaser.
1215 * @returns The new reference count.
1216 * @param pClass The class.
1217 */
1218DECLINLINE(uint32_t) rtLockValidatorClassRelease(RTLOCKVALCLASSINT *pClass)
1219{
1220 uint32_t cRefs = ASMAtomicDecU32(&pClass->cRefs);
1221 if (cRefs + 1 == RTLOCKVALCLASS_MAX_REFS)
1222 ASMAtomicWriteU32(&pClass->cRefs, RTLOCKVALCLASS_MAX_REFS);
1223 else if (!cRefs)
1224 rtLockValidatorClassDestroy(pClass);
1225 return cRefs;
1226}
1227
1228
1229/**
1230 * Destroys a class once there are not more references to it.
1231 *
1232 * @param Class The class.
1233 */
1234static void rtLockValidatorClassDestroy(RTLOCKVALCLASSINT *pClass)
1235{
1236 AssertReturnVoid(!pClass->fInTree);
1237 ASMAtomicWriteU32(&pClass->u32Magic, RTLOCKVALCLASS_MAGIC_DEAD);
1238
1239 PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks;
1240 while (pChunk)
1241 {
1242 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1243 {
1244 RTLOCKVALCLASSINT *pClass2 = pChunk->aRefs[i].hClass;
1245 if (pClass2 != NIL_RTLOCKVALCLASS)
1246 {
1247 pChunk->aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1248 rtLockValidatorClassRelease(pClass2);
1249 }
1250 }
1251
1252 PRTLOCKVALCLASSREFCHUNK pNext = pChunk->pNext;
1253 pChunk->pNext = NULL;
1254 if (pChunk != &pClass->PriorLocks)
1255 RTMemFree(pChunk);
1256 pChunk = pNext;
1257 }
1258
1259 RTMemFree(pClass);
1260}
1261
1262
1263RTDECL(RTLOCKVALCLASS) RTLockValidatorClassFindForSrcPos(PRTLOCKVALSRCPOS pSrcPos)
1264{
1265 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
1266 rtLockValidatorLazyInit();
1267 int rcLock = RTSemRWRequestRead(g_hLockValClassTreeRWLock, RT_INDEFINITE_WAIT);
1268
1269 uint32_t uSrcPosHash = rtLockValidatorSrcPosHash(pSrcPos);
1270 RTLOCKVALCLASSINT *pClass = (RTLOCKVALCLASSINT *)RTAvllU32Get(&g_LockValClassTree, uSrcPosHash);
1271 while (pClass)
1272 {
1273 if (rtLockValidatorSrcPosCompare(&pClass->CreatePos, pSrcPos) == 0)
1274 break;
1275 pClass = (RTLOCKVALCLASSINT *)pClass->Core.pList;
1276 }
1277
1278 if (RT_SUCCESS(rcLock))
1279 RTSemRWReleaseRead(g_hLockValClassTreeRWLock);
1280 return pClass;
1281}
1282
1283
1284RTDECL(RTLOCKVALCLASS) RTLockValidatorClassForSrcPos(RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1285{
1286 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1287 RTLOCKVALCLASS hClass = RTLockValidatorClassFindForSrcPos(&SrcPos);
1288 if (hClass == NIL_RTLOCKVALCLASS)
1289 {
1290 /*
1291 * Create a new class and insert it into the tree.
1292 */
1293 va_list va;
1294 va_start(va, pszNameFmt);
1295 int rc = RTLockValidatorClassCreateExV(&hClass, &SrcPos,
1296 true /*fAutodidact*/, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1297 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1298 pszNameFmt, va);
1299 va_end(va);
1300 if (RT_SUCCESS(rc))
1301 {
1302 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
1303 rtLockValidatorLazyInit();
1304 int rcLock = RTSemRWRequestWrite(g_hLockValClassTreeRWLock, RT_INDEFINITE_WAIT);
1305
1306 Assert(!hClass->fInTree);
1307 hClass->fInTree = RTAvllU32Insert(&g_LockValClassTree, &hClass->Core);
1308 Assert(hClass->fInTree);
1309
1310 if (RT_SUCCESS(rcLock))
1311 RTSemRWReleaseWrite(g_hLockValClassTreeRWLock);
1312 return hClass;
1313 }
1314 }
1315 return hClass;
1316}
1317
1318
1319RTDECL(uint32_t) RTLockValidatorClassRetain(RTLOCKVALCLASS hClass)
1320{
1321 RTLOCKVALCLASSINT *pClass = hClass;
1322 AssertPtrReturn(pClass, UINT32_MAX);
1323 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, UINT32_MAX);
1324 return rtLockValidatorClassRetain(pClass);
1325}
1326
1327
1328RTDECL(uint32_t) RTLockValidatorClassRelease(RTLOCKVALCLASS hClass)
1329{
1330 RTLOCKVALCLASSINT *pClass = hClass;
1331 if (pClass == NIL_RTLOCKVALCLASS)
1332 return 0;
1333 AssertPtrReturn(pClass, UINT32_MAX);
1334 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, UINT32_MAX);
1335 return rtLockValidatorClassRelease(pClass);
1336}
1337
1338
1339/**
1340 * Worker for rtLockValidatorClassIsPriorClass that does a linear search thru
1341 * all the chunks for @a pPriorClass.
1342 *
1343 * @returns true / false.
1344 * @param pClass The class to search.
1345 * @param pPriorClass The class to search for.
1346 */
1347static bool rtLockValidatorClassIsPriorClassByLinearSearch(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass)
1348{
1349 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
1350 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1351 {
1352 if (pChunk->aRefs[i].hClass == pPriorClass)
1353 {
1354 uint32_t cLookups = ASMAtomicIncU32(&pChunk->aRefs[i].cLookups);
1355 if (RT_UNLIKELY(cLookups >= RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX))
1356 {
1357 ASMAtomicWriteU32(&pChunk->aRefs[i].cLookups, RTLOCKVALCLASSREF_MAX_LOOKUPS);
1358 cLookups = RTLOCKVALCLASSREF_MAX_LOOKUPS;
1359 }
1360
1361 /* update the hash table entry. */
1362 PRTLOCKVALCLASSREF *ppHashEntry = &pClass->apPriorLocksHash[RTLOCKVALCLASS_HASH(pPriorClass)];
1363 if ( !(*ppHashEntry)
1364 || (*ppHashEntry)->cLookups + 128 < cLookups)
1365 ASMAtomicWritePtr((void * volatile *)ppHashEntry, &pChunk->aRefs[i]);
1366
1367#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1368 ASMAtomicIncU32(&pClass->cHashMisses);
1369#endif
1370 return true;
1371 }
1372 }
1373
1374 return false;
1375}
1376
1377
1378/**
1379 * Checks if @a pPriorClass is a known prior class.
1380 *
1381 * @returns true / false.
1382 * @param pClass The class to search.
1383 * @param pPriorClass The class to search for.
1384 */
1385DECL_FORCE_INLINE(bool) rtLockValidatorClassIsPriorClass(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass)
1386{
1387 /*
1388 * Hash lookup here.
1389 */
1390 PRTLOCKVALCLASSREF pRef = pClass->apPriorLocksHash[RTLOCKVALCLASS_HASH(pPriorClass)];
1391 if ( pRef
1392 && pRef->hClass == pPriorClass)
1393 {
1394 uint32_t cLookups = ASMAtomicIncU32(&pRef->cLookups);
1395 if (RT_UNLIKELY(cLookups >= RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX))
1396 ASMAtomicWriteU32(&pRef->cLookups, RTLOCKVALCLASSREF_MAX_LOOKUPS);
1397#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1398 ASMAtomicIncU32(&pClass->cHashHits);
1399#endif
1400 return true;
1401 }
1402
1403 return rtLockValidatorClassIsPriorClassByLinearSearch(pClass, pPriorClass);
1404}
1405
1406
1407/**
1408 * Adds a class to the prior list.
1409 *
1410 * @returns VINF_SUCCESS, VERR_NO_MEMORY or VERR_SEM_LV_WRONG_ORDER.
1411 * @param pClass The class to work on.
1412 * @param pPriorClass The class to add.
1413 * @param fAutodidacticism Whether we're teaching ourselfs (true) or
1414 * somebody is teaching us via the API (false).
1415 * @param pSrcPos Where this rule was added (optional).
1416 */
1417static int rtLockValidatorClassAddPriorClass(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass,
1418 bool fAutodidacticism, PCRTLOCKVALSRCPOS pSrcPos)
1419{
1420 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
1421 rtLockValidatorLazyInit();
1422 int rcLock = RTCritSectEnter(&g_LockValClassTeachCS);
1423
1424 /*
1425 * Check that there are no conflict (no assert since we might race each other).
1426 */
1427 int rc = VERR_SEM_LV_INTERNAL_ERROR;
1428 if (!rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
1429 {
1430 if (!rtLockValidatorClassIsPriorClass(pClass, pPriorClass))
1431 {
1432 /*
1433 * Scan the table for a free entry, allocating a new chunk if necessary.
1434 */
1435 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; ; pChunk = pChunk->pNext)
1436 {
1437 bool fDone = false;
1438 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1439 {
1440 ASMAtomicCmpXchgHandle(&pChunk->aRefs[i].hClass, pPriorClass, NIL_RTLOCKVALCLASS, fDone);
1441 if (fDone)
1442 {
1443 pChunk->aRefs[i].fAutodidacticism = fAutodidacticism;
1444 rtLockValidatorClassRetain(pPriorClass);
1445 rc = VINF_SUCCESS;
1446 break;
1447 }
1448 }
1449 if (fDone)
1450 break;
1451
1452 /* If no more chunks, allocate a new one and insert the class before linking it. */
1453 if (!pChunk->pNext)
1454 {
1455 PRTLOCKVALCLASSREFCHUNK pNew = (PRTLOCKVALCLASSREFCHUNK)RTMemAlloc(sizeof(*pNew));
1456 if (!pNew)
1457 {
1458 rc = VERR_NO_MEMORY;
1459 break;
1460 }
1461 pNew->pNext = NULL;
1462 for (uint32_t i = 0; i < RT_ELEMENTS(pNew->aRefs); i++)
1463 {
1464 pNew->aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1465 pNew->aRefs[i].cLookups = 0;
1466 pNew->aRefs[i].fAutodidacticism = false;
1467 pNew->aRefs[i].afReserved[0] = false;
1468 pNew->aRefs[i].afReserved[1] = false;
1469 pNew->aRefs[i].afReserved[2] = false;
1470 }
1471
1472 pNew->aRefs[0].hClass = pPriorClass;
1473 pNew->aRefs[0].fAutodidacticism = fAutodidacticism;
1474
1475 ASMAtomicWritePtr((void * volatile *)&pChunk->pNext, pNew);
1476 rtLockValidatorClassRetain(pPriorClass);
1477 rc = VINF_SUCCESS;
1478 break;
1479 }
1480 } /* chunk loop */
1481 }
1482 else
1483 rc = VINF_SUCCESS;
1484 }
1485 else
1486 rc = !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_ORDER : VINF_SUCCESS;
1487
1488 if (RT_SUCCESS(rcLock))
1489 RTCritSectLeave(&g_LockValClassTeachCS);
1490 return rc;
1491}
1492
1493
1494RTDECL(int) RTLockValidatorClassAddPriorClass(RTLOCKVALCLASS hClass, RTLOCKVALCLASS hPriorClass)
1495{
1496 RTLOCKVALCLASSINT *pClass = hClass;
1497 AssertPtrReturn(pClass, VERR_INVALID_HANDLE);
1498 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1499
1500 RTLOCKVALCLASSINT *pPriorClass = hPriorClass;
1501 AssertPtrReturn(pPriorClass, VERR_INVALID_HANDLE);
1502 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1503
1504 return rtLockValidatorClassAddPriorClass(pClass, pPriorClass, false /*fAutodidacticism*/, NULL);
1505}
1506
1507
1508RTDECL(int) RTLockValidatorClassEnforceStrictReleaseOrder(RTLOCKVALCLASS hClass, bool fEnabled)
1509{
1510 RTLOCKVALCLASSINT *pClass = hClass;
1511 AssertPtrReturn(pClass, VERR_INVALID_HANDLE);
1512 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1513
1514 ASMAtomicWriteBool(&pClass->fStrictReleaseOrder, fEnabled);
1515 return VINF_SUCCESS;
1516}
1517
1518
1519/**
1520 * Unlinks all siblings.
1521 *
1522 * This is used during record deletion and assumes no races.
1523 *
1524 * @param pCore One of the siblings.
1525 */
1526static void rtLockValidatorUnlinkAllSiblings(PRTLOCKVALRECCORE pCore)
1527{
1528 /* ASSUMES sibling destruction doesn't involve any races and that all
1529 related records are to be disposed off now. */
1530 PRTLOCKVALRECUNION pSibling = (PRTLOCKVALRECUNION)pCore;
1531 while (pSibling)
1532 {
1533 PRTLOCKVALRECUNION volatile *ppCoreNext;
1534 switch (pSibling->Core.u32Magic)
1535 {
1536 case RTLOCKVALRECEXCL_MAGIC:
1537 case RTLOCKVALRECEXCL_MAGIC_DEAD:
1538 ppCoreNext = &pSibling->Excl.pSibling;
1539 break;
1540
1541 case RTLOCKVALRECSHRD_MAGIC:
1542 case RTLOCKVALRECSHRD_MAGIC_DEAD:
1543 ppCoreNext = &pSibling->Shared.pSibling;
1544 break;
1545
1546 default:
1547 AssertFailed();
1548 ppCoreNext = NULL;
1549 break;
1550 }
1551 if (RT_UNLIKELY(ppCoreNext))
1552 break;
1553 pSibling = (PRTLOCKVALRECUNION)ASMAtomicXchgPtr((void * volatile *)ppCoreNext, NULL);
1554 }
1555}
1556
1557
1558RTDECL(int) RTLockValidatorRecMakeSiblings(PRTLOCKVALRECCORE pRec1, PRTLOCKVALRECCORE pRec2)
1559{
1560 /*
1561 * Validate input.
1562 */
1563 PRTLOCKVALRECUNION p1 = (PRTLOCKVALRECUNION)pRec1;
1564 PRTLOCKVALRECUNION p2 = (PRTLOCKVALRECUNION)pRec2;
1565
1566 AssertPtrReturn(p1, VERR_SEM_LV_INVALID_PARAMETER);
1567 AssertReturn( p1->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1568 || p1->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1569 , VERR_SEM_LV_INVALID_PARAMETER);
1570
1571 AssertPtrReturn(p2, VERR_SEM_LV_INVALID_PARAMETER);
1572 AssertReturn( p2->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1573 || p2->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1574 , VERR_SEM_LV_INVALID_PARAMETER);
1575
1576 /*
1577 * Link them (circular list).
1578 */
1579 if ( p1->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1580 && p2->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
1581 {
1582 p1->Excl.pSibling = p2;
1583 p2->Shared.pSibling = p1;
1584 }
1585 else if ( p1->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1586 && p2->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
1587 {
1588 p1->Shared.pSibling = p2;
1589 p2->Excl.pSibling = p1;
1590 }
1591 else
1592 AssertFailedReturn(VERR_SEM_LV_INVALID_PARAMETER); /* unsupported mix */
1593
1594 return VINF_SUCCESS;
1595}
1596
1597
1598/**
1599 * Gets the lock name for the given record.
1600 *
1601 * @returns Read-only lock name.
1602 * @param pRec The lock record.
1603 */
1604DECL_FORCE_INLINE(const char *) rtLockValidatorRecName(PRTLOCKVALRECUNION pRec)
1605{
1606 switch (pRec->Core.u32Magic)
1607 {
1608 case RTLOCKVALRECEXCL_MAGIC:
1609 return pRec->Excl.szName;
1610 case RTLOCKVALRECSHRD_MAGIC:
1611 return pRec->Shared.szName;
1612 case RTLOCKVALRECSHRDOWN_MAGIC:
1613 return pRec->ShrdOwner.pSharedRec ? pRec->ShrdOwner.pSharedRec->szName : "orphaned";
1614 case RTLOCKVALRECNEST_MAGIC:
1615 pRec = rtLockValidatorReadRecUnionPtr(&pRec->Nest.pRec);
1616 if (VALID_PTR(pRec))
1617 {
1618 switch (pRec->Core.u32Magic)
1619 {
1620 case RTLOCKVALRECEXCL_MAGIC:
1621 return pRec->Excl.szName;
1622 case RTLOCKVALRECSHRD_MAGIC:
1623 return pRec->Shared.szName;
1624 case RTLOCKVALRECSHRDOWN_MAGIC:
1625 return pRec->ShrdOwner.pSharedRec ? pRec->ShrdOwner.pSharedRec->szName : "orphaned";
1626 default:
1627 return "unknown-nested";
1628 }
1629 }
1630 return "orphaned-nested";
1631 default:
1632 return "unknown";
1633 }
1634}
1635
1636
1637/**
1638 * Gets the class for this locking record.
1639 *
1640 * @returns Pointer to the class or NIL_RTLOCKVALCLASS.
1641 * @param pRec The lock validator record.
1642 */
1643DECLINLINE(RTLOCKVALCLASSINT *) rtLockValidatorRecGetClass(PRTLOCKVALRECUNION pRec)
1644{
1645 switch (pRec->Core.u32Magic)
1646 {
1647 case RTLOCKVALRECEXCL_MAGIC:
1648 return pRec->Excl.hClass;
1649
1650 case RTLOCKVALRECSHRD_MAGIC:
1651 return pRec->Shared.hClass;
1652
1653 case RTLOCKVALRECSHRDOWN_MAGIC:
1654 {
1655 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1656 if (RT_LIKELY( VALID_PTR(pSharedRec)
1657 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1658 return pSharedRec->hClass;
1659 return NIL_RTLOCKVALCLASS;
1660 }
1661
1662 case RTLOCKVALRECNEST_MAGIC:
1663 {
1664 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1665 if (VALID_PTR(pRealRec))
1666 {
1667 switch (pRealRec->Core.u32Magic)
1668 {
1669 case RTLOCKVALRECEXCL_MAGIC:
1670 return pRealRec->Excl.hClass;
1671
1672 case RTLOCKVALRECSHRDOWN_MAGIC:
1673 {
1674 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1675 if (RT_LIKELY( VALID_PTR(pSharedRec)
1676 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1677 return pSharedRec->hClass;
1678 break;
1679 }
1680
1681 default:
1682 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1683 break;
1684 }
1685 }
1686 return NIL_RTLOCKVALCLASS;
1687 }
1688
1689 default:
1690 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1691 return NIL_RTLOCKVALCLASS;
1692 }
1693}
1694
1695
1696/**
1697 * Gets the class for this locking record and the pointer to the one below it in
1698 * the stack.
1699 *
1700 * @returns Pointer to the class or NIL_RTLOCKVALCLASS.
1701 * @param pRec The lock validator record.
1702 * @param puSubClass Where to return the sub-class.
1703 * @param ppDown Where to return the pointer to the record below.
1704 */
1705DECL_FORCE_INLINE(RTLOCKVALCLASSINT *)
1706rtLockValidatorRecGetClassesAndDown(PRTLOCKVALRECUNION pRec, uint32_t *puSubClass, PRTLOCKVALRECUNION *ppDown)
1707{
1708 switch (pRec->Core.u32Magic)
1709 {
1710 case RTLOCKVALRECEXCL_MAGIC:
1711 *ppDown = pRec->Excl.pDown;
1712 *puSubClass = pRec->Excl.uSubClass;
1713 return pRec->Excl.hClass;
1714
1715 case RTLOCKVALRECSHRD_MAGIC:
1716 *ppDown = NULL;
1717 *puSubClass = pRec->Shared.uSubClass;
1718 return pRec->Shared.hClass;
1719
1720 case RTLOCKVALRECSHRDOWN_MAGIC:
1721 {
1722 *ppDown = pRec->ShrdOwner.pDown;
1723
1724 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1725 if (RT_LIKELY( VALID_PTR(pSharedRec)
1726 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1727 {
1728 *puSubClass = pSharedRec->uSubClass;
1729 return pSharedRec->hClass;
1730 }
1731 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1732 return NIL_RTLOCKVALCLASS;
1733 }
1734
1735 case RTLOCKVALRECNEST_MAGIC:
1736 {
1737 *ppDown = pRec->Nest.pDown;
1738
1739 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1740 if (VALID_PTR(pRealRec))
1741 {
1742 switch (pRealRec->Core.u32Magic)
1743 {
1744 case RTLOCKVALRECEXCL_MAGIC:
1745 *puSubClass = pRealRec->Excl.uSubClass;
1746 return pRealRec->Excl.hClass;
1747
1748 case RTLOCKVALRECSHRDOWN_MAGIC:
1749 {
1750 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1751 if (RT_LIKELY( VALID_PTR(pSharedRec)
1752 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1753 {
1754 *puSubClass = pSharedRec->uSubClass;
1755 return pSharedRec->hClass;
1756 }
1757 break;
1758 }
1759
1760 default:
1761 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1762 break;
1763 }
1764 }
1765 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1766 return NIL_RTLOCKVALCLASS;
1767 }
1768
1769 default:
1770 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1771 *ppDown = NULL;
1772 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1773 return NIL_RTLOCKVALCLASS;
1774 }
1775}
1776
1777
1778/**
1779 * Gets the sub-class for a lock record.
1780 *
1781 * @returns the sub-class.
1782 * @param pRec The lock validator record.
1783 */
1784DECLINLINE(uint32_t) rtLockValidatorRecGetSubClass(PRTLOCKVALRECUNION pRec)
1785{
1786 switch (pRec->Core.u32Magic)
1787 {
1788 case RTLOCKVALRECEXCL_MAGIC:
1789 return pRec->Excl.uSubClass;
1790
1791 case RTLOCKVALRECSHRD_MAGIC:
1792 return pRec->Shared.uSubClass;
1793
1794 case RTLOCKVALRECSHRDOWN_MAGIC:
1795 {
1796 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1797 if (RT_LIKELY( VALID_PTR(pSharedRec)
1798 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1799 return pSharedRec->uSubClass;
1800 return RTLOCKVAL_SUB_CLASS_NONE;
1801 }
1802
1803 case RTLOCKVALRECNEST_MAGIC:
1804 {
1805 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1806 if (VALID_PTR(pRealRec))
1807 {
1808 switch (pRealRec->Core.u32Magic)
1809 {
1810 case RTLOCKVALRECEXCL_MAGIC:
1811 return pRec->Excl.uSubClass;
1812
1813 case RTLOCKVALRECSHRDOWN_MAGIC:
1814 {
1815 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1816 if (RT_LIKELY( VALID_PTR(pSharedRec)
1817 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1818 return pSharedRec->uSubClass;
1819 break;
1820 }
1821
1822 default:
1823 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1824 break;
1825 }
1826 }
1827 return RTLOCKVAL_SUB_CLASS_NONE;
1828 }
1829
1830 default:
1831 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1832 return RTLOCKVAL_SUB_CLASS_NONE;
1833 }
1834}
1835
1836
1837
1838
1839/**
1840 * Calculates the depth of a lock stack.
1841 *
1842 * @returns Number of stack frames.
1843 * @param pThread The thread.
1844 */
1845static uint32_t rtLockValidatorStackDepth(PRTTHREADINT pThread)
1846{
1847 uint32_t cEntries = 0;
1848 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
1849 while (VALID_PTR(pCur))
1850 {
1851 switch (pCur->Core.u32Magic)
1852 {
1853 case RTLOCKVALRECEXCL_MAGIC:
1854 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
1855 break;
1856
1857 case RTLOCKVALRECSHRDOWN_MAGIC:
1858 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
1859 break;
1860
1861 case RTLOCKVALRECNEST_MAGIC:
1862 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
1863 break;
1864
1865 default:
1866 AssertMsgFailedReturn(("%#x\n", pCur->Core.u32Magic), cEntries);
1867 }
1868 cEntries++;
1869 }
1870 return cEntries;
1871}
1872
1873
1874/**
1875 * Checks if the stack contains @a pRec.
1876 *
1877 * @returns true / false.
1878 * @param pThreadSelf The curren thread.
1879 * @param pRec The lock record.
1880 */
1881static bool rtLockValidatorStackContainsRec(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1882{
1883 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
1884 while (pCur)
1885 {
1886 AssertPtrReturn(pCur, false);
1887 if (pCur == pRec)
1888 return true;
1889 switch (pCur->Core.u32Magic)
1890 {
1891 case RTLOCKVALRECEXCL_MAGIC:
1892 Assert(pCur->Excl.cRecursion >= 1);
1893 pCur = pCur->Excl.pDown;
1894 break;
1895
1896 case RTLOCKVALRECSHRDOWN_MAGIC:
1897 Assert(pCur->ShrdOwner.cRecursion >= 1);
1898 pCur = pCur->ShrdOwner.pDown;
1899 break;
1900
1901 case RTLOCKVALRECNEST_MAGIC:
1902 Assert(pCur->Nest.cRecursion > 1);
1903 pCur = pCur->Nest.pDown;
1904 break;
1905
1906 default:
1907 AssertMsgFailedReturn(("%#x\n", pCur->Core.u32Magic), false);
1908 }
1909 }
1910 return false;
1911}
1912
1913
1914/**
1915 * Pushes a lock record onto the stack.
1916 *
1917 * @param pThreadSelf The current thread.
1918 * @param pRec The lock record.
1919 */
1920static void rtLockValidatorStackPush(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1921{
1922 Assert(pThreadSelf == RTThreadSelf());
1923 Assert(!rtLockValidatorStackContainsRec(pThreadSelf, pRec));
1924
1925 switch (pRec->Core.u32Magic)
1926 {
1927 case RTLOCKVALRECEXCL_MAGIC:
1928 Assert(pRec->Excl.cRecursion == 1);
1929 Assert(pRec->Excl.pDown == NULL);
1930 rtLockValidatorWriteRecUnionPtr(&pRec->Excl.pDown, pThreadSelf->LockValidator.pStackTop);
1931 break;
1932
1933 case RTLOCKVALRECSHRDOWN_MAGIC:
1934 Assert(pRec->ShrdOwner.cRecursion == 1);
1935 Assert(pRec->ShrdOwner.pDown == NULL);
1936 rtLockValidatorWriteRecUnionPtr(&pRec->ShrdOwner.pDown, pThreadSelf->LockValidator.pStackTop);
1937 break;
1938
1939 default:
1940 AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
1941 }
1942 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pRec);
1943}
1944
1945
1946/**
1947 * Pops a lock record off the stack.
1948 *
1949 * @param pThreadSelf The current thread.
1950 * @param pRec The lock.
1951 */
1952static void rtLockValidatorStackPop(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1953{
1954 Assert(pThreadSelf == RTThreadSelf());
1955
1956 PRTLOCKVALRECUNION pDown;
1957 switch (pRec->Core.u32Magic)
1958 {
1959 case RTLOCKVALRECEXCL_MAGIC:
1960 Assert(pRec->Excl.cRecursion == 0);
1961 pDown = pRec->Excl.pDown;
1962 rtLockValidatorWriteRecUnionPtr(&pRec->Excl.pDown, NULL); /* lazy bird */
1963 break;
1964
1965 case RTLOCKVALRECSHRDOWN_MAGIC:
1966 Assert(pRec->ShrdOwner.cRecursion == 0);
1967 pDown = pRec->ShrdOwner.pDown;
1968 rtLockValidatorWriteRecUnionPtr(&pRec->ShrdOwner.pDown, NULL);
1969 break;
1970
1971 default:
1972 AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
1973 }
1974 if (pThreadSelf->LockValidator.pStackTop == pRec)
1975 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pDown);
1976 else
1977 {
1978 /* Find the pointer to our record and unlink ourselves. */
1979 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
1980 while (pCur)
1981 {
1982 PRTLOCKVALRECUNION volatile *ppDown;
1983 switch (pCur->Core.u32Magic)
1984 {
1985 case RTLOCKVALRECEXCL_MAGIC:
1986 Assert(pCur->Excl.cRecursion >= 1);
1987 ppDown = &pCur->Excl.pDown;
1988 break;
1989
1990 case RTLOCKVALRECSHRDOWN_MAGIC:
1991 Assert(pCur->ShrdOwner.cRecursion >= 1);
1992 ppDown = &pCur->ShrdOwner.pDown;
1993 break;
1994
1995 case RTLOCKVALRECNEST_MAGIC:
1996 Assert(pCur->Nest.cRecursion >= 1);
1997 ppDown = &pCur->Nest.pDown;
1998 break;
1999
2000 default:
2001 AssertMsgFailedReturnVoid(("%#x\n", pCur->Core.u32Magic));
2002 }
2003 pCur = *ppDown;
2004 if (pCur == pRec)
2005 {
2006 rtLockValidatorWriteRecUnionPtr(ppDown, pDown);
2007 return;
2008 }
2009 }
2010 AssertMsgFailed(("%p %p\n", pRec, pThreadSelf));
2011 }
2012}
2013
2014
2015/**
2016 * Creates and pushes lock recursion record onto the stack.
2017 *
2018 * @param pThreadSelf The current thread.
2019 * @param pRec The lock record.
2020 * @param pSrcPos Where the recursion occured.
2021 */
2022static void rtLockValidatorStackPushRecursion(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec, PCRTLOCKVALSRCPOS pSrcPos)
2023{
2024 Assert(pThreadSelf == RTThreadSelf());
2025 Assert(rtLockValidatorStackContainsRec(pThreadSelf, pRec));
2026
2027#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2028 /*
2029 * Allocate a new recursion record
2030 */
2031 PRTLOCKVALRECNEST pRecursionRec = pThreadSelf->LockValidator.pFreeNestRecs;
2032 if (pRecursionRec)
2033 pThreadSelf->LockValidator.pFreeNestRecs = pRecursionRec->pNextFree;
2034 else
2035 {
2036 pRecursionRec = (PRTLOCKVALRECNEST)RTMemAlloc(sizeof(*pRecursionRec));
2037 if (!pRecursionRec)
2038 return;
2039 }
2040
2041 /*
2042 * Initialize it.
2043 */
2044 switch (pRec->Core.u32Magic)
2045 {
2046 case RTLOCKVALRECEXCL_MAGIC:
2047 pRecursionRec->cRecursion = pRec->Excl.cRecursion;
2048 break;
2049
2050 case RTLOCKVALRECSHRDOWN_MAGIC:
2051 pRecursionRec->cRecursion = pRec->ShrdOwner.cRecursion;
2052 break;
2053
2054 default:
2055 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
2056 rtLockValidatorSerializeDestructEnter();
2057 rtLockValidatorSerializeDestructLeave();
2058 RTMemFree(pRecursionRec);
2059 return;
2060 }
2061 Assert(pRecursionRec->cRecursion > 1);
2062 pRecursionRec->pRec = pRec;
2063 pRecursionRec->pDown = NULL;
2064 pRecursionRec->pNextFree = NULL;
2065 rtLockValidatorSrcPosCopy(&pRecursionRec->SrcPos, pSrcPos);
2066 pRecursionRec->Core.u32Magic = RTLOCKVALRECNEST_MAGIC;
2067
2068 /*
2069 * Link it.
2070 */
2071 pRecursionRec->pDown = pThreadSelf->LockValidator.pStackTop;
2072 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, (PRTLOCKVALRECUNION)pRecursionRec);
2073#endif /* RTLOCKVAL_WITH_RECURSION_RECORDS */
2074}
2075
2076
2077/**
2078 * Pops a lock recursion record off the stack.
2079 *
2080 * @param pThreadSelf The current thread.
2081 * @param pRec The lock record.
2082 */
2083static void rtLockValidatorStackPopRecursion(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
2084{
2085 Assert(pThreadSelf == RTThreadSelf());
2086 Assert(rtLockValidatorStackContainsRec(pThreadSelf, pRec));
2087
2088 uint32_t cRecursion;
2089 switch (pRec->Core.u32Magic)
2090 {
2091 case RTLOCKVALRECEXCL_MAGIC: cRecursion = pRec->Excl.cRecursion; break;
2092 case RTLOCKVALRECSHRDOWN_MAGIC: cRecursion = pRec->ShrdOwner.cRecursion; break;
2093 default: AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
2094 }
2095 Assert(cRecursion >= 1);
2096
2097#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2098 /*
2099 * Pop the recursion record.
2100 */
2101 PRTLOCKVALRECUNION pNest = pThreadSelf->LockValidator.pStackTop;
2102 if ( pNest != NULL
2103 && pNest->Core.u32Magic == RTLOCKVALRECNEST_MAGIC
2104 && pNest->Nest.pRec == pRec
2105 )
2106 {
2107 Assert(pNest->Nest.cRecursion == cRecursion + 1);
2108 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pNest->Nest.pDown);
2109 }
2110 else
2111 {
2112 /* Find the record above ours. */
2113 PRTLOCKVALRECUNION volatile *ppDown = NULL;
2114 for (;;)
2115 {
2116 AssertMsgReturnVoid(pNest, ("%p %p\n", pRec, pThreadSelf));
2117 switch (pNest->Core.u32Magic)
2118 {
2119 case RTLOCKVALRECEXCL_MAGIC:
2120 ppDown = &pNest->Excl.pDown;
2121 pNest = *ppDown;
2122 continue;
2123 case RTLOCKVALRECSHRDOWN_MAGIC:
2124 ppDown = &pNest->ShrdOwner.pDown;
2125 pNest = *ppDown;
2126 continue;
2127 case RTLOCKVALRECNEST_MAGIC:
2128 if (pNest->Nest.pRec == pRec)
2129 break;
2130 ppDown = &pNest->Nest.pDown;
2131 pNest = *ppDown;
2132 continue;
2133 default:
2134 AssertMsgFailedReturnVoid(("%#x\n", pNest->Core.u32Magic));
2135 }
2136 break; /* ugly */
2137 }
2138 Assert(pNest->Nest.cRecursion == cRecursion + 1);
2139 rtLockValidatorWriteRecUnionPtr(ppDown, pNest->Nest.pDown);
2140 }
2141
2142 /*
2143 * Invalidate and free the record.
2144 */
2145 ASMAtomicWriteU32(&pNest->Core.u32Magic, RTLOCKVALRECNEST_MAGIC);
2146 rtLockValidatorWriteRecUnionPtr(&pNest->Nest.pDown, NULL);
2147 rtLockValidatorWriteRecUnionPtr(&pNest->Nest.pRec, NULL);
2148 pNest->Nest.cRecursion = 0;
2149 pNest->Nest.pNextFree = pThreadSelf->LockValidator.pFreeNestRecs;
2150 pThreadSelf->LockValidator.pFreeNestRecs = &pNest->Nest;
2151#endif /* RTLOCKVAL_WITH_RECURSION_RECORDS */
2152}
2153
2154
2155/**
2156 * Helper for rtLockValidatorStackCheckLockingOrder that does the bitching and
2157 * returns VERR_SEM_LV_WRONG_ORDER.
2158 */
2159static int rtLockValidatorStackWrongOrder(const char *pszWhat, PCRTLOCKVALSRCPOS pSrcPos, PRTTHREADINT pThreadSelf,
2160 PRTLOCKVALRECUNION pRec1, PRTLOCKVALRECUNION pRec2,
2161 RTLOCKVALCLASSINT *pClass1, RTLOCKVALCLASSINT *pClass2)
2162
2163
2164{
2165 rtLockValComplainFirst(pszWhat, pSrcPos, pThreadSelf, pRec1, false);
2166 rtLockValComplainAboutLock("Other lock: ", pRec2, "\n");
2167 rtLockValComplainAboutClass("My class: ", pClass1, rtLockValidatorRecGetSubClass(pRec1), true /*fVerbose*/);
2168 rtLockValComplainAboutClass("Other class: ", pClass2, rtLockValidatorRecGetSubClass(pRec2), true /*fVerbose*/);
2169 rtLockValComplainAboutLockStack(pThreadSelf, 0, 0, pRec2);
2170 rtLockValComplainPanic();
2171 return !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_ORDER : VINF_SUCCESS;
2172}
2173
2174
2175/**
2176 * Checks if the sub-class order is ok or not.
2177 *
2178 * Used to deal with two locks from the same class.
2179 *
2180 * @returns true if ok, false if not.
2181 * @param uSubClass1 The sub-class of the lock that is being
2182 * considered.
2183 * @param uSubClass2 The sub-class of the lock that is already being
2184 * held.
2185 */
2186DECL_FORCE_INLINE(bool) rtLockValidatorIsSubClassOrderOk(uint32_t uSubClass1, uint32_t uSubClass2)
2187{
2188 if (uSubClass1 > uSubClass2)
2189 {
2190 /* NONE kills ANY. */
2191 if (uSubClass2 == RTLOCKVAL_SUB_CLASS_NONE)
2192 return false;
2193 return true;
2194 }
2195
2196 /* ANY counters all USER values. (uSubClass1 == NONE only if they are equal) */
2197 AssertCompile(RTLOCKVAL_SUB_CLASS_ANY > RTLOCKVAL_SUB_CLASS_NONE);
2198 if (uSubClass1 == RTLOCKVAL_SUB_CLASS_ANY)
2199 return true;
2200 return false;
2201}
2202
2203
2204/**
2205 * Checks if the class and sub-class lock order is ok.
2206 *
2207 * @returns true if ok, false if not.
2208 * @param pClass1 The class of the lock that is being considered.
2209 * @param uSubClass1 The sub-class that goes with @a pClass1.
2210 * @param pClass2 The class of the lock that is already being
2211 * held.
2212 * @param uSubClass2 The sub-class that goes with @a pClass2.
2213 */
2214DECL_FORCE_INLINE(bool) rtLockValidatorIsClassOrderOk(RTLOCKVALCLASSINT *pClass1, uint32_t uSubClass1,
2215 RTLOCKVALCLASSINT *pClass2, uint32_t uSubClass2)
2216{
2217 if (pClass1 == pClass2)
2218 return rtLockValidatorIsSubClassOrderOk(uSubClass1, uSubClass2);
2219 return rtLockValidatorClassIsPriorClass(pClass1, pClass2);
2220}
2221
2222
2223/**
2224 * Checks the locking order, part two.
2225 *
2226 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_ORDER or VERR_SEM_LV_INTERNAL_ERROR.
2227 * @param pClass The lock class.
2228 * @param uSubClass The lock sub-class.
2229 * @param pThreadSelf The current thread.
2230 * @param pRec The lock record.
2231 * @param pSrcPos The source position of the locking operation.
2232 */
2233static int rtLockValidatorStackCheckLockingOrder2(RTLOCKVALCLASSINT * const pClass, uint32_t const uSubClass,
2234 PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION const pRec,
2235 PCRTLOCKVALSRCPOS const pSrcPos,
2236 RTLOCKVALCLASSINT * const pFirstBadClass,
2237 PRTLOCKVALRECUNION const pFirstBadRec,
2238 PRTLOCKVALRECUNION const pFirstBadDown)
2239{
2240 /*
2241 * Something went wrong, pCur is pointing to where.
2242 */
2243 if ( pClass == pFirstBadClass
2244 || rtLockValidatorClassIsPriorClass(pFirstBadClass, pClass))
2245 return rtLockValidatorStackWrongOrder("Wrong locking order!", pSrcPos, pThreadSelf,
2246 pRec, pFirstBadRec, pClass, pFirstBadClass);
2247 if (!pClass->fAutodidact)
2248 return rtLockValidatorStackWrongOrder("Wrong locking order! (unknown)", pSrcPos, pThreadSelf,
2249 pRec, pFirstBadRec, pClass, pFirstBadClass);
2250
2251 /*
2252 * This class is an autodidact, so we have to check out the rest of the stack
2253 * for direct violations.
2254 */
2255 uint32_t cNewRules = 1;
2256 PRTLOCKVALRECUNION pCur = pFirstBadDown;
2257 while (pCur)
2258 {
2259 AssertPtrReturn(pCur, VERR_SEM_LV_INTERNAL_ERROR);
2260
2261 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2262 pCur = pCur->Nest.pDown;
2263 else
2264 {
2265 PRTLOCKVALRECUNION pDown;
2266 uint32_t uPriorSubClass;
2267 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2268 if (pPriorClass != NIL_RTLOCKVALCLASS)
2269 {
2270 AssertPtrReturn(pPriorClass, VERR_SEM_LV_INTERNAL_ERROR);
2271 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_SEM_LV_INTERNAL_ERROR);
2272 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2273 {
2274 if ( pClass == pPriorClass
2275 || rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
2276 return rtLockValidatorStackWrongOrder("Wrong locking order! (more than one)", pSrcPos, pThreadSelf,
2277 pRec, pCur, pClass, pPriorClass);
2278 cNewRules++;
2279 }
2280 }
2281 pCur = pDown;
2282 }
2283 }
2284
2285 if (cNewRules == 1)
2286 {
2287 /*
2288 * Special case the simple operation, hoping that it will be a
2289 * frequent case.
2290 */
2291 int rc = rtLockValidatorClassAddPriorClass(pClass, pFirstBadClass, true /*fAutodidacticism*/, pSrcPos);
2292 if (rc == VERR_SEM_LV_WRONG_ORDER)
2293 return rtLockValidatorStackWrongOrder("Wrong locking order! (race)", pSrcPos, pThreadSelf,
2294 pRec, pFirstBadRec, pClass, pFirstBadClass);
2295 Assert(RT_SUCCESS(rc) || rc == VERR_NO_MEMORY);
2296 }
2297 else
2298 {
2299 /*
2300 * We may be adding more than one rule, so we have to take the lock
2301 * before starting to add the rules. This means we have to check
2302 * the state after taking it since we might be racing someone adding
2303 * a conflicting rule.
2304 */
2305 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
2306 rtLockValidatorLazyInit();
2307 int rcLock = RTCritSectEnter(&g_LockValClassTeachCS);
2308
2309 /* Check */
2310 pCur = pFirstBadRec;
2311 while (pCur)
2312 {
2313 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2314 pCur = pCur->Nest.pDown;
2315 else
2316 {
2317 uint32_t uPriorSubClass;
2318 PRTLOCKVALRECUNION pDown;
2319 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2320 if (pPriorClass != NIL_RTLOCKVALCLASS)
2321 {
2322 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2323 {
2324 if ( pClass == pPriorClass
2325 || rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
2326 {
2327 if (RT_SUCCESS(rcLock))
2328 RTCritSectLeave(&g_LockValClassTeachCS);
2329 return rtLockValidatorStackWrongOrder("Wrong locking order! (2nd)", pSrcPos, pThreadSelf,
2330 pRec, pCur, pClass, pPriorClass);
2331 }
2332 }
2333 }
2334 pCur = pDown;
2335 }
2336 }
2337
2338 /* Iterate the stack yet again, adding new rules this time. */
2339 pCur = pFirstBadRec;
2340 while (pCur)
2341 {
2342 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2343 pCur = pCur->Nest.pDown;
2344 else
2345 {
2346 uint32_t uPriorSubClass;
2347 PRTLOCKVALRECUNION pDown;
2348 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2349 if (pPriorClass != NIL_RTLOCKVALCLASS)
2350 {
2351 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2352 {
2353 Assert( pClass != pPriorClass
2354 && !rtLockValidatorClassIsPriorClass(pPriorClass, pClass));
2355 int rc = rtLockValidatorClassAddPriorClass(pClass, pPriorClass, true /*fAutodidacticism*/, pSrcPos);
2356 if (RT_FAILURE(rc))
2357 {
2358 Assert(rc == VERR_NO_MEMORY);
2359 break;
2360 }
2361 Assert(rtLockValidatorClassIsPriorClass(pClass, pPriorClass));
2362 }
2363 }
2364 pCur = pDown;
2365 }
2366 }
2367
2368 if (RT_SUCCESS(rcLock))
2369 RTCritSectLeave(&g_LockValClassTeachCS);
2370 }
2371
2372 return VINF_SUCCESS;
2373}
2374
2375
2376
2377/**
2378 * Checks the locking order.
2379 *
2380 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_ORDER or VERR_SEM_LV_INTERNAL_ERROR.
2381 * @param pClass The lock class.
2382 * @param uSubClass The lock sub-class.
2383 * @param pThreadSelf The current thread.
2384 * @param pRec The lock record.
2385 * @param pSrcPos The source position of the locking operation.
2386 */
2387static int rtLockValidatorStackCheckLockingOrder(RTLOCKVALCLASSINT * const pClass, uint32_t const uSubClass,
2388 PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION const pRec,
2389 PCRTLOCKVALSRCPOS pSrcPos)
2390{
2391 /*
2392 * Some internal paranoia first.
2393 */
2394 AssertPtr(pClass);
2395 Assert(pClass->u32Magic == RTLOCKVALCLASS_MAGIC);
2396 AssertPtr(pThreadSelf);
2397 Assert(pThreadSelf->u32Magic == RTTHREADINT_MAGIC);
2398 AssertPtr(pRec);
2399 AssertPtrNull(pSrcPos);
2400
2401 /*
2402 * Walk the stack, delegate problems to a worker routine.
2403 */
2404 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
2405 if (!pCur)
2406 return VINF_SUCCESS;
2407
2408 for (;;)
2409 {
2410 AssertPtrReturn(pCur, VERR_SEM_LV_INTERNAL_ERROR);
2411
2412 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2413 pCur = pCur->Nest.pDown;
2414 else
2415 {
2416 uint32_t uPriorSubClass;
2417 PRTLOCKVALRECUNION pDown;
2418 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2419 if (pPriorClass != NIL_RTLOCKVALCLASS)
2420 {
2421 AssertPtrReturn(pPriorClass, VERR_SEM_LV_INTERNAL_ERROR);
2422 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_SEM_LV_INTERNAL_ERROR);
2423 if (RT_UNLIKELY(!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass)))
2424 return rtLockValidatorStackCheckLockingOrder2(pClass, uSubClass, pThreadSelf, pRec, pSrcPos,
2425 pPriorClass, pCur, pDown);
2426 }
2427 pCur = pDown;
2428 }
2429 if (!pCur)
2430 return VINF_SUCCESS;
2431 }
2432}
2433
2434
2435/**
2436 * Check that the lock record is the topmost one on the stack, complain and fail
2437 * if it isn't.
2438 *
2439 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_RELEASE_ORDER or
2440 * VERR_SEM_LV_INVALID_PARAMETER.
2441 * @param pThreadSelf The current thread.
2442 * @param pRec The record.
2443 */
2444static int rtLockValidatorStackCheckReleaseOrder(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
2445{
2446 AssertReturn(pThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
2447 Assert(pThreadSelf == RTThreadSelf());
2448
2449 PRTLOCKVALRECUNION pTop = pThreadSelf->LockValidator.pStackTop;
2450 if (RT_LIKELY( pTop == pRec
2451 || ( pTop
2452 && pTop->Core.u32Magic == RTLOCKVALRECNEST_MAGIC
2453 && pTop->Nest.pRec == pRec) ))
2454 return VINF_SUCCESS;
2455
2456#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2457 /* Look for a recursion record so the right frame is dumped and marked. */
2458 while (pTop)
2459 {
2460 if (pTop->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2461 {
2462 if (pTop->Nest.pRec == pRec)
2463 {
2464 pRec = pTop;
2465 break;
2466 }
2467 pTop = pTop->Nest.pDown;
2468 }
2469 else if (pTop->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
2470 pTop = pTop->Excl.pDown;
2471 else if (pTop->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2472 pTop = pTop->ShrdOwner.pDown;
2473 else
2474 break;
2475 }
2476#endif
2477
2478 rtLockValComplainFirst("Wrong release order!", NULL, pThreadSelf, pRec, true);
2479 rtLockValComplainPanic();
2480 return !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_RELEASE_ORDER : VINF_SUCCESS;
2481}
2482
2483
2484/**
2485 * Checks if all owners are blocked - shared record operated in signaller mode.
2486 *
2487 * @returns true / false accordingly.
2488 * @param pRec The record.
2489 * @param pThreadSelf The current thread.
2490 */
2491DECL_FORCE_INLINE(bool) rtLockValidatorDdAreAllThreadsBlocked(PRTLOCKVALRECSHRD pRec, PRTTHREADINT pThreadSelf)
2492{
2493 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->papOwners;
2494 uint32_t cAllocated = pRec->cAllocated;
2495 uint32_t cEntries = ASMAtomicUoReadU32(&pRec->cEntries);
2496 if (cEntries == 0)
2497 return false;
2498
2499 for (uint32_t i = 0; i < cAllocated; i++)
2500 {
2501 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[i]);
2502 if ( pEntry
2503 && pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2504 {
2505 PRTTHREADINT pCurThread = rtLockValidatorReadThreadHandle(&pEntry->hThread);
2506 if (!pCurThread)
2507 return false;
2508 if (pCurThread->u32Magic != RTTHREADINT_MAGIC)
2509 return false;
2510 if ( !RTTHREAD_IS_SLEEPING(rtThreadGetState(pCurThread))
2511 && pCurThread != pThreadSelf)
2512 return false;
2513 if (--cEntries == 0)
2514 break;
2515 }
2516 else
2517 Assert(!pEntry || pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
2518 }
2519
2520 return true;
2521}
2522
2523
2524/**
2525 * Verifies the deadlock stack before calling it a deadlock.
2526 *
2527 * @retval VERR_SEM_LV_DEADLOCK if it's a deadlock.
2528 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE if it's a deadlock on the same lock.
2529 * @retval VERR_TRY_AGAIN if something changed.
2530 *
2531 * @param pStack The deadlock detection stack.
2532 * @param pThreadSelf The current thread.
2533 */
2534static int rtLockValidatorDdVerifyDeadlock(PRTLOCKVALDDSTACK pStack, PRTTHREADINT pThreadSelf)
2535{
2536 uint32_t const c = pStack->c;
2537 for (uint32_t iPass = 0; iPass < 3; iPass++)
2538 {
2539 for (uint32_t i = 1; i < c; i++)
2540 {
2541 PRTTHREADINT pThread = pStack->a[i].pThread;
2542 if (pThread->u32Magic != RTTHREADINT_MAGIC)
2543 return VERR_TRY_AGAIN;
2544 if (rtThreadGetState(pThread) != pStack->a[i].enmState)
2545 return VERR_TRY_AGAIN;
2546 if (rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pRec) != pStack->a[i].pFirstSibling)
2547 return VERR_TRY_AGAIN;
2548 /* ASSUMES the signaller records won't have siblings! */
2549 PRTLOCKVALRECUNION pRec = pStack->a[i].pRec;
2550 if ( pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
2551 && pRec->Shared.fSignaller
2552 && !rtLockValidatorDdAreAllThreadsBlocked(&pRec->Shared, pThreadSelf))
2553 return VERR_TRY_AGAIN;
2554 }
2555 RTThreadYield();
2556 }
2557
2558 if (c == 1)
2559 return VERR_SEM_LV_ILLEGAL_UPGRADE;
2560 return VERR_SEM_LV_DEADLOCK;
2561}
2562
2563
2564/**
2565 * Checks for stack cycles caused by another deadlock before returning.
2566 *
2567 * @retval VINF_SUCCESS if the stack is simply too small.
2568 * @retval VERR_SEM_LV_EXISTING_DEADLOCK if a cycle was detected.
2569 *
2570 * @param pStack The deadlock detection stack.
2571 */
2572static int rtLockValidatorDdHandleStackOverflow(PRTLOCKVALDDSTACK pStack)
2573{
2574 for (size_t i = 0; i < RT_ELEMENTS(pStack->a) - 1; i++)
2575 {
2576 PRTTHREADINT pThread = pStack->a[i].pThread;
2577 for (size_t j = i + 1; j < RT_ELEMENTS(pStack->a); j++)
2578 if (pStack->a[j].pThread == pThread)
2579 return VERR_SEM_LV_EXISTING_DEADLOCK;
2580 }
2581 static bool volatile s_fComplained = false;
2582 if (!s_fComplained)
2583 {
2584 s_fComplained = true;
2585 rtLockValComplain(RT_SRC_POS, "lock validator stack is too small! (%zu entries)\n", RT_ELEMENTS(pStack->a));
2586 }
2587 return VINF_SUCCESS;
2588}
2589
2590
2591/**
2592 * Worker for rtLockValidatorDeadlockDetection that does the actual deadlock
2593 * detection.
2594 *
2595 * @retval VINF_SUCCESS
2596 * @retval VERR_SEM_LV_DEADLOCK
2597 * @retval VERR_SEM_LV_EXISTING_DEADLOCK
2598 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE
2599 * @retval VERR_TRY_AGAIN
2600 *
2601 * @param pStack The stack to use.
2602 * @param pOriginalRec The original record.
2603 * @param pThreadSelf The calling thread.
2604 */
2605static int rtLockValidatorDdDoDetection(PRTLOCKVALDDSTACK pStack, PRTLOCKVALRECUNION const pOriginalRec,
2606 PRTTHREADINT const pThreadSelf)
2607{
2608 pStack->c = 0;
2609
2610 /* We could use a single RTLOCKVALDDENTRY variable here, but the
2611 compiler may make a better job of it when using individual variables. */
2612 PRTLOCKVALRECUNION pRec = pOriginalRec;
2613 PRTLOCKVALRECUNION pFirstSibling = pOriginalRec;
2614 uint32_t iEntry = UINT32_MAX;
2615 PRTTHREADINT pThread = NIL_RTTHREAD;
2616 RTTHREADSTATE enmState = RTTHREADSTATE_RUNNING;
2617 for (uint32_t iLoop = 0; ; iLoop++)
2618 {
2619 /*
2620 * Process the current record.
2621 */
2622 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
2623
2624 /* Find the next relevant owner thread and record. */
2625 PRTLOCKVALRECUNION pNextRec = NULL;
2626 RTTHREADSTATE enmNextState = RTTHREADSTATE_RUNNING;
2627 PRTTHREADINT pNextThread = NIL_RTTHREAD;
2628 switch (pRec->Core.u32Magic)
2629 {
2630 case RTLOCKVALRECEXCL_MAGIC:
2631 Assert(iEntry == UINT32_MAX);
2632 for (;;)
2633 {
2634 pNextThread = rtLockValidatorReadThreadHandle(&pRec->Excl.hThread);
2635 if ( !pNextThread
2636 || pNextThread->u32Magic != RTTHREADINT_MAGIC)
2637 break;
2638 enmNextState = rtThreadGetState(pNextThread);
2639 if ( !RTTHREAD_IS_SLEEPING(enmNextState)
2640 && pNextThread != pThreadSelf)
2641 break;
2642 pNextRec = rtLockValidatorReadRecUnionPtr(&pNextThread->LockValidator.pRec);
2643 if (RT_LIKELY( !pNextRec
2644 || enmNextState == rtThreadGetState(pNextThread)))
2645 break;
2646 pNextRec = NULL;
2647 }
2648 if (!pNextRec)
2649 {
2650 pRec = pRec->Excl.pSibling;
2651 if ( pRec
2652 && pRec != pFirstSibling)
2653 continue;
2654 pNextThread = NIL_RTTHREAD;
2655 }
2656 break;
2657
2658 case RTLOCKVALRECSHRD_MAGIC:
2659 if (!pRec->Shared.fSignaller)
2660 {
2661 /* Skip to the next sibling if same side. ASSUMES reader priority. */
2662 /** @todo The read side of a read-write lock is problematic if
2663 * the implementation prioritizes writers over readers because
2664 * that means we should could deadlock against current readers
2665 * if a writer showed up. If the RW sem implementation is
2666 * wrapping some native API, it's not so easy to detect when we
2667 * should do this and when we shouldn't. Checking when we
2668 * shouldn't is subject to wakeup scheduling and cannot easily
2669 * be made reliable.
2670 *
2671 * At the moment we circumvent all this mess by declaring that
2672 * readers has priority. This is TRUE on linux, but probably
2673 * isn't on Solaris and FreeBSD. */
2674 if ( pRec == pFirstSibling
2675 && pRec->Shared.pSibling != NULL
2676 && pRec->Shared.pSibling != pFirstSibling)
2677 {
2678 pRec = pRec->Shared.pSibling;
2679 Assert(iEntry == UINT32_MAX);
2680 continue;
2681 }
2682 }
2683
2684 /* Scan the owner table for blocked owners. */
2685 if ( ASMAtomicUoReadU32(&pRec->Shared.cEntries) > 0
2686 && ( !pRec->Shared.fSignaller
2687 || iEntry != UINT32_MAX
2688 || rtLockValidatorDdAreAllThreadsBlocked(&pRec->Shared, pThreadSelf)
2689 )
2690 )
2691 {
2692 uint32_t cAllocated = pRec->Shared.cAllocated;
2693 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->Shared.papOwners;
2694 while (++iEntry < cAllocated)
2695 {
2696 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[iEntry]);
2697 if (pEntry)
2698 {
2699 for (;;)
2700 {
2701 if (pEntry->Core.u32Magic != RTLOCKVALRECSHRDOWN_MAGIC)
2702 break;
2703 pNextThread = rtLockValidatorReadThreadHandle(&pEntry->hThread);
2704 if ( !pNextThread
2705 || pNextThread->u32Magic != RTTHREADINT_MAGIC)
2706 break;
2707 enmNextState = rtThreadGetState(pNextThread);
2708 if ( !RTTHREAD_IS_SLEEPING(enmNextState)
2709 && pNextThread != pThreadSelf)
2710 break;
2711 pNextRec = rtLockValidatorReadRecUnionPtr(&pNextThread->LockValidator.pRec);
2712 if (RT_LIKELY( !pNextRec
2713 || enmNextState == rtThreadGetState(pNextThread)))
2714 break;
2715 pNextRec = NULL;
2716 }
2717 if (pNextRec)
2718 break;
2719 }
2720 else
2721 Assert(!pEntry || pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
2722 }
2723 if (pNextRec)
2724 break;
2725 pNextThread = NIL_RTTHREAD;
2726 }
2727
2728 /* Advance to the next sibling, if any. */
2729 pRec = pRec->Shared.pSibling;
2730 if ( pRec != NULL
2731 && pRec != pFirstSibling)
2732 {
2733 iEntry = UINT32_MAX;
2734 continue;
2735 }
2736 break;
2737
2738 case RTLOCKVALRECEXCL_MAGIC_DEAD:
2739 case RTLOCKVALRECSHRD_MAGIC_DEAD:
2740 break;
2741
2742 case RTLOCKVALRECSHRDOWN_MAGIC:
2743 case RTLOCKVALRECSHRDOWN_MAGIC_DEAD:
2744 default:
2745 AssertMsgFailed(("%p: %#x\n", pRec, pRec->Core));
2746 break;
2747 }
2748
2749 if (pNextRec)
2750 {
2751 /*
2752 * Recurse and check for deadlock.
2753 */
2754 uint32_t i = pStack->c;
2755 if (RT_UNLIKELY(i >= RT_ELEMENTS(pStack->a)))
2756 return rtLockValidatorDdHandleStackOverflow(pStack);
2757
2758 pStack->c++;
2759 pStack->a[i].pRec = pRec;
2760 pStack->a[i].iEntry = iEntry;
2761 pStack->a[i].enmState = enmState;
2762 pStack->a[i].pThread = pThread;
2763 pStack->a[i].pFirstSibling = pFirstSibling;
2764
2765 if (RT_UNLIKELY( pNextThread == pThreadSelf
2766 && ( i != 0
2767 || pRec->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC
2768 || !pRec->Shared.fSignaller) /* ASSUMES signaller records have no siblings. */
2769 )
2770 )
2771 return rtLockValidatorDdVerifyDeadlock(pStack, pThreadSelf);
2772
2773 pRec = pNextRec;
2774 pFirstSibling = pNextRec;
2775 iEntry = UINT32_MAX;
2776 enmState = enmNextState;
2777 pThread = pNextThread;
2778 }
2779 else
2780 {
2781 /*
2782 * No deadlock here, unwind the stack and deal with any unfinished
2783 * business there.
2784 */
2785 uint32_t i = pStack->c;
2786 for (;;)
2787 {
2788 /* pop */
2789 if (i == 0)
2790 return VINF_SUCCESS;
2791 i--;
2792 pRec = pStack->a[i].pRec;
2793 iEntry = pStack->a[i].iEntry;
2794
2795 /* Examine it. */
2796 uint32_t u32Magic = pRec->Core.u32Magic;
2797 if (u32Magic == RTLOCKVALRECEXCL_MAGIC)
2798 pRec = pRec->Excl.pSibling;
2799 else if (u32Magic == RTLOCKVALRECSHRD_MAGIC)
2800 {
2801 if (iEntry + 1 < pRec->Shared.cAllocated)
2802 break; /* continue processing this record. */
2803 pRec = pRec->Shared.pSibling;
2804 }
2805 else
2806 {
2807 Assert( u32Magic == RTLOCKVALRECEXCL_MAGIC_DEAD
2808 || u32Magic == RTLOCKVALRECSHRD_MAGIC_DEAD);
2809 continue;
2810 }
2811
2812 /* Any next record to advance to? */
2813 if ( !pRec
2814 || pRec == pStack->a[i].pFirstSibling)
2815 continue;
2816 iEntry = UINT32_MAX;
2817 break;
2818 }
2819
2820 /* Restore the rest of the state and update the stack. */
2821 pFirstSibling = pStack->a[i].pFirstSibling;
2822 enmState = pStack->a[i].enmState;
2823 pThread = pStack->a[i].pThread;
2824 pStack->c = i;
2825 }
2826
2827 Assert(iLoop != 1000000);
2828 }
2829}
2830
2831
2832/**
2833 * Check for the simple no-deadlock case.
2834 *
2835 * @returns true if no deadlock, false if further investigation is required.
2836 *
2837 * @param pOriginalRec The original record.
2838 */
2839DECLINLINE(int) rtLockValidatorIsSimpleNoDeadlockCase(PRTLOCKVALRECUNION pOriginalRec)
2840{
2841 if ( pOriginalRec->Excl.Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
2842 && !pOriginalRec->Excl.pSibling)
2843 {
2844 PRTTHREADINT pThread = rtLockValidatorReadThreadHandle(&pOriginalRec->Excl.hThread);
2845 if ( !pThread
2846 || pThread->u32Magic != RTTHREADINT_MAGIC)
2847 return true;
2848 RTTHREADSTATE enmState = rtThreadGetState(pThread);
2849 if (!RTTHREAD_IS_SLEEPING(enmState))
2850 return true;
2851 }
2852 return false;
2853}
2854
2855
2856/**
2857 * Worker for rtLockValidatorDeadlockDetection that bitches about a deadlock.
2858 *
2859 * @param pStack The chain of locks causing the deadlock.
2860 * @param pRec The record relating to the current thread's lock
2861 * operation.
2862 * @param pThreadSelf This thread.
2863 * @param pSrcPos Where we are going to deadlock.
2864 * @param rc The return code.
2865 */
2866static void rcLockValidatorDoDeadlockComplaining(PRTLOCKVALDDSTACK pStack, PRTLOCKVALRECUNION pRec,
2867 PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos, int rc)
2868{
2869 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
2870 {
2871 const char *pszWhat;
2872 switch (rc)
2873 {
2874 case VERR_SEM_LV_DEADLOCK: pszWhat = "Detected deadlock!"; break;
2875 case VERR_SEM_LV_EXISTING_DEADLOCK: pszWhat = "Found existing deadlock!"; break;
2876 case VERR_SEM_LV_ILLEGAL_UPGRADE: pszWhat = "Illegal lock upgrade!"; break;
2877 default: AssertFailed(); pszWhat = "!unexpected rc!"; break;
2878 }
2879 rtLockValComplainFirst(pszWhat, pSrcPos, pThreadSelf, pStack->a[0].pRec != pRec ? pRec : NULL, true);
2880 rtLockValComplainMore("---- start of deadlock chain - %u entries ----\n", pStack->c);
2881 for (uint32_t i = 0; i < pStack->c; i++)
2882 {
2883 char szPrefix[24];
2884 RTStrPrintf(szPrefix, sizeof(szPrefix), "#%02u: ", i);
2885 PRTLOCKVALRECUNION pShrdOwner = NULL;
2886 if (pStack->a[i].pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
2887 pShrdOwner = (PRTLOCKVALRECUNION)pStack->a[i].pRec->Shared.papOwners[pStack->a[i].iEntry];
2888 if (VALID_PTR(pShrdOwner) && pShrdOwner->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2889 {
2890 rtLockValComplainAboutLock(szPrefix, pShrdOwner, "\n");
2891 rtLockValComplainAboutLockStack(pShrdOwner->ShrdOwner.hThread, 5, 2, pShrdOwner);
2892 }
2893 else
2894 {
2895 rtLockValComplainAboutLock(szPrefix, pStack->a[i].pRec, "\n");
2896 if (pStack->a[i].pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
2897 rtLockValComplainAboutLockStack(pStack->a[i].pRec->Excl.hThread, 5, 2, pStack->a[i].pRec);
2898 }
2899 }
2900 rtLockValComplainMore("---- end of deadlock chain ----\n");
2901 }
2902
2903 rtLockValComplainPanic();
2904}
2905
2906
2907/**
2908 * Perform deadlock detection.
2909 *
2910 * @retval VINF_SUCCESS
2911 * @retval VERR_SEM_LV_DEADLOCK
2912 * @retval VERR_SEM_LV_EXISTING_DEADLOCK
2913 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE
2914 *
2915 * @param pRec The record relating to the current thread's lock
2916 * operation.
2917 * @param pThreadSelf The current thread.
2918 * @param pSrcPos The position of the current lock operation.
2919 */
2920static int rtLockValidatorDeadlockDetection(PRTLOCKVALRECUNION pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
2921{
2922 RTLOCKVALDDSTACK Stack;
2923 int rc = rtLockValidatorDdDoDetection(&Stack, pRec, pThreadSelf);
2924 if (RT_SUCCESS(rc))
2925 return VINF_SUCCESS;
2926
2927 if (rc == VERR_TRY_AGAIN)
2928 {
2929 for (uint32_t iLoop = 0; ; iLoop++)
2930 {
2931 rc = rtLockValidatorDdDoDetection(&Stack, pRec, pThreadSelf);
2932 if (RT_SUCCESS_NP(rc))
2933 return VINF_SUCCESS;
2934 if (rc != VERR_TRY_AGAIN)
2935 break;
2936 RTThreadYield();
2937 if (iLoop >= 3)
2938 return VINF_SUCCESS;
2939 }
2940 }
2941
2942 rcLockValidatorDoDeadlockComplaining(&Stack, pRec, pThreadSelf, pSrcPos, rc);
2943 return rc;
2944}
2945
2946
2947RTDECL(void) RTLockValidatorRecExclInitV(PRTLOCKVALRECEXCL pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
2948 void *hLock, bool fEnabled, const char *pszNameFmt, va_list va)
2949{
2950 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
2951 RTLOCKVAL_ASSERT_PTR_ALIGN(hLock);
2952 Assert( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
2953 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
2954 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY);
2955
2956 pRec->Core.u32Magic = RTLOCKVALRECEXCL_MAGIC;
2957 pRec->fEnabled = fEnabled && RTLockValidatorIsEnabled();
2958 pRec->afReserved[0] = 0;
2959 pRec->afReserved[1] = 0;
2960 pRec->afReserved[2] = 0;
2961 rtLockValidatorSrcPosInit(&pRec->SrcPos);
2962 pRec->hThread = NIL_RTTHREAD;
2963 pRec->pDown = NULL;
2964 pRec->hClass = rtLockValidatorClassValidateAndRetain(hClass);
2965 pRec->uSubClass = uSubClass;
2966 pRec->cRecursion = 0;
2967 pRec->hLock = hLock;
2968 pRec->pSibling = NULL;
2969 if (pszNameFmt)
2970 RTStrPrintfV(pRec->szName, sizeof(pRec->szName), pszNameFmt, va);
2971 else
2972 {
2973 static uint32_t volatile s_cAnonymous = 0;
2974 uint32_t i = ASMAtomicIncU32(&s_cAnonymous) - 1;
2975 RTStrPrintf(pRec->szName, sizeof(pRec->szName), "anon-excl-%u", i);
2976 }
2977
2978 /* Lazy initialization. */
2979 if (RT_UNLIKELY(g_hLockValidatorXRoads == NIL_RTSEMXROADS))
2980 rtLockValidatorLazyInit();
2981}
2982
2983
2984RTDECL(void) RTLockValidatorRecExclInit(PRTLOCKVALRECEXCL pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
2985 void *hLock, bool fEnabled, const char *pszNameFmt, ...)
2986{
2987 va_list va;
2988 va_start(va, pszNameFmt);
2989 RTLockValidatorRecExclInitV(pRec, hClass, uSubClass, hLock, fEnabled, pszNameFmt, va);
2990 va_end(va);
2991}
2992
2993
2994RTDECL(int) RTLockValidatorRecExclCreateV(PRTLOCKVALRECEXCL *ppRec, RTLOCKVALCLASS hClass,
2995 uint32_t uSubClass, void *pvLock, bool fEnabled,
2996 const char *pszNameFmt, va_list va)
2997{
2998 PRTLOCKVALRECEXCL pRec;
2999 *ppRec = pRec = (PRTLOCKVALRECEXCL)RTMemAlloc(sizeof(*pRec));
3000 if (!pRec)
3001 return VERR_NO_MEMORY;
3002 RTLockValidatorRecExclInitV(pRec, hClass, uSubClass, pvLock, fEnabled, pszNameFmt, va);
3003 return VINF_SUCCESS;
3004}
3005
3006
3007RTDECL(int) RTLockValidatorRecExclCreate(PRTLOCKVALRECEXCL *ppRec, RTLOCKVALCLASS hClass,
3008 uint32_t uSubClass, void *pvLock, bool fEnabled,
3009 const char *pszNameFmt, ...)
3010{
3011 va_list va;
3012 va_start(va, pszNameFmt);
3013 int rc = RTLockValidatorRecExclCreateV(ppRec, hClass, uSubClass, pvLock, fEnabled, pszNameFmt, va);
3014 va_end(va);
3015 return rc;
3016}
3017
3018
3019RTDECL(void) RTLockValidatorRecExclDelete(PRTLOCKVALRECEXCL pRec)
3020{
3021 Assert(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3022
3023 rtLockValidatorSerializeDestructEnter();
3024
3025 ASMAtomicWriteU32(&pRec->Core.u32Magic, RTLOCKVALRECEXCL_MAGIC_DEAD);
3026 ASMAtomicWriteHandle(&pRec->hThread, NIL_RTTHREAD);
3027 RTLOCKVALCLASS hClass;
3028 ASMAtomicXchgHandle(&pRec->hClass, NIL_RTLOCKVALCLASS, &hClass);
3029 if (pRec->pSibling)
3030 rtLockValidatorUnlinkAllSiblings(&pRec->Core);
3031 rtLockValidatorSerializeDestructLeave();
3032 if (hClass != NIL_RTLOCKVALCLASS)
3033 RTLockValidatorClassRelease(hClass);
3034}
3035
3036
3037RTDECL(void) RTLockValidatorRecExclDestroy(PRTLOCKVALRECEXCL *ppRec)
3038{
3039 PRTLOCKVALRECEXCL pRec = *ppRec;
3040 *ppRec = NULL;
3041 if (pRec)
3042 {
3043 RTLockValidatorRecExclDelete(pRec);
3044 RTMemFree(pRec);
3045 }
3046}
3047
3048
3049RTDECL(uint32_t) RTLockValidatorRecExclSetSubClass(PRTLOCKVALRECEXCL pRec, uint32_t uSubClass)
3050{
3051 AssertPtrReturn(pRec, RTLOCKVAL_SUB_CLASS_INVALID);
3052 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
3053 AssertReturn( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3054 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3055 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY,
3056 RTLOCKVAL_SUB_CLASS_INVALID);
3057 return ASMAtomicXchgU32(&pRec->uSubClass, uSubClass);
3058}
3059
3060
3061RTDECL(void) RTLockValidatorRecExclSetOwner(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3062 PCRTLOCKVALSRCPOS pSrcPos, bool fFirstRecursion)
3063{
3064 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3065 AssertReturnVoid(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3066 if (!pRecU->Excl.fEnabled)
3067 return;
3068 if (hThreadSelf == NIL_RTTHREAD)
3069 {
3070 hThreadSelf = RTThreadSelfAutoAdopt();
3071 AssertReturnVoid(hThreadSelf != NIL_RTTHREAD);
3072 }
3073 AssertReturnVoid(hThreadSelf->u32Magic == RTTHREADINT_MAGIC);
3074 Assert(hThreadSelf == RTThreadSelf());
3075
3076 ASMAtomicIncS32(&hThreadSelf->LockValidator.cWriteLocks);
3077
3078 if (pRecU->Excl.hThread == hThreadSelf)
3079 {
3080 Assert(!fFirstRecursion);
3081 pRecU->Excl.cRecursion++;
3082 rtLockValidatorStackPushRecursion(hThreadSelf, pRecU, pSrcPos);
3083 }
3084 else
3085 {
3086 Assert(pRecU->Excl.hThread == NIL_RTTHREAD);
3087
3088 rtLockValidatorSrcPosCopy(&pRecU->Excl.SrcPos, pSrcPos);
3089 ASMAtomicUoWriteU32(&pRecU->Excl.cRecursion, 1);
3090 ASMAtomicWriteHandle(&pRecU->Excl.hThread, hThreadSelf);
3091
3092 rtLockValidatorStackPush(hThreadSelf, pRecU);
3093 }
3094}
3095
3096
3097/**
3098 * Internal worker for RTLockValidatorRecExclReleaseOwner and
3099 * RTLockValidatorRecExclReleaseOwnerUnchecked.
3100 */
3101static void rtLockValidatorRecExclReleaseOwnerUnchecked(PRTLOCKVALRECUNION pRec, bool fFinalRecursion)
3102{
3103 RTTHREADINT *pThread = pRec->Excl.hThread;
3104 AssertReturnVoid(pThread != NIL_RTTHREAD);
3105 Assert(pThread == RTThreadSelf());
3106
3107 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
3108 uint32_t c = ASMAtomicDecU32(&pRec->Excl.cRecursion);
3109 if (c == 0)
3110 {
3111 rtLockValidatorStackPop(pThread, pRec);
3112 ASMAtomicWriteHandle(&pRec->Excl.hThread, NIL_RTTHREAD);
3113 }
3114 else
3115 {
3116 Assert(c < UINT32_C(0xffff0000));
3117 Assert(!fFinalRecursion);
3118 rtLockValidatorStackPopRecursion(pThread, pRec);
3119 }
3120}
3121
3122RTDECL(int) RTLockValidatorRecExclReleaseOwner(PRTLOCKVALRECEXCL pRec, bool fFinalRecursion)
3123{
3124 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3125 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3126 if (!pRecU->Excl.fEnabled)
3127 return VINF_SUCCESS;
3128
3129 /*
3130 * Check the release order.
3131 */
3132 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3133 && pRecU->Excl.hClass->fStrictReleaseOrder
3134 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3135 )
3136 {
3137 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3138 if (RT_FAILURE(rc))
3139 return rc;
3140 }
3141
3142 /*
3143 * Join paths with RTLockValidatorRecExclReleaseOwnerUnchecked.
3144 */
3145 rtLockValidatorRecExclReleaseOwnerUnchecked(pRecU, fFinalRecursion);
3146 return VINF_SUCCESS;
3147}
3148
3149
3150RTDECL(void) RTLockValidatorRecExclReleaseOwnerUnchecked(PRTLOCKVALRECEXCL pRec)
3151{
3152 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3153 AssertReturnVoid(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3154 if (pRecU->Excl.fEnabled)
3155 rtLockValidatorRecExclReleaseOwnerUnchecked(pRecU, false);
3156}
3157
3158
3159RTDECL(int) RTLockValidatorRecExclRecursion(PRTLOCKVALRECEXCL pRec, PCRTLOCKVALSRCPOS pSrcPos)
3160{
3161 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3162 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3163 if (!pRecU->Excl.fEnabled)
3164 return VINF_SUCCESS;
3165 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3166 AssertReturn(pRecU->Excl.cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
3167
3168 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3169 && !pRecU->Excl.hClass->fRecursionOk)
3170 {
3171 rtLockValComplainFirst("Recursion not allowed by the class!",
3172 pSrcPos, pRecU->Excl.hThread, (PRTLOCKVALRECUNION)pRec, true);
3173 rtLockValComplainPanic();
3174 return VERR_SEM_LV_NESTED;
3175 }
3176
3177 Assert(pRecU->Excl.cRecursion < _1M);
3178 pRecU->Excl.cRecursion++;
3179 rtLockValidatorStackPushRecursion(pRecU->Excl.hThread, pRecU, pSrcPos);
3180 return VINF_SUCCESS;
3181}
3182
3183
3184RTDECL(int) RTLockValidatorRecExclUnwind(PRTLOCKVALRECEXCL pRec)
3185{
3186 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3187 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3188 if (!pRecU->Excl.fEnabled)
3189 return VINF_SUCCESS;
3190 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3191 Assert(pRecU->Excl.hThread == RTThreadSelf());
3192 AssertReturn(pRecU->Excl.cRecursion > 1, VERR_SEM_LV_INVALID_PARAMETER);
3193
3194 /*
3195 * Check the release order.
3196 */
3197 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3198 && pRecU->Excl.hClass->fStrictReleaseOrder
3199 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3200 )
3201 {
3202 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3203 if (RT_FAILURE(rc))
3204 return rc;
3205 }
3206
3207 /*
3208 * Perform the unwind.
3209 */
3210 pRecU->Excl.cRecursion--;
3211 rtLockValidatorStackPopRecursion(pRecU->Excl.hThread, pRecU);
3212 return VINF_SUCCESS;
3213}
3214
3215
3216RTDECL(int) RTLockValidatorRecExclRecursionMixed(PRTLOCKVALRECEXCL pRec, PRTLOCKVALRECCORE pRecMixed, PCRTLOCKVALSRCPOS pSrcPos)
3217{
3218 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3219 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3220 PRTLOCKVALRECUNION pRecMixedU = (PRTLOCKVALRECUNION)pRecMixed;
3221 AssertReturn( pRecMixedU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3222 || pRecMixedU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
3223 , VERR_SEM_LV_INVALID_PARAMETER);
3224 if (!pRecU->Excl.fEnabled)
3225 return VINF_SUCCESS;
3226 Assert(pRecU->Excl.hThread == RTThreadSelf());
3227 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3228 AssertReturn(pRecU->Excl.cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
3229
3230 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3231 && !pRecU->Excl.hClass->fRecursionOk)
3232 {
3233 rtLockValComplainFirst("Mixed recursion not allowed by the class!",
3234 pSrcPos, pRecU->Excl.hThread, (PRTLOCKVALRECUNION)pRec, true);
3235 rtLockValComplainPanic();
3236 return VERR_SEM_LV_NESTED;
3237 }
3238
3239 Assert(pRecU->Excl.cRecursion < _1M);
3240 pRecU->Excl.cRecursion++;
3241 rtLockValidatorStackPushRecursion(pRecU->Excl.hThread, pRecU, pSrcPos);
3242
3243 return VINF_SUCCESS;
3244}
3245
3246
3247RTDECL(int) RTLockValidatorRecExclUnwindMixed(PRTLOCKVALRECEXCL pRec, PRTLOCKVALRECCORE pRecMixed)
3248{
3249 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3250 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3251 PRTLOCKVALRECUNION pRecMixedU = (PRTLOCKVALRECUNION)pRecMixed;
3252 AssertReturn( pRecMixedU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3253 || pRecMixedU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
3254 , VERR_SEM_LV_INVALID_PARAMETER);
3255 if (!pRecU->Excl.fEnabled)
3256 return VINF_SUCCESS;
3257 Assert(pRecU->Excl.hThread == RTThreadSelf());
3258 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3259 AssertReturn(pRecU->Excl.cRecursion > 1, VERR_SEM_LV_INVALID_PARAMETER);
3260
3261 /*
3262 * Check the release order.
3263 */
3264 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3265 && pRecU->Excl.hClass->fStrictReleaseOrder
3266 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3267 )
3268 {
3269 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3270 if (RT_FAILURE(rc))
3271 return rc;
3272 }
3273
3274 /*
3275 * Perform the unwind.
3276 */
3277 pRecU->Excl.cRecursion--;
3278 rtLockValidatorStackPopRecursion(pRecU->Excl.hThread, pRecU);
3279 return VINF_SUCCESS;
3280}
3281
3282
3283RTDECL(int) RTLockValidatorRecExclCheckOrder(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3284 PCRTLOCKVALSRCPOS pSrcPos, RTMSINTERVAL cMillies)
3285{
3286 /*
3287 * Validate and adjust input. Quit early if order validation is disabled.
3288 */
3289 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3290 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3291 if ( !pRecU->Excl.fEnabled
3292 || pRecU->Excl.hClass == NIL_RTLOCKVALCLASS
3293 || pRecU->Excl.hClass->cMsMinOrder == RT_INDEFINITE_WAIT
3294 || pRecU->Excl.hClass->cMsMinOrder > cMillies)
3295 return VINF_SUCCESS;
3296
3297 if (hThreadSelf == NIL_RTTHREAD)
3298 {
3299 hThreadSelf = RTThreadSelfAutoAdopt();
3300 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
3301 }
3302 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3303 Assert(hThreadSelf == RTThreadSelf());
3304
3305 /*
3306 * Detect recursion as it isn't subject to order restrictions.
3307 */
3308 if (pRec->hThread == hThreadSelf)
3309 return VINF_SUCCESS;
3310
3311 return rtLockValidatorStackCheckLockingOrder(pRecU->Excl.hClass, pRecU->Excl.uSubClass, hThreadSelf, pRecU, pSrcPos);
3312}
3313
3314
3315RTDECL(int) RTLockValidatorRecExclCheckBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3316 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3317 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3318{
3319 /*
3320 * Fend off wild life.
3321 */
3322 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3323 AssertPtrReturn(pRecU, VERR_SEM_LV_INVALID_PARAMETER);
3324 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3325 if (!pRec->fEnabled)
3326 return VINF_SUCCESS;
3327
3328 PRTTHREADINT pThreadSelf = hThreadSelf;
3329 AssertPtrReturn(pThreadSelf, VERR_SEM_LV_INVALID_PARAMETER);
3330 AssertReturn(pThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3331 Assert(pThreadSelf == RTThreadSelf());
3332
3333 AssertReturn(RTTHREAD_IS_SLEEPING(enmSleepState), VERR_SEM_LV_INVALID_PARAMETER);
3334
3335 RTTHREADSTATE enmThreadState = rtThreadGetState(pThreadSelf);
3336 if (RT_UNLIKELY(enmThreadState != RTTHREADSTATE_RUNNING))
3337 {
3338 AssertReturn( enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
3339 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
3340 , VERR_SEM_LV_INVALID_PARAMETER);
3341 enmSleepState = enmThreadState;
3342 }
3343
3344 /*
3345 * Record the location.
3346 */
3347 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, pRecU);
3348 rtLockValidatorSrcPosCopy(&pThreadSelf->LockValidator.SrcPos, pSrcPos);
3349 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, true);
3350 pThreadSelf->LockValidator.enmRecState = enmSleepState;
3351 rtThreadSetState(pThreadSelf, enmSleepState);
3352
3353 /*
3354 * Don't do deadlock detection if we're recursing.
3355 *
3356 * On some hosts we don't do recursion accounting our selves and there
3357 * isn't any other place to check for this.
3358 */
3359 int rc = VINF_SUCCESS;
3360 if (rtLockValidatorReadThreadHandle(&pRecU->Excl.hThread) == pThreadSelf)
3361 {
3362 if ( !fRecursiveOk
3363 || ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3364 && !pRecU->Excl.hClass->fRecursionOk))
3365 {
3366 rtLockValComplainFirst("Recursion not allowed!", pSrcPos, pThreadSelf, pRecU, true);
3367 rtLockValComplainPanic();
3368 rc = VERR_SEM_LV_NESTED;
3369 }
3370 }
3371 /*
3372 * Perform deadlock detection.
3373 */
3374 else if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3375 && ( pRecU->Excl.hClass->cMsMinDeadlock > cMillies
3376 || pRecU->Excl.hClass->cMsMinDeadlock > RT_INDEFINITE_WAIT))
3377 rc = VINF_SUCCESS;
3378 else if (!rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
3379 rc = rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos);
3380
3381 if (RT_SUCCESS(rc))
3382 ASMAtomicWriteBool(&pThreadSelf->fReallySleeping, fReallySleeping);
3383 else
3384 {
3385 rtThreadSetState(pThreadSelf, enmThreadState);
3386 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, NULL);
3387 }
3388 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, false);
3389 return rc;
3390}
3391RT_EXPORT_SYMBOL(RTLockValidatorRecExclCheckBlocking);
3392
3393
3394RTDECL(int) RTLockValidatorRecExclCheckOrderAndBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3395 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3396 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3397{
3398 int rc = RTLockValidatorRecExclCheckOrder(pRec, hThreadSelf, pSrcPos, cMillies);
3399 if (RT_SUCCESS(rc))
3400 rc = RTLockValidatorRecExclCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, cMillies,
3401 enmSleepState, fReallySleeping);
3402 return rc;
3403}
3404RT_EXPORT_SYMBOL(RTLockValidatorRecExclCheckOrderAndBlocking);
3405
3406
3407RTDECL(void) RTLockValidatorRecSharedInitV(PRTLOCKVALRECSHRD pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
3408 void *hLock, bool fSignaller, bool fEnabled, const char *pszNameFmt, va_list va)
3409{
3410 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
3411 RTLOCKVAL_ASSERT_PTR_ALIGN(hLock);
3412 Assert( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3413 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3414 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY);
3415
3416 pRec->Core.u32Magic = RTLOCKVALRECSHRD_MAGIC;
3417 pRec->uSubClass = uSubClass;
3418 pRec->hClass = rtLockValidatorClassValidateAndRetain(hClass);
3419 pRec->hLock = hLock;
3420 pRec->fEnabled = fEnabled && RTLockValidatorIsEnabled();
3421 pRec->fSignaller = fSignaller;
3422 pRec->pSibling = NULL;
3423
3424 /* the table */
3425 pRec->cEntries = 0;
3426 pRec->iLastEntry = 0;
3427 pRec->cAllocated = 0;
3428 pRec->fReallocating = false;
3429 pRec->fPadding = false;
3430 pRec->papOwners = NULL;
3431
3432 /* the name */
3433 if (pszNameFmt)
3434 RTStrPrintfV(pRec->szName, sizeof(pRec->szName), pszNameFmt, va);
3435 else
3436 {
3437 static uint32_t volatile s_cAnonymous = 0;
3438 uint32_t i = ASMAtomicIncU32(&s_cAnonymous) - 1;
3439 RTStrPrintf(pRec->szName, sizeof(pRec->szName), "anon-shrd-%u", i);
3440 }
3441}
3442
3443
3444RTDECL(void) RTLockValidatorRecSharedInit(PRTLOCKVALRECSHRD pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
3445 void *hLock, bool fSignaller, bool fEnabled, const char *pszNameFmt, ...)
3446{
3447 va_list va;
3448 va_start(va, pszNameFmt);
3449 RTLockValidatorRecSharedInitV(pRec, hClass, uSubClass, hLock, fSignaller, fEnabled, pszNameFmt, va);
3450 va_end(va);
3451}
3452
3453
3454RTDECL(void) RTLockValidatorRecSharedDelete(PRTLOCKVALRECSHRD pRec)
3455{
3456 Assert(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3457
3458 /*
3459 * Flip it into table realloc mode and take the destruction lock.
3460 */
3461 rtLockValidatorSerializeDestructEnter();
3462 while (!ASMAtomicCmpXchgBool(&pRec->fReallocating, true, false))
3463 {
3464 rtLockValidatorSerializeDestructLeave();
3465
3466 rtLockValidatorSerializeDetectionEnter();
3467 rtLockValidatorSerializeDetectionLeave();
3468
3469 rtLockValidatorSerializeDestructEnter();
3470 }
3471
3472 ASMAtomicWriteU32(&pRec->Core.u32Magic, RTLOCKVALRECSHRD_MAGIC_DEAD);
3473 RTLOCKVALCLASS hClass;
3474 ASMAtomicXchgHandle(&pRec->hClass, NIL_RTLOCKVALCLASS, &hClass);
3475 if (pRec->papOwners)
3476 {
3477 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->papOwners;
3478 ASMAtomicUoWritePtr((void * volatile *)&pRec->papOwners, NULL);
3479 ASMAtomicUoWriteU32(&pRec->cAllocated, 0);
3480
3481 RTMemFree((void *)pRec->papOwners);
3482 }
3483 if (pRec->pSibling)
3484 rtLockValidatorUnlinkAllSiblings(&pRec->Core);
3485 ASMAtomicWriteBool(&pRec->fReallocating, false);
3486
3487 rtLockValidatorSerializeDestructLeave();
3488
3489 if (hClass != NIL_RTLOCKVALCLASS)
3490 RTLockValidatorClassRelease(hClass);
3491}
3492
3493
3494RTDECL(uint32_t) RTLockValidatorRecSharedSetSubClass(PRTLOCKVALRECSHRD pRec, uint32_t uSubClass)
3495{
3496 AssertPtrReturn(pRec, RTLOCKVAL_SUB_CLASS_INVALID);
3497 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
3498 AssertReturn( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3499 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3500 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY,
3501 RTLOCKVAL_SUB_CLASS_INVALID);
3502 return ASMAtomicXchgU32(&pRec->uSubClass, uSubClass);
3503}
3504
3505
3506/**
3507 * Locates an owner (thread) in a shared lock record.
3508 *
3509 * @returns Pointer to the owner entry on success, NULL on failure..
3510 * @param pShared The shared lock record.
3511 * @param hThread The thread (owner) to find.
3512 * @param piEntry Where to optionally return the table in index.
3513 * Optional.
3514 */
3515DECLINLINE(PRTLOCKVALRECUNION)
3516rtLockValidatorRecSharedFindOwner(PRTLOCKVALRECSHRD pShared, RTTHREAD hThread, uint32_t *piEntry)
3517{
3518 rtLockValidatorSerializeDetectionEnter();
3519
3520 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3521 if (papOwners)
3522 {
3523 uint32_t const cMax = pShared->cAllocated;
3524 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
3525 {
3526 PRTLOCKVALRECUNION pEntry = (PRTLOCKVALRECUNION)rtLockValidatorUoReadSharedOwner(&papOwners[iEntry]);
3527 if (pEntry && pEntry->ShrdOwner.hThread == hThread)
3528 {
3529 rtLockValidatorSerializeDetectionLeave();
3530 if (piEntry)
3531 *piEntry = iEntry;
3532 return pEntry;
3533 }
3534 }
3535 }
3536
3537 rtLockValidatorSerializeDetectionLeave();
3538 return NULL;
3539}
3540
3541
3542RTDECL(int) RTLockValidatorRecSharedCheckOrder(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3543 PCRTLOCKVALSRCPOS pSrcPos, RTMSINTERVAL cMillies)
3544{
3545 /*
3546 * Validate and adjust input. Quit early if order validation is disabled.
3547 */
3548 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3549 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3550 if ( !pRecU->Shared.fEnabled
3551 || pRecU->Shared.hClass == NIL_RTLOCKVALCLASS
3552 || pRecU->Shared.hClass->cMsMinOrder == RT_INDEFINITE_WAIT
3553 || pRecU->Shared.hClass->cMsMinOrder > cMillies
3554 )
3555 return VINF_SUCCESS;
3556
3557 if (hThreadSelf == NIL_RTTHREAD)
3558 {
3559 hThreadSelf = RTThreadSelfAutoAdopt();
3560 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
3561 }
3562 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3563 Assert(hThreadSelf == RTThreadSelf());
3564
3565 /*
3566 * Detect recursion as it isn't subject to order restrictions.
3567 */
3568 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(&pRecU->Shared, hThreadSelf, NULL);
3569 if (pEntry)
3570 return VINF_SUCCESS;
3571
3572 return rtLockValidatorStackCheckLockingOrder(pRecU->Shared.hClass, pRecU->Shared.uSubClass, hThreadSelf, pRecU, pSrcPos);
3573}
3574
3575
3576RTDECL(int) RTLockValidatorRecSharedCheckBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3577 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3578 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3579{
3580 /*
3581 * Fend off wild life.
3582 */
3583 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3584 AssertPtrReturn(pRecU, VERR_SEM_LV_INVALID_PARAMETER);
3585 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3586 if (!pRecU->Shared.fEnabled)
3587 return VINF_SUCCESS;
3588
3589 PRTTHREADINT pThreadSelf = hThreadSelf;
3590 AssertPtrReturn(pThreadSelf, VERR_SEM_LV_INVALID_PARAMETER);
3591 AssertReturn(pThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3592 Assert(pThreadSelf == RTThreadSelf());
3593
3594 AssertReturn(RTTHREAD_IS_SLEEPING(enmSleepState), VERR_SEM_LV_INVALID_PARAMETER);
3595
3596 RTTHREADSTATE enmThreadState = rtThreadGetState(pThreadSelf);
3597 if (RT_UNLIKELY(enmThreadState != RTTHREADSTATE_RUNNING))
3598 {
3599 AssertReturn( enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
3600 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
3601 , VERR_SEM_LV_INVALID_PARAMETER);
3602 enmSleepState = enmThreadState;
3603 }
3604
3605 /*
3606 * Record the location.
3607 */
3608 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, pRecU);
3609 rtLockValidatorSrcPosCopy(&pThreadSelf->LockValidator.SrcPos, pSrcPos);
3610 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, true);
3611 pThreadSelf->LockValidator.enmRecState = enmSleepState;
3612 rtThreadSetState(pThreadSelf, enmSleepState);
3613
3614 /*
3615 * Don't do deadlock detection if we're recursing.
3616 */
3617 int rc = VINF_SUCCESS;
3618 PRTLOCKVALRECUNION pEntry = !pRecU->Shared.fSignaller
3619 ? rtLockValidatorRecSharedFindOwner(&pRecU->Shared, pThreadSelf, NULL)
3620 : NULL;
3621 if (pEntry)
3622 {
3623 if ( !fRecursiveOk
3624 || ( pRec->hClass
3625 && !pRec->hClass->fRecursionOk)
3626 )
3627 {
3628 rtLockValComplainFirst("Recursion not allowed!", pSrcPos, pThreadSelf, pRecU, true);
3629 rtLockValComplainPanic();
3630 rc = VERR_SEM_LV_NESTED;
3631 }
3632 }
3633 /*
3634 * Perform deadlock detection.
3635 */
3636 else if ( pRec->hClass
3637 && ( pRec->hClass->cMsMinDeadlock == RT_INDEFINITE_WAIT
3638 || pRec->hClass->cMsMinDeadlock > cMillies))
3639 rc = VINF_SUCCESS;
3640 else if (!rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
3641 rc = rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos);
3642
3643 if (RT_SUCCESS(rc))
3644 ASMAtomicWriteBool(&pThreadSelf->fReallySleeping, fReallySleeping);
3645 else
3646 {
3647 rtThreadSetState(pThreadSelf, enmThreadState);
3648 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, NULL);
3649 }
3650 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, false);
3651 return rc;
3652}
3653RT_EXPORT_SYMBOL(RTLockValidatorRecSharedCheckBlocking);
3654
3655
3656RTDECL(int) RTLockValidatorRecSharedCheckOrderAndBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3657 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3658 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3659{
3660 int rc = RTLockValidatorRecSharedCheckOrder(pRec, hThreadSelf, pSrcPos, cMillies);
3661 if (RT_SUCCESS(rc))
3662 rc = RTLockValidatorRecSharedCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, cMillies,
3663 enmSleepState, fReallySleeping);
3664 return rc;
3665}
3666RT_EXPORT_SYMBOL(RTLockValidatorRecSharedCheckOrderAndBlocking);
3667
3668
3669/**
3670 * Allocates and initializes an owner entry for the shared lock record.
3671 *
3672 * @returns The new owner entry.
3673 * @param pRec The shared lock record.
3674 * @param pThreadSelf The calling thread and owner. Used for record
3675 * initialization and allocation.
3676 * @param pSrcPos The source position.
3677 */
3678DECLINLINE(PRTLOCKVALRECUNION)
3679rtLockValidatorRecSharedAllocOwner(PRTLOCKVALRECSHRD pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
3680{
3681 PRTLOCKVALRECUNION pEntry;
3682
3683 /*
3684 * Check if the thread has any statically allocated records we can easily
3685 * make use of.
3686 */
3687 unsigned iEntry = ASMBitFirstSetU32(ASMAtomicUoReadU32(&pThreadSelf->LockValidator.bmFreeShrdOwners));
3688 if ( iEntry > 0
3689 && ASMAtomicBitTestAndClear(&pThreadSelf->LockValidator.bmFreeShrdOwners, iEntry - 1))
3690 {
3691 pEntry = (PRTLOCKVALRECUNION)&pThreadSelf->LockValidator.aShrdOwners[iEntry - 1];
3692 Assert(!pEntry->ShrdOwner.fReserved);
3693 pEntry->ShrdOwner.fStaticAlloc = true;
3694 rtThreadGet(pThreadSelf);
3695 }
3696 else
3697 {
3698 pEntry = (PRTLOCKVALRECUNION)RTMemAlloc(sizeof(RTLOCKVALRECSHRDOWN));
3699 if (RT_UNLIKELY(!pEntry))
3700 return NULL;
3701 pEntry->ShrdOwner.fStaticAlloc = false;
3702 }
3703
3704 pEntry->Core.u32Magic = RTLOCKVALRECSHRDOWN_MAGIC;
3705 pEntry->ShrdOwner.cRecursion = 1;
3706 pEntry->ShrdOwner.fReserved = true;
3707 pEntry->ShrdOwner.hThread = pThreadSelf;
3708 pEntry->ShrdOwner.pDown = NULL;
3709 pEntry->ShrdOwner.pSharedRec = pRec;
3710#if HC_ARCH_BITS == 32
3711 pEntry->ShrdOwner.pvReserved = NULL;
3712#endif
3713 if (pSrcPos)
3714 pEntry->ShrdOwner.SrcPos = *pSrcPos;
3715 else
3716 rtLockValidatorSrcPosInit(&pEntry->ShrdOwner.SrcPos);
3717 return pEntry;
3718}
3719
3720
3721/**
3722 * Frees an owner entry allocated by rtLockValidatorRecSharedAllocOwner.
3723 *
3724 * @param pEntry The owner entry.
3725 */
3726DECLINLINE(void) rtLockValidatorRecSharedFreeOwner(PRTLOCKVALRECSHRDOWN pEntry)
3727{
3728 if (pEntry)
3729 {
3730 Assert(pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC);
3731 ASMAtomicWriteU32(&pEntry->Core.u32Magic, RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
3732
3733 PRTTHREADINT pThread;
3734 ASMAtomicXchgHandle(&pEntry->hThread, NIL_RTTHREAD, &pThread);
3735
3736 Assert(pEntry->fReserved);
3737 pEntry->fReserved = false;
3738
3739 if (pEntry->fStaticAlloc)
3740 {
3741 AssertPtrReturnVoid(pThread);
3742 AssertReturnVoid(pThread->u32Magic == RTTHREADINT_MAGIC);
3743
3744 uintptr_t iEntry = pEntry - &pThread->LockValidator.aShrdOwners[0];
3745 AssertReleaseReturnVoid(iEntry < RT_ELEMENTS(pThread->LockValidator.aShrdOwners));
3746
3747 Assert(!ASMBitTest(&pThread->LockValidator.bmFreeShrdOwners, iEntry));
3748 ASMAtomicBitSet(&pThread->LockValidator.bmFreeShrdOwners, iEntry);
3749
3750 rtThreadRelease(pThread);
3751 }
3752 else
3753 {
3754 rtLockValidatorSerializeDestructEnter();
3755 rtLockValidatorSerializeDestructLeave();
3756
3757 RTMemFree(pEntry);
3758 }
3759 }
3760}
3761
3762
3763/**
3764 * Make more room in the table.
3765 *
3766 * @retval true on success
3767 * @retval false if we're out of memory or running into a bad race condition
3768 * (probably a bug somewhere). No longer holding the lock.
3769 *
3770 * @param pShared The shared lock record.
3771 */
3772static bool rtLockValidatorRecSharedMakeRoom(PRTLOCKVALRECSHRD pShared)
3773{
3774 for (unsigned i = 0; i < 1000; i++)
3775 {
3776 /*
3777 * Switch to the other data access direction.
3778 */
3779 rtLockValidatorSerializeDetectionLeave();
3780 if (i >= 10)
3781 {
3782 Assert(i != 10 && i != 100);
3783 RTThreadSleep(i >= 100);
3784 }
3785 rtLockValidatorSerializeDestructEnter();
3786
3787 /*
3788 * Try grab the privilege to reallocating the table.
3789 */
3790 if ( pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3791 && ASMAtomicCmpXchgBool(&pShared->fReallocating, true, false))
3792 {
3793 uint32_t cAllocated = pShared->cAllocated;
3794 if (cAllocated < pShared->cEntries)
3795 {
3796 /*
3797 * Ok, still not enough space. Reallocate the table.
3798 */
3799#if 0 /** @todo enable this after making sure growing works flawlessly. */
3800 uint32_t cInc = RT_ALIGN_32(pShared->cEntries - cAllocated, 16);
3801#else
3802 uint32_t cInc = RT_ALIGN_32(pShared->cEntries - cAllocated, 1);
3803#endif
3804 PRTLOCKVALRECSHRDOWN *papOwners;
3805 papOwners = (PRTLOCKVALRECSHRDOWN *)RTMemRealloc((void *)pShared->papOwners,
3806 (cAllocated + cInc) * sizeof(void *));
3807 if (!papOwners)
3808 {
3809 ASMAtomicWriteBool(&pShared->fReallocating, false);
3810 rtLockValidatorSerializeDestructLeave();
3811 /* RTMemRealloc will assert */
3812 return false;
3813 }
3814
3815 while (cInc-- > 0)
3816 {
3817 papOwners[cAllocated] = NULL;
3818 cAllocated++;
3819 }
3820
3821 ASMAtomicWritePtr((void * volatile *)&pShared->papOwners, papOwners);
3822 ASMAtomicWriteU32(&pShared->cAllocated, cAllocated);
3823 }
3824 ASMAtomicWriteBool(&pShared->fReallocating, false);
3825 }
3826 rtLockValidatorSerializeDestructLeave();
3827
3828 rtLockValidatorSerializeDetectionEnter();
3829 if (RT_UNLIKELY(pShared->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC))
3830 break;
3831
3832 if (pShared->cAllocated >= pShared->cEntries)
3833 return true;
3834 }
3835
3836 rtLockValidatorSerializeDetectionLeave();
3837 AssertFailed(); /* too many iterations or destroyed while racing. */
3838 return false;
3839}
3840
3841
3842/**
3843 * Adds an owner entry to a shared lock record.
3844 *
3845 * @returns true on success, false on serious race or we're if out of memory.
3846 * @param pShared The shared lock record.
3847 * @param pEntry The owner entry.
3848 */
3849DECLINLINE(bool) rtLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry)
3850{
3851 rtLockValidatorSerializeDetectionEnter();
3852 if (RT_LIKELY(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)) /* paranoia */
3853 {
3854 if ( ASMAtomicIncU32(&pShared->cEntries) > pShared->cAllocated /** @todo add fudge */
3855 && !rtLockValidatorRecSharedMakeRoom(pShared))
3856 return false; /* the worker leave the lock */
3857
3858 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3859 uint32_t const cMax = pShared->cAllocated;
3860 for (unsigned i = 0; i < 100; i++)
3861 {
3862 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
3863 {
3864 if (ASMAtomicCmpXchgPtr((void * volatile *)&papOwners[iEntry], pEntry, NULL))
3865 {
3866 rtLockValidatorSerializeDetectionLeave();
3867 return true;
3868 }
3869 }
3870 Assert(i != 25);
3871 }
3872 AssertFailed();
3873 }
3874 rtLockValidatorSerializeDetectionLeave();
3875 return false;
3876}
3877
3878
3879/**
3880 * Remove an owner entry from a shared lock record and free it.
3881 *
3882 * @param pShared The shared lock record.
3883 * @param pEntry The owner entry to remove.
3884 * @param iEntry The last known index.
3885 */
3886DECLINLINE(void) rtLockValidatorRecSharedRemoveAndFreeOwner(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry,
3887 uint32_t iEntry)
3888{
3889 /*
3890 * Remove it from the table.
3891 */
3892 rtLockValidatorSerializeDetectionEnter();
3893 AssertReturnVoidStmt(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave());
3894 if (RT_UNLIKELY( iEntry >= pShared->cAllocated
3895 || !ASMAtomicCmpXchgPtr((void * volatile *)&pShared->papOwners[iEntry], NULL, pEntry)))
3896 {
3897 /* this shouldn't happen yet... */
3898 AssertFailed();
3899 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3900 uint32_t const cMax = pShared->cAllocated;
3901 for (iEntry = 0; iEntry < cMax; iEntry++)
3902 if (ASMAtomicCmpXchgPtr((void * volatile *)&papOwners[iEntry], NULL, pEntry))
3903 break;
3904 AssertReturnVoidStmt(iEntry < cMax, rtLockValidatorSerializeDetectionLeave());
3905 }
3906 uint32_t cNow = ASMAtomicDecU32(&pShared->cEntries);
3907 Assert(!(cNow & RT_BIT_32(31))); NOREF(cNow);
3908 rtLockValidatorSerializeDetectionLeave();
3909
3910 /*
3911 * Successfully removed, now free it.
3912 */
3913 rtLockValidatorRecSharedFreeOwner(pEntry);
3914}
3915
3916
3917RTDECL(void) RTLockValidatorRecSharedResetOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
3918{
3919 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3920 if (!pRec->fEnabled)
3921 return;
3922 AssertReturnVoid(hThread == NIL_RTTHREAD || hThread->u32Magic == RTTHREADINT_MAGIC);
3923 AssertReturnVoid(pRec->fSignaller);
3924
3925 /*
3926 * Free all current owners.
3927 */
3928 rtLockValidatorSerializeDetectionEnter();
3929 while (ASMAtomicUoReadU32(&pRec->cEntries) > 0)
3930 {
3931 AssertReturnVoidStmt(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave());
3932 uint32_t iEntry = 0;
3933 uint32_t cEntries = pRec->cAllocated;
3934 PRTLOCKVALRECSHRDOWN volatile *papEntries = pRec->papOwners;
3935 while (iEntry < cEntries)
3936 {
3937 PRTLOCKVALRECSHRDOWN pEntry = (PRTLOCKVALRECSHRDOWN)ASMAtomicXchgPtr((void * volatile *)&papEntries[iEntry], NULL);
3938 if (pEntry)
3939 {
3940 ASMAtomicDecU32(&pRec->cEntries);
3941 rtLockValidatorSerializeDetectionLeave();
3942
3943 rtLockValidatorRecSharedFreeOwner(pEntry);
3944
3945 rtLockValidatorSerializeDetectionEnter();
3946 if (ASMAtomicUoReadU32(&pRec->cEntries) == 0)
3947 break;
3948 cEntries = pRec->cAllocated;
3949 papEntries = pRec->papOwners;
3950 }
3951 iEntry++;
3952 }
3953 }
3954 rtLockValidatorSerializeDetectionLeave();
3955
3956 if (hThread != NIL_RTTHREAD)
3957 {
3958 /*
3959 * Allocate a new owner entry and insert it into the table.
3960 */
3961 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedAllocOwner(pRec, hThread, pSrcPos);
3962 if ( pEntry
3963 && !rtLockValidatorRecSharedAddOwner(pRec, &pEntry->ShrdOwner))
3964 rtLockValidatorRecSharedFreeOwner(&pEntry->ShrdOwner);
3965 }
3966}
3967RT_EXPORT_SYMBOL(RTLockValidatorRecSharedResetOwner);
3968
3969
3970RTDECL(void) RTLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
3971{
3972 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3973 if (!pRec->fEnabled)
3974 return;
3975 if (hThread == NIL_RTTHREAD)
3976 {
3977 hThread = RTThreadSelfAutoAdopt();
3978 AssertReturnVoid(hThread != NIL_RTTHREAD);
3979 }
3980 AssertReturnVoid(hThread->u32Magic == RTTHREADINT_MAGIC);
3981
3982 /*
3983 * Recursive?
3984 *
3985 * Note! This code can be optimized to try avoid scanning the table on
3986 * insert. However, that's annoying work that makes the code big,
3987 * so it can wait til later sometime.
3988 */
3989 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, NULL);
3990 if (pEntry)
3991 {
3992 Assert(!pRec->fSignaller);
3993 pEntry->ShrdOwner.cRecursion++;
3994 rtLockValidatorStackPushRecursion(hThread, pEntry, pSrcPos);
3995 return;
3996 }
3997
3998 /*
3999 * Allocate a new owner entry and insert it into the table.
4000 */
4001 pEntry = rtLockValidatorRecSharedAllocOwner(pRec, hThread, pSrcPos);
4002 if (pEntry)
4003 {
4004 if (rtLockValidatorRecSharedAddOwner(pRec, &pEntry->ShrdOwner))
4005 {
4006 if (!pRec->fSignaller)
4007 rtLockValidatorStackPush(hThread, pEntry);
4008 }
4009 else
4010 rtLockValidatorRecSharedFreeOwner(&pEntry->ShrdOwner);
4011 }
4012}
4013RT_EXPORT_SYMBOL(RTLockValidatorRecSharedAddOwner);
4014
4015
4016RTDECL(void) RTLockValidatorRecSharedRemoveOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread)
4017{
4018 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
4019 if (!pRec->fEnabled)
4020 return;
4021 if (hThread == NIL_RTTHREAD)
4022 {
4023 hThread = RTThreadSelfAutoAdopt();
4024 AssertReturnVoid(hThread != NIL_RTTHREAD);
4025 }
4026 AssertReturnVoid(hThread->u32Magic == RTTHREADINT_MAGIC);
4027
4028 /*
4029 * Find the entry hope it's a recursive one.
4030 */
4031 uint32_t iEntry = UINT32_MAX; /* shuts up gcc */
4032 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, &iEntry);
4033 AssertReturnVoid(pEntry);
4034 AssertReturnVoid(pEntry->ShrdOwner.cRecursion > 0);
4035
4036 uint32_t c = --pEntry->ShrdOwner.cRecursion;
4037 if (c == 0)
4038 {
4039 if (!pRec->fSignaller)
4040 rtLockValidatorStackPop(hThread, (PRTLOCKVALRECUNION)pEntry);
4041 rtLockValidatorRecSharedRemoveAndFreeOwner(pRec, &pEntry->ShrdOwner, iEntry);
4042 }
4043 else
4044 {
4045 Assert(!pRec->fSignaller);
4046 rtLockValidatorStackPopRecursion(hThread, pEntry);
4047 }
4048}
4049RT_EXPORT_SYMBOL(RTLockValidatorRecSharedRemoveOwner);
4050
4051
4052RTDECL(bool) RTLockValidatorRecSharedIsOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread)
4053{
4054 /* Validate and resolve input. */
4055 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, false);
4056 if (!pRec->fEnabled)
4057 return false;
4058 if (hThread == NIL_RTTHREAD)
4059 {
4060 hThread = RTThreadSelfAutoAdopt();
4061 AssertReturn(hThread != NIL_RTTHREAD, false);
4062 }
4063 AssertReturn(hThread->u32Magic == RTTHREADINT_MAGIC, false);
4064
4065 /* Do the job. */
4066 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, NULL);
4067 return pEntry != NULL;
4068}
4069RT_EXPORT_SYMBOL(RTLockValidatorRecSharedIsOwner);
4070
4071
4072RTDECL(int) RTLockValidatorRecSharedCheckAndRelease(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf)
4073{
4074 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4075 if (!pRec->fEnabled)
4076 return VINF_SUCCESS;
4077 if (hThreadSelf == NIL_RTTHREAD)
4078 {
4079 hThreadSelf = RTThreadSelfAutoAdopt();
4080 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
4081 }
4082 Assert(hThreadSelf == RTThreadSelf());
4083 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4084
4085 /*
4086 * Locate the entry for this thread in the table.
4087 */
4088 uint32_t iEntry = 0;
4089 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, &iEntry);
4090 if (RT_UNLIKELY(!pEntry))
4091 {
4092 rtLockValComplainFirst("Not owner (shared)!", NULL, hThreadSelf, (PRTLOCKVALRECUNION)pRec, true);
4093 rtLockValComplainPanic();
4094 return VERR_SEM_LV_NOT_OWNER;
4095 }
4096
4097 /*
4098 * Check the release order.
4099 */
4100 if ( pRec->hClass != NIL_RTLOCKVALCLASS
4101 && pRec->hClass->fStrictReleaseOrder
4102 && pRec->hClass->cMsMinOrder != RT_INDEFINITE_WAIT
4103 )
4104 {
4105 int rc = rtLockValidatorStackCheckReleaseOrder(hThreadSelf, (PRTLOCKVALRECUNION)pEntry);
4106 if (RT_FAILURE(rc))
4107 return rc;
4108 }
4109
4110 /*
4111 * Release the ownership or unwind a level of recursion.
4112 */
4113 Assert(pEntry->ShrdOwner.cRecursion > 0);
4114 uint32_t c = --pEntry->ShrdOwner.cRecursion;
4115 if (c == 0)
4116 {
4117 rtLockValidatorStackPop(hThreadSelf, pEntry);
4118 rtLockValidatorRecSharedRemoveAndFreeOwner(pRec, &pEntry->ShrdOwner, iEntry);
4119 }
4120 else
4121 rtLockValidatorStackPopRecursion(hThreadSelf, pEntry);
4122
4123 return VINF_SUCCESS;
4124}
4125
4126
4127RTDECL(int) RTLockValidatorRecSharedCheckSignaller(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf)
4128{
4129 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4130 if (!pRec->fEnabled)
4131 return VINF_SUCCESS;
4132 if (hThreadSelf == NIL_RTTHREAD)
4133 {
4134 hThreadSelf = RTThreadSelfAutoAdopt();
4135 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
4136 }
4137 Assert(hThreadSelf == RTThreadSelf());
4138 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4139
4140 /*
4141 * Locate the entry for this thread in the table.
4142 */
4143 uint32_t iEntry = 0;
4144 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, &iEntry);
4145 if (RT_UNLIKELY(!pEntry))
4146 {
4147 rtLockValComplainFirst("Invalid signaller!", NULL, hThreadSelf, (PRTLOCKVALRECUNION)pRec, true);
4148 rtLockValComplainPanic();
4149 return VERR_SEM_LV_NOT_SIGNALLER;
4150 }
4151 return VINF_SUCCESS;
4152}
4153
4154
4155RTDECL(int32_t) RTLockValidatorWriteLockGetCount(RTTHREAD Thread)
4156{
4157 if (Thread == NIL_RTTHREAD)
4158 return 0;
4159
4160 PRTTHREADINT pThread = rtThreadGet(Thread);
4161 if (!pThread)
4162 return VERR_INVALID_HANDLE;
4163 int32_t cWriteLocks = ASMAtomicReadS32(&pThread->LockValidator.cWriteLocks);
4164 rtThreadRelease(pThread);
4165 return cWriteLocks;
4166}
4167RT_EXPORT_SYMBOL(RTLockValidatorWriteLockGetCount);
4168
4169
4170RTDECL(void) RTLockValidatorWriteLockInc(RTTHREAD Thread)
4171{
4172 PRTTHREADINT pThread = rtThreadGet(Thread);
4173 AssertReturnVoid(pThread);
4174 ASMAtomicIncS32(&pThread->LockValidator.cWriteLocks);
4175 rtThreadRelease(pThread);
4176}
4177RT_EXPORT_SYMBOL(RTLockValidatorWriteLockInc);
4178
4179
4180RTDECL(void) RTLockValidatorWriteLockDec(RTTHREAD Thread)
4181{
4182 PRTTHREADINT pThread = rtThreadGet(Thread);
4183 AssertReturnVoid(pThread);
4184 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
4185 rtThreadRelease(pThread);
4186}
4187RT_EXPORT_SYMBOL(RTLockValidatorWriteLockDec);
4188
4189
4190RTDECL(int32_t) RTLockValidatorReadLockGetCount(RTTHREAD Thread)
4191{
4192 if (Thread == NIL_RTTHREAD)
4193 return 0;
4194
4195 PRTTHREADINT pThread = rtThreadGet(Thread);
4196 if (!pThread)
4197 return VERR_INVALID_HANDLE;
4198 int32_t cReadLocks = ASMAtomicReadS32(&pThread->LockValidator.cReadLocks);
4199 rtThreadRelease(pThread);
4200 return cReadLocks;
4201}
4202RT_EXPORT_SYMBOL(RTLockValidatorReadLockGetCount);
4203
4204
4205RTDECL(void) RTLockValidatorReadLockInc(RTTHREAD Thread)
4206{
4207 PRTTHREADINT pThread = rtThreadGet(Thread);
4208 Assert(pThread);
4209 ASMAtomicIncS32(&pThread->LockValidator.cReadLocks);
4210 rtThreadRelease(pThread);
4211}
4212RT_EXPORT_SYMBOL(RTLockValidatorReadLockInc);
4213
4214
4215RTDECL(void) RTLockValidatorReadLockDec(RTTHREAD Thread)
4216{
4217 PRTTHREADINT pThread = rtThreadGet(Thread);
4218 Assert(pThread);
4219 ASMAtomicDecS32(&pThread->LockValidator.cReadLocks);
4220 rtThreadRelease(pThread);
4221}
4222RT_EXPORT_SYMBOL(RTLockValidatorReadLockDec);
4223
4224
4225RTDECL(void *) RTLockValidatorQueryBlocking(RTTHREAD hThread)
4226{
4227 void *pvLock = NULL;
4228 PRTTHREADINT pThread = rtThreadGet(hThread);
4229 if (pThread)
4230 {
4231 RTTHREADSTATE enmState = rtThreadGetState(pThread);
4232 if (RTTHREAD_IS_SLEEPING(enmState))
4233 {
4234 rtLockValidatorSerializeDetectionEnter();
4235
4236 enmState = rtThreadGetState(pThread);
4237 if (RTTHREAD_IS_SLEEPING(enmState))
4238 {
4239 PRTLOCKVALRECUNION pRec = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pRec);
4240 if (pRec)
4241 {
4242 switch (pRec->Core.u32Magic)
4243 {
4244 case RTLOCKVALRECEXCL_MAGIC:
4245 pvLock = pRec->Excl.hLock;
4246 break;
4247
4248 case RTLOCKVALRECSHRDOWN_MAGIC:
4249 pRec = (PRTLOCKVALRECUNION)pRec->ShrdOwner.pSharedRec;
4250 if (!pRec || pRec->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC)
4251 break;
4252 case RTLOCKVALRECSHRD_MAGIC:
4253 pvLock = pRec->Shared.hLock;
4254 break;
4255 }
4256 if (RTThreadGetState(pThread) != enmState)
4257 pvLock = NULL;
4258 }
4259 }
4260
4261 rtLockValidatorSerializeDetectionLeave();
4262 }
4263 rtThreadRelease(pThread);
4264 }
4265 return pvLock;
4266}
4267RT_EXPORT_SYMBOL(RTLockValidatorQueryBlocking);
4268
4269
4270RTDECL(bool) RTLockValidatorIsBlockedThreadInValidator(RTTHREAD hThread)
4271{
4272 bool fRet = false;
4273 PRTTHREADINT pThread = rtThreadGet(hThread);
4274 if (pThread)
4275 {
4276 fRet = ASMAtomicReadBool(&pThread->LockValidator.fInValidator);
4277 rtThreadRelease(pThread);
4278 }
4279 return fRet;
4280}
4281RT_EXPORT_SYMBOL(RTLockValidatorIsBlockedThreadInValidator);
4282
4283
4284RTDECL(bool) RTLockValidatorHoldsLocksInClass(RTTHREAD hCurrentThread, RTLOCKVALCLASS hClass)
4285{
4286 bool fRet = false;
4287 if (hCurrentThread == NIL_RTTHREAD)
4288 hCurrentThread = RTThreadSelf();
4289 else
4290 Assert(hCurrentThread == RTThreadSelf());
4291 PRTTHREADINT pThread = rtThreadGet(hCurrentThread);
4292 if (pThread)
4293 {
4294 if (hClass != NIL_RTLOCKVALCLASS)
4295 {
4296 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
4297 while (VALID_PTR(pCur) && !fRet)
4298 {
4299 switch (pCur->Core.u32Magic)
4300 {
4301 case RTLOCKVALRECEXCL_MAGIC:
4302 fRet = pCur->Excl.hClass == hClass;
4303 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
4304 break;
4305 case RTLOCKVALRECSHRDOWN_MAGIC:
4306 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4307 && pCur->ShrdOwner.pSharedRec->hClass == hClass;
4308 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
4309 break;
4310 case RTLOCKVALRECNEST_MAGIC:
4311 switch (pCur->Nest.pRec->Core.u32Magic)
4312 {
4313 case RTLOCKVALRECEXCL_MAGIC:
4314 fRet = pCur->Nest.pRec->Excl.hClass == hClass;
4315 break;
4316 case RTLOCKVALRECSHRDOWN_MAGIC:
4317 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4318 && pCur->Nest.pRec->ShrdOwner.pSharedRec->hClass == hClass;
4319 break;
4320 }
4321 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
4322 break;
4323 default:
4324 pCur = NULL;
4325 break;
4326 }
4327 }
4328 }
4329
4330 rtThreadRelease(pThread);
4331 }
4332 return fRet;
4333}
4334RT_EXPORT_SYMBOL(RTLockValidatorHoldsLocksInClass);
4335
4336
4337RTDECL(bool) RTLockValidatorHoldsLocksInSubClass(RTTHREAD hCurrentThread, RTLOCKVALCLASS hClass, uint32_t uSubClass)
4338{
4339 bool fRet = false;
4340 if (hCurrentThread == NIL_RTTHREAD)
4341 hCurrentThread = RTThreadSelf();
4342 else
4343 Assert(hCurrentThread == RTThreadSelf());
4344 PRTTHREADINT pThread = rtThreadGet(hCurrentThread);
4345 if (pThread)
4346 {
4347 if (hClass != NIL_RTLOCKVALCLASS)
4348 {
4349 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
4350 while (VALID_PTR(pCur) && !fRet)
4351 {
4352 switch (pCur->Core.u32Magic)
4353 {
4354 case RTLOCKVALRECEXCL_MAGIC:
4355 fRet = pCur->Excl.hClass == hClass
4356 && pCur->Excl.uSubClass == uSubClass;
4357 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
4358 break;
4359 case RTLOCKVALRECSHRDOWN_MAGIC:
4360 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4361 && pCur->ShrdOwner.pSharedRec->hClass == hClass
4362 && pCur->ShrdOwner.pSharedRec->uSubClass == uSubClass;
4363 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
4364 break;
4365 case RTLOCKVALRECNEST_MAGIC:
4366 switch (pCur->Nest.pRec->Core.u32Magic)
4367 {
4368 case RTLOCKVALRECEXCL_MAGIC:
4369 fRet = pCur->Nest.pRec->Excl.hClass == hClass
4370 && pCur->Nest.pRec->Excl.uSubClass == uSubClass;
4371 break;
4372 case RTLOCKVALRECSHRDOWN_MAGIC:
4373 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4374 && pCur->Nest.pRec->ShrdOwner.pSharedRec->hClass == hClass
4375 && pCur->Nest.pRec->ShrdOwner.pSharedRec->uSubClass == uSubClass;
4376 break;
4377 }
4378 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
4379 break;
4380 default:
4381 pCur = NULL;
4382 break;
4383 }
4384 }
4385 }
4386
4387 rtThreadRelease(pThread);
4388 }
4389 return fRet;
4390}
4391RT_EXPORT_SYMBOL(RTLockValidatorHoldsLocksInClass);
4392
4393
4394RTDECL(bool) RTLockValidatorSetEnabled(bool fEnabled)
4395{
4396 return ASMAtomicXchgBool(&g_fLockValidatorEnabled, fEnabled);
4397}
4398RT_EXPORT_SYMBOL(RTLockValidatorSetEnabled);
4399
4400
4401RTDECL(bool) RTLockValidatorIsEnabled(void)
4402{
4403 return ASMAtomicUoReadBool(&g_fLockValidatorEnabled);
4404}
4405RT_EXPORT_SYMBOL(RTLockValidatorIsEnabled);
4406
4407
4408RTDECL(bool) RTLockValidatorSetQuiet(bool fQuiet)
4409{
4410 return ASMAtomicXchgBool(&g_fLockValidatorQuiet, fQuiet);
4411}
4412RT_EXPORT_SYMBOL(RTLockValidatorSetQuiet);
4413
4414
4415RTDECL(bool) RTLockValidatorIsQuiet(void)
4416{
4417 return ASMAtomicUoReadBool(&g_fLockValidatorQuiet);
4418}
4419RT_EXPORT_SYMBOL(RTLockValidatorIsQuiet);
4420
4421
4422RTDECL(bool) RTLockValidatorSetMayPanic(bool fMayPanic)
4423{
4424 return ASMAtomicXchgBool(&g_fLockValidatorMayPanic, fMayPanic);
4425}
4426RT_EXPORT_SYMBOL(RTLockValidatorSetMayPanic);
4427
4428
4429RTDECL(bool) RTLockValidatorMayPanic(void)
4430{
4431 return ASMAtomicUoReadBool(&g_fLockValidatorMayPanic);
4432}
4433RT_EXPORT_SYMBOL(RTLockValidatorMayPanic);
4434
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette