VirtualBox

source: vbox/trunk/src/VBox/Runtime/generic/semrw-lockless-generic.cpp@ 25707

Last change on this file since 25707 was 25707, checked in by vboxsync, 15 years ago

iprt: Added RTSemRWCreateEx and RTSemRWSetSubClass. Updated tstRTLockValidator with a test of the SemRW lock order validation.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 31.9 KB
Line 
1/* $Id: semrw-lockless-generic.cpp 25707 2010-01-11 10:02:03Z vboxsync $ */
2/** @file
3 * IPRT Testcase - RTSemXRoads, generic implementation.
4 */
5
6/*
7 * Copyright (C) 2009 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31
32/*******************************************************************************
33* Header Files *
34*******************************************************************************/
35#define RTASSERT_QUIET
36#include <iprt/semaphore.h>
37#include "internal/iprt.h"
38
39#include <iprt/asm.h>
40#include <iprt/assert.h>
41#include <iprt/err.h>
42#include <iprt/lockvalidator.h>
43#include <iprt/mem.h>
44#include <iprt/thread.h>
45
46#include "internal/magics.h"
47#include "internal/strict.h"
48
49
50/*******************************************************************************
51* Structures and Typedefs *
52*******************************************************************************/
53typedef struct RTSEMRWINTERNAL
54{
55 /** Magic value (RTSEMRW_MAGIC). */
56 uint32_t volatile u32Magic;
57 uint32_t u32Padding; /**< alignment padding.*/
58 /* The state variable.
59 * All accesses are atomic and it bits are defined like this:
60 * Bits 0..14 - cReads.
61 * Bit 15 - Unused.
62 * Bits 16..31 - cWrites. - doesn't make sense here
63 * Bit 31 - fDirection; 0=Read, 1=Write.
64 * Bits 32..46 - cWaitingReads
65 * Bit 47 - Unused.
66 * Bits 48..62 - cWaitingWrites
67 * Bit 63 - Unused.
68 */
69 uint64_t volatile u64State;
70 /** The write owner. */
71 RTNATIVETHREAD volatile hNativeWriter;
72 /** The number of reads made by the current writer. */
73 uint32_t volatile cWriterReads;
74 /** The number of reads made by the current writer. */
75 uint32_t volatile cWriteRecursions;
76
77 /** What the writer threads are blocking on. */
78 RTSEMEVENT hEvtWrite;
79 /** What the read threads are blocking on when waiting for the writer to
80 * finish. */
81 RTSEMEVENTMULTI hEvtRead;
82 /** Indicates whether hEvtRead needs resetting. */
83 bool volatile fNeedReset;
84
85#ifdef RTSEMRW_STRICT
86 /** The validator record for the writer. */
87 RTLOCKVALRECEXCL ValidatorWrite;
88 /** The validator record for the readers. */
89 RTLOCKVALRECSHRD ValidatorRead;
90#endif
91} RTSEMRWINTERNAL;
92
93
94/*******************************************************************************
95* Defined Constants And Macros *
96*******************************************************************************/
97#define RTSEMRW_CNT_BITS 15
98#define RTSEMRW_CNT_MASK UINT64_C(0x00007fff)
99
100#define RTSEMRW_CNT_RD_SHIFT 0
101#define RTSEMRW_CNT_RD_MASK (RTSEMRW_CNT_MASK << RTSEMRW_CNT_RD_SHIFT)
102#define RTSEMRW_CNT_WR_SHIFT 16
103#define RTSEMRW_CNT_WR_MASK (RTSEMRW_CNT_MASK << RTSEMRW_CNT_WR_SHIFT)
104#define RTSEMRW_DIR_SHIFT 31
105#define RTSEMRW_DIR_MASK RT_BIT_64(RTSEMRW_DIR_SHIFT)
106#define RTSEMRW_DIR_READ UINT64_C(0)
107#define RTSEMRW_DIR_WRITE UINT64_C(1)
108
109#define RTSEMRW_WAIT_CNT_RD_SHIFT 32
110#define RTSEMRW_WAIT_CNT_RD_MASK (RTSEMRW_CNT_MASK << RTSEMRW_WAIT_CNT_RD_SHIFT)
111//#define RTSEMRW_WAIT_CNT_WR_SHIFT 48
112//#define RTSEMRW_WAIT_CNT_WR_MASK (RTSEMRW_CNT_MASK << RTSEMRW_WAIT_CNT_WR_SHIFT)
113
114
115#undef RTSemRWCreate
116RTDECL(int) RTSemRWCreate(PRTSEMRW phRWSem)
117{
118 return RTSemRWCreateEx(phRWSem, 0 /*fFlags*/, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, "RTSemRW");
119}
120RT_EXPORT_SYMBOL(RTSemRWCreate);
121
122
123RTDECL(int) RTSemRWCreateEx(PRTSEMRW phRWSem, uint32_t fFlags,
124 RTLOCKVALCLASS hClass, uint32_t uSubClass, const char *pszNameFmt, ...)
125{
126 AssertReturn(!(fFlags & ~RTSEMRW_FLAGS_NO_LOCK_VAL), VERR_INVALID_PARAMETER);
127
128 RTSEMRWINTERNAL *pThis = (RTSEMRWINTERNAL *)RTMemAlloc(sizeof(*pThis));
129 if (!pThis)
130 return VERR_NO_MEMORY;
131
132 int rc = RTSemEventMultiCreate(&pThis->hEvtRead);
133 if (RT_SUCCESS(rc))
134 {
135 rc = RTSemEventCreate(&pThis->hEvtWrite);
136 if (RT_SUCCESS(rc))
137 {
138 pThis->u32Magic = RTSEMRW_MAGIC;
139 pThis->u32Padding = 0;
140 pThis->u64State = 0;
141 pThis->hNativeWriter = NIL_RTNATIVETHREAD;
142 pThis->cWriterReads = 0;
143 pThis->cWriteRecursions = 0;
144 pThis->fNeedReset = false;
145#ifdef RTSEMRW_STRICT
146 bool const fLVEnabled = !(fFlags & RTSEMRW_FLAGS_NO_LOCK_VAL);
147 va_list va;
148 va_start(va, pszNameFmt);
149 RTLockValidatorRecExclInit(&pThis->ValidatorWrite, hClass, uSubClass, pThis, fLVEnabled, pszNameFmt);
150 va_end(va);
151 va_start(va, pszNameFmt);
152 RTLockValidatorRecSharedInit(&pThis->ValidatorRead, hClass, uSubClass, pThis, false /*fSignaller*/,
153 fLVEnabled, pszNameFmt);
154 va_end(va);
155 RTLockValidatorRecMakeSiblings(&pThis->ValidatorWrite.Core, &pThis->ValidatorRead.Core);
156#endif
157
158 *phRWSem = pThis;
159 return VINF_SUCCESS;
160 }
161 RTSemEventMultiDestroy(pThis->hEvtRead);
162 }
163 return rc;
164}
165RT_EXPORT_SYMBOL(RTSemRWCreateEx);
166
167
168RTDECL(int) RTSemRWDestroy(RTSEMRW hRWSem)
169{
170 /*
171 * Validate input.
172 */
173 RTSEMRWINTERNAL *pThis = hRWSem;
174 if (pThis == NIL_RTSEMRW)
175 return VINF_SUCCESS;
176 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
177 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
178 Assert(!(ASMAtomicReadU64(&pThis->u64State) & (RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK)));
179
180 /*
181 * Invalidate the object and free up the resources.
182 */
183 AssertReturn(ASMAtomicCmpXchgU32(&pThis->u32Magic, ~RTSEMRW_MAGIC, RTSEMRW_MAGIC), VERR_INVALID_HANDLE);
184
185 RTSEMEVENTMULTI hEvtRead;
186 ASMAtomicXchgHandle(&pThis->hEvtRead, NIL_RTSEMEVENTMULTI, &hEvtRead);
187 int rc = RTSemEventMultiDestroy(hEvtRead);
188 AssertRC(rc);
189
190 RTSEMEVENT hEvtWrite;
191 ASMAtomicXchgHandle(&pThis->hEvtWrite, NIL_RTSEMEVENT, &hEvtWrite);
192 rc = RTSemEventDestroy(hEvtWrite);
193 AssertRC(rc);
194
195#ifdef RTSEMRW_STRICT
196 RTLockValidatorRecSharedDelete(&pThis->ValidatorRead);
197 RTLockValidatorRecExclDelete(&pThis->ValidatorWrite);
198#endif
199 RTMemFree(pThis);
200 return VINF_SUCCESS;
201}
202RT_EXPORT_SYMBOL(RTSemRWDestroy);
203
204
205RTDECL(uint32_t) RTSemRWSetSubClass(RTSEMRW hRWSem, uint32_t uSubClass)
206{
207#ifdef RTSEMRW_STRICT
208 /*
209 * Validate handle.
210 */
211 struct RTSEMRWINTERNAL *pThis = hRWSem;
212 AssertPtrReturn(pThis, RTLOCKVAL_SUB_CLASS_INVALID);
213 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
214
215 RTLockValidatorRecSharedSetSubClass(&pThis->ValidatorRead, uSubClass);
216 return RTLockValidatorRecExclSetSubClass(&pThis->ValidatorWrite, uSubClass);
217#else
218 return RTLOCKVAL_SUB_CLASS_INVALID;
219#endif
220}
221RT_EXPORT_SYMBOL(RTSemRWSetSubClass);
222
223
224static int rtSemRWRequestRead(RTSEMRW hRWSem, unsigned cMillies, bool fInterruptible, PCRTLOCKVALSRCPOS pSrcPos)
225{
226 /*
227 * Validate input.
228 */
229 RTSEMRWINTERNAL *pThis = hRWSem;
230 if (pThis == NIL_RTSEMRW)
231 return VINF_SUCCESS;
232 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
233 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
234
235#ifdef RTSEMRW_STRICT
236 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
237 if (cMillies > 0)
238 {
239 int rc9 = RTLockValidatorRecSharedCheckOrder(&pThis->ValidatorRead, hThreadSelf, pSrcPos, cMillies);
240 if (RT_FAILURE(rc9))
241 return rc9;
242 }
243#endif
244
245 /*
246 * Get cracking...
247 */
248 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State);
249 uint64_t u64OldState = u64State;
250
251 for (;;)
252 {
253 if ((u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT))
254 {
255 /* It flows in the right direction, try follow it before it changes. */
256 uint64_t c = (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT;
257 c++;
258 Assert(c < RTSEMRW_CNT_MASK / 2);
259 u64State &= ~RTSEMRW_CNT_RD_MASK;
260 u64State |= c << RTSEMRW_CNT_RD_SHIFT;
261 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
262 {
263#ifdef RTSEMRW_STRICT
264 RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos);
265#endif
266 break;
267 }
268 }
269 else if ((u64State & (RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK)) == 0)
270 {
271 /* Wrong direction, but we're alone here and can simply try switch the direction. */
272 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK | RTSEMRW_DIR_MASK);
273 u64State |= (UINT64_C(1) << RTSEMRW_CNT_RD_SHIFT) | (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT);
274 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
275 {
276 Assert(!pThis->fNeedReset);
277#ifdef RTSEMRW_STRICT
278 RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos);
279#endif
280 break;
281 }
282 }
283 else
284 {
285 /* Is the writer perhaps doing a read recursion? */
286 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
287 RTNATIVETHREAD hNativeWriter;
288 ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
289 if (hNativeSelf == hNativeWriter)
290 {
291#ifdef RTSEMRW_STRICT
292 int rc9 = RTLockValidatorRecExclRecursionMixed(&pThis->ValidatorWrite, &pThis->ValidatorRead.Core, pSrcPos);
293 if (RT_FAILURE(rc9))
294 return rc9;
295#endif
296 Assert(pThis->cWriterReads < UINT32_MAX / 2);
297 ASMAtomicIncU32(&pThis->cWriterReads);
298 return VINF_SUCCESS; /* don't break! */
299 }
300
301 /* If the timeout is 0, return already. */
302 if (!cMillies)
303 return VERR_TIMEOUT;
304
305 /* Add ourselves to the queue and wait for the direction to change. */
306 uint64_t c = (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT;
307 c++;
308 Assert(c < RTSEMRW_CNT_MASK / 2);
309
310 uint64_t cWait = (u64State & RTSEMRW_WAIT_CNT_RD_MASK) >> RTSEMRW_WAIT_CNT_RD_SHIFT;
311 cWait++;
312 Assert(cWait <= c);
313 Assert(cWait < RTSEMRW_CNT_MASK / 2);
314
315 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_WAIT_CNT_RD_MASK);
316 u64State |= (c << RTSEMRW_CNT_RD_SHIFT) | (cWait << RTSEMRW_WAIT_CNT_RD_SHIFT);
317
318 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
319 {
320 for (uint32_t iLoop = 0; ; iLoop++)
321 {
322 int rc;
323#ifdef RTSEMRW_STRICT
324 rc = RTLockValidatorRecSharedCheckBlocking(&pThis->ValidatorRead, hThreadSelf, pSrcPos, true,
325 cMillies, RTTHREADSTATE_RW_READ, false);
326 if (RT_SUCCESS(rc))
327#else
328 RTTHREAD hThreadSelf = RTThreadSelf();
329 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
330#endif
331 {
332 if (fInterruptible)
333 rc = RTSemEventMultiWaitNoResume(pThis->hEvtRead, cMillies);
334 else
335 rc = RTSemEventMultiWait(pThis->hEvtRead, cMillies);
336 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ);
337 if (pThis->u32Magic != RTSEMRW_MAGIC)
338 return VERR_SEM_DESTROYED;
339 }
340 if (RT_FAILURE(rc))
341 {
342 /* Decrement the counts and return the error. */
343 for (;;)
344 {
345 u64OldState = u64State = ASMAtomicReadU64(&pThis->u64State);
346 c = (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT; Assert(c > 0);
347 c--;
348 cWait = (u64State & RTSEMRW_WAIT_CNT_RD_MASK) >> RTSEMRW_WAIT_CNT_RD_SHIFT; Assert(cWait > 0);
349 cWait--;
350 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_WAIT_CNT_RD_MASK);
351 u64State |= (c << RTSEMRW_CNT_RD_SHIFT) | (cWait << RTSEMRW_WAIT_CNT_RD_SHIFT);
352 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
353 break;
354 }
355 return rc;
356 }
357
358 Assert(pThis->fNeedReset);
359 u64State = ASMAtomicReadU64(&pThis->u64State);
360 if ((u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT))
361 break;
362 AssertMsg(iLoop < 1, ("%u\n", iLoop));
363 }
364
365 /* Decrement the wait count and maybe reset the semaphore (if we're last). */
366 for (;;)
367 {
368 u64OldState = u64State;
369
370 cWait = (u64State & RTSEMRW_WAIT_CNT_RD_MASK) >> RTSEMRW_WAIT_CNT_RD_SHIFT;
371 Assert(cWait > 0);
372 cWait--;
373 u64State &= ~RTSEMRW_WAIT_CNT_RD_MASK;
374 u64State |= cWait << RTSEMRW_WAIT_CNT_RD_SHIFT;
375
376 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
377 {
378 if (cWait == 0)
379 {
380 if (ASMAtomicXchgBool(&pThis->fNeedReset, false))
381 {
382 int rc = RTSemEventMultiReset(pThis->hEvtRead);
383 AssertRCReturn(rc, rc);
384 }
385 }
386 break;
387 }
388 u64State = ASMAtomicReadU64(&pThis->u64State);
389 }
390
391#ifdef RTSEMRW_STRICT
392 RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos);
393#endif
394 break;
395 }
396 }
397
398 if (pThis->u32Magic != RTSEMRW_MAGIC)
399 return VERR_SEM_DESTROYED;
400
401 ASMNopPause();
402 u64State = ASMAtomicReadU64(&pThis->u64State);
403 u64OldState = u64State;
404 }
405
406 /* got it! */
407 Assert((ASMAtomicReadU64(&pThis->u64State) & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT));
408 return VINF_SUCCESS;
409
410}
411
412
413#undef RTSemRWRequestRead
414RTDECL(int) RTSemRWRequestRead(RTSEMRW RWSem, unsigned cMillies)
415{
416#ifndef RTSEMRW_STRICT
417 return rtSemRWRequestRead(RWSem, cMillies, false, NULL);
418#else
419 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
420 return rtSemRWRequestRead(RWSem, cMillies, false, &SrcPos);
421#endif
422}
423RT_EXPORT_SYMBOL(RTSemRWRequestRead);
424
425
426RTDECL(int) RTSemRWRequestReadDebug(RTSEMRW RWSem, unsigned cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
427{
428 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
429 return rtSemRWRequestRead(RWSem, cMillies, false, &SrcPos);
430}
431RT_EXPORT_SYMBOL(RTSemRWRequestReadDebug);
432
433
434#undef RTSemRWRequestReadNoResume
435RTDECL(int) RTSemRWRequestReadNoResume(RTSEMRW RWSem, unsigned cMillies)
436{
437#ifndef RTSEMRW_STRICT
438 return rtSemRWRequestRead(RWSem, cMillies, true, NULL);
439#else
440 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
441 return rtSemRWRequestRead(RWSem, cMillies, true, &SrcPos);
442#endif
443}
444RT_EXPORT_SYMBOL(RTSemRWRequestReadNoResume);
445
446
447RTDECL(int) RTSemRWRequestReadNoResumeDebug(RTSEMRW RWSem, unsigned cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
448{
449 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
450 return rtSemRWRequestRead(RWSem, cMillies, true, &SrcPos);
451}
452RT_EXPORT_SYMBOL(RTSemRWRequestReadNoResumeDebug);
453
454
455
456RTDECL(int) RTSemRWReleaseRead(RTSEMRW RWSem)
457{
458 /*
459 * Validate handle.
460 */
461 RTSEMRWINTERNAL *pThis = RWSem;
462 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
463 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
464
465 /*
466 * Check the direction and take action accordingly.
467 */
468 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State);
469 uint64_t u64OldState = u64State;
470 if ((u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT))
471 {
472#ifdef RTSEMRW_STRICT
473 int rc9 = RTLockValidatorRecSharedCheckAndRelease(&pThis->ValidatorRead, NIL_RTTHREAD);
474 if (RT_FAILURE(rc9))
475 return rc9;
476#endif
477 for (;;)
478 {
479 uint64_t c = (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT;
480 AssertReturn(c > 0, VERR_NOT_OWNER);
481 c--;
482
483 if ( c > 0
484 || (u64State & RTSEMRW_CNT_RD_MASK) == 0)
485 {
486 /* Don't change the direction. */
487 u64State &= ~RTSEMRW_CNT_RD_MASK;
488 u64State |= c << RTSEMRW_CNT_RD_SHIFT;
489 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
490 break;
491 }
492 else
493 {
494 /* Reverse the direction and signal the reader threads. */
495 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_DIR_MASK);
496 u64State |= RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT;
497 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
498 {
499 int rc = RTSemEventSignal(pThis->hEvtWrite);
500 AssertRC(rc);
501 break;
502 }
503 }
504
505 ASMNopPause();
506 u64State = ASMAtomicReadU64(&pThis->u64State);
507 u64OldState = u64State;
508 }
509 }
510 else
511 {
512 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
513 RTNATIVETHREAD hNativeWriter;
514 ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
515 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
516 AssertReturn(pThis->cWriterReads > 0, VERR_NOT_OWNER);
517#ifdef RTSEMRW_STRICT
518 int rc = RTLockValidatorRecExclUnwindMixed(&pThis->ValidatorWrite, &pThis->ValidatorRead.Core);
519 if (RT_FAILURE(rc))
520 return rc;
521#endif
522 ASMAtomicDecU32(&pThis->cWriterReads);
523 }
524
525 return VINF_SUCCESS;
526}
527RT_EXPORT_SYMBOL(RTSemRWReleaseRead);
528
529
530DECL_FORCE_INLINE(int) rtSemRWRequestWrite(RTSEMRW hRWSem, unsigned cMillies, bool fInterruptible, PCRTLOCKVALSRCPOS pSrcPos)
531{
532 /*
533 * Validate input.
534 */
535 RTSEMRWINTERNAL *pThis = hRWSem;
536 if (pThis == NIL_RTSEMRW)
537 return VINF_SUCCESS;
538 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
539 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
540
541#ifdef RTSEMRW_STRICT
542 RTTHREAD hThreadSelf = NIL_RTTHREAD;
543 if (cMillies)
544 {
545 hThreadSelf = RTThreadSelfAutoAdopt();
546 int rc9 = RTLockValidatorRecExclCheckOrder(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, cMillies);
547 if (RT_FAILURE(rc9))
548 return rc9;
549 }
550#endif
551
552 /*
553 * Check if we're already the owner and just recursing.
554 */
555 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
556 RTNATIVETHREAD hNativeWriter;
557 ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
558 if (hNativeSelf == hNativeWriter)
559 {
560 Assert((ASMAtomicReadU64(&pThis->u64State) & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT));
561#ifdef RTSEMRW_STRICT
562 int rc9 = RTLockValidatorRecExclRecursion(&pThis->ValidatorWrite, pSrcPos);
563 if (RT_FAILURE(rc9))
564 return rc9;
565#endif
566 Assert(pThis->cWriteRecursions < UINT32_MAX / 2);
567 ASMAtomicIncU32(&pThis->cWriteRecursions);
568 return VINF_SUCCESS;
569 }
570
571 /*
572 * Get cracking.
573 */
574 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State);
575 uint64_t u64OldState = u64State;
576
577 for (;;)
578 {
579 if ( (u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT)
580 || (u64State & (RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK)) != 0)
581 {
582 /* It flows in the right direction, try follow it before it changes. */
583 uint64_t c = (u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT;
584 c++;
585 Assert(c < RTSEMRW_CNT_MASK / 2);
586 u64State &= ~RTSEMRW_CNT_WR_MASK;
587 u64State |= c << RTSEMRW_CNT_WR_SHIFT;
588 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
589 break;
590 }
591 else if ((u64State & (RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK)) == 0)
592 {
593 /* Wrong direction, but we're alone here and can simply try switch the direction. */
594 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK | RTSEMRW_DIR_MASK);
595 u64State |= (UINT64_C(1) << RTSEMRW_CNT_WR_SHIFT) | (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT);
596 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
597 break;
598 }
599 else if (!cMillies)
600 /* Wrong direction and we're not supposed to wait, just return. */
601 return VERR_TIMEOUT;
602 else
603 {
604 /* Add ourselves to the write count and break out to do the wait. */
605 uint64_t c = (u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT;
606 c++;
607 Assert(c < RTSEMRW_CNT_MASK / 2);
608 u64State &= ~RTSEMRW_CNT_WR_MASK;
609 u64State |= c << RTSEMRW_CNT_WR_SHIFT;
610 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
611 break;
612 }
613
614 if (pThis->u32Magic != RTSEMRW_MAGIC)
615 return VERR_SEM_DESTROYED;
616
617 ASMNopPause();
618 u64State = ASMAtomicReadU64(&pThis->u64State);
619 u64OldState = u64State;
620 }
621
622 /*
623 * If we're in write mode now try grab the ownership. Play fair if there
624 * are threads already waiting.
625 */
626 bool fDone = (u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT)
627 && ( ((u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT) == 1
628 || cMillies == 0);
629 if (fDone)
630 ASMAtomicCmpXchgHandle(&pThis->hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
631 if (!fDone)
632 {
633 /*
634 * Wait for our turn.
635 */
636 for (uint32_t iLoop = 0; ; iLoop++)
637 {
638 int rc;
639#ifdef RTSEMRW_STRICT
640 if (cMillies)
641 {
642 if (hThreadSelf == NIL_RTTHREAD)
643 hThreadSelf = RTThreadSelfAutoAdopt();
644 rc = RTLockValidatorRecExclCheckBlocking(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true,
645 cMillies, RTTHREADSTATE_RW_WRITE, false);
646 }
647 else
648 rc = VINF_SUCCESS;
649 if (RT_SUCCESS(rc))
650#else
651 RTTHREAD hThreadSelf = RTThreadSelf();
652 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false);
653#endif
654 {
655 if (fInterruptible)
656 rc = RTSemEventWaitNoResume(pThis->hEvtWrite, cMillies);
657 else
658 rc = RTSemEventWait(pThis->hEvtWrite, cMillies);
659 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
660 if (pThis->u32Magic != RTSEMRW_MAGIC)
661 return VERR_SEM_DESTROYED;
662 }
663 if (RT_FAILURE(rc))
664 {
665 /* Decrement the counts and return the error. */
666 for (;;)
667 {
668 u64OldState = u64State = ASMAtomicReadU64(&pThis->u64State);
669 uint64_t c = (u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT; Assert(c > 0);
670 c--;
671 u64State &= ~RTSEMRW_CNT_WR_MASK;
672 u64State |= c << RTSEMRW_CNT_WR_SHIFT;
673 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
674 break;
675 }
676 return rc;
677 }
678
679 u64State = ASMAtomicReadU64(&pThis->u64State);
680 if ((u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT))
681 {
682 ASMAtomicCmpXchgHandle(&pThis->hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
683 if (fDone)
684 break;
685 }
686 AssertMsg(iLoop < 1000, ("%u\n", iLoop)); /* may loop a few times here... */
687 }
688 }
689
690 /*
691 * Got it!
692 */
693 Assert((ASMAtomicReadU64(&pThis->u64State) & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT));
694 ASMAtomicWriteU32(&pThis->cWriteRecursions, 1);
695 Assert(pThis->cWriterReads == 0);
696#ifdef RTSEMRW_STRICT
697 RTLockValidatorRecExclSetOwner(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true);
698#endif
699
700 return VINF_SUCCESS;
701}
702
703
704#undef RTSemRWRequestWrite
705RTDECL(int) RTSemRWRequestWrite(RTSEMRW RWSem, unsigned cMillies)
706{
707#ifndef RTSEMRW_STRICT
708 return rtSemRWRequestWrite(RWSem, cMillies, false, NULL);
709#else
710 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
711 return rtSemRWRequestWrite(RWSem, cMillies, false, &SrcPos);
712#endif
713}
714RT_EXPORT_SYMBOL(RTSemRWRequestWrite);
715
716
717RTDECL(int) RTSemRWRequestWriteDebug(RTSEMRW RWSem, unsigned cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
718{
719 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
720 return rtSemRWRequestWrite(RWSem, cMillies, false, &SrcPos);
721}
722RT_EXPORT_SYMBOL(RTSemRWRequestWriteDebug);
723
724
725#undef RTSemRWRequestWriteNoResume
726RTDECL(int) RTSemRWRequestWriteNoResume(RTSEMRW RWSem, unsigned cMillies)
727{
728#ifndef RTSEMRW_STRICT
729 return rtSemRWRequestWrite(RWSem, cMillies, true, NULL);
730#else
731 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
732 return rtSemRWRequestWrite(RWSem, cMillies, true, &SrcPos);
733#endif
734}
735RT_EXPORT_SYMBOL(RTSemRWRequestWriteNoResume);
736
737
738RTDECL(int) RTSemRWRequestWriteNoResumeDebug(RTSEMRW RWSem, unsigned cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
739{
740 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
741 return rtSemRWRequestWrite(RWSem, cMillies, true, &SrcPos);
742}
743RT_EXPORT_SYMBOL(RTSemRWRequestWriteNoResumeDebug);
744
745
746RTDECL(int) RTSemRWReleaseWrite(RTSEMRW RWSem)
747{
748
749 /*
750 * Validate handle.
751 */
752 struct RTSEMRWINTERNAL *pThis = RWSem;
753 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
754 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
755
756 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
757 RTNATIVETHREAD hNativeWriter;
758 ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
759 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
760
761 /*
762 * Unwind a recursion.
763 */
764 if (pThis->cWriteRecursions == 1)
765 {
766 AssertReturn(pThis->cWriterReads == 0, VERR_WRONG_ORDER); /* (must release all read recursions before the final write.) */
767#ifdef RTSEMRW_STRICT
768 int rc9 = RTLockValidatorRecExclReleaseOwner(&pThis->ValidatorWrite, true);
769 if (RT_FAILURE(rc9))
770 return rc9;
771#endif
772 /*
773 * Update the state.
774 */
775 ASMAtomicWriteU32(&pThis->cWriteRecursions, 0);
776 /** @todo validate order. */
777 ASMAtomicWriteHandle(&pThis->hNativeWriter, NIL_RTNATIVETHREAD);
778
779 for (;;)
780 {
781 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State);
782 uint64_t u64OldState = u64State;
783
784 uint64_t c = (u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT;
785 Assert(c > 0);
786 c--;
787
788 if ( c > 0
789 || (u64State & RTSEMRW_CNT_RD_MASK) == 0)
790 {
791 /* Don't change the direction, wait up the next writer if any. */
792 u64State &= ~RTSEMRW_CNT_WR_MASK;
793 u64State |= c << RTSEMRW_CNT_WR_SHIFT;
794 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
795 {
796 if (c > 0)
797 {
798 int rc = RTSemEventSignal(pThis->hEvtWrite);
799 AssertRC(rc);
800 }
801 break;
802 }
803 }
804 else
805 {
806 /* Reverse the direction and signal the reader threads. */
807 u64State &= ~(RTSEMRW_CNT_WR_MASK | RTSEMRW_DIR_MASK);
808 u64State |= RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT;
809 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
810 {
811 Assert(!pThis->fNeedReset);
812 ASMAtomicWriteBool(&pThis->fNeedReset, true);
813 int rc = RTSemEventMultiSignal(pThis->hEvtRead);
814 AssertRC(rc);
815 break;
816 }
817 }
818
819 ASMNopPause();
820 if (pThis->u32Magic != RTSEMRW_MAGIC)
821 return VERR_SEM_DESTROYED;
822 }
823 }
824 else
825 {
826 Assert(pThis->cWriteRecursions != 0);
827#ifdef RTSEMRW_STRICT
828 int rc9 = RTLockValidatorRecExclUnwind(&pThis->ValidatorWrite);
829 if (RT_FAILURE(rc9))
830 return rc9;
831#endif
832 ASMAtomicDecU32(&pThis->cWriteRecursions);
833 }
834
835 return VINF_SUCCESS;
836}
837RT_EXPORT_SYMBOL(RTSemRWReleaseWrite);
838
839
840RTDECL(bool) RTSemRWIsWriteOwner(RTSEMRW RWSem)
841{
842 /*
843 * Validate handle.
844 */
845 struct RTSEMRWINTERNAL *pThis = RWSem;
846 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
847 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
848
849 /*
850 * Check ownership.
851 */
852 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
853 RTNATIVETHREAD hNativeWriter;
854 ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
855 return hNativeWriter == hNativeSelf;
856}
857RT_EXPORT_SYMBOL(RTSemRWIsWriteOwner);
858
859
860RTDECL(uint32_t) RTSemRWGetWriteRecursion(RTSEMRW RWSem)
861{
862 /*
863 * Validate handle.
864 */
865 struct RTSEMRWINTERNAL *pThis = RWSem;
866 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
867 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
868
869 /*
870 * Return the requested data.
871 */
872 return pThis->cWriteRecursions;
873}
874RT_EXPORT_SYMBOL(RTSemRWGetWriteRecursion);
875
876
877RTDECL(uint32_t) RTSemRWGetWriterReadRecursion(RTSEMRW RWSem)
878{
879 /*
880 * Validate handle.
881 */
882 struct RTSEMRWINTERNAL *pThis = RWSem;
883 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
884 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
885
886 /*
887 * Return the requested data.
888 */
889 return pThis->cWriterReads;
890}
891RT_EXPORT_SYMBOL(RTSemRWGetWriterReadRecursion);
892
893
894RTDECL(uint32_t) RTSemRWGetReadCount(RTSEMRW RWSem)
895{
896 /*
897 * Validate input.
898 */
899 struct RTSEMRWINTERNAL *pThis = RWSem;
900 AssertPtrReturn(pThis, 0);
901 AssertMsgReturn(pThis->u32Magic == RTSEMRW_MAGIC,
902 ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic),
903 0);
904
905 /*
906 * Return the requested data.
907 */
908 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State);
909 if ((u64State & RTSEMRW_DIR_MASK) != (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT))
910 return 0;
911 return (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT;
912}
913RT_EXPORT_SYMBOL(RTSemRWGetReadCount);
914
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette