VirtualBox

source: vbox/trunk/src/VBox/Runtime/generic/semrw-lockless-generic.cpp@ 25704

Last change on this file since 25704 was 25704, checked in by vboxsync, 15 years ago

iprt,pdmcritsect: More flexible lock naming, added RTCritSectSetSubClass and made some RTCritSectInitEx.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 30.7 KB
Line 
1/* $Id: semrw-lockless-generic.cpp 25704 2010-01-10 20:12:30Z vboxsync $ */
2/** @file
3 * IPRT Testcase - RTSemXRoads, generic implementation.
4 */
5
6/*
7 * Copyright (C) 2009 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31
32/*******************************************************************************
33* Header Files *
34*******************************************************************************/
35#define RTASSERT_QUIET
36#include <iprt/semaphore.h>
37#include "internal/iprt.h"
38
39#include <iprt/asm.h>
40#include <iprt/assert.h>
41#include <iprt/err.h>
42#include <iprt/lockvalidator.h>
43#include <iprt/mem.h>
44#include <iprt/thread.h>
45
46#include "internal/magics.h"
47#include "internal/strict.h"
48
49
50/*******************************************************************************
51* Structures and Typedefs *
52*******************************************************************************/
53typedef struct RTSEMRWINTERNAL
54{
55 /** Magic value (RTSEMRW_MAGIC). */
56 uint32_t volatile u32Magic;
57 uint32_t u32Padding; /**< alignment padding.*/
58 /* The state variable.
59 * All accesses are atomic and it bits are defined like this:
60 * Bits 0..14 - cReads.
61 * Bit 15 - Unused.
62 * Bits 16..31 - cWrites. - doesn't make sense here
63 * Bit 31 - fDirection; 0=Read, 1=Write.
64 * Bits 32..46 - cWaitingReads
65 * Bit 47 - Unused.
66 * Bits 48..62 - cWaitingWrites
67 * Bit 63 - Unused.
68 */
69 uint64_t volatile u64State;
70 /** The write owner. */
71 RTNATIVETHREAD volatile hNativeWriter;
72 /** The number of reads made by the current writer. */
73 uint32_t volatile cWriterReads;
74 /** The number of reads made by the current writer. */
75 uint32_t volatile cWriteRecursions;
76
77 /** What the writer threads are blocking on. */
78 RTSEMEVENT hEvtWrite;
79 /** What the read threads are blocking on when waiting for the writer to
80 * finish. */
81 RTSEMEVENTMULTI hEvtRead;
82 /** Indicates whether hEvtRead needs resetting. */
83 bool volatile fNeedReset;
84
85#ifdef RTSEMRW_STRICT
86 /** The validator record for the writer. */
87 RTLOCKVALRECEXCL ValidatorWrite;
88 /** The validator record for the readers. */
89 RTLOCKVALRECSHRD ValidatorRead;
90#endif
91} RTSEMRWINTERNAL;
92
93
94/*******************************************************************************
95* Defined Constants And Macros *
96*******************************************************************************/
97#define RTSEMRW_CNT_BITS 15
98#define RTSEMRW_CNT_MASK UINT64_C(0x00007fff)
99
100#define RTSEMRW_CNT_RD_SHIFT 0
101#define RTSEMRW_CNT_RD_MASK (RTSEMRW_CNT_MASK << RTSEMRW_CNT_RD_SHIFT)
102#define RTSEMRW_CNT_WR_SHIFT 16
103#define RTSEMRW_CNT_WR_MASK (RTSEMRW_CNT_MASK << RTSEMRW_CNT_WR_SHIFT)
104#define RTSEMRW_DIR_SHIFT 31
105#define RTSEMRW_DIR_MASK RT_BIT_64(RTSEMRW_DIR_SHIFT)
106#define RTSEMRW_DIR_READ UINT64_C(0)
107#define RTSEMRW_DIR_WRITE UINT64_C(1)
108
109#define RTSEMRW_WAIT_CNT_RD_SHIFT 32
110#define RTSEMRW_WAIT_CNT_RD_MASK (RTSEMRW_CNT_MASK << RTSEMRW_WAIT_CNT_RD_SHIFT)
111//#define RTSEMRW_WAIT_CNT_WR_SHIFT 48
112//#define RTSEMRW_WAIT_CNT_WR_MASK (RTSEMRW_CNT_MASK << RTSEMRW_WAIT_CNT_WR_SHIFT)
113
114
115
116RTDECL(int) RTSemRWCreate(PRTSEMRW phRWSem)
117{
118 RTSEMRWINTERNAL *pThis = (RTSEMRWINTERNAL *)RTMemAlloc(sizeof(*pThis));
119 if (!pThis)
120 return VERR_NO_MEMORY;
121
122 int rc = RTSemEventMultiCreate(&pThis->hEvtRead);
123 if (RT_SUCCESS(rc))
124 {
125 rc = RTSemEventCreate(&pThis->hEvtWrite);
126 if (RT_SUCCESS(rc))
127 {
128 pThis->u32Magic = RTSEMRW_MAGIC;
129 pThis->u32Padding = 0;
130 pThis->u64State = 0;
131 pThis->hNativeWriter = NIL_RTNATIVETHREAD;
132 pThis->cWriterReads = 0;
133 pThis->cWriteRecursions = 0;
134 pThis->fNeedReset = false;
135#ifdef RTSEMRW_STRICT
136 RTLockValidatorRecExclInit(&pThis->ValidatorWrite, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, pThis,
137 true /*fEnabled*/, "RTSemRW");
138 RTLockValidatorRecSharedInit(&pThis->ValidatorRead, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, pThis,
139 false /*fSignaller*/, true /*fEnabled*/, "RTSemEvent");
140 RTLockValidatorRecMakeSiblings(&pThis->ValidatorWrite.Core, &pThis->ValidatorRead.Core);
141#endif
142
143 *phRWSem = pThis;
144 return VINF_SUCCESS;
145 }
146 RTSemEventMultiDestroy(pThis->hEvtRead);
147 }
148 return rc;
149}
150
151
152RTDECL(int) RTSemRWDestroy(RTSEMRW hRWSem)
153{
154 /*
155 * Validate input.
156 */
157 RTSEMRWINTERNAL *pThis = hRWSem;
158 if (pThis == NIL_RTSEMRW)
159 return VINF_SUCCESS;
160 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
161 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
162 Assert(!(ASMAtomicReadU64(&pThis->u64State) & (RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK)));
163
164 /*
165 * Invalidate the object and free up the resources.
166 */
167 AssertReturn(ASMAtomicCmpXchgU32(&pThis->u32Magic, ~RTSEMRW_MAGIC, RTSEMRW_MAGIC), VERR_INVALID_HANDLE);
168
169 RTSEMEVENTMULTI hEvtRead;
170 ASMAtomicXchgHandle(&pThis->hEvtRead, NIL_RTSEMEVENTMULTI, &hEvtRead);
171 int rc = RTSemEventMultiDestroy(hEvtRead);
172 AssertRC(rc);
173
174 RTSEMEVENT hEvtWrite;
175 ASMAtomicXchgHandle(&pThis->hEvtWrite, NIL_RTSEMEVENT, &hEvtWrite);
176 rc = RTSemEventDestroy(hEvtWrite);
177 AssertRC(rc);
178
179#ifdef RTSEMRW_STRICT
180 RTLockValidatorRecSharedDelete(&pThis->ValidatorRead);
181 RTLockValidatorRecExclDelete(&pThis->ValidatorWrite);
182#endif
183 RTMemFree(pThis);
184 return VINF_SUCCESS;
185}
186
187
188static int rtSemRWRequestRead(RTSEMRW hRWSem, unsigned cMillies, bool fInterruptible, PCRTLOCKVALSRCPOS pSrcPos)
189{
190 /*
191 * Validate input.
192 */
193 RTSEMRWINTERNAL *pThis = hRWSem;
194 if (pThis == NIL_RTSEMRW)
195 return VINF_SUCCESS;
196 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
197 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
198
199#ifdef RTSEMRW_STRICT
200 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
201 if (cMillies > 0)
202 {
203 int rc9 = RTLockValidatorRecSharedCheckOrder(&pThis->ValidatorRead, hThreadSelf, pSrcPos, cMillies);
204 if (RT_FAILURE(rc9))
205 return rc9;
206 }
207#endif
208
209 /*
210 * Get cracking...
211 */
212 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State);
213 uint64_t u64OldState = u64State;
214
215 for (;;)
216 {
217 if ((u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT))
218 {
219 /* It flows in the right direction, try follow it before it changes. */
220 uint64_t c = (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT;
221 c++;
222 Assert(c < RTSEMRW_CNT_MASK / 2);
223 u64State &= ~RTSEMRW_CNT_RD_MASK;
224 u64State |= c << RTSEMRW_CNT_RD_SHIFT;
225 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
226 {
227#ifdef RTSEMRW_STRICT
228 RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos);
229#endif
230 break;
231 }
232 }
233 else if ((u64State & (RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK)) == 0)
234 {
235 /* Wrong direction, but we're alone here and can simply try switch the direction. */
236 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK | RTSEMRW_DIR_MASK);
237 u64State |= (UINT64_C(1) << RTSEMRW_CNT_RD_SHIFT) | (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT);
238 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
239 {
240 Assert(!pThis->fNeedReset);
241#ifdef RTSEMRW_STRICT
242 RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos);
243#endif
244 break;
245 }
246 }
247 else
248 {
249 /* Is the writer perhaps doing a read recursion? */
250 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
251 RTNATIVETHREAD hNativeWriter;
252 ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
253 if (hNativeSelf == hNativeWriter)
254 {
255#ifdef RTSEMRW_STRICT
256 int rc9 = RTLockValidatorRecExclRecursionMixed(&pThis->ValidatorWrite, &pThis->ValidatorRead.Core, pSrcPos);
257 if (RT_FAILURE(rc9))
258 return rc9;
259#endif
260 Assert(pThis->cWriterReads < UINT32_MAX / 2);
261 ASMAtomicIncU32(&pThis->cWriterReads);
262 return VINF_SUCCESS; /* don't break! */
263 }
264
265 /* If the timeout is 0, return already. */
266 if (!cMillies)
267 return VERR_TIMEOUT;
268
269 /* Add ourselves to the queue and wait for the direction to change. */
270 uint64_t c = (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT;
271 c++;
272 Assert(c < RTSEMRW_CNT_MASK / 2);
273
274 uint64_t cWait = (u64State & RTSEMRW_WAIT_CNT_RD_MASK) >> RTSEMRW_WAIT_CNT_RD_SHIFT;
275 cWait++;
276 Assert(cWait <= c);
277 Assert(cWait < RTSEMRW_CNT_MASK / 2);
278
279 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_WAIT_CNT_RD_MASK);
280 u64State |= (c << RTSEMRW_CNT_RD_SHIFT) | (cWait << RTSEMRW_WAIT_CNT_RD_SHIFT);
281
282 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
283 {
284 for (uint32_t iLoop = 0; ; iLoop++)
285 {
286 int rc;
287#ifdef RTSEMRW_STRICT
288 rc = RTLockValidatorRecSharedCheckBlocking(&pThis->ValidatorRead, hThreadSelf, pSrcPos, true,
289 cMillies, RTTHREADSTATE_RW_READ, false);
290 if (RT_SUCCESS(rc))
291#else
292 RTTHREAD hThreadSelf = RTThreadSelf();
293 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
294#endif
295 {
296 if (fInterruptible)
297 rc = RTSemEventMultiWaitNoResume(pThis->hEvtRead, cMillies);
298 else
299 rc = RTSemEventMultiWait(pThis->hEvtRead, cMillies);
300 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ);
301 if (pThis->u32Magic != RTSEMRW_MAGIC)
302 return VERR_SEM_DESTROYED;
303 }
304 if (RT_FAILURE(rc))
305 {
306 /* Decrement the counts and return the error. */
307 for (;;)
308 {
309 u64OldState = u64State = ASMAtomicReadU64(&pThis->u64State);
310 c = (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT; Assert(c > 0);
311 c--;
312 cWait = (u64State & RTSEMRW_WAIT_CNT_RD_MASK) >> RTSEMRW_WAIT_CNT_RD_SHIFT; Assert(cWait > 0);
313 cWait--;
314 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_WAIT_CNT_RD_MASK);
315 u64State |= (c << RTSEMRW_CNT_RD_SHIFT) | (cWait << RTSEMRW_WAIT_CNT_RD_SHIFT);
316 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
317 break;
318 }
319 return rc;
320 }
321
322 Assert(pThis->fNeedReset);
323 u64State = ASMAtomicReadU64(&pThis->u64State);
324 if ((u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT))
325 break;
326 AssertMsg(iLoop < 1, ("%u\n", iLoop));
327 }
328
329 /* Decrement the wait count and maybe reset the semaphore (if we're last). */
330 for (;;)
331 {
332 u64OldState = u64State;
333
334 cWait = (u64State & RTSEMRW_WAIT_CNT_RD_MASK) >> RTSEMRW_WAIT_CNT_RD_SHIFT;
335 Assert(cWait > 0);
336 cWait--;
337 u64State &= ~RTSEMRW_WAIT_CNT_RD_MASK;
338 u64State |= cWait << RTSEMRW_WAIT_CNT_RD_SHIFT;
339
340 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
341 {
342 if (cWait == 0)
343 {
344 if (ASMAtomicXchgBool(&pThis->fNeedReset, false))
345 {
346 int rc = RTSemEventMultiReset(pThis->hEvtRead);
347 AssertRCReturn(rc, rc);
348 }
349 }
350 break;
351 }
352 u64State = ASMAtomicReadU64(&pThis->u64State);
353 }
354
355#ifdef RTSEMRW_STRICT
356 RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos);
357#endif
358 break;
359 }
360 }
361
362 if (pThis->u32Magic != RTSEMRW_MAGIC)
363 return VERR_SEM_DESTROYED;
364
365 ASMNopPause();
366 u64State = ASMAtomicReadU64(&pThis->u64State);
367 u64OldState = u64State;
368 }
369
370 /* got it! */
371 Assert((ASMAtomicReadU64(&pThis->u64State) & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT));
372 return VINF_SUCCESS;
373
374}
375
376
377#undef RTSemRWRequestRead
378RTDECL(int) RTSemRWRequestRead(RTSEMRW RWSem, unsigned cMillies)
379{
380#ifndef RTSEMRW_STRICT
381 return rtSemRWRequestRead(RWSem, cMillies, false, NULL);
382#else
383 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
384 return rtSemRWRequestRead(RWSem, cMillies, false, &SrcPos);
385#endif
386}
387RT_EXPORT_SYMBOL(RTSemRWRequestRead);
388
389
390RTDECL(int) RTSemRWRequestReadDebug(RTSEMRW RWSem, unsigned cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
391{
392 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
393 return rtSemRWRequestRead(RWSem, cMillies, false, &SrcPos);
394}
395RT_EXPORT_SYMBOL(RTSemRWRequestReadDebug);
396
397
398#undef RTSemRWRequestReadNoResume
399RTDECL(int) RTSemRWRequestReadNoResume(RTSEMRW RWSem, unsigned cMillies)
400{
401#ifndef RTSEMRW_STRICT
402 return rtSemRWRequestRead(RWSem, cMillies, true, NULL);
403#else
404 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
405 return rtSemRWRequestRead(RWSem, cMillies, true, &SrcPos);
406#endif
407}
408RT_EXPORT_SYMBOL(RTSemRWRequestReadNoResume);
409
410
411RTDECL(int) RTSemRWRequestReadNoResumeDebug(RTSEMRW RWSem, unsigned cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
412{
413 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
414 return rtSemRWRequestRead(RWSem, cMillies, true, &SrcPos);
415}
416RT_EXPORT_SYMBOL(RTSemRWRequestReadNoResumeDebug);
417
418
419
420RTDECL(int) RTSemRWReleaseRead(RTSEMRW RWSem)
421{
422 /*
423 * Validate handle.
424 */
425 RTSEMRWINTERNAL *pThis = RWSem;
426 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
427 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
428
429 /*
430 * Check the direction and take action accordingly.
431 */
432 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State);
433 uint64_t u64OldState = u64State;
434 if ((u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT))
435 {
436#ifdef RTSEMRW_STRICT
437 int rc9 = RTLockValidatorRecSharedCheckAndRelease(&pThis->ValidatorRead, NIL_RTTHREAD);
438 if (RT_FAILURE(rc9))
439 return rc9;
440#endif
441 for (;;)
442 {
443 uint64_t c = (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT;
444 AssertReturn(c > 0, VERR_NOT_OWNER);
445 c--;
446
447 if ( c > 0
448 || (u64State & RTSEMRW_CNT_RD_MASK) == 0)
449 {
450 /* Don't change the direction. */
451 u64State &= ~RTSEMRW_CNT_RD_MASK;
452 u64State |= c << RTSEMRW_CNT_RD_SHIFT;
453 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
454 break;
455 }
456 else
457 {
458 /* Reverse the direction and signal the reader threads. */
459 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_DIR_MASK);
460 u64State |= RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT;
461 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
462 {
463 int rc = RTSemEventSignal(pThis->hEvtWrite);
464 AssertRC(rc);
465 break;
466 }
467 }
468
469 ASMNopPause();
470 u64State = ASMAtomicReadU64(&pThis->u64State);
471 u64OldState = u64State;
472 }
473 }
474 else
475 {
476 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
477 RTNATIVETHREAD hNativeWriter;
478 ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
479 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
480 AssertReturn(pThis->cWriterReads > 0, VERR_NOT_OWNER);
481#ifdef RTSEMRW_STRICT
482 int rc = RTLockValidatorRecExclUnwindMixed(&pThis->ValidatorWrite, &pThis->ValidatorRead.Core);
483 if (RT_FAILURE(rc))
484 return rc;
485#endif
486 ASMAtomicDecU32(&pThis->cWriterReads);
487 }
488
489 return VINF_SUCCESS;
490}
491RT_EXPORT_SYMBOL(RTSemRWReleaseRead);
492
493
494DECL_FORCE_INLINE(int) rtSemRWRequestWrite(RTSEMRW hRWSem, unsigned cMillies, bool fInterruptible, PCRTLOCKVALSRCPOS pSrcPos)
495{
496 /*
497 * Validate input.
498 */
499 RTSEMRWINTERNAL *pThis = hRWSem;
500 if (pThis == NIL_RTSEMRW)
501 return VINF_SUCCESS;
502 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
503 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
504
505#ifdef RTSEMRW_STRICT
506 RTTHREAD hThreadSelf = NIL_RTTHREAD;
507 if (cMillies)
508 {
509 hThreadSelf = RTThreadSelfAutoAdopt();
510 int rc9 = RTLockValidatorRecExclCheckOrder(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, cMillies);
511 if (RT_FAILURE(rc9))
512 return rc9;
513 }
514#endif
515
516 /*
517 * Check if we're already the owner and just recursing.
518 */
519 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
520 RTNATIVETHREAD hNativeWriter;
521 ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
522 if (hNativeSelf == hNativeWriter)
523 {
524 Assert((ASMAtomicReadU64(&pThis->u64State) & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT));
525#ifdef RTSEMRW_STRICT
526 int rc9 = RTLockValidatorRecExclRecursion(&pThis->ValidatorWrite, pSrcPos);
527 if (RT_FAILURE(rc9))
528 return rc9;
529#endif
530 Assert(pThis->cWriteRecursions < UINT32_MAX / 2);
531 ASMAtomicIncU32(&pThis->cWriteRecursions);
532 return VINF_SUCCESS;
533 }
534
535 /*
536 * Get cracking.
537 */
538 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State);
539 uint64_t u64OldState = u64State;
540
541 for (;;)
542 {
543 if ( (u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT)
544 || (u64State & (RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK)) != 0)
545 {
546 /* It flows in the right direction, try follow it before it changes. */
547 uint64_t c = (u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT;
548 c++;
549 Assert(c < RTSEMRW_CNT_MASK / 2);
550 u64State &= ~RTSEMRW_CNT_WR_MASK;
551 u64State |= c << RTSEMRW_CNT_WR_SHIFT;
552 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
553 break;
554 }
555 else if ((u64State & (RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK)) == 0)
556 {
557 /* Wrong direction, but we're alone here and can simply try switch the direction. */
558 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK | RTSEMRW_DIR_MASK);
559 u64State |= (UINT64_C(1) << RTSEMRW_CNT_WR_SHIFT) | (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT);
560 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
561 break;
562 }
563 else if (!cMillies)
564 /* Wrong direction and we're not supposed to wait, just return. */
565 return VERR_TIMEOUT;
566 else
567 {
568 /* Add ourselves to the write count and break out to do the wait. */
569 uint64_t c = (u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT;
570 c++;
571 Assert(c < RTSEMRW_CNT_MASK / 2);
572 u64State &= ~RTSEMRW_CNT_WR_MASK;
573 u64State |= c << RTSEMRW_CNT_WR_SHIFT;
574 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
575 break;
576 }
577
578 if (pThis->u32Magic != RTSEMRW_MAGIC)
579 return VERR_SEM_DESTROYED;
580
581 ASMNopPause();
582 u64State = ASMAtomicReadU64(&pThis->u64State);
583 u64OldState = u64State;
584 }
585
586 /*
587 * If we're in write mode now try grab the ownership. Play fair if there
588 * are threads already waiting.
589 */
590 bool fDone = (u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT)
591 && ( ((u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT) == 1
592 || cMillies == 0);
593 if (fDone)
594 ASMAtomicCmpXchgHandle(&pThis->hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
595 if (!fDone)
596 {
597 /*
598 * Wait for our turn.
599 */
600 for (uint32_t iLoop = 0; ; iLoop++)
601 {
602 int rc;
603#ifdef RTSEMRW_STRICT
604 if (cMillies)
605 {
606 if (hThreadSelf == NIL_RTTHREAD)
607 hThreadSelf = RTThreadSelfAutoAdopt();
608 rc = RTLockValidatorRecExclCheckBlocking(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true,
609 cMillies, RTTHREADSTATE_RW_WRITE, false);
610 }
611 else
612 rc = VINF_SUCCESS;
613 if (RT_SUCCESS(rc))
614#else
615 RTTHREAD hThreadSelf = RTThreadSelf();
616 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false);
617#endif
618 {
619 if (fInterruptible)
620 rc = RTSemEventWaitNoResume(pThis->hEvtWrite, cMillies);
621 else
622 rc = RTSemEventWait(pThis->hEvtWrite, cMillies);
623 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
624 if (pThis->u32Magic != RTSEMRW_MAGIC)
625 return VERR_SEM_DESTROYED;
626 }
627 if (RT_FAILURE(rc))
628 {
629 /* Decrement the counts and return the error. */
630 for (;;)
631 {
632 u64OldState = u64State = ASMAtomicReadU64(&pThis->u64State);
633 uint64_t c = (u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT; Assert(c > 0);
634 c--;
635 u64State &= ~RTSEMRW_CNT_WR_MASK;
636 u64State |= c << RTSEMRW_CNT_WR_SHIFT;
637 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
638 break;
639 }
640 return rc;
641 }
642
643 u64State = ASMAtomicReadU64(&pThis->u64State);
644 if ((u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT))
645 {
646 ASMAtomicCmpXchgHandle(&pThis->hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
647 if (fDone)
648 break;
649 }
650 AssertMsg(iLoop < 1000, ("%u\n", iLoop)); /* may loop a few times here... */
651 }
652 }
653
654 /*
655 * Got it!
656 */
657 Assert((ASMAtomicReadU64(&pThis->u64State) & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT));
658 ASMAtomicWriteU32(&pThis->cWriteRecursions, 1);
659 Assert(pThis->cWriterReads == 0);
660#ifdef RTSEMRW_STRICT
661 RTLockValidatorRecExclSetOwner(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true);
662#endif
663
664 return VINF_SUCCESS;
665}
666
667
668#undef RTSemRWRequestWrite
669RTDECL(int) RTSemRWRequestWrite(RTSEMRW RWSem, unsigned cMillies)
670{
671#ifndef RTSEMRW_STRICT
672 return rtSemRWRequestWrite(RWSem, cMillies, false, NULL);
673#else
674 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
675 return rtSemRWRequestWrite(RWSem, cMillies, false, &SrcPos);
676#endif
677}
678RT_EXPORT_SYMBOL(RTSemRWRequestWrite);
679
680
681RTDECL(int) RTSemRWRequestWriteDebug(RTSEMRW RWSem, unsigned cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
682{
683 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
684 return rtSemRWRequestWrite(RWSem, cMillies, false, &SrcPos);
685}
686RT_EXPORT_SYMBOL(RTSemRWRequestWriteDebug);
687
688
689#undef RTSemRWRequestWriteNoResume
690RTDECL(int) RTSemRWRequestWriteNoResume(RTSEMRW RWSem, unsigned cMillies)
691{
692#ifndef RTSEMRW_STRICT
693 return rtSemRWRequestWrite(RWSem, cMillies, true, NULL);
694#else
695 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
696 return rtSemRWRequestWrite(RWSem, cMillies, true, &SrcPos);
697#endif
698}
699RT_EXPORT_SYMBOL(RTSemRWRequestWriteNoResume);
700
701
702RTDECL(int) RTSemRWRequestWriteNoResumeDebug(RTSEMRW RWSem, unsigned cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
703{
704 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
705 return rtSemRWRequestWrite(RWSem, cMillies, true, &SrcPos);
706}
707RT_EXPORT_SYMBOL(RTSemRWRequestWriteNoResumeDebug);
708
709
710RTDECL(int) RTSemRWReleaseWrite(RTSEMRW RWSem)
711{
712
713 /*
714 * Validate handle.
715 */
716 struct RTSEMRWINTERNAL *pThis = RWSem;
717 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
718 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
719
720 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
721 RTNATIVETHREAD hNativeWriter;
722 ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
723 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
724
725 /*
726 * Unwind a recursion.
727 */
728 if (pThis->cWriteRecursions == 1)
729 {
730 AssertReturn(pThis->cWriterReads == 0, VERR_WRONG_ORDER); /* (must release all read recursions before the final write.) */
731#ifdef RTSEMRW_STRICT
732 int rc9 = RTLockValidatorRecExclReleaseOwner(&pThis->ValidatorWrite, true);
733 if (RT_FAILURE(rc9))
734 return rc9;
735#endif
736 /*
737 * Update the state.
738 */
739 ASMAtomicWriteU32(&pThis->cWriteRecursions, 0);
740 /** @todo validate order. */
741 ASMAtomicWriteHandle(&pThis->hNativeWriter, NIL_RTNATIVETHREAD);
742
743 for (;;)
744 {
745 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State);
746 uint64_t u64OldState = u64State;
747
748 uint64_t c = (u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT;
749 Assert(c > 0);
750 c--;
751
752 if ( c > 0
753 || (u64State & RTSEMRW_CNT_RD_MASK) == 0)
754 {
755 /* Don't change the direction, wait up the next writer if any. */
756 u64State &= ~RTSEMRW_CNT_WR_MASK;
757 u64State |= c << RTSEMRW_CNT_WR_SHIFT;
758 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
759 {
760 if (c > 0)
761 {
762 int rc = RTSemEventSignal(pThis->hEvtWrite);
763 AssertRC(rc);
764 }
765 break;
766 }
767 }
768 else
769 {
770 /* Reverse the direction and signal the reader threads. */
771 u64State &= ~(RTSEMRW_CNT_WR_MASK | RTSEMRW_DIR_MASK);
772 u64State |= RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT;
773 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
774 {
775 Assert(!pThis->fNeedReset);
776 ASMAtomicWriteBool(&pThis->fNeedReset, true);
777 int rc = RTSemEventMultiSignal(pThis->hEvtRead);
778 AssertRC(rc);
779 break;
780 }
781 }
782
783 ASMNopPause();
784 if (pThis->u32Magic != RTSEMRW_MAGIC)
785 return VERR_SEM_DESTROYED;
786 }
787 }
788 else
789 {
790 Assert(pThis->cWriteRecursions != 0);
791#ifdef RTSEMRW_STRICT
792 int rc9 = RTLockValidatorRecExclUnwind(&pThis->ValidatorWrite);
793 if (RT_FAILURE(rc9))
794 return rc9;
795#endif
796 ASMAtomicDecU32(&pThis->cWriteRecursions);
797 }
798
799 return VINF_SUCCESS;
800}
801RT_EXPORT_SYMBOL(RTSemRWReleaseWrite);
802
803
804RTDECL(bool) RTSemRWIsWriteOwner(RTSEMRW RWSem)
805{
806 /*
807 * Validate handle.
808 */
809 struct RTSEMRWINTERNAL *pThis = RWSem;
810 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
811 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
812
813 /*
814 * Check ownership.
815 */
816 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
817 RTNATIVETHREAD hNativeWriter;
818 ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
819 return hNativeWriter == hNativeSelf;
820}
821RT_EXPORT_SYMBOL(RTSemRWIsWriteOwner);
822
823
824RTDECL(uint32_t) RTSemRWGetWriteRecursion(RTSEMRW RWSem)
825{
826 /*
827 * Validate handle.
828 */
829 struct RTSEMRWINTERNAL *pThis = RWSem;
830 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
831 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
832
833 /*
834 * Return the requested data.
835 */
836 return pThis->cWriteRecursions;
837}
838RT_EXPORT_SYMBOL(RTSemRWGetWriteRecursion);
839
840
841RTDECL(uint32_t) RTSemRWGetWriterReadRecursion(RTSEMRW RWSem)
842{
843 /*
844 * Validate handle.
845 */
846 struct RTSEMRWINTERNAL *pThis = RWSem;
847 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
848 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
849
850 /*
851 * Return the requested data.
852 */
853 return pThis->cWriterReads;
854}
855RT_EXPORT_SYMBOL(RTSemRWGetWriterReadRecursion);
856
857
858RTDECL(uint32_t) RTSemRWGetReadCount(RTSEMRW RWSem)
859{
860 /*
861 * Validate input.
862 */
863 struct RTSEMRWINTERNAL *pThis = RWSem;
864 AssertPtrReturn(pThis, 0);
865 AssertMsgReturn(pThis->u32Magic == RTSEMRW_MAGIC,
866 ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic),
867 0);
868
869 /*
870 * Return the requested data.
871 */
872 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State);
873 if ((u64State & RTSEMRW_DIR_MASK) != (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT))
874 return 0;
875 return (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT;
876}
877RT_EXPORT_SYMBOL(RTSemRWGetReadCount);
878
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette