VirtualBox

source: vbox/trunk/src/VBox/Runtime/generic/semrw-lockless-generic.cpp@ 25685

Last change on this file since 25685 was 25685, checked in by vboxsync, 15 years ago

iprt,pdmcritsect: Some more lock validator code, almost there now... :-)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 30.6 KB
Line 
1/* $Id: semrw-lockless-generic.cpp 25685 2010-01-07 22:03:06Z vboxsync $ */
2/** @file
3 * IPRT Testcase - RTSemXRoads, generic implementation.
4 */
5
6/*
7 * Copyright (C) 2009 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31
32/*******************************************************************************
33* Header Files *
34*******************************************************************************/
35#define RTASSERT_QUIET
36#include <iprt/semaphore.h>
37#include "internal/iprt.h"
38
39#include <iprt/asm.h>
40#include <iprt/assert.h>
41#include <iprt/err.h>
42#include <iprt/lockvalidator.h>
43#include <iprt/mem.h>
44#include <iprt/thread.h>
45
46#include "internal/magics.h"
47#include "internal/strict.h"
48
49
50/*******************************************************************************
51* Structures and Typedefs *
52*******************************************************************************/
53typedef struct RTSEMRWINTERNAL
54{
55 /** Magic value (RTSEMRW_MAGIC). */
56 uint32_t volatile u32Magic;
57 uint32_t u32Padding; /**< alignment padding.*/
58 /* The state variable.
59 * All accesses are atomic and it bits are defined like this:
60 * Bits 0..14 - cReads.
61 * Bit 15 - Unused.
62 * Bits 16..31 - cWrites. - doesn't make sense here
63 * Bit 31 - fDirection; 0=Read, 1=Write.
64 * Bits 32..46 - cWaitingReads
65 * Bit 47 - Unused.
66 * Bits 48..62 - cWaitingWrites
67 * Bit 63 - Unused.
68 */
69 uint64_t volatile u64State;
70 /** The write owner. */
71 RTNATIVETHREAD volatile hNativeWriter;
72 /** The number of reads made by the current writer. */
73 uint32_t volatile cWriterReads;
74 /** The number of reads made by the current writer. */
75 uint32_t volatile cWriteRecursions;
76
77 /** What the writer threads are blocking on. */
78 RTSEMEVENT hEvtWrite;
79 /** What the read threads are blocking on when waiting for the writer to
80 * finish. */
81 RTSEMEVENTMULTI hEvtRead;
82 /** Indicates whether hEvtRead needs resetting. */
83 bool volatile fNeedReset;
84
85#ifdef RTSEMRW_STRICT
86 /** The validator record for the writer. */
87 RTLOCKVALRECEXCL ValidatorWrite;
88 /** The validator record for the readers. */
89 RTLOCKVALRECSHRD ValidatorRead;
90#endif
91} RTSEMRWINTERNAL;
92
93
94/*******************************************************************************
95* Defined Constants And Macros *
96*******************************************************************************/
97#define RTSEMRW_CNT_BITS 15
98#define RTSEMRW_CNT_MASK UINT64_C(0x00007fff)
99
100#define RTSEMRW_CNT_RD_SHIFT 0
101#define RTSEMRW_CNT_RD_MASK (RTSEMRW_CNT_MASK << RTSEMRW_CNT_RD_SHIFT)
102#define RTSEMRW_CNT_WR_SHIFT 16
103#define RTSEMRW_CNT_WR_MASK (RTSEMRW_CNT_MASK << RTSEMRW_CNT_WR_SHIFT)
104#define RTSEMRW_DIR_SHIFT 31
105#define RTSEMRW_DIR_MASK RT_BIT_64(RTSEMRW_DIR_SHIFT)
106#define RTSEMRW_DIR_READ UINT64_C(0)
107#define RTSEMRW_DIR_WRITE UINT64_C(1)
108
109#define RTSEMRW_WAIT_CNT_RD_SHIFT 32
110#define RTSEMRW_WAIT_CNT_RD_MASK (RTSEMRW_CNT_MASK << RTSEMRW_WAIT_CNT_RD_SHIFT)
111//#define RTSEMRW_WAIT_CNT_WR_SHIFT 48
112//#define RTSEMRW_WAIT_CNT_WR_MASK (RTSEMRW_CNT_MASK << RTSEMRW_WAIT_CNT_WR_SHIFT)
113
114
115
116RTDECL(int) RTSemRWCreate(PRTSEMRW phRWSem)
117{
118 RTSEMRWINTERNAL *pThis = (RTSEMRWINTERNAL *)RTMemAlloc(sizeof(*pThis));
119 if (!pThis)
120 return VERR_NO_MEMORY;
121
122 int rc = RTSemEventMultiCreate(&pThis->hEvtRead);
123 if (RT_SUCCESS(rc))
124 {
125 rc = RTSemEventCreate(&pThis->hEvtWrite);
126 if (RT_SUCCESS(rc))
127 {
128 pThis->u32Magic = RTSEMRW_MAGIC;
129 pThis->u32Padding = 0;
130 pThis->u64State = 0;
131 pThis->hNativeWriter = NIL_RTNATIVETHREAD;
132 pThis->cWriterReads = 0;
133 pThis->cWriteRecursions = 0;
134 pThis->fNeedReset = false;
135#ifdef RTSEMRW_STRICT
136 RTLockValidatorRecExclInit(&pThis->ValidatorWrite, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, "RTSemRW", pThis, true);
137 RTLockValidatorRecSharedInit(&pThis->ValidatorRead, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, "RTSemRW", pThis, false /*fSignaller*/, true);
138 RTLockValidatorRecMakeSiblings(&pThis->ValidatorWrite.Core, &pThis->ValidatorRead.Core);
139#endif
140
141 *phRWSem = pThis;
142 return VINF_SUCCESS;
143 }
144 RTSemEventMultiDestroy(pThis->hEvtRead);
145 }
146 return rc;
147}
148
149
150RTDECL(int) RTSemRWDestroy(RTSEMRW hRWSem)
151{
152 /*
153 * Validate input.
154 */
155 RTSEMRWINTERNAL *pThis = hRWSem;
156 if (pThis == NIL_RTSEMRW)
157 return VINF_SUCCESS;
158 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
159 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
160 Assert(!(ASMAtomicReadU64(&pThis->u64State) & (RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK)));
161
162 /*
163 * Invalidate the object and free up the resources.
164 */
165 AssertReturn(ASMAtomicCmpXchgU32(&pThis->u32Magic, ~RTSEMRW_MAGIC, RTSEMRW_MAGIC), VERR_INVALID_HANDLE);
166
167 RTSEMEVENTMULTI hEvtRead;
168 ASMAtomicXchgHandle(&pThis->hEvtRead, NIL_RTSEMEVENTMULTI, &hEvtRead);
169 int rc = RTSemEventMultiDestroy(hEvtRead);
170 AssertRC(rc);
171
172 RTSEMEVENT hEvtWrite;
173 ASMAtomicXchgHandle(&pThis->hEvtWrite, NIL_RTSEMEVENT, &hEvtWrite);
174 rc = RTSemEventDestroy(hEvtWrite);
175 AssertRC(rc);
176
177#ifdef RTSEMRW_STRICT
178 RTLockValidatorRecSharedDelete(&pThis->ValidatorRead);
179 RTLockValidatorRecExclDelete(&pThis->ValidatorWrite);
180#endif
181 RTMemFree(pThis);
182 return VINF_SUCCESS;
183}
184
185
186static int rtSemRWRequestRead(RTSEMRW hRWSem, unsigned cMillies, bool fInterruptible, PCRTLOCKVALSRCPOS pSrcPos)
187{
188 /*
189 * Validate input.
190 */
191 RTSEMRWINTERNAL *pThis = hRWSem;
192 if (pThis == NIL_RTSEMRW)
193 return VINF_SUCCESS;
194 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
195 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
196
197#ifdef RTSEMRW_STRICT
198 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
199 if (cMillies > 0)
200 {
201 int rc9 = RTLockValidatorRecSharedCheckOrder(&pThis->ValidatorRead, hThreadSelf, pSrcPos, cMillies);
202 if (RT_FAILURE(rc9))
203 return rc9;
204 }
205#endif
206
207 /*
208 * Get cracking...
209 */
210 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State);
211 uint64_t u64OldState = u64State;
212
213 for (;;)
214 {
215 if ((u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT))
216 {
217 /* It flows in the right direction, try follow it before it changes. */
218 uint64_t c = (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT;
219 c++;
220 Assert(c < RTSEMRW_CNT_MASK / 2);
221 u64State &= ~RTSEMRW_CNT_RD_MASK;
222 u64State |= c << RTSEMRW_CNT_RD_SHIFT;
223 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
224 {
225#ifdef RTSEMRW_STRICT
226 RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos);
227#endif
228 break;
229 }
230 }
231 else if ((u64State & (RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK)) == 0)
232 {
233 /* Wrong direction, but we're alone here and can simply try switch the direction. */
234 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK | RTSEMRW_DIR_MASK);
235 u64State |= (UINT64_C(1) << RTSEMRW_CNT_RD_SHIFT) | (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT);
236 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
237 {
238 Assert(!pThis->fNeedReset);
239#ifdef RTSEMRW_STRICT
240 RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos);
241#endif
242 break;
243 }
244 }
245 else
246 {
247 /* Is the writer perhaps doing a read recursion? */
248 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
249 RTNATIVETHREAD hNativeWriter;
250 ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
251 if (hNativeSelf == hNativeWriter)
252 {
253#ifdef RTSEMRW_STRICT
254 int rc9 = RTLockValidatorRecExclRecursionMixed(&pThis->ValidatorWrite, &pThis->ValidatorRead.Core, pSrcPos);
255 if (RT_FAILURE(rc9))
256 return rc9;
257#endif
258 Assert(pThis->cWriterReads < UINT32_MAX / 2);
259 ASMAtomicIncU32(&pThis->cWriterReads);
260 return VINF_SUCCESS; /* don't break! */
261 }
262
263 /* If the timeout is 0, return already. */
264 if (!cMillies)
265 return VERR_TIMEOUT;
266
267 /* Add ourselves to the queue and wait for the direction to change. */
268 uint64_t c = (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT;
269 c++;
270 Assert(c < RTSEMRW_CNT_MASK / 2);
271
272 uint64_t cWait = (u64State & RTSEMRW_WAIT_CNT_RD_MASK) >> RTSEMRW_WAIT_CNT_RD_SHIFT;
273 cWait++;
274 Assert(cWait <= c);
275 Assert(cWait < RTSEMRW_CNT_MASK / 2);
276
277 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_WAIT_CNT_RD_MASK);
278 u64State |= (c << RTSEMRW_CNT_RD_SHIFT) | (cWait << RTSEMRW_WAIT_CNT_RD_SHIFT);
279
280 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
281 {
282 for (uint32_t iLoop = 0; ; iLoop++)
283 {
284 int rc;
285#ifdef RTSEMRW_STRICT
286 rc = RTLockValidatorRecSharedCheckBlocking(&pThis->ValidatorRead, hThreadSelf, pSrcPos, true,
287 cMillies, RTTHREADSTATE_RW_READ, false);
288 if (RT_SUCCESS(rc))
289#else
290 RTTHREAD hThreadSelf = RTThreadSelf();
291 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
292#endif
293 {
294 if (fInterruptible)
295 rc = RTSemEventMultiWaitNoResume(pThis->hEvtRead, cMillies);
296 else
297 rc = RTSemEventMultiWait(pThis->hEvtRead, cMillies);
298 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ);
299 if (pThis->u32Magic != RTSEMRW_MAGIC)
300 return VERR_SEM_DESTROYED;
301 }
302 if (RT_FAILURE(rc))
303 {
304 /* Decrement the counts and return the error. */
305 for (;;)
306 {
307 u64OldState = u64State = ASMAtomicReadU64(&pThis->u64State);
308 c = (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT; Assert(c > 0);
309 c--;
310 cWait = (u64State & RTSEMRW_WAIT_CNT_RD_MASK) >> RTSEMRW_WAIT_CNT_RD_SHIFT; Assert(cWait > 0);
311 cWait--;
312 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_WAIT_CNT_RD_MASK);
313 u64State |= (c << RTSEMRW_CNT_RD_SHIFT) | (cWait << RTSEMRW_WAIT_CNT_RD_SHIFT);
314 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
315 break;
316 }
317 return rc;
318 }
319
320 Assert(pThis->fNeedReset);
321 u64State = ASMAtomicReadU64(&pThis->u64State);
322 if ((u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT))
323 break;
324 AssertMsg(iLoop < 1, ("%u\n", iLoop));
325 }
326
327 /* Decrement the wait count and maybe reset the semaphore (if we're last). */
328 for (;;)
329 {
330 u64OldState = u64State;
331
332 cWait = (u64State & RTSEMRW_WAIT_CNT_RD_MASK) >> RTSEMRW_WAIT_CNT_RD_SHIFT;
333 Assert(cWait > 0);
334 cWait--;
335 u64State &= ~RTSEMRW_WAIT_CNT_RD_MASK;
336 u64State |= cWait << RTSEMRW_WAIT_CNT_RD_SHIFT;
337
338 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
339 {
340 if (cWait == 0)
341 {
342 if (ASMAtomicXchgBool(&pThis->fNeedReset, false))
343 {
344 int rc = RTSemEventMultiReset(pThis->hEvtRead);
345 AssertRCReturn(rc, rc);
346 }
347 }
348 break;
349 }
350 u64State = ASMAtomicReadU64(&pThis->u64State);
351 }
352
353#ifdef RTSEMRW_STRICT
354 RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos);
355#endif
356 break;
357 }
358 }
359
360 if (pThis->u32Magic != RTSEMRW_MAGIC)
361 return VERR_SEM_DESTROYED;
362
363 ASMNopPause();
364 u64State = ASMAtomicReadU64(&pThis->u64State);
365 u64OldState = u64State;
366 }
367
368 /* got it! */
369 Assert((ASMAtomicReadU64(&pThis->u64State) & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT));
370 return VINF_SUCCESS;
371
372}
373
374
375#undef RTSemRWRequestRead
376RTDECL(int) RTSemRWRequestRead(RTSEMRW RWSem, unsigned cMillies)
377{
378#ifndef RTSEMRW_STRICT
379 return rtSemRWRequestRead(RWSem, cMillies, false, NULL);
380#else
381 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
382 return rtSemRWRequestRead(RWSem, cMillies, false, &SrcPos);
383#endif
384}
385RT_EXPORT_SYMBOL(RTSemRWRequestRead);
386
387
388RTDECL(int) RTSemRWRequestReadDebug(RTSEMRW RWSem, unsigned cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
389{
390 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
391 return rtSemRWRequestRead(RWSem, cMillies, false, &SrcPos);
392}
393RT_EXPORT_SYMBOL(RTSemRWRequestReadDebug);
394
395
396#undef RTSemRWRequestReadNoResume
397RTDECL(int) RTSemRWRequestReadNoResume(RTSEMRW RWSem, unsigned cMillies)
398{
399#ifndef RTSEMRW_STRICT
400 return rtSemRWRequestRead(RWSem, cMillies, true, NULL);
401#else
402 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
403 return rtSemRWRequestRead(RWSem, cMillies, true, &SrcPos);
404#endif
405}
406RT_EXPORT_SYMBOL(RTSemRWRequestReadNoResume);
407
408
409RTDECL(int) RTSemRWRequestReadNoResumeDebug(RTSEMRW RWSem, unsigned cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
410{
411 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
412 return rtSemRWRequestRead(RWSem, cMillies, true, &SrcPos);
413}
414RT_EXPORT_SYMBOL(RTSemRWRequestReadNoResumeDebug);
415
416
417
418RTDECL(int) RTSemRWReleaseRead(RTSEMRW RWSem)
419{
420 /*
421 * Validate handle.
422 */
423 RTSEMRWINTERNAL *pThis = RWSem;
424 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
425 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
426
427 /*
428 * Check the direction and take action accordingly.
429 */
430 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State);
431 uint64_t u64OldState = u64State;
432 if ((u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT))
433 {
434#ifdef RTSEMRW_STRICT
435 int rc9 = RTLockValidatorRecSharedCheckAndRelease(&pThis->ValidatorRead, NIL_RTTHREAD);
436 if (RT_FAILURE(rc9))
437 return rc9;
438#endif
439 for (;;)
440 {
441 uint64_t c = (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT;
442 AssertReturn(c > 0, VERR_NOT_OWNER);
443 c--;
444
445 if ( c > 0
446 || (u64State & RTSEMRW_CNT_RD_MASK) == 0)
447 {
448 /* Don't change the direction. */
449 u64State &= ~RTSEMRW_CNT_RD_MASK;
450 u64State |= c << RTSEMRW_CNT_RD_SHIFT;
451 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
452 break;
453 }
454 else
455 {
456 /* Reverse the direction and signal the reader threads. */
457 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_DIR_MASK);
458 u64State |= RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT;
459 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
460 {
461 int rc = RTSemEventSignal(pThis->hEvtWrite);
462 AssertRC(rc);
463 break;
464 }
465 }
466
467 ASMNopPause();
468 u64State = ASMAtomicReadU64(&pThis->u64State);
469 u64OldState = u64State;
470 }
471 }
472 else
473 {
474 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
475 RTNATIVETHREAD hNativeWriter;
476 ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
477 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
478 AssertReturn(pThis->cWriterReads > 0, VERR_NOT_OWNER);
479#ifdef RTSEMRW_STRICT
480 int rc = RTLockValidatorRecExclUnwindMixed(&pThis->ValidatorWrite, &pThis->ValidatorRead.Core);
481 if (RT_FAILURE(rc))
482 return rc;
483#endif
484 ASMAtomicDecU32(&pThis->cWriterReads);
485 }
486
487 return VINF_SUCCESS;
488}
489RT_EXPORT_SYMBOL(RTSemRWReleaseRead);
490
491
492DECL_FORCE_INLINE(int) rtSemRWRequestWrite(RTSEMRW hRWSem, unsigned cMillies, bool fInterruptible, PCRTLOCKVALSRCPOS pSrcPos)
493{
494 /*
495 * Validate input.
496 */
497 RTSEMRWINTERNAL *pThis = hRWSem;
498 if (pThis == NIL_RTSEMRW)
499 return VINF_SUCCESS;
500 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
501 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
502
503#ifdef RTSEMRW_STRICT
504 RTTHREAD hThreadSelf = NIL_RTTHREAD;
505 if (cMillies)
506 {
507 hThreadSelf = RTThreadSelfAutoAdopt();
508 int rc9 = RTLockValidatorRecExclCheckOrder(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, cMillies);
509 if (RT_FAILURE(rc9))
510 return rc9;
511 }
512#endif
513
514 /*
515 * Check if we're already the owner and just recursing.
516 */
517 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
518 RTNATIVETHREAD hNativeWriter;
519 ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
520 if (hNativeSelf == hNativeWriter)
521 {
522 Assert((ASMAtomicReadU64(&pThis->u64State) & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT));
523#ifdef RTSEMRW_STRICT
524 int rc9 = RTLockValidatorRecExclRecursion(&pThis->ValidatorWrite, pSrcPos);
525 if (RT_FAILURE(rc9))
526 return rc9;
527#endif
528 Assert(pThis->cWriteRecursions < UINT32_MAX / 2);
529 ASMAtomicIncU32(&pThis->cWriteRecursions);
530 return VINF_SUCCESS;
531 }
532
533 /*
534 * Get cracking.
535 */
536 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State);
537 uint64_t u64OldState = u64State;
538
539 for (;;)
540 {
541 if ( (u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT)
542 || (u64State & (RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK)) != 0)
543 {
544 /* It flows in the right direction, try follow it before it changes. */
545 uint64_t c = (u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT;
546 c++;
547 Assert(c < RTSEMRW_CNT_MASK / 2);
548 u64State &= ~RTSEMRW_CNT_WR_MASK;
549 u64State |= c << RTSEMRW_CNT_WR_SHIFT;
550 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
551 break;
552 }
553 else if ((u64State & (RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK)) == 0)
554 {
555 /* Wrong direction, but we're alone here and can simply try switch the direction. */
556 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK | RTSEMRW_DIR_MASK);
557 u64State |= (UINT64_C(1) << RTSEMRW_CNT_WR_SHIFT) | (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT);
558 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
559 break;
560 }
561 else if (!cMillies)
562 /* Wrong direction and we're not supposed to wait, just return. */
563 return VERR_TIMEOUT;
564 else
565 {
566 /* Add ourselves to the write count and break out to do the wait. */
567 uint64_t c = (u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT;
568 c++;
569 Assert(c < RTSEMRW_CNT_MASK / 2);
570 u64State &= ~RTSEMRW_CNT_WR_MASK;
571 u64State |= c << RTSEMRW_CNT_WR_SHIFT;
572 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
573 break;
574 }
575
576 if (pThis->u32Magic != RTSEMRW_MAGIC)
577 return VERR_SEM_DESTROYED;
578
579 ASMNopPause();
580 u64State = ASMAtomicReadU64(&pThis->u64State);
581 u64OldState = u64State;
582 }
583
584 /*
585 * If we're in write mode now try grab the ownership. Play fair if there
586 * are threads already waiting.
587 */
588 bool fDone = (u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT)
589 && ( ((u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT) == 1
590 || cMillies == 0);
591 if (fDone)
592 ASMAtomicCmpXchgHandle(&pThis->hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
593 if (!fDone)
594 {
595 /*
596 * Wait for our turn.
597 */
598 for (uint32_t iLoop = 0; ; iLoop++)
599 {
600 int rc;
601#ifdef RTSEMRW_STRICT
602 if (cMillies)
603 {
604 if (hThreadSelf == NIL_RTTHREAD)
605 hThreadSelf = RTThreadSelfAutoAdopt();
606 rc = RTLockValidatorRecExclCheckBlocking(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true,
607 cMillies, RTTHREADSTATE_RW_WRITE, false);
608 }
609 else
610 rc = VINF_SUCCESS;
611 if (RT_SUCCESS(rc))
612#else
613 RTTHREAD hThreadSelf = RTThreadSelf();
614 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false);
615#endif
616 {
617 if (fInterruptible)
618 rc = RTSemEventWaitNoResume(pThis->hEvtWrite, cMillies);
619 else
620 rc = RTSemEventWait(pThis->hEvtWrite, cMillies);
621 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
622 if (pThis->u32Magic != RTSEMRW_MAGIC)
623 return VERR_SEM_DESTROYED;
624 }
625 if (RT_FAILURE(rc))
626 {
627 /* Decrement the counts and return the error. */
628 for (;;)
629 {
630 u64OldState = u64State = ASMAtomicReadU64(&pThis->u64State);
631 uint64_t c = (u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT; Assert(c > 0);
632 c--;
633 u64State &= ~RTSEMRW_CNT_WR_MASK;
634 u64State |= c << RTSEMRW_CNT_WR_SHIFT;
635 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
636 break;
637 }
638 return rc;
639 }
640
641 u64State = ASMAtomicReadU64(&pThis->u64State);
642 if ((u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT))
643 {
644 ASMAtomicCmpXchgHandle(&pThis->hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
645 if (fDone)
646 break;
647 }
648 AssertMsg(iLoop < 1000, ("%u\n", iLoop)); /* may loop a few times here... */
649 }
650 }
651
652 /*
653 * Got it!
654 */
655 Assert((ASMAtomicReadU64(&pThis->u64State) & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT));
656 ASMAtomicWriteU32(&pThis->cWriteRecursions, 1);
657 Assert(pThis->cWriterReads == 0);
658#ifdef RTSEMRW_STRICT
659 RTLockValidatorRecExclSetOwner(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true);
660#endif
661
662 return VINF_SUCCESS;
663}
664
665
666#undef RTSemRWRequestWrite
667RTDECL(int) RTSemRWRequestWrite(RTSEMRW RWSem, unsigned cMillies)
668{
669#ifndef RTSEMRW_STRICT
670 return rtSemRWRequestWrite(RWSem, cMillies, false, NULL);
671#else
672 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
673 return rtSemRWRequestWrite(RWSem, cMillies, false, &SrcPos);
674#endif
675}
676RT_EXPORT_SYMBOL(RTSemRWRequestWrite);
677
678
679RTDECL(int) RTSemRWRequestWriteDebug(RTSEMRW RWSem, unsigned cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
680{
681 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
682 return rtSemRWRequestWrite(RWSem, cMillies, false, &SrcPos);
683}
684RT_EXPORT_SYMBOL(RTSemRWRequestWriteDebug);
685
686
687#undef RTSemRWRequestWriteNoResume
688RTDECL(int) RTSemRWRequestWriteNoResume(RTSEMRW RWSem, unsigned cMillies)
689{
690#ifndef RTSEMRW_STRICT
691 return rtSemRWRequestWrite(RWSem, cMillies, true, NULL);
692#else
693 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
694 return rtSemRWRequestWrite(RWSem, cMillies, true, &SrcPos);
695#endif
696}
697RT_EXPORT_SYMBOL(RTSemRWRequestWriteNoResume);
698
699
700RTDECL(int) RTSemRWRequestWriteNoResumeDebug(RTSEMRW RWSem, unsigned cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
701{
702 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
703 return rtSemRWRequestWrite(RWSem, cMillies, true, &SrcPos);
704}
705RT_EXPORT_SYMBOL(RTSemRWRequestWriteNoResumeDebug);
706
707
708RTDECL(int) RTSemRWReleaseWrite(RTSEMRW RWSem)
709{
710
711 /*
712 * Validate handle.
713 */
714 struct RTSEMRWINTERNAL *pThis = RWSem;
715 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
716 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
717
718 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
719 RTNATIVETHREAD hNativeWriter;
720 ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
721 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
722
723 /*
724 * Unwind a recursion.
725 */
726 if (pThis->cWriteRecursions == 1)
727 {
728 AssertReturn(pThis->cWriterReads == 0, VERR_WRONG_ORDER); /* (must release all read recursions before the final write.) */
729#ifdef RTSEMRW_STRICT
730 int rc9 = RTLockValidatorRecExclReleaseOwner(&pThis->ValidatorWrite, true);
731 if (RT_FAILURE(rc9))
732 return rc9;
733#endif
734 /*
735 * Update the state.
736 */
737 ASMAtomicWriteU32(&pThis->cWriteRecursions, 0);
738 /** @todo validate order. */
739 ASMAtomicWriteHandle(&pThis->hNativeWriter, NIL_RTNATIVETHREAD);
740
741 for (;;)
742 {
743 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State);
744 uint64_t u64OldState = u64State;
745
746 uint64_t c = (u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT;
747 Assert(c > 0);
748 c--;
749
750 if ( c > 0
751 || (u64State & RTSEMRW_CNT_RD_MASK) == 0)
752 {
753 /* Don't change the direction, wait up the next writer if any. */
754 u64State &= ~RTSEMRW_CNT_WR_MASK;
755 u64State |= c << RTSEMRW_CNT_WR_SHIFT;
756 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
757 {
758 if (c > 0)
759 {
760 int rc = RTSemEventSignal(pThis->hEvtWrite);
761 AssertRC(rc);
762 }
763 break;
764 }
765 }
766 else
767 {
768 /* Reverse the direction and signal the reader threads. */
769 u64State &= ~(RTSEMRW_CNT_WR_MASK | RTSEMRW_DIR_MASK);
770 u64State |= RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT;
771 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
772 {
773 Assert(!pThis->fNeedReset);
774 ASMAtomicWriteBool(&pThis->fNeedReset, true);
775 int rc = RTSemEventMultiSignal(pThis->hEvtRead);
776 AssertRC(rc);
777 break;
778 }
779 }
780
781 ASMNopPause();
782 if (pThis->u32Magic != RTSEMRW_MAGIC)
783 return VERR_SEM_DESTROYED;
784 }
785 }
786 else
787 {
788 Assert(pThis->cWriteRecursions != 0);
789#ifdef RTSEMRW_STRICT
790 int rc9 = RTLockValidatorRecExclUnwind(&pThis->ValidatorWrite);
791 if (RT_FAILURE(rc9))
792 return rc9;
793#endif
794 ASMAtomicDecU32(&pThis->cWriteRecursions);
795 }
796
797 return VINF_SUCCESS;
798}
799RT_EXPORT_SYMBOL(RTSemRWReleaseWrite);
800
801
802RTDECL(bool) RTSemRWIsWriteOwner(RTSEMRW RWSem)
803{
804 /*
805 * Validate handle.
806 */
807 struct RTSEMRWINTERNAL *pThis = RWSem;
808 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
809 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
810
811 /*
812 * Check ownership.
813 */
814 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
815 RTNATIVETHREAD hNativeWriter;
816 ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
817 return hNativeWriter == hNativeSelf;
818}
819RT_EXPORT_SYMBOL(RTSemRWIsWriteOwner);
820
821
822RTDECL(uint32_t) RTSemRWGetWriteRecursion(RTSEMRW RWSem)
823{
824 /*
825 * Validate handle.
826 */
827 struct RTSEMRWINTERNAL *pThis = RWSem;
828 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
829 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
830
831 /*
832 * Return the requested data.
833 */
834 return pThis->cWriteRecursions;
835}
836RT_EXPORT_SYMBOL(RTSemRWGetWriteRecursion);
837
838
839RTDECL(uint32_t) RTSemRWGetWriterReadRecursion(RTSEMRW RWSem)
840{
841 /*
842 * Validate handle.
843 */
844 struct RTSEMRWINTERNAL *pThis = RWSem;
845 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
846 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
847
848 /*
849 * Return the requested data.
850 */
851 return pThis->cWriterReads;
852}
853RT_EXPORT_SYMBOL(RTSemRWGetWriterReadRecursion);
854
855
856RTDECL(uint32_t) RTSemRWGetReadCount(RTSEMRW RWSem)
857{
858 /*
859 * Validate input.
860 */
861 struct RTSEMRWINTERNAL *pThis = RWSem;
862 AssertPtrReturn(pThis, 0);
863 AssertMsgReturn(pThis->u32Magic == RTSEMRW_MAGIC,
864 ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic),
865 0);
866
867 /*
868 * Return the requested data.
869 */
870 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State);
871 if ((u64State & RTSEMRW_DIR_MASK) != (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT))
872 return 0;
873 return (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT;
874}
875RT_EXPORT_SYMBOL(RTSemRWGetReadCount);
876
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette