VirtualBox

source: vbox/trunk/src/VBox/Runtime/generic/semrw-lockless-generic.cpp@ 25723

Last change on this file since 25723 was 25723, checked in by vboxsync, 15 years ago

iprt/RTSemRW: A little cleanup.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 32.3 KB
Line 
1/* $Id: semrw-lockless-generic.cpp 25723 2010-01-11 14:28:57Z vboxsync $ */
2/** @file
3 * IPRT Testcase - RTSemXRoads, generic implementation.
4 */
5
6/*
7 * Copyright (C) 2009 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31
32/*******************************************************************************
33* Header Files *
34*******************************************************************************/
35#define RTASSERT_QUIET
36#include <iprt/semaphore.h>
37#include "internal/iprt.h"
38
39#include <iprt/asm.h>
40#include <iprt/assert.h>
41#include <iprt/err.h>
42#include <iprt/lockvalidator.h>
43#include <iprt/mem.h>
44#include <iprt/thread.h>
45
46#include "internal/magics.h"
47#include "internal/strict.h"
48
49
50/*******************************************************************************
51* Structures and Typedefs *
52*******************************************************************************/
53typedef struct RTSEMRWINTERNAL
54{
55 /** Magic value (RTSEMRW_MAGIC). */
56 uint32_t volatile u32Magic;
57 uint32_t u32Padding; /**< alignment padding.*/
58 /* The state variable.
59 * All accesses are atomic and it bits are defined like this:
60 * Bits 0..14 - cReads.
61 * Bit 15 - Unused.
62 * Bits 16..31 - cWrites. - doesn't make sense here
63 * Bit 31 - fDirection; 0=Read, 1=Write.
64 * Bits 32..46 - cWaitingReads
65 * Bit 47 - Unused.
66 * Bits 48..62 - cWaitingWrites
67 * Bit 63 - Unused.
68 */
69 uint64_t volatile u64State;
70 /** The write owner. */
71 RTNATIVETHREAD volatile hNativeWriter;
72 /** The number of reads made by the current writer. */
73 uint32_t volatile cWriterReads;
74 /** The number of reads made by the current writer. */
75 uint32_t volatile cWriteRecursions;
76
77 /** What the writer threads are blocking on. */
78 RTSEMEVENT hEvtWrite;
79 /** What the read threads are blocking on when waiting for the writer to
80 * finish. */
81 RTSEMEVENTMULTI hEvtRead;
82 /** Indicates whether hEvtRead needs resetting. */
83 bool volatile fNeedReset;
84
85#ifdef RTSEMRW_STRICT
86 /** The validator record for the writer. */
87 RTLOCKVALRECEXCL ValidatorWrite;
88 /** The validator record for the readers. */
89 RTLOCKVALRECSHRD ValidatorRead;
90#endif
91} RTSEMRWINTERNAL;
92
93
94/*******************************************************************************
95* Defined Constants And Macros *
96*******************************************************************************/
97#define RTSEMRW_CNT_BITS 15
98#define RTSEMRW_CNT_MASK UINT64_C(0x00007fff)
99
100#define RTSEMRW_CNT_RD_SHIFT 0
101#define RTSEMRW_CNT_RD_MASK (RTSEMRW_CNT_MASK << RTSEMRW_CNT_RD_SHIFT)
102#define RTSEMRW_CNT_WR_SHIFT 16
103#define RTSEMRW_CNT_WR_MASK (RTSEMRW_CNT_MASK << RTSEMRW_CNT_WR_SHIFT)
104#define RTSEMRW_DIR_SHIFT 31
105#define RTSEMRW_DIR_MASK RT_BIT_64(RTSEMRW_DIR_SHIFT)
106#define RTSEMRW_DIR_READ UINT64_C(0)
107#define RTSEMRW_DIR_WRITE UINT64_C(1)
108
109#define RTSEMRW_WAIT_CNT_RD_SHIFT 32
110#define RTSEMRW_WAIT_CNT_RD_MASK (RTSEMRW_CNT_MASK << RTSEMRW_WAIT_CNT_RD_SHIFT)
111//#define RTSEMRW_WAIT_CNT_WR_SHIFT 48
112//#define RTSEMRW_WAIT_CNT_WR_MASK (RTSEMRW_CNT_MASK << RTSEMRW_WAIT_CNT_WR_SHIFT)
113
114
115#undef RTSemRWCreate
116RTDECL(int) RTSemRWCreate(PRTSEMRW phRWSem)
117{
118 return RTSemRWCreateEx(phRWSem, 0 /*fFlags*/, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, "RTSemRW");
119}
120RT_EXPORT_SYMBOL(RTSemRWCreate);
121
122
123RTDECL(int) RTSemRWCreateEx(PRTSEMRW phRWSem, uint32_t fFlags,
124 RTLOCKVALCLASS hClass, uint32_t uSubClass, const char *pszNameFmt, ...)
125{
126 AssertReturn(!(fFlags & ~RTSEMRW_FLAGS_NO_LOCK_VAL), VERR_INVALID_PARAMETER);
127
128 RTSEMRWINTERNAL *pThis = (RTSEMRWINTERNAL *)RTMemAlloc(sizeof(*pThis));
129 if (!pThis)
130 return VERR_NO_MEMORY;
131
132 int rc = RTSemEventMultiCreate(&pThis->hEvtRead);
133 if (RT_SUCCESS(rc))
134 {
135 rc = RTSemEventCreate(&pThis->hEvtWrite);
136 if (RT_SUCCESS(rc))
137 {
138 pThis->u32Magic = RTSEMRW_MAGIC;
139 pThis->u32Padding = 0;
140 pThis->u64State = 0;
141 pThis->hNativeWriter = NIL_RTNATIVETHREAD;
142 pThis->cWriterReads = 0;
143 pThis->cWriteRecursions = 0;
144 pThis->fNeedReset = false;
145#ifdef RTSEMRW_STRICT
146 bool const fLVEnabled = !(fFlags & RTSEMRW_FLAGS_NO_LOCK_VAL);
147 va_list va;
148 va_start(va, pszNameFmt);
149 RTLockValidatorRecExclInitV(&pThis->ValidatorWrite, hClass, uSubClass, pThis, fLVEnabled, pszNameFmt, va);
150 va_end(va);
151 va_start(va, pszNameFmt);
152 RTLockValidatorRecSharedInitV(&pThis->ValidatorRead, hClass, uSubClass, pThis, false /*fSignaller*/,
153 fLVEnabled, pszNameFmt, va);
154 va_end(va);
155 RTLockValidatorRecMakeSiblings(&pThis->ValidatorWrite.Core, &pThis->ValidatorRead.Core);
156#endif
157
158 *phRWSem = pThis;
159 return VINF_SUCCESS;
160 }
161 RTSemEventMultiDestroy(pThis->hEvtRead);
162 }
163 return rc;
164}
165RT_EXPORT_SYMBOL(RTSemRWCreateEx);
166
167
168RTDECL(int) RTSemRWDestroy(RTSEMRW hRWSem)
169{
170 /*
171 * Validate input.
172 */
173 RTSEMRWINTERNAL *pThis = hRWSem;
174 if (pThis == NIL_RTSEMRW)
175 return VINF_SUCCESS;
176 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
177 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
178 Assert(!(ASMAtomicReadU64(&pThis->u64State) & (RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK)));
179
180 /*
181 * Invalidate the object and free up the resources.
182 */
183 AssertReturn(ASMAtomicCmpXchgU32(&pThis->u32Magic, ~RTSEMRW_MAGIC, RTSEMRW_MAGIC), VERR_INVALID_HANDLE);
184
185 RTSEMEVENTMULTI hEvtRead;
186 ASMAtomicXchgHandle(&pThis->hEvtRead, NIL_RTSEMEVENTMULTI, &hEvtRead);
187 int rc = RTSemEventMultiDestroy(hEvtRead);
188 AssertRC(rc);
189
190 RTSEMEVENT hEvtWrite;
191 ASMAtomicXchgHandle(&pThis->hEvtWrite, NIL_RTSEMEVENT, &hEvtWrite);
192 rc = RTSemEventDestroy(hEvtWrite);
193 AssertRC(rc);
194
195#ifdef RTSEMRW_STRICT
196 RTLockValidatorRecSharedDelete(&pThis->ValidatorRead);
197 RTLockValidatorRecExclDelete(&pThis->ValidatorWrite);
198#endif
199 RTMemFree(pThis);
200 return VINF_SUCCESS;
201}
202RT_EXPORT_SYMBOL(RTSemRWDestroy);
203
204
205RTDECL(uint32_t) RTSemRWSetSubClass(RTSEMRW hRWSem, uint32_t uSubClass)
206{
207#ifdef RTSEMRW_STRICT
208 /*
209 * Validate handle.
210 */
211 struct RTSEMRWINTERNAL *pThis = hRWSem;
212 AssertPtrReturn(pThis, RTLOCKVAL_SUB_CLASS_INVALID);
213 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
214
215 RTLockValidatorRecSharedSetSubClass(&pThis->ValidatorRead, uSubClass);
216 return RTLockValidatorRecExclSetSubClass(&pThis->ValidatorWrite, uSubClass);
217#else
218 return RTLOCKVAL_SUB_CLASS_INVALID;
219#endif
220}
221RT_EXPORT_SYMBOL(RTSemRWSetSubClass);
222
223
224static int rtSemRWRequestRead(RTSEMRW hRWSem, RTMSINTERVAL cMillies, bool fInterruptible, PCRTLOCKVALSRCPOS pSrcPos)
225{
226 /*
227 * Validate input.
228 */
229 RTSEMRWINTERNAL *pThis = hRWSem;
230 if (pThis == NIL_RTSEMRW)
231 return VINF_SUCCESS;
232 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
233 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
234
235#ifdef RTSEMRW_STRICT
236 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
237 if (cMillies > 0)
238 {
239 int rc9;
240 RTNATIVETHREAD hNativeWriter;
241 ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
242 if (hNativeWriter != NIL_RTTHREAD && hNativeWriter == RTThreadNativeSelf())
243 rc9 = RTLockValidatorRecExclCheckOrder(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, cMillies);
244 else
245 rc9 = RTLockValidatorRecSharedCheckOrder(&pThis->ValidatorRead, hThreadSelf, pSrcPos, cMillies);
246 if (RT_FAILURE(rc9))
247 return rc9;
248 }
249#endif
250
251 /*
252 * Get cracking...
253 */
254 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State);
255 uint64_t u64OldState = u64State;
256
257 for (;;)
258 {
259 if ((u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT))
260 {
261 /* It flows in the right direction, try follow it before it changes. */
262 uint64_t c = (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT;
263 c++;
264 Assert(c < RTSEMRW_CNT_MASK / 2);
265 u64State &= ~RTSEMRW_CNT_RD_MASK;
266 u64State |= c << RTSEMRW_CNT_RD_SHIFT;
267 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
268 {
269#ifdef RTSEMRW_STRICT
270 RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos);
271#endif
272 break;
273 }
274 }
275 else if ((u64State & (RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK)) == 0)
276 {
277 /* Wrong direction, but we're alone here and can simply try switch the direction. */
278 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK | RTSEMRW_DIR_MASK);
279 u64State |= (UINT64_C(1) << RTSEMRW_CNT_RD_SHIFT) | (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT);
280 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
281 {
282 Assert(!pThis->fNeedReset);
283#ifdef RTSEMRW_STRICT
284 RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos);
285#endif
286 break;
287 }
288 }
289 else
290 {
291 /* Is the writer perhaps doing a read recursion? */
292 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
293 RTNATIVETHREAD hNativeWriter;
294 ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
295 if (hNativeSelf == hNativeWriter)
296 {
297#ifdef RTSEMRW_STRICT
298 int rc9 = RTLockValidatorRecExclRecursionMixed(&pThis->ValidatorWrite, &pThis->ValidatorRead.Core, pSrcPos);
299 if (RT_FAILURE(rc9))
300 return rc9;
301#endif
302 Assert(pThis->cWriterReads < UINT32_MAX / 2);
303 ASMAtomicIncU32(&pThis->cWriterReads);
304 return VINF_SUCCESS; /* don't break! */
305 }
306
307 /* If the timeout is 0, return already. */
308 if (!cMillies)
309 return VERR_TIMEOUT;
310
311 /* Add ourselves to the queue and wait for the direction to change. */
312 uint64_t c = (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT;
313 c++;
314 Assert(c < RTSEMRW_CNT_MASK / 2);
315
316 uint64_t cWait = (u64State & RTSEMRW_WAIT_CNT_RD_MASK) >> RTSEMRW_WAIT_CNT_RD_SHIFT;
317 cWait++;
318 Assert(cWait <= c);
319 Assert(cWait < RTSEMRW_CNT_MASK / 2);
320
321 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_WAIT_CNT_RD_MASK);
322 u64State |= (c << RTSEMRW_CNT_RD_SHIFT) | (cWait << RTSEMRW_WAIT_CNT_RD_SHIFT);
323
324 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
325 {
326 for (uint32_t iLoop = 0; ; iLoop++)
327 {
328 int rc;
329#ifdef RTSEMRW_STRICT
330 rc = RTLockValidatorRecSharedCheckBlocking(&pThis->ValidatorRead, hThreadSelf, pSrcPos, true,
331 cMillies, RTTHREADSTATE_RW_READ, false);
332 if (RT_SUCCESS(rc))
333#else
334 RTTHREAD hThreadSelf = RTThreadSelf();
335 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
336#endif
337 {
338 if (fInterruptible)
339 rc = RTSemEventMultiWaitNoResume(pThis->hEvtRead, cMillies);
340 else
341 rc = RTSemEventMultiWait(pThis->hEvtRead, cMillies);
342 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ);
343 if (pThis->u32Magic != RTSEMRW_MAGIC)
344 return VERR_SEM_DESTROYED;
345 }
346 if (RT_FAILURE(rc))
347 {
348 /* Decrement the counts and return the error. */
349 for (;;)
350 {
351 u64OldState = u64State = ASMAtomicReadU64(&pThis->u64State);
352 c = (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT; Assert(c > 0);
353 c--;
354 cWait = (u64State & RTSEMRW_WAIT_CNT_RD_MASK) >> RTSEMRW_WAIT_CNT_RD_SHIFT; Assert(cWait > 0);
355 cWait--;
356 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_WAIT_CNT_RD_MASK);
357 u64State |= (c << RTSEMRW_CNT_RD_SHIFT) | (cWait << RTSEMRW_WAIT_CNT_RD_SHIFT);
358 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
359 break;
360 }
361 return rc;
362 }
363
364 Assert(pThis->fNeedReset);
365 u64State = ASMAtomicReadU64(&pThis->u64State);
366 if ((u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT))
367 break;
368 AssertMsg(iLoop < 1, ("%u\n", iLoop));
369 }
370
371 /* Decrement the wait count and maybe reset the semaphore (if we're last). */
372 for (;;)
373 {
374 u64OldState = u64State;
375
376 cWait = (u64State & RTSEMRW_WAIT_CNT_RD_MASK) >> RTSEMRW_WAIT_CNT_RD_SHIFT;
377 Assert(cWait > 0);
378 cWait--;
379 u64State &= ~RTSEMRW_WAIT_CNT_RD_MASK;
380 u64State |= cWait << RTSEMRW_WAIT_CNT_RD_SHIFT;
381
382 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
383 {
384 if (cWait == 0)
385 {
386 if (ASMAtomicXchgBool(&pThis->fNeedReset, false))
387 {
388 int rc = RTSemEventMultiReset(pThis->hEvtRead);
389 AssertRCReturn(rc, rc);
390 }
391 }
392 break;
393 }
394 u64State = ASMAtomicReadU64(&pThis->u64State);
395 }
396
397#ifdef RTSEMRW_STRICT
398 RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos);
399#endif
400 break;
401 }
402 }
403
404 if (pThis->u32Magic != RTSEMRW_MAGIC)
405 return VERR_SEM_DESTROYED;
406
407 ASMNopPause();
408 u64State = ASMAtomicReadU64(&pThis->u64State);
409 u64OldState = u64State;
410 }
411
412 /* got it! */
413 Assert((ASMAtomicReadU64(&pThis->u64State) & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT));
414 return VINF_SUCCESS;
415
416}
417
418
419#undef RTSemRWRequestRead
420RTDECL(int) RTSemRWRequestRead(RTSEMRW hRWSem, RTMSINTERVAL cMillies)
421{
422#ifndef RTSEMRW_STRICT
423 return rtSemRWRequestRead(hRWSem, cMillies, false, NULL);
424#else
425 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
426 return rtSemRWRequestRead(hRWSem, cMillies, false, &SrcPos);
427#endif
428}
429RT_EXPORT_SYMBOL(RTSemRWRequestRead);
430
431
432RTDECL(int) RTSemRWRequestReadDebug(RTSEMRW hRWSem, RTMSINTERVAL cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
433{
434 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
435 return rtSemRWRequestRead(hRWSem, cMillies, false, &SrcPos);
436}
437RT_EXPORT_SYMBOL(RTSemRWRequestReadDebug);
438
439
440#undef RTSemRWRequestReadNoResume
441RTDECL(int) RTSemRWRequestReadNoResume(RTSEMRW hRWSem, RTMSINTERVAL cMillies)
442{
443#ifndef RTSEMRW_STRICT
444 return rtSemRWRequestRead(hRWSem, cMillies, true, NULL);
445#else
446 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
447 return rtSemRWRequestRead(hRWSem, cMillies, true, &SrcPos);
448#endif
449}
450RT_EXPORT_SYMBOL(RTSemRWRequestReadNoResume);
451
452
453RTDECL(int) RTSemRWRequestReadNoResumeDebug(RTSEMRW hRWSem, RTMSINTERVAL cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
454{
455 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
456 return rtSemRWRequestRead(hRWSem, cMillies, true, &SrcPos);
457}
458RT_EXPORT_SYMBOL(RTSemRWRequestReadNoResumeDebug);
459
460
461
462RTDECL(int) RTSemRWReleaseRead(RTSEMRW hRWSem)
463{
464 /*
465 * Validate handle.
466 */
467 RTSEMRWINTERNAL *pThis = hRWSem;
468 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
469 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
470
471 /*
472 * Check the direction and take action accordingly.
473 */
474 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State);
475 uint64_t u64OldState = u64State;
476 if ((u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT))
477 {
478#ifdef RTSEMRW_STRICT
479 int rc9 = RTLockValidatorRecSharedCheckAndRelease(&pThis->ValidatorRead, NIL_RTTHREAD);
480 if (RT_FAILURE(rc9))
481 return rc9;
482#endif
483 for (;;)
484 {
485 uint64_t c = (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT;
486 AssertReturn(c > 0, VERR_NOT_OWNER);
487 c--;
488
489 if ( c > 0
490 || (u64State & RTSEMRW_CNT_RD_MASK) == 0)
491 {
492 /* Don't change the direction. */
493 u64State &= ~RTSEMRW_CNT_RD_MASK;
494 u64State |= c << RTSEMRW_CNT_RD_SHIFT;
495 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
496 break;
497 }
498 else
499 {
500 /* Reverse the direction and signal the reader threads. */
501 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_DIR_MASK);
502 u64State |= RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT;
503 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
504 {
505 int rc = RTSemEventSignal(pThis->hEvtWrite);
506 AssertRC(rc);
507 break;
508 }
509 }
510
511 ASMNopPause();
512 u64State = ASMAtomicReadU64(&pThis->u64State);
513 u64OldState = u64State;
514 }
515 }
516 else
517 {
518 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
519 RTNATIVETHREAD hNativeWriter;
520 ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
521 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
522 AssertReturn(pThis->cWriterReads > 0, VERR_NOT_OWNER);
523#ifdef RTSEMRW_STRICT
524 int rc = RTLockValidatorRecExclUnwindMixed(&pThis->ValidatorWrite, &pThis->ValidatorRead.Core);
525 if (RT_FAILURE(rc))
526 return rc;
527#endif
528 ASMAtomicDecU32(&pThis->cWriterReads);
529 }
530
531 return VINF_SUCCESS;
532}
533RT_EXPORT_SYMBOL(RTSemRWReleaseRead);
534
535
536DECL_FORCE_INLINE(int) rtSemRWRequestWrite(RTSEMRW hRWSem, RTMSINTERVAL cMillies, bool fInterruptible, PCRTLOCKVALSRCPOS pSrcPos)
537{
538 /*
539 * Validate input.
540 */
541 RTSEMRWINTERNAL *pThis = hRWSem;
542 if (pThis == NIL_RTSEMRW)
543 return VINF_SUCCESS;
544 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
545 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
546
547#ifdef RTSEMRW_STRICT
548 RTTHREAD hThreadSelf = NIL_RTTHREAD;
549 if (cMillies)
550 {
551 hThreadSelf = RTThreadSelfAutoAdopt();
552 int rc9 = RTLockValidatorRecExclCheckOrder(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, cMillies);
553 if (RT_FAILURE(rc9))
554 return rc9;
555 }
556#endif
557
558 /*
559 * Check if we're already the owner and just recursing.
560 */
561 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
562 RTNATIVETHREAD hNativeWriter;
563 ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
564 if (hNativeSelf == hNativeWriter)
565 {
566 Assert((ASMAtomicReadU64(&pThis->u64State) & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT));
567#ifdef RTSEMRW_STRICT
568 int rc9 = RTLockValidatorRecExclRecursion(&pThis->ValidatorWrite, pSrcPos);
569 if (RT_FAILURE(rc9))
570 return rc9;
571#endif
572 Assert(pThis->cWriteRecursions < UINT32_MAX / 2);
573 ASMAtomicIncU32(&pThis->cWriteRecursions);
574 return VINF_SUCCESS;
575 }
576
577 /*
578 * Get cracking.
579 */
580 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State);
581 uint64_t u64OldState = u64State;
582
583 for (;;)
584 {
585 if ( (u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT)
586 || (u64State & (RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK)) != 0)
587 {
588 /* It flows in the right direction, try follow it before it changes. */
589 uint64_t c = (u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT;
590 c++;
591 Assert(c < RTSEMRW_CNT_MASK / 2);
592 u64State &= ~RTSEMRW_CNT_WR_MASK;
593 u64State |= c << RTSEMRW_CNT_WR_SHIFT;
594 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
595 break;
596 }
597 else if ((u64State & (RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK)) == 0)
598 {
599 /* Wrong direction, but we're alone here and can simply try switch the direction. */
600 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK | RTSEMRW_DIR_MASK);
601 u64State |= (UINT64_C(1) << RTSEMRW_CNT_WR_SHIFT) | (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT);
602 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
603 break;
604 }
605 else if (!cMillies)
606 /* Wrong direction and we're not supposed to wait, just return. */
607 return VERR_TIMEOUT;
608 else
609 {
610 /* Add ourselves to the write count and break out to do the wait. */
611 uint64_t c = (u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT;
612 c++;
613 Assert(c < RTSEMRW_CNT_MASK / 2);
614 u64State &= ~RTSEMRW_CNT_WR_MASK;
615 u64State |= c << RTSEMRW_CNT_WR_SHIFT;
616 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
617 break;
618 }
619
620 if (pThis->u32Magic != RTSEMRW_MAGIC)
621 return VERR_SEM_DESTROYED;
622
623 ASMNopPause();
624 u64State = ASMAtomicReadU64(&pThis->u64State);
625 u64OldState = u64State;
626 }
627
628 /*
629 * If we're in write mode now try grab the ownership. Play fair if there
630 * are threads already waiting.
631 */
632 bool fDone = (u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT)
633 && ( ((u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT) == 1
634 || cMillies == 0);
635 if (fDone)
636 ASMAtomicCmpXchgHandle(&pThis->hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
637 if (!fDone)
638 {
639 /*
640 * Wait for our turn.
641 */
642 for (uint32_t iLoop = 0; ; iLoop++)
643 {
644 int rc;
645#ifdef RTSEMRW_STRICT
646 if (cMillies)
647 {
648 if (hThreadSelf == NIL_RTTHREAD)
649 hThreadSelf = RTThreadSelfAutoAdopt();
650 rc = RTLockValidatorRecExclCheckBlocking(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true,
651 cMillies, RTTHREADSTATE_RW_WRITE, false);
652 }
653 else
654 rc = VINF_SUCCESS;
655 if (RT_SUCCESS(rc))
656#else
657 RTTHREAD hThreadSelf = RTThreadSelf();
658 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false);
659#endif
660 {
661 if (fInterruptible)
662 rc = RTSemEventWaitNoResume(pThis->hEvtWrite, cMillies);
663 else
664 rc = RTSemEventWait(pThis->hEvtWrite, cMillies);
665 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
666 if (pThis->u32Magic != RTSEMRW_MAGIC)
667 return VERR_SEM_DESTROYED;
668 }
669 if (RT_FAILURE(rc))
670 {
671 /* Decrement the counts and return the error. */
672 for (;;)
673 {
674 u64OldState = u64State = ASMAtomicReadU64(&pThis->u64State);
675 uint64_t c = (u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT; Assert(c > 0);
676 c--;
677 u64State &= ~RTSEMRW_CNT_WR_MASK;
678 u64State |= c << RTSEMRW_CNT_WR_SHIFT;
679 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
680 break;
681 }
682 return rc;
683 }
684
685 u64State = ASMAtomicReadU64(&pThis->u64State);
686 if ((u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT))
687 {
688 ASMAtomicCmpXchgHandle(&pThis->hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
689 if (fDone)
690 break;
691 }
692 AssertMsg(iLoop < 1000, ("%u\n", iLoop)); /* may loop a few times here... */
693 }
694 }
695
696 /*
697 * Got it!
698 */
699 Assert((ASMAtomicReadU64(&pThis->u64State) & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT));
700 ASMAtomicWriteU32(&pThis->cWriteRecursions, 1);
701 Assert(pThis->cWriterReads == 0);
702#ifdef RTSEMRW_STRICT
703 RTLockValidatorRecExclSetOwner(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true);
704#endif
705
706 return VINF_SUCCESS;
707}
708
709
710#undef RTSemRWRequestWrite
711RTDECL(int) RTSemRWRequestWrite(RTSEMRW hRWSem, RTMSINTERVAL cMillies)
712{
713#ifndef RTSEMRW_STRICT
714 return rtSemRWRequestWrite(hRWSem, cMillies, false, NULL);
715#else
716 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
717 return rtSemRWRequestWrite(hRWSem, cMillies, false, &SrcPos);
718#endif
719}
720RT_EXPORT_SYMBOL(RTSemRWRequestWrite);
721
722
723RTDECL(int) RTSemRWRequestWriteDebug(RTSEMRW hRWSem, RTMSINTERVAL cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
724{
725 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
726 return rtSemRWRequestWrite(hRWSem, cMillies, false, &SrcPos);
727}
728RT_EXPORT_SYMBOL(RTSemRWRequestWriteDebug);
729
730
731#undef RTSemRWRequestWriteNoResume
732RTDECL(int) RTSemRWRequestWriteNoResume(RTSEMRW hRWSem, RTMSINTERVAL cMillies)
733{
734#ifndef RTSEMRW_STRICT
735 return rtSemRWRequestWrite(hRWSem, cMillies, true, NULL);
736#else
737 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
738 return rtSemRWRequestWrite(hRWSem, cMillies, true, &SrcPos);
739#endif
740}
741RT_EXPORT_SYMBOL(RTSemRWRequestWriteNoResume);
742
743
744RTDECL(int) RTSemRWRequestWriteNoResumeDebug(RTSEMRW hRWSem, RTMSINTERVAL cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
745{
746 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
747 return rtSemRWRequestWrite(hRWSem, cMillies, true, &SrcPos);
748}
749RT_EXPORT_SYMBOL(RTSemRWRequestWriteNoResumeDebug);
750
751
752RTDECL(int) RTSemRWReleaseWrite(RTSEMRW hRWSem)
753{
754
755 /*
756 * Validate handle.
757 */
758 struct RTSEMRWINTERNAL *pThis = hRWSem;
759 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
760 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
761
762 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
763 RTNATIVETHREAD hNativeWriter;
764 ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
765 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
766
767 /*
768 * Unwind a recursion.
769 */
770 if (pThis->cWriteRecursions == 1)
771 {
772 AssertReturn(pThis->cWriterReads == 0, VERR_WRONG_ORDER); /* (must release all read recursions before the final write.) */
773#ifdef RTSEMRW_STRICT
774 int rc9 = RTLockValidatorRecExclReleaseOwner(&pThis->ValidatorWrite, true);
775 if (RT_FAILURE(rc9))
776 return rc9;
777#endif
778 /*
779 * Update the state.
780 */
781 ASMAtomicWriteU32(&pThis->cWriteRecursions, 0);
782 /** @todo validate order. */
783 ASMAtomicWriteHandle(&pThis->hNativeWriter, NIL_RTNATIVETHREAD);
784
785 for (;;)
786 {
787 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State);
788 uint64_t u64OldState = u64State;
789
790 uint64_t c = (u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT;
791 Assert(c > 0);
792 c--;
793
794 if ( c > 0
795 || (u64State & RTSEMRW_CNT_RD_MASK) == 0)
796 {
797 /* Don't change the direction, wait up the next writer if any. */
798 u64State &= ~RTSEMRW_CNT_WR_MASK;
799 u64State |= c << RTSEMRW_CNT_WR_SHIFT;
800 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
801 {
802 if (c > 0)
803 {
804 int rc = RTSemEventSignal(pThis->hEvtWrite);
805 AssertRC(rc);
806 }
807 break;
808 }
809 }
810 else
811 {
812 /* Reverse the direction and signal the reader threads. */
813 u64State &= ~(RTSEMRW_CNT_WR_MASK | RTSEMRW_DIR_MASK);
814 u64State |= RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT;
815 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
816 {
817 Assert(!pThis->fNeedReset);
818 ASMAtomicWriteBool(&pThis->fNeedReset, true);
819 int rc = RTSemEventMultiSignal(pThis->hEvtRead);
820 AssertRC(rc);
821 break;
822 }
823 }
824
825 ASMNopPause();
826 if (pThis->u32Magic != RTSEMRW_MAGIC)
827 return VERR_SEM_DESTROYED;
828 }
829 }
830 else
831 {
832 Assert(pThis->cWriteRecursions != 0);
833#ifdef RTSEMRW_STRICT
834 int rc9 = RTLockValidatorRecExclUnwind(&pThis->ValidatorWrite);
835 if (RT_FAILURE(rc9))
836 return rc9;
837#endif
838 ASMAtomicDecU32(&pThis->cWriteRecursions);
839 }
840
841 return VINF_SUCCESS;
842}
843RT_EXPORT_SYMBOL(RTSemRWReleaseWrite);
844
845
846RTDECL(bool) RTSemRWIsWriteOwner(RTSEMRW hRWSem)
847{
848 /*
849 * Validate handle.
850 */
851 struct RTSEMRWINTERNAL *pThis = hRWSem;
852 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
853 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
854
855 /*
856 * Check ownership.
857 */
858 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
859 RTNATIVETHREAD hNativeWriter;
860 ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
861 return hNativeWriter == hNativeSelf;
862}
863RT_EXPORT_SYMBOL(RTSemRWIsWriteOwner);
864
865
866RTDECL(uint32_t) RTSemRWGetWriteRecursion(RTSEMRW hRWSem)
867{
868 /*
869 * Validate handle.
870 */
871 struct RTSEMRWINTERNAL *pThis = hRWSem;
872 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
873 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
874
875 /*
876 * Return the requested data.
877 */
878 return pThis->cWriteRecursions;
879}
880RT_EXPORT_SYMBOL(RTSemRWGetWriteRecursion);
881
882
883RTDECL(uint32_t) RTSemRWGetWriterReadRecursion(RTSEMRW hRWSem)
884{
885 /*
886 * Validate handle.
887 */
888 struct RTSEMRWINTERNAL *pThis = hRWSem;
889 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
890 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
891
892 /*
893 * Return the requested data.
894 */
895 return pThis->cWriterReads;
896}
897RT_EXPORT_SYMBOL(RTSemRWGetWriterReadRecursion);
898
899
900RTDECL(uint32_t) RTSemRWGetReadCount(RTSEMRW hRWSem)
901{
902 /*
903 * Validate input.
904 */
905 struct RTSEMRWINTERNAL *pThis = hRWSem;
906 AssertPtrReturn(pThis, 0);
907 AssertMsgReturn(pThis->u32Magic == RTSEMRW_MAGIC,
908 ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic),
909 0);
910
911 /*
912 * Return the requested data.
913 */
914 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State);
915 if ((u64State & RTSEMRW_DIR_MASK) != (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT))
916 return 0;
917 return (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT;
918}
919RT_EXPORT_SYMBOL(RTSemRWGetReadCount);
920
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette