VirtualBox

source: vbox/trunk/src/VBox/Runtime/r3/posix/fileaio-posix.cpp@ 19348

Last change on this file since 19348 was 19348, checked in by vboxsync, 16 years ago

Runtime/Aio: Properties

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 20.7 KB
Line 
1/* $Id: fileaio-posix.cpp 19348 2009-05-05 00:40:33Z vboxsync $ */
2/** @file
3 * IPRT - File async I/O, native implementation for POSIX compliant host platforms.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31
32/*******************************************************************************
33* Header Files *
34*******************************************************************************/
35#define LOG_GROUP RTLOGGROUP_DIR
36#include <iprt/asm.h>
37#include <iprt/file.h>
38#include <iprt/mem.h>
39#include <iprt/assert.h>
40#include <iprt/string.h>
41#include <iprt/err.h>
42#include <iprt/log.h>
43#include <iprt/thread.h>
44#include <iprt/semaphore.h>
45#include "internal/fileaio.h"
46
47#include <aio.h>
48#include <errno.h>
49
50#define AIO_MAXIMUM_REQUESTS_PER_CONTEXT 64
51
52/*******************************************************************************
53* Structures and Typedefs *
54*******************************************************************************/
55/**
56 * Async I/O request state.
57 */
58typedef struct RTFILEAIOREQINTERNAL
59{
60 /** The aio control block. FIRST ELEMENT! */
61 struct aiocb AioCB;
62 /** Next element in the chain. */
63 struct RTFILEAIOREQINTERNAL *pNext;
64 /** Flag whether this is a flush request. */
65 bool fFlush;
66 /** Flag indicating if the request was canceled. */
67 volatile bool fCanceled;
68 /** Opaque user data. */
69 void *pvUser;
70 /** Number of bytes actually transfered. */
71 size_t cbTransfered;
72 /** Status code. */
73 int Rc;
74 /** Completion context we are assigned to. */
75 struct RTFILEAIOCTXINTERNAL *pCtxInt;
76 /** Entry in the waiting list the request is in. */
77 unsigned iWaitingList;
78 /** Magic value (RTFILEAIOREQ_MAGIC). */
79 uint32_t u32Magic;
80} RTFILEAIOREQINTERNAL, *PRTFILEAIOREQINTERNAL;
81
82/**
83 * Async I/O completion context state.
84 */
85typedef struct RTFILEAIOCTXINTERNAL
86{
87 /** Current number of requests active on this context. */
88 volatile int32_t cRequests;
89 /** Maximum number of requests this context can handle. */
90 uint32_t cMaxRequests;
91 /** The ID of the thread which is currently waiting for requests. */
92 volatile RTTHREAD hThreadWait;
93 /** Flag whether the thread was woken up. */
94 volatile bool fWokenUp;
95 /** Flag whether the thread is currently waiting in the syscall. */
96 volatile bool fWaiting;
97 /** Magic value (RTFILEAIOCTX_MAGIC). */
98 uint32_t u32Magic;
99 /** Flag whether the thread was woken up due to a internal event. */
100 volatile bool fWokenUpInternal;
101 /** List of new requests which needs to be inserted into apReqs by the
102 * waiting thread. */
103 volatile PRTFILEAIOREQINTERNAL apReqsNewHead[5];
104 /** Special entry for requests which are canceled. Because only one
105 * request can be canceled at a time and the thread canceling the request
106 * has to wait we need only one entry. */
107 volatile PRTFILEAIOREQINTERNAL pReqToCancel;
108 /** Event semaphore the canceling thread is waiting for completion of
109 * the operation. */
110 RTSEMEVENT SemEventCancel;
111 /** Number of elements in the waiting list. */
112 unsigned cReqsWait;
113 /** First free slot in the waiting list. */
114 unsigned iFirstFree;
115 /** List of requests we are currently waiting on.
116 * Size depends on cMaxRequests. */
117 volatile PRTFILEAIOREQINTERNAL apReqs[1];
118} RTFILEAIOCTXINTERNAL, *PRTFILEAIOCTXINTERNAL;
119
120/**
121 * Internal worker for waking up the waiting thread.
122 */
123static void rtFileAioCtxWakeup(PRTFILEAIOCTXINTERNAL pCtxInt)
124{
125 /*
126 * Read the thread handle before the status flag.
127 * If we read the handle after the flag we might
128 * end up with an invalid handle because the thread
129 * waiting in RTFileAioCtxWakeup() might get scheduled
130 * before we read the flag and returns.
131 * We can ensure that the handle is valid if fWaiting is true
132 * when reading the handle before the status flag.
133 */
134 RTTHREAD hThread;
135 ASMAtomicReadHandle(&pCtxInt->hThreadWait, &hThread);
136 bool fWaiting = ASMAtomicReadBool(&pCtxInt->fWaiting);
137 if (fWaiting)
138 {
139 /*
140 * If a thread waits the handle must be valid.
141 * It is possible that the thread returns from
142 * aio_suspend() before the signal is send.
143 * This is no problem because we already set fWokenUp
144 * to true which will let the thread return VERR_INTERRUPTED
145 * and the next call to RTFileAioCtxWait() will not
146 * return VERR_INTERRUPTED because signals are not saved
147 * and will simply vanish if the destination thread can't
148 * receive it.
149 */
150 Assert(hThread != NIL_RTTHREAD);
151 RTThreadPoke(hThread);
152 }
153}
154
155/**
156 * Internal worker processing events and inserting new requests into the waiting list.
157 */
158static int rtFileAioCtxProcessEvents(PRTFILEAIOCTXINTERNAL pCtxInt)
159{
160 int rc = VINF_SUCCESS;
161
162 /* Process new requests first. */
163 bool fWokenUp = ASMAtomicXchgBool(&pCtxInt->fWokenUpInternal, false);
164 if (fWokenUp)
165 {
166 for (unsigned iSlot = 0; iSlot < RT_ELEMENTS(pCtxInt->apReqsNewHead); iSlot++)
167 {
168 PRTFILEAIOREQINTERNAL pReqHead = (PRTFILEAIOREQINTERNAL)ASMAtomicXchgPtr((void* volatile*)&pCtxInt->apReqsNewHead[iSlot],
169 NULL);
170
171 while (pReqHead)
172 {
173 pCtxInt->apReqs[pCtxInt->iFirstFree] = pReqHead;
174 pReqHead->iWaitingList = pCtxInt->iFirstFree;
175 pReqHead = pReqHead->pNext;
176
177 /* Clear pointer to next element just for safety. */
178 pCtxInt->apReqs[pCtxInt->iFirstFree]->pNext = NULL;
179 pCtxInt->iFirstFree++;
180 Assert(pCtxInt->iFirstFree <= pCtxInt->cMaxRequests);
181 }
182 }
183
184 /* Check if a request needs to be canceled. */
185 PRTFILEAIOREQINTERNAL pReqToCancel = (PRTFILEAIOREQINTERNAL)ASMAtomicReadPtr((void* volatile*)&pCtxInt->pReqToCancel);
186 if (pReqToCancel)
187 {
188 /* Put it out of the waiting list. */
189 pCtxInt->apReqs[pReqToCancel->iWaitingList] = pCtxInt->apReqs[--pCtxInt->iFirstFree];
190 pCtxInt->apReqs[pReqToCancel->iWaitingList]->iWaitingList = pReqToCancel->iWaitingList;
191 ASMAtomicDecS32(&pCtxInt->cRequests);
192 RTSemEventSignal(pCtxInt->SemEventCancel);
193 }
194 }
195 else
196 {
197 if (ASMAtomicXchgBool(&pCtxInt->fWokenUp, false))
198 rc = VERR_INTERRUPTED;
199 }
200
201 return rc;
202}
203
204RTR3DECL(int) RTFileAioReqCreate(PRTFILEAIOREQ phReq)
205{
206 AssertPtrReturn(phReq, VERR_INVALID_POINTER);
207
208 PRTFILEAIOREQINTERNAL pReqInt = (PRTFILEAIOREQINTERNAL)RTMemAllocZ(sizeof(RTFILEAIOREQINTERNAL));
209 if (RT_UNLIKELY(!pReqInt))
210 return VERR_NO_MEMORY;
211
212 pReqInt->pCtxInt = NULL;
213 pReqInt->u32Magic = RTFILEAIOREQ_MAGIC;
214
215 *phReq = (RTFILEAIOREQ)pReqInt;
216
217 return VINF_SUCCESS;
218}
219
220
221RTDECL(void) RTFileAioReqDestroy(RTFILEAIOREQ hReq)
222{
223 /*
224 * Validate the handle and ignore nil.
225 */
226 if (hReq == NIL_RTFILEAIOREQ)
227 return;
228 PRTFILEAIOREQINTERNAL pReqInt = hReq;
229 RTFILEAIOREQ_VALID_RETURN_VOID(pReqInt);
230
231 /*
232 * Trash the magic and free it.
233 */
234 ASMAtomicUoWriteU32(&pReqInt->u32Magic, ~RTFILEAIOREQ_MAGIC);
235 RTMemFree(pReqInt);
236}
237
238/**
239 * Worker setting up the request.
240 */
241DECLINLINE(int) rtFileAioReqPrepareTransfer(RTFILEAIOREQ hReq, RTFILE hFile,
242 unsigned uTransferDirection,
243 RTFOFF off, void *pvBuf, size_t cbTransfer,
244 void *pvUser)
245{
246 /*
247 * Validate the input.
248 */
249 PRTFILEAIOREQINTERNAL pReqInt = hReq;
250 RTFILEAIOREQ_VALID_RETURN(pReqInt);
251 Assert(hFile != NIL_RTFILE);
252 AssertPtr(pvBuf);
253 Assert(off >= 0);
254 Assert(cbTransfer > 0);
255
256 memset(&pReqInt->AioCB, 0, sizeof(struct aiocb));
257 pReqInt->AioCB.aio_lio_opcode = uTransferDirection;
258 pReqInt->AioCB.aio_fildes = (int)hFile;
259 pReqInt->AioCB.aio_offset = off;
260 pReqInt->AioCB.aio_nbytes = cbTransfer;
261 pReqInt->AioCB.aio_buf = pvBuf;
262 pReqInt->pvUser = pvUser;
263 pReqInt->pCtxInt = NULL;
264 pReqInt->Rc = VERR_FILE_AIO_IN_PROGRESS;
265
266 return VINF_SUCCESS;
267}
268
269
270RTDECL(int) RTFileAioReqPrepareRead(RTFILEAIOREQ hReq, RTFILE hFile, RTFOFF off,
271 void *pvBuf, size_t cbRead, void *pvUser)
272{
273 return rtFileAioReqPrepareTransfer(hReq, hFile, LIO_READ,
274 off, pvBuf, cbRead, pvUser);
275}
276
277
278RTDECL(int) RTFileAioReqPrepareWrite(RTFILEAIOREQ hReq, RTFILE hFile, RTFOFF off,
279 void *pvBuf, size_t cbWrite, void *pvUser)
280{
281 return rtFileAioReqPrepareTransfer(hReq, hFile, LIO_WRITE,
282 off, pvBuf, cbWrite, pvUser);
283}
284
285
286RTDECL(int) RTFileAioReqPrepareFlush(RTFILEAIOREQ hReq, RTFILE hFile, void *pvUser)
287{
288 PRTFILEAIOREQINTERNAL pReqInt = (PRTFILEAIOREQINTERNAL)hReq;
289
290 RTFILEAIOREQ_VALID_RETURN(pReqInt);
291 Assert(hFile != NIL_RTFILE);
292
293 pReqInt->fFlush = true;
294 pReqInt->AioCB.aio_fildes = (int)hFile;
295 pReqInt->pvUser = pvUser;
296
297 return VINF_SUCCESS;
298}
299
300
301RTDECL(void *) RTFileAioReqGetUser(RTFILEAIOREQ hReq)
302{
303 PRTFILEAIOREQINTERNAL pReqInt = hReq;
304 RTFILEAIOREQ_VALID_RETURN_RC(pReqInt, NULL);
305
306 return pReqInt->pvUser;
307}
308
309
310RTDECL(int) RTFileAioReqCancel(RTFILEAIOREQ hReq)
311{
312 PRTFILEAIOREQINTERNAL pReqInt = hReq;
313 RTFILEAIOREQ_VALID_RETURN(pReqInt);
314
315 ASMAtomicXchgBool(&pReqInt->fCanceled, true);
316
317 int rcPosix = aio_cancel(pReqInt->AioCB.aio_fildes, &pReqInt->AioCB);
318
319 if (rcPosix == AIO_CANCELED)
320 {
321 PRTFILEAIOCTXINTERNAL pCtxInt = pReqInt->pCtxInt;
322 /*
323 * Notify the waiting thread that the request was canceled.
324 */
325 AssertMsg(VALID_PTR(pCtxInt),
326 ("Invalid state. Request was canceled but wasn't submitted\n"));
327
328 Assert(!pCtxInt->pReqToCancel);
329 ASMAtomicWritePtr((void* volatile*)&pCtxInt->pReqToCancel, pReqInt);
330 rtFileAioCtxWakeup(pCtxInt);
331
332 /* Wait for acknowledge. */
333 int rc = RTSemEventWait(pCtxInt->SemEventCancel, RT_INDEFINITE_WAIT);
334 AssertRC(rc);
335
336 ASMAtomicWritePtr((void* volatile*)&pCtxInt->pReqToCancel, NULL);
337 return VINF_SUCCESS;
338 }
339 else if (rcPosix == AIO_ALLDONE)
340 return VERR_FILE_AIO_COMPLETED;
341 else if (rcPosix == AIO_NOTCANCELED)
342 return VERR_FILE_AIO_IN_PROGRESS;
343 else
344 return RTErrConvertFromErrno(errno);
345}
346
347
348RTDECL(int) RTFileAioReqGetRC(RTFILEAIOREQ hReq, size_t *pcbTransfered)
349{
350 PRTFILEAIOREQINTERNAL pReqInt = hReq;
351 RTFILEAIOREQ_VALID_RETURN(pReqInt);
352 AssertPtrNull(pcbTransfered);
353
354 if ( (pReqInt->Rc != VERR_FILE_AIO_IN_PROGRESS)
355 && (pcbTransfered))
356 *pcbTransfered = pReqInt->cbTransfered;
357
358 return pReqInt->Rc;
359}
360
361
362RTDECL(int) RTFileAioCtxCreate(PRTFILEAIOCTX phAioCtx, uint32_t cAioReqsMax)
363{
364 PRTFILEAIOCTXINTERNAL pCtxInt;
365 AssertPtrReturn(phAioCtx, VERR_INVALID_POINTER);
366
367 pCtxInt = (PRTFILEAIOCTXINTERNAL)RTMemAllocZ( sizeof(RTFILEAIOCTXINTERNAL)
368 + cAioReqsMax * sizeof(PRTFILEAIOREQINTERNAL));
369 if (RT_UNLIKELY(!pCtxInt))
370 return VERR_NO_MEMORY;
371
372 /* Create event semaphore. */
373 int rc = RTSemEventCreate(&pCtxInt->SemEventCancel);
374 if (RT_FAILURE(rc))
375 {
376 RTMemFree(pCtxInt);
377 return rc;
378 }
379
380 pCtxInt->u32Magic = RTFILEAIOCTX_MAGIC;
381 pCtxInt->cMaxRequests = cAioReqsMax;
382 *phAioCtx = (RTFILEAIOCTX)pCtxInt;
383
384 return VINF_SUCCESS;
385}
386
387
388RTDECL(int) RTFileAioCtxDestroy(RTFILEAIOCTX hAioCtx)
389{
390 PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
391
392 AssertPtrReturn(pCtxInt, VERR_INVALID_HANDLE);
393
394 if (RT_UNLIKELY(pCtxInt->cRequests))
395 return VERR_FILE_AIO_BUSY;
396
397 RTSemEventDestroy(pCtxInt->SemEventCancel);
398 RTMemFree(pCtxInt);
399
400 return VINF_SUCCESS;
401}
402
403
404RTDECL(uint32_t) RTFileAioCtxGetMaxReqCount(RTFILEAIOCTX hAioCtx)
405{
406 PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
407
408 if (hAioCtx == NIL_RTFILEAIOCTX)
409 return RTFILEAIO_UNLIMITED_REQS;
410 else
411 return pCtxInt->cMaxRequests;
412}
413
414RTDECL(int) RTFileAioCtxAssociateWithFile(RTFILEAIOCTX hAioCtx, RTFILE hFile)
415{
416 return VINF_SUCCESS;
417}
418
419RTDECL(int) RTFileAioCtxSubmit(RTFILEAIOCTX hAioCtx, PRTFILEAIOREQ phReqs, size_t cReqs, size_t *pcReqs)
420{
421 PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
422
423 /* Parameter checks */
424 AssertPtrReturn(pCtxInt, VERR_INVALID_HANDLE);
425 AssertReturn(cReqs != 0, VERR_INVALID_POINTER);
426 AssertPtrReturn(phReqs, VERR_INVALID_PARAMETER);
427
428 /* Check that we don't exceed the limit */
429 if (ASMAtomicUoReadS32(&pCtxInt->cRequests) + cReqs > pCtxInt->cMaxRequests)
430 return VERR_FILE_AIO_LIMIT_EXCEEDED;
431
432 PRTFILEAIOREQINTERNAL pHead = NULL;
433 for (size_t i = 0; i < cReqs; i++)
434 {
435 PRTFILEAIOREQINTERNAL pReqInt = phReqs[i];
436
437 pReqInt->pCtxInt = pCtxInt;
438 /* Link them together. */
439 pReqInt->pNext = pHead;
440 pHead = pReqInt;
441 }
442
443 int rcPosix = lio_listio(LIO_NOWAIT, (struct aiocb **)phReqs, cReqs, NULL);
444 if (RT_UNLIKELY(rcPosix < 0))
445 return RTErrConvertFromErrno(errno);
446
447 ASMAtomicAddS32(&pCtxInt->cRequests, cReqs);
448 *pcReqs = cReqs;
449
450 /*
451 * Forward them to the thread waiting for requests.
452 * We search for a free slot first and if we don't find one
453 * we will grab the first one and append our list to the existing entries.
454 */
455 unsigned iSlot = 0;
456 while ( (iSlot < RT_ELEMENTS(pCtxInt->apReqsNewHead))
457 && !ASMAtomicCmpXchgPtr((void * volatile *)&pCtxInt->apReqsNewHead[iSlot], pHead, NULL))
458 iSlot++;
459
460 if (iSlot == RT_ELEMENTS(pCtxInt->apReqsNewHead))
461 {
462 /* Nothing found. */
463 PRTFILEAIOREQINTERNAL pOldHead = (PRTFILEAIOREQINTERNAL)ASMAtomicXchgPtr((void * volatile *)&pCtxInt->apReqsNewHead[0],
464 NULL);
465
466 /* Find the end of the current head and link the old list to the current. */
467 PRTFILEAIOREQINTERNAL pTail = pHead;
468 while (pTail->pNext)
469 pTail = pTail->pNext;
470
471 pTail->pNext = pOldHead;
472
473 ASMAtomicXchgPtr((void * volatile *)&pCtxInt->apReqsNewHead[0], pHead);
474 }
475
476 /* Set the internal wakeup flag and wakeup the thread if possible. */
477 bool fWokenUp = ASMAtomicXchgBool(&pCtxInt->fWokenUpInternal, true);
478 if (!fWokenUp)
479 rtFileAioCtxWakeup(pCtxInt);
480
481 return VINF_SUCCESS;
482}
483
484
485RTDECL(int) RTFileAioCtxWait(RTFILEAIOCTX hAioCtx, size_t cMinReqs, unsigned cMillisTimeout,
486 PRTFILEAIOREQ pahReqs, size_t cReqs, uint32_t *pcReqs)
487{
488 int rc = VINF_SUCCESS;
489 int cRequestsCompleted = 0;
490 PRTFILEAIOCTXINTERNAL pCtxInt = (PRTFILEAIOCTXINTERNAL)hAioCtx;
491 struct timespec Timeout;
492 struct timespec *pTimeout = NULL;
493 uint64_t StartNanoTS = 0;
494
495 /* Check parameters. */
496 AssertPtrReturn(pCtxInt, VERR_INVALID_HANDLE);
497 AssertPtrReturn(pcReqs, VERR_INVALID_POINTER);
498 AssertPtrReturn(pahReqs, VERR_INVALID_POINTER);
499 AssertReturn(cReqs != 0, VERR_INVALID_PARAMETER);
500 AssertReturn(cReqs >= cMinReqs, VERR_OUT_OF_RANGE);
501
502 if (RT_UNLIKELY(ASMAtomicReadS32(&pCtxInt->cRequests) == 0))
503 return VERR_FILE_AIO_NO_REQUEST;
504
505 if (cMillisTimeout != RT_INDEFINITE_WAIT)
506 {
507 Timeout.tv_sec = cMillisTimeout / 1000;
508 Timeout.tv_nsec = (cMillisTimeout % 1000) * 1000000;
509 pTimeout = &Timeout;
510 StartNanoTS = RTTimeNanoTS();
511 }
512
513 /* Wait for at least one. */
514 if (!cMinReqs)
515 cMinReqs = 1;
516
517 /* For the wakeup call. */
518 Assert(pCtxInt->hThreadWait == NIL_RTTHREAD);
519 ASMAtomicWriteHandle(&pCtxInt->hThreadWait, RTThreadSelf());
520
521 /* Update the waiting list once before we enter the loop. */
522 rc = rtFileAioCtxProcessEvents(pCtxInt);
523
524 while ( cMinReqs
525 && RT_SUCCESS_NP(rc))
526 {
527 ASMAtomicXchgBool(&pCtxInt->fWaiting, true);
528 int rcPosix = aio_suspend((const struct aiocb * const *)pCtxInt->apReqs,
529 pCtxInt->iFirstFree, pTimeout);
530 ASMAtomicXchgBool(&pCtxInt->fWaiting, false);
531 if (rcPosix < 0)
532 {
533 /* Check that this is an external wakeup event. */
534 if (errno == EINTR)
535 rc = rtFileAioCtxProcessEvents(pCtxInt);
536 else
537 rc = RTErrConvertFromErrno(errno);
538 }
539 else
540 {
541 /* Requests finished. */
542 unsigned iReqCurr = 0;
543 int cDone = 0;
544
545 /* Remove completed requests from the waiting list. */
546 while (iReqCurr < pCtxInt->iFirstFree)
547 {
548 PRTFILEAIOREQINTERNAL pReq = pCtxInt->apReqs[iReqCurr];
549 int rcReq = aio_error(&pReq->AioCB);
550
551 if (rcReq != EINPROGRESS)
552 {
553 /* Completed store the return code. */
554 if (rcReq == 0)
555 {
556 pReq->Rc = VINF_SUCCESS;
557 /* Call aio_return() to free ressources. */
558 pReq->cbTransfered = aio_return(&pReq->AioCB);
559 }
560 else
561 pReq->Rc = RTErrConvertFromErrno(rcReq);
562
563 cDone++;
564
565 /*
566 * Move the last entry into the current position to avoid holes
567 * but only if it is not the last element already.
568 */
569 if (pReq->iWaitingList < pCtxInt->iFirstFree - 1)
570 {
571 pCtxInt->apReqs[pReq->iWaitingList] = pCtxInt->apReqs[--pCtxInt->iFirstFree];
572 pCtxInt->apReqs[pReq->iWaitingList]->iWaitingList = pReq->iWaitingList;
573 pCtxInt->apReqs[pCtxInt->iFirstFree] = NULL;
574 }
575 else
576 pCtxInt->iFirstFree--;
577
578 /* Put the request into the completed list. */
579 pahReqs[cRequestsCompleted++] = pReq;
580 }
581 else
582 iReqCurr++;
583 }
584
585 cReqs -= cDone;
586 cMinReqs -= cDone;
587 ASMAtomicSubS32(&pCtxInt->cRequests, cDone);
588
589 if ((cMillisTimeout != RT_INDEFINITE_WAIT) && (cMinReqs > 0))
590 {
591 uint64_t TimeDiff;
592
593 /* Recalculate the timeout. */
594 TimeDiff = RTTimeSystemNanoTS() - StartNanoTS;
595 Timeout.tv_sec = Timeout.tv_sec - (TimeDiff / 1000000);
596 Timeout.tv_nsec = Timeout.tv_nsec - (TimeDiff % 1000000);
597 }
598
599 /* Check for new elements. */
600 rc = rtFileAioCtxProcessEvents(pCtxInt);
601 }
602 }
603
604 *pcReqs = cRequestsCompleted;
605 Assert(pCtxInt->hThreadWait == RTThreadSelf());
606 ASMAtomicWriteHandle(&pCtxInt->hThreadWait, NIL_RTTHREAD);
607
608 return rc;
609}
610
611
612RTDECL(int) RTFileAioCtxWakeup(RTFILEAIOCTX hAioCtx)
613{
614 PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
615 RTFILEAIOCTX_VALID_RETURN(pCtxInt);
616
617 /** @todo r=bird: Define the protocol for how to resume work after calling
618 * this function. */
619
620 bool fWokenUp = ASMAtomicXchgBool(&pCtxInt->fWokenUp, true);
621 if (!fWokenUp)
622 rtFileAioCtxWakeup(pCtxInt);
623
624 return VINF_SUCCESS;
625}
626
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette