VirtualBox

source: vbox/trunk/src/VBox/Runtime/r3/posix/fileaio-posix.cpp@ 29129

Last change on this file since 29129 was 29129, checked in by vboxsync, 15 years ago

Runtime/Aio: Fix flush API. The fFlush flag needs to be resetted

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 34.4 KB
Line 
1/* $Id: fileaio-posix.cpp 29129 2010-05-06 10:40:30Z vboxsync $ */
2/** @file
3 * IPRT - File async I/O, native implementation for POSIX compliant host platforms.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#define LOG_GROUP RTLOGGROUP_DIR
32#include <iprt/asm.h>
33#include <iprt/file.h>
34#include <iprt/mem.h>
35#include <iprt/assert.h>
36#include <iprt/string.h>
37#include <iprt/err.h>
38#include <iprt/log.h>
39#include <iprt/thread.h>
40#include <iprt/semaphore.h>
41#include "internal/fileaio.h"
42
43#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
44# include <sys/types.h>
45# include <sys/sysctl.h> /* for sysctlbyname */
46#endif
47#if defined(RT_OS_FREEBSD)
48# include <fcntl.h> /* O_SYNC */
49#endif
50#include <aio.h>
51#include <errno.h>
52#include <time.h>
53
54/*
55 * Linux does not define this value.
56 * Just define it with really big
57 * value.
58 */
59#ifndef AIO_LISTIO_MAX
60# define AIO_LISTIO_MAX UINT32_MAX
61#endif
62
63#if 0 /* Only used for debugging */
64# undef AIO_LISTIO_MAX
65# define AIO_LISTIO_MAX 16
66#endif
67
68/** Invalid entry in the waiting array. */
69#define RTFILEAIOCTX_WAIT_ENTRY_INVALID (~0U)
70
71/*******************************************************************************
72* Structures and Typedefs *
73*******************************************************************************/
74/**
75 * Async I/O request state.
76 */
77typedef struct RTFILEAIOREQINTERNAL
78{
79 /** The aio control block. FIRST ELEMENT! */
80 struct aiocb AioCB;
81 /** Next element in the chain. */
82 struct RTFILEAIOREQINTERNAL *pNext;
83 /** Previous element in the chain. */
84 struct RTFILEAIOREQINTERNAL *pPrev;
85 /** Current state the request is in. */
86 RTFILEAIOREQSTATE enmState;
87 /** Flag whether this is a flush request. */
88 bool fFlush;
89 /** Flag indicating if the request was canceled. */
90 volatile bool fCanceled;
91 /** Opaque user data. */
92 void *pvUser;
93 /** Number of bytes actually transfered. */
94 size_t cbTransfered;
95 /** Status code. */
96 int Rc;
97 /** Completion context we are assigned to. */
98 struct RTFILEAIOCTXINTERNAL *pCtxInt;
99 /** Entry in the waiting list the request is in. */
100 unsigned iWaitingList;
101 /** Magic value (RTFILEAIOREQ_MAGIC). */
102 uint32_t u32Magic;
103} RTFILEAIOREQINTERNAL, *PRTFILEAIOREQINTERNAL;
104
105/**
106 * Async I/O completion context state.
107 */
108typedef struct RTFILEAIOCTXINTERNAL
109{
110 /** Current number of requests active on this context. */
111 volatile int32_t cRequests;
112 /** Maximum number of requests this context can handle. */
113 uint32_t cMaxRequests;
114 /** The ID of the thread which is currently waiting for requests. */
115 volatile RTTHREAD hThreadWait;
116 /** Flag whether the thread was woken up. */
117 volatile bool fWokenUp;
118 /** Flag whether the thread is currently waiting in the syscall. */
119 volatile bool fWaiting;
120 /** Magic value (RTFILEAIOCTX_MAGIC). */
121 uint32_t u32Magic;
122 /** Flag whether the thread was woken up due to a internal event. */
123 volatile bool fWokenUpInternal;
124 /** List of new requests which needs to be inserted into apReqs by the
125 * waiting thread. */
126 volatile PRTFILEAIOREQINTERNAL apReqsNewHead[5];
127 /** Special entry for requests which are canceled. Because only one
128 * request can be canceled at a time and the thread canceling the request
129 * has to wait we need only one entry. */
130 volatile PRTFILEAIOREQINTERNAL pReqToCancel;
131 /** Event semaphore the canceling thread is waiting for completion of
132 * the operation. */
133 RTSEMEVENT SemEventCancel;
134 /** Head of submitted elements waiting to get into the array. */
135 PRTFILEAIOREQINTERNAL pReqsWaitHead;
136 /** Tail of submitted elements waiting to get into the array. */
137 PRTFILEAIOREQINTERNAL pReqsWaitTail;
138 /** Maximum number of elements in the waiting array. */
139 unsigned cReqsWaitMax;
140 /** First free slot in the waiting list. */
141 unsigned iFirstFree;
142 /** List of requests we are currently waiting on.
143 * Size depends on cMaxRequests and AIO_LISTIO_MAX. */
144 volatile PRTFILEAIOREQINTERNAL apReqs[1];
145} RTFILEAIOCTXINTERNAL, *PRTFILEAIOCTXINTERNAL;
146
147/**
148 * Internal worker for waking up the waiting thread.
149 */
150static void rtFileAioCtxWakeup(PRTFILEAIOCTXINTERNAL pCtxInt)
151{
152 /*
153 * Read the thread handle before the status flag.
154 * If we read the handle after the flag we might
155 * end up with an invalid handle because the thread
156 * waiting in RTFileAioCtxWakeup() might get scheduled
157 * before we read the flag and returns.
158 * We can ensure that the handle is valid if fWaiting is true
159 * when reading the handle before the status flag.
160 */
161 RTTHREAD hThread;
162 ASMAtomicReadHandle(&pCtxInt->hThreadWait, &hThread);
163 bool fWaiting = ASMAtomicReadBool(&pCtxInt->fWaiting);
164 if (fWaiting)
165 {
166 /*
167 * If a thread waits the handle must be valid.
168 * It is possible that the thread returns from
169 * aio_suspend() before the signal is send.
170 * This is no problem because we already set fWokenUp
171 * to true which will let the thread return VERR_INTERRUPTED
172 * and the next call to RTFileAioCtxWait() will not
173 * return VERR_INTERRUPTED because signals are not saved
174 * and will simply vanish if the destination thread can't
175 * receive it.
176 */
177 Assert(hThread != NIL_RTTHREAD);
178 RTThreadPoke(hThread);
179 }
180}
181
182/**
183 * Internal worker processing events and inserting new requests into the waiting list.
184 */
185static int rtFileAioCtxProcessEvents(PRTFILEAIOCTXINTERNAL pCtxInt)
186{
187 int rc = VINF_SUCCESS;
188
189 /* Process new requests first. */
190 bool fWokenUp = ASMAtomicXchgBool(&pCtxInt->fWokenUpInternal, false);
191 if (fWokenUp)
192 {
193 for (unsigned iSlot = 0; iSlot < RT_ELEMENTS(pCtxInt->apReqsNewHead); iSlot++)
194 {
195 PRTFILEAIOREQINTERNAL pReqHead = (PRTFILEAIOREQINTERNAL)ASMAtomicXchgPtr((void* volatile*)&pCtxInt->apReqsNewHead[iSlot],
196 NULL);
197
198 while ( (pCtxInt->iFirstFree < pCtxInt->cReqsWaitMax)
199 && pReqHead)
200 {
201 pCtxInt->apReqs[pCtxInt->iFirstFree] = pReqHead;
202 pReqHead->iWaitingList = pCtxInt->iFirstFree;
203 pReqHead = pReqHead->pNext;
204
205 /* Clear pointer to next and previous element just for safety. */
206 pCtxInt->apReqs[pCtxInt->iFirstFree]->pNext = NULL;
207 pCtxInt->apReqs[pCtxInt->iFirstFree]->pPrev = NULL;
208 pCtxInt->iFirstFree++;
209
210 Assert( (pCtxInt->iFirstFree <= pCtxInt->cMaxRequests)
211 && (pCtxInt->iFirstFree <= pCtxInt->cReqsWaitMax));
212 }
213
214 /* Append the rest to the wait list. */
215 if (pReqHead)
216 {
217 if (!pCtxInt->pReqsWaitHead)
218 {
219 Assert(!pCtxInt->pReqsWaitTail);
220 pCtxInt->pReqsWaitHead = pReqHead;
221 pReqHead->pPrev = NULL;
222 }
223 else
224 {
225 AssertPtr(pCtxInt->pReqsWaitTail);
226
227 pCtxInt->pReqsWaitTail->pNext = pReqHead;
228 pReqHead->pPrev = pCtxInt->pReqsWaitTail;
229 }
230
231 /* Update tail. */
232 while (pReqHead->pNext)
233 pReqHead = pReqHead->pNext;
234
235 pCtxInt->pReqsWaitTail = pReqHead;
236 pCtxInt->pReqsWaitTail->pNext = NULL;
237 }
238 }
239
240 /* Check if a request needs to be canceled. */
241 PRTFILEAIOREQINTERNAL pReqToCancel = (PRTFILEAIOREQINTERNAL)ASMAtomicReadPtr((void* volatile*)&pCtxInt->pReqToCancel);
242 if (pReqToCancel)
243 {
244 /* The request can be in the array waiting for completion or still in the list because it is full. */
245 if (pReqToCancel->iWaitingList != RTFILEAIOCTX_WAIT_ENTRY_INVALID)
246 {
247 /* Put it out of the waiting list. */
248 pCtxInt->apReqs[pReqToCancel->iWaitingList] = pCtxInt->apReqs[--pCtxInt->iFirstFree];
249 pCtxInt->apReqs[pReqToCancel->iWaitingList]->iWaitingList = pReqToCancel->iWaitingList;
250 }
251 else
252 {
253 /* Unlink from the waiting list. */
254 PRTFILEAIOREQINTERNAL pPrev = pReqToCancel->pPrev;
255 PRTFILEAIOREQINTERNAL pNext = pReqToCancel->pNext;
256
257 if (pNext)
258 pNext->pPrev = pPrev;
259 else
260 {
261 /* We canceled the tail. */
262 pCtxInt->pReqsWaitTail = pPrev;
263 }
264
265 if (pPrev)
266 pPrev->pNext = pNext;
267 else
268 {
269 /* We canceled the head. */
270 pCtxInt->pReqsWaitHead = pNext;
271 }
272 }
273
274 ASMAtomicDecS32(&pCtxInt->cRequests);
275 AssertMsg(pCtxInt->cRequests >= 0, ("Canceled request not which is not in this context\n"));
276 RTSemEventSignal(pCtxInt->SemEventCancel);
277 }
278 }
279 else
280 {
281 if (ASMAtomicXchgBool(&pCtxInt->fWokenUp, false))
282 rc = VERR_INTERRUPTED;
283 }
284
285 return rc;
286}
287
288RTR3DECL(int) RTFileAioGetLimits(PRTFILEAIOLIMITS pAioLimits)
289{
290 int rcBSD = 0;
291 AssertPtrReturn(pAioLimits, VERR_INVALID_POINTER);
292
293#if defined(RT_OS_DARWIN)
294 int cReqsOutstandingMax = 0;
295 size_t cbParameter = sizeof(int);
296
297 rcBSD = sysctlbyname("kern.aioprocmax", /* name */
298 &cReqsOutstandingMax, /* Where to store the old value. */
299 &cbParameter, /* Size of the memory pointed to. */
300 NULL, /* Where the new value is located. */
301 NULL); /* Where the size of the new value is stored. */
302 if (rcBSD == -1)
303 return RTErrConvertFromErrno(errno);
304
305 pAioLimits->cReqsOutstandingMax = cReqsOutstandingMax;
306 pAioLimits->cbBufferAlignment = 0;
307#elif defined(RT_OS_FREEBSD)
308 /*
309 * The AIO API is implemented in a kernel module which is not
310 * loaded by default.
311 * If it is loaded there are additional sysctl parameters.
312 */
313 int cReqsOutstandingMax = 0;
314 size_t cbParameter = sizeof(int);
315
316 rcBSD = sysctlbyname("vfs.aio.max_aio_per_proc", /* name */
317 &cReqsOutstandingMax, /* Where to store the old value. */
318 &cbParameter, /* Size of the memory pointed to. */
319 NULL, /* Where the new value is located. */
320 NULL); /* Where the size of the new value is stored. */
321 if (rcBSD == -1)
322 {
323 /* ENOENT means the value is unknown thus the module is not loaded. */
324 if (errno == ENOENT)
325 return VERR_NOT_SUPPORTED;
326 else
327 return RTErrConvertFromErrno(errno);
328 }
329
330 pAioLimits->cReqsOutstandingMax = cReqsOutstandingMax;
331 pAioLimits->cbBufferAlignment = 0;
332#else
333 pAioLimits->cReqsOutstandingMax = RTFILEAIO_UNLIMITED_REQS;
334 pAioLimits->cbBufferAlignment = 0;
335#endif
336
337 return VINF_SUCCESS;
338}
339
340RTR3DECL(int) RTFileAioReqCreate(PRTFILEAIOREQ phReq)
341{
342 AssertPtrReturn(phReq, VERR_INVALID_POINTER);
343
344 PRTFILEAIOREQINTERNAL pReqInt = (PRTFILEAIOREQINTERNAL)RTMemAllocZ(sizeof(RTFILEAIOREQINTERNAL));
345 if (RT_UNLIKELY(!pReqInt))
346 return VERR_NO_MEMORY;
347
348 pReqInt->pCtxInt = NULL;
349 pReqInt->u32Magic = RTFILEAIOREQ_MAGIC;
350 pReqInt->iWaitingList = RTFILEAIOCTX_WAIT_ENTRY_INVALID;
351 RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
352
353 *phReq = (RTFILEAIOREQ)pReqInt;
354
355 return VINF_SUCCESS;
356}
357
358
359RTDECL(int) RTFileAioReqDestroy(RTFILEAIOREQ hReq)
360{
361 /*
362 * Validate the handle and ignore nil.
363 */
364 if (hReq == NIL_RTFILEAIOREQ)
365 return VINF_SUCCESS;
366 PRTFILEAIOREQINTERNAL pReqInt = hReq;
367 RTFILEAIOREQ_VALID_RETURN(pReqInt);
368 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_IN_PROGRESS);
369
370 /*
371 * Trash the magic and free it.
372 */
373 ASMAtomicUoWriteU32(&pReqInt->u32Magic, ~RTFILEAIOREQ_MAGIC);
374 RTMemFree(pReqInt);
375 return VINF_SUCCESS;
376}
377
378/**
379 * Worker setting up the request.
380 */
381DECLINLINE(int) rtFileAioReqPrepareTransfer(RTFILEAIOREQ hReq, RTFILE hFile,
382 unsigned uTransferDirection,
383 RTFOFF off, void *pvBuf, size_t cbTransfer,
384 void *pvUser)
385{
386 /*
387 * Validate the input.
388 */
389 PRTFILEAIOREQINTERNAL pReqInt = hReq;
390 RTFILEAIOREQ_VALID_RETURN(pReqInt);
391 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_IN_PROGRESS);
392 Assert(hFile != NIL_RTFILE);
393 AssertPtr(pvBuf);
394 Assert(off >= 0);
395 Assert(cbTransfer > 0);
396
397 memset(&pReqInt->AioCB, 0, sizeof(struct aiocb));
398 pReqInt->fFlush = false;
399 pReqInt->AioCB.aio_lio_opcode = uTransferDirection;
400 pReqInt->AioCB.aio_fildes = (int)hFile;
401 pReqInt->AioCB.aio_offset = off;
402 pReqInt->AioCB.aio_nbytes = cbTransfer;
403 pReqInt->AioCB.aio_buf = pvBuf;
404 pReqInt->pvUser = pvUser;
405 pReqInt->pCtxInt = NULL;
406 pReqInt->Rc = VERR_FILE_AIO_IN_PROGRESS;
407 RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
408
409 return VINF_SUCCESS;
410}
411
412
413RTDECL(int) RTFileAioReqPrepareRead(RTFILEAIOREQ hReq, RTFILE hFile, RTFOFF off,
414 void *pvBuf, size_t cbRead, void *pvUser)
415{
416 return rtFileAioReqPrepareTransfer(hReq, hFile, LIO_READ,
417 off, pvBuf, cbRead, pvUser);
418}
419
420
421RTDECL(int) RTFileAioReqPrepareWrite(RTFILEAIOREQ hReq, RTFILE hFile, RTFOFF off,
422 void const *pvBuf, size_t cbWrite, void *pvUser)
423{
424 return rtFileAioReqPrepareTransfer(hReq, hFile, LIO_WRITE,
425 off, (void *)pvBuf, cbWrite, pvUser);
426}
427
428
429RTDECL(int) RTFileAioReqPrepareFlush(RTFILEAIOREQ hReq, RTFILE hFile, void *pvUser)
430{
431 PRTFILEAIOREQINTERNAL pReqInt = (PRTFILEAIOREQINTERNAL)hReq;
432
433 RTFILEAIOREQ_VALID_RETURN(pReqInt);
434 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_IN_PROGRESS);
435 Assert(hFile != NIL_RTFILE);
436
437 pReqInt->fFlush = true;
438 pReqInt->AioCB.aio_fildes = (int)hFile;
439 pReqInt->AioCB.aio_offset = 0;
440 pReqInt->AioCB.aio_nbytes = 0;
441 pReqInt->AioCB.aio_buf = NULL;
442 pReqInt->pvUser = pvUser;
443 pReqInt->Rc = VERR_FILE_AIO_IN_PROGRESS;
444 RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
445
446 return VINF_SUCCESS;
447}
448
449
450RTDECL(void *) RTFileAioReqGetUser(RTFILEAIOREQ hReq)
451{
452 PRTFILEAIOREQINTERNAL pReqInt = hReq;
453 RTFILEAIOREQ_VALID_RETURN_RC(pReqInt, NULL);
454
455 return pReqInt->pvUser;
456}
457
458
459RTDECL(int) RTFileAioReqCancel(RTFILEAIOREQ hReq)
460{
461 PRTFILEAIOREQINTERNAL pReqInt = hReq;
462 RTFILEAIOREQ_VALID_RETURN(pReqInt);
463 RTFILEAIOREQ_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_NOT_SUBMITTED);
464
465 ASMAtomicXchgBool(&pReqInt->fCanceled, true);
466
467 int rcPosix = aio_cancel(pReqInt->AioCB.aio_fildes, &pReqInt->AioCB);
468
469 if (rcPosix == AIO_CANCELED)
470 {
471 PRTFILEAIOCTXINTERNAL pCtxInt = pReqInt->pCtxInt;
472 /*
473 * Notify the waiting thread that the request was canceled.
474 */
475 AssertMsg(VALID_PTR(pCtxInt),
476 ("Invalid state. Request was canceled but wasn't submitted\n"));
477
478 Assert(!pCtxInt->pReqToCancel);
479 ASMAtomicWritePtr((void* volatile*)&pCtxInt->pReqToCancel, pReqInt);
480 rtFileAioCtxWakeup(pCtxInt);
481
482 /* Wait for acknowledge. */
483 int rc = RTSemEventWait(pCtxInt->SemEventCancel, RT_INDEFINITE_WAIT);
484 AssertRC(rc);
485
486 ASMAtomicWritePtr((void* volatile*)&pCtxInt->pReqToCancel, NULL);
487 pReqInt->Rc = VERR_FILE_AIO_CANCELED;
488 RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
489 return VINF_SUCCESS;
490 }
491 else if (rcPosix == AIO_ALLDONE)
492 return VERR_FILE_AIO_COMPLETED;
493 else if (rcPosix == AIO_NOTCANCELED)
494 return VERR_FILE_AIO_IN_PROGRESS;
495 else
496 return RTErrConvertFromErrno(errno);
497}
498
499
500RTDECL(int) RTFileAioReqGetRC(RTFILEAIOREQ hReq, size_t *pcbTransfered)
501{
502 PRTFILEAIOREQINTERNAL pReqInt = hReq;
503 RTFILEAIOREQ_VALID_RETURN(pReqInt);
504 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_IN_PROGRESS);
505 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, PREPARED, VERR_FILE_AIO_NOT_SUBMITTED);
506 AssertPtrNull(pcbTransfered);
507
508 if ( (RT_SUCCESS(pReqInt->Rc))
509 && (pcbTransfered))
510 *pcbTransfered = pReqInt->cbTransfered;
511
512 return pReqInt->Rc;
513}
514
515
516RTDECL(int) RTFileAioCtxCreate(PRTFILEAIOCTX phAioCtx, uint32_t cAioReqsMax)
517{
518 PRTFILEAIOCTXINTERNAL pCtxInt;
519 unsigned cReqsWaitMax;
520
521 AssertPtrReturn(phAioCtx, VERR_INVALID_POINTER);
522
523 if (cAioReqsMax == RTFILEAIO_UNLIMITED_REQS)
524 return VERR_OUT_OF_RANGE;
525
526 cReqsWaitMax = RT_MIN(cAioReqsMax, AIO_LISTIO_MAX);
527
528 pCtxInt = (PRTFILEAIOCTXINTERNAL)RTMemAllocZ( sizeof(RTFILEAIOCTXINTERNAL)
529 + cReqsWaitMax * sizeof(PRTFILEAIOREQINTERNAL));
530 if (RT_UNLIKELY(!pCtxInt))
531 return VERR_NO_MEMORY;
532
533 /* Create event semaphore. */
534 int rc = RTSemEventCreate(&pCtxInt->SemEventCancel);
535 if (RT_FAILURE(rc))
536 {
537 RTMemFree(pCtxInt);
538 return rc;
539 }
540
541 pCtxInt->u32Magic = RTFILEAIOCTX_MAGIC;
542 pCtxInt->cMaxRequests = cAioReqsMax;
543 pCtxInt->cReqsWaitMax = cReqsWaitMax;
544 *phAioCtx = (RTFILEAIOCTX)pCtxInt;
545
546 return VINF_SUCCESS;
547}
548
549
550RTDECL(int) RTFileAioCtxDestroy(RTFILEAIOCTX hAioCtx)
551{
552 PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
553
554 AssertPtrReturn(pCtxInt, VERR_INVALID_HANDLE);
555
556 if (RT_UNLIKELY(pCtxInt->cRequests))
557 return VERR_FILE_AIO_BUSY;
558
559 RTSemEventDestroy(pCtxInt->SemEventCancel);
560 RTMemFree(pCtxInt);
561
562 return VINF_SUCCESS;
563}
564
565
566RTDECL(uint32_t) RTFileAioCtxGetMaxReqCount(RTFILEAIOCTX hAioCtx)
567{
568 PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
569
570 if (hAioCtx == NIL_RTFILEAIOCTX)
571 return RTFILEAIO_UNLIMITED_REQS;
572 else
573 return pCtxInt->cMaxRequests;
574}
575
576RTDECL(int) RTFileAioCtxAssociateWithFile(RTFILEAIOCTX hAioCtx, RTFILE hFile)
577{
578 return VINF_SUCCESS;
579}
580
581RTDECL(int) RTFileAioCtxSubmit(RTFILEAIOCTX hAioCtx, PRTFILEAIOREQ pahReqs, size_t cReqs)
582{
583 int rc = VINF_SUCCESS;
584 PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
585
586 /* Parameter checks */
587 AssertPtrReturn(pCtxInt, VERR_INVALID_HANDLE);
588 AssertReturn(cReqs != 0, VERR_INVALID_POINTER);
589 AssertPtrReturn(pahReqs, VERR_INVALID_PARAMETER);
590
591 /* Check that we don't exceed the limit */
592 if (ASMAtomicUoReadS32(&pCtxInt->cRequests) + cReqs > pCtxInt->cMaxRequests)
593 return VERR_FILE_AIO_LIMIT_EXCEEDED;
594
595 PRTFILEAIOREQINTERNAL pHead = NULL;
596
597 do
598 {
599 int rcPosix = 0;
600 size_t cReqsSubmit = 0;
601 size_t i = 0;
602 PRTFILEAIOREQINTERNAL pReqInt;
603
604 while ( (i < cReqs)
605 && (i < AIO_LISTIO_MAX))
606 {
607 pReqInt = pahReqs[i];
608 if (RTFILEAIOREQ_IS_NOT_VALID(pReqInt))
609 {
610 /* Undo everything and stop submitting. */
611 for (size_t iUndo = 0; iUndo < i; iUndo++)
612 {
613 pReqInt = pahReqs[iUndo];
614 RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
615 pReqInt->pCtxInt = NULL;
616
617 /* Unlink from the list again. */
618 PRTFILEAIOREQINTERNAL pNext, pPrev;
619 pNext = pReqInt->pNext;
620 pPrev = pReqInt->pPrev;
621 if (pNext)
622 pNext->pPrev = pPrev;
623 if (pPrev)
624 pPrev->pNext = pNext;
625 else
626 pHead = pNext;
627 }
628 rc = VERR_INVALID_HANDLE;
629 break;
630 }
631
632 pReqInt->pCtxInt = pCtxInt;
633
634 /* Link them together. */
635 pReqInt->pNext = pHead;
636 if (pHead)
637 pHead->pPrev = pReqInt;
638 pReqInt->pPrev = NULL;
639 pHead = pReqInt;
640 RTFILEAIOREQ_SET_STATE(pReqInt, SUBMITTED);
641
642 if (pReqInt->fFlush)
643 break;
644
645 cReqsSubmit++;
646 i++;
647 }
648
649 if (cReqsSubmit)
650 {
651 rcPosix = lio_listio(LIO_NOWAIT, (struct aiocb **)pahReqs, cReqsSubmit, NULL);
652 if (RT_UNLIKELY(rcPosix < 0))
653 {
654 size_t cReqsSubmitted = cReqsSubmit;
655
656 if (errno == EAGAIN)
657 rc = VERR_FILE_AIO_INSUFFICIENT_RESSOURCES;
658 else
659 rc = RTErrConvertFromErrno(errno);
660
661 /* Check which ones were not submitted. */
662 for (i = 0; i < cReqsSubmit; i++)
663 {
664 pReqInt = pahReqs[i];
665
666 rcPosix = aio_error(&pReqInt->AioCB);
667
668 if ((rcPosix != EINPROGRESS) && (rcPosix != 0))
669 {
670 cReqsSubmitted--;
671
672#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
673 if (errno == EINVAL)
674#else
675 if (rcPosix == EINVAL)
676#endif
677 {
678 /* Was not submitted. */
679 RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
680 }
681 else
682 {
683 /* An error occurred. */
684 RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
685
686 /*
687 * Looks like Apple and glibc interpret the standard in different ways.
688 * glibc returns the error code which would be in errno but Apple returns
689 * -1 and sets errno to the appropriate value
690 */
691#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
692 Assert(rcPosix == -1);
693 pReqInt->Rc = RTErrConvertFromErrno(errno);
694#elif defined(RT_OS_LINUX)
695 pReqInt->Rc = RTErrConvertFromErrno(rcPosix);
696#endif
697 pReqInt->cbTransfered = 0;
698 }
699 /* Unlink from the list. */
700 PRTFILEAIOREQINTERNAL pNext, pPrev;
701 pNext = pReqInt->pNext;
702 pPrev = pReqInt->pPrev;
703 if (pNext)
704 pNext->pPrev = pPrev;
705 if (pPrev)
706 pPrev->pNext = pNext;
707 else
708 pHead = pNext;
709
710 pReqInt->pNext = NULL;
711 pReqInt->pPrev = NULL;
712 }
713 }
714 ASMAtomicAddS32(&pCtxInt->cRequests, cReqsSubmitted);
715 AssertMsg(pCtxInt->cRequests > 0, ("Adding requests resulted in overflow\n"));
716 break;
717 }
718
719 ASMAtomicAddS32(&pCtxInt->cRequests, cReqsSubmit);
720 AssertMsg(pCtxInt->cRequests > 0, ("Adding requests resulted in overflow\n"));
721 cReqs -= cReqsSubmit;
722 pahReqs += cReqsSubmit;
723 }
724
725 /*
726 * Check if we have a flush request now.
727 * If not we hit the AIO_LISTIO_MAX limit
728 * and will continue submitting requests
729 * above.
730 */
731 if (cReqs && RT_SUCCESS_NP(rc))
732 {
733 pReqInt = pahReqs[0];
734 RTFILEAIOREQ_VALID_RETURN(pReqInt);
735
736 if (pReqInt->fFlush)
737 {
738 /*
739 * lio_listio does not work with flush requests so
740 * we have to use aio_fsync directly.
741 */
742 rcPosix = aio_fsync(O_SYNC, &pReqInt->AioCB);
743 if (RT_UNLIKELY(rcPosix < 0))
744 {
745 rc = RTErrConvertFromErrno(errno);
746 RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
747 pReqInt->Rc = rc;
748 pReqInt->cbTransfered = 0;
749
750 /* Unlink from the list. */
751 PRTFILEAIOREQINTERNAL pNext, pPrev;
752 pNext = pReqInt->pNext;
753 pPrev = pReqInt->pPrev;
754 if (pNext)
755 pNext->pPrev = pPrev;
756 if (pPrev)
757 pPrev->pNext = pNext;
758 else
759 pHead = pNext;
760 break;
761 }
762
763 ASMAtomicIncS32(&pCtxInt->cRequests);
764 AssertMsg(pCtxInt->cRequests > 0, ("Adding requests resulted in overflow\n"));
765 cReqs--;
766 pahReqs++;
767 }
768 }
769 } while ( cReqs
770 && RT_SUCCESS_NP(rc));
771
772 if (pHead)
773 {
774 /*
775 * Forward successfully submitted requests to the thread waiting for requests.
776 * We search for a free slot first and if we don't find one
777 * we will grab the first one and append our list to the existing entries.
778 */
779 unsigned iSlot = 0;
780 while ( (iSlot < RT_ELEMENTS(pCtxInt->apReqsNewHead))
781 && !ASMAtomicCmpXchgPtr((void * volatile *)&pCtxInt->apReqsNewHead[iSlot], pHead, NULL))
782 iSlot++;
783
784 if (iSlot == RT_ELEMENTS(pCtxInt->apReqsNewHead))
785 {
786 /* Nothing found. */
787 PRTFILEAIOREQINTERNAL pOldHead = (PRTFILEAIOREQINTERNAL)ASMAtomicXchgPtr((void * volatile *)&pCtxInt->apReqsNewHead[0],
788 NULL);
789
790 /* Find the end of the current head and link the old list to the current. */
791 PRTFILEAIOREQINTERNAL pTail = pHead;
792 while (pTail->pNext)
793 pTail = pTail->pNext;
794
795 pTail->pNext = pOldHead;
796
797 ASMAtomicXchgPtr((void * volatile *)&pCtxInt->apReqsNewHead[0], pHead);
798 }
799
800 /* Set the internal wakeup flag and wakeup the thread if possible. */
801 bool fWokenUp = ASMAtomicXchgBool(&pCtxInt->fWokenUpInternal, true);
802 if (!fWokenUp)
803 rtFileAioCtxWakeup(pCtxInt);
804 }
805
806 return rc;
807}
808
809
810RTDECL(int) RTFileAioCtxWait(RTFILEAIOCTX hAioCtx, size_t cMinReqs, RTMSINTERVAL cMillies,
811 PRTFILEAIOREQ pahReqs, size_t cReqs, uint32_t *pcReqs)
812{
813 int rc = VINF_SUCCESS;
814 int cRequestsCompleted = 0;
815 PRTFILEAIOCTXINTERNAL pCtxInt = (PRTFILEAIOCTXINTERNAL)hAioCtx;
816 struct timespec Timeout;
817 struct timespec *pTimeout = NULL;
818 uint64_t StartNanoTS = 0;
819
820 /* Check parameters. */
821 AssertPtrReturn(pCtxInt, VERR_INVALID_HANDLE);
822 AssertPtrReturn(pcReqs, VERR_INVALID_POINTER);
823 AssertPtrReturn(pahReqs, VERR_INVALID_POINTER);
824 AssertReturn(cReqs != 0, VERR_INVALID_PARAMETER);
825 AssertReturn(cReqs >= cMinReqs, VERR_OUT_OF_RANGE);
826
827 int32_t cRequestsWaiting = ASMAtomicReadS32(&pCtxInt->cRequests);
828
829 if (RT_UNLIKELY(cRequestsWaiting <= 0))
830 return VERR_FILE_AIO_NO_REQUEST;
831
832 if (RT_UNLIKELY(cMinReqs > (uint32_t)cRequestsWaiting))
833 return VERR_INVALID_PARAMETER;
834
835 if (cMillies != RT_INDEFINITE_WAIT)
836 {
837 Timeout.tv_sec = cMillies / 1000;
838 Timeout.tv_nsec = (cMillies % 1000) * 1000000;
839 pTimeout = &Timeout;
840 StartNanoTS = RTTimeNanoTS();
841 }
842
843 /* Wait for at least one. */
844 if (!cMinReqs)
845 cMinReqs = 1;
846
847 /* For the wakeup call. */
848 Assert(pCtxInt->hThreadWait == NIL_RTTHREAD);
849 ASMAtomicWriteHandle(&pCtxInt->hThreadWait, RTThreadSelf());
850
851 /* Update the waiting list once before we enter the loop. */
852 rc = rtFileAioCtxProcessEvents(pCtxInt);
853
854 while ( cMinReqs
855 && RT_SUCCESS_NP(rc))
856 {
857#ifdef RT_STRICT
858 if (RT_UNLIKELY(!pCtxInt->iFirstFree))
859 {
860 for (unsigned i = 0; i < pCtxInt->cReqsWaitMax; i++)
861 RTAssertMsg2Weak("wait[%d] = %#p\n", i, pCtxInt->apReqs[i]);
862
863 AssertMsgFailed(("No request to wait for. pReqsWaitHead=%#p pReqsWaitTail=%#p\n",
864 pCtxInt->pReqsWaitHead, pCtxInt->pReqsWaitTail));
865 }
866#endif
867
868 ASMAtomicXchgBool(&pCtxInt->fWaiting, true);
869 int rcPosix = aio_suspend((const struct aiocb * const *)pCtxInt->apReqs,
870 pCtxInt->iFirstFree, pTimeout);
871 ASMAtomicXchgBool(&pCtxInt->fWaiting, false);
872 if (rcPosix < 0)
873 {
874 /* Check that this is an external wakeup event. */
875 if (errno == EINTR)
876 rc = rtFileAioCtxProcessEvents(pCtxInt);
877 else
878 rc = RTErrConvertFromErrno(errno);
879 }
880 else
881 {
882 /* Requests finished. */
883 unsigned iReqCurr = 0;
884 unsigned cDone = 0;
885
886 /* Remove completed requests from the waiting list. */
887 while ( (iReqCurr < pCtxInt->iFirstFree)
888 && (cDone < cReqs))
889 {
890 PRTFILEAIOREQINTERNAL pReq = pCtxInt->apReqs[iReqCurr];
891 int rcReq = aio_error(&pReq->AioCB);
892
893 if (rcReq != EINPROGRESS)
894 {
895 /* Completed store the return code. */
896 if (rcReq == 0)
897 {
898 pReq->Rc = VINF_SUCCESS;
899 /* Call aio_return() to free ressources. */
900 pReq->cbTransfered = aio_return(&pReq->AioCB);
901 }
902 else
903 {
904#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
905 pReq->Rc = RTErrConvertFromErrno(errno);
906#else
907 pReq->Rc = RTErrConvertFromErrno(rcReq);
908#endif
909 }
910
911 /* Mark the request as finished. */
912 RTFILEAIOREQ_SET_STATE(pReq, COMPLETED);
913 cDone++;
914
915 /* If there are other entries waiting put the head into the now free entry. */
916 if (pCtxInt->pReqsWaitHead)
917 {
918 PRTFILEAIOREQINTERNAL pReqInsert = pCtxInt->pReqsWaitHead;
919
920 pCtxInt->pReqsWaitHead = pReqInsert->pNext;
921 if (!pCtxInt->pReqsWaitHead)
922 {
923 /* List is empty now. Clear tail too. */
924 pCtxInt->pReqsWaitTail = NULL;
925 }
926
927 pReqInsert->iWaitingList = pReq->iWaitingList;
928 pCtxInt->apReqs[pReqInsert->iWaitingList] = pReqInsert;
929 iReqCurr++;
930 }
931 else
932 {
933 /*
934 * Move the last entry into the current position to avoid holes
935 * but only if it is not the last element already.
936 */
937 if (pReq->iWaitingList < pCtxInt->iFirstFree - 1)
938 {
939 pCtxInt->apReqs[pReq->iWaitingList] = pCtxInt->apReqs[--pCtxInt->iFirstFree];
940 pCtxInt->apReqs[pReq->iWaitingList]->iWaitingList = pReq->iWaitingList;
941 }
942 else
943 pCtxInt->iFirstFree--;
944
945 pCtxInt->apReqs[pCtxInt->iFirstFree] = NULL;
946 }
947
948 /* Put the request into the completed list. */
949 pahReqs[cRequestsCompleted++] = pReq;
950 pReq->iWaitingList = RTFILEAIOCTX_WAIT_ENTRY_INVALID;
951 }
952 else
953 iReqCurr++;
954 }
955
956 AssertMsg((cDone <= cReqs), ("Overflow cReqs=%u cMinReqs=%u cDone=%u\n",
957 cReqs, cDone));
958 cReqs -= cDone;
959 cMinReqs = RT_MAX(cMinReqs, cDone) - cDone;
960 ASMAtomicSubS32(&pCtxInt->cRequests, cDone);
961
962 AssertMsg(pCtxInt->cRequests >= 0, ("Finished more requests than currently active\n"));
963
964 if (!cMinReqs)
965 break;
966
967 if (cMillies != RT_INDEFINITE_WAIT)
968 {
969 uint64_t TimeDiff;
970
971 /* Recalculate the timeout. */
972 TimeDiff = RTTimeSystemNanoTS() - StartNanoTS;
973 Timeout.tv_sec = Timeout.tv_sec - (TimeDiff / 1000000);
974 Timeout.tv_nsec = Timeout.tv_nsec - (TimeDiff % 1000000);
975 }
976
977 /* Check for new elements. */
978 rc = rtFileAioCtxProcessEvents(pCtxInt);
979 }
980 }
981
982 *pcReqs = cRequestsCompleted;
983 Assert(pCtxInt->hThreadWait == RTThreadSelf());
984 ASMAtomicWriteHandle(&pCtxInt->hThreadWait, NIL_RTTHREAD);
985
986 return rc;
987}
988
989
990RTDECL(int) RTFileAioCtxWakeup(RTFILEAIOCTX hAioCtx)
991{
992 PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
993 RTFILEAIOCTX_VALID_RETURN(pCtxInt);
994
995 /** @todo r=bird: Define the protocol for how to resume work after calling
996 * this function. */
997
998 bool fWokenUp = ASMAtomicXchgBool(&pCtxInt->fWokenUp, true);
999 if (!fWokenUp)
1000 rtFileAioCtxWakeup(pCtxInt);
1001
1002 return VINF_SUCCESS;
1003}
1004
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette