VirtualBox

source: vbox/trunk/src/VBox/Runtime/r3/posix/fileaio-posix.cpp@ 28800

Last change on this file since 28800 was 28800, checked in by vboxsync, 15 years ago

Automated rebranding to Oracle copyright/license strings via filemuncher

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 34.2 KB
Line 
1/* $Id: fileaio-posix.cpp 28800 2010-04-27 08:22:32Z vboxsync $ */
2/** @file
3 * IPRT - File async I/O, native implementation for POSIX compliant host platforms.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#define LOG_GROUP RTLOGGROUP_DIR
32#include <iprt/asm.h>
33#include <iprt/file.h>
34#include <iprt/mem.h>
35#include <iprt/assert.h>
36#include <iprt/string.h>
37#include <iprt/err.h>
38#include <iprt/log.h>
39#include <iprt/thread.h>
40#include <iprt/semaphore.h>
41#include "internal/fileaio.h"
42
43#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
44# include <sys/types.h>
45# include <sys/sysctl.h> /* for sysctlbyname */
46#endif
47#if defined(RT_OS_FREEBSD)
48# include <fcntl.h> /* O_SYNC */
49#endif
50#include <aio.h>
51#include <errno.h>
52#include <time.h>
53
54/*
55 * Linux does not define this value.
56 * Just define it with really big
57 * value.
58 */
59#ifndef AIO_LISTIO_MAX
60# define AIO_LISTIO_MAX UINT32_MAX
61#endif
62
63#if 0 /* Only used for debugging */
64# undef AIO_LISTIO_MAX
65# define AIO_LISTIO_MAX 16
66#endif
67
68/** Invalid entry in the waiting array. */
69#define RTFILEAIOCTX_WAIT_ENTRY_INVALID (~0U)
70
71/*******************************************************************************
72* Structures and Typedefs *
73*******************************************************************************/
74/**
75 * Async I/O request state.
76 */
77typedef struct RTFILEAIOREQINTERNAL
78{
79 /** The aio control block. FIRST ELEMENT! */
80 struct aiocb AioCB;
81 /** Next element in the chain. */
82 struct RTFILEAIOREQINTERNAL *pNext;
83 /** Previous element in the chain. */
84 struct RTFILEAIOREQINTERNAL *pPrev;
85 /** Current state the request is in. */
86 RTFILEAIOREQSTATE enmState;
87 /** Flag whether this is a flush request. */
88 bool fFlush;
89 /** Flag indicating if the request was canceled. */
90 volatile bool fCanceled;
91 /** Opaque user data. */
92 void *pvUser;
93 /** Number of bytes actually transfered. */
94 size_t cbTransfered;
95 /** Status code. */
96 int Rc;
97 /** Completion context we are assigned to. */
98 struct RTFILEAIOCTXINTERNAL *pCtxInt;
99 /** Entry in the waiting list the request is in. */
100 unsigned iWaitingList;
101 /** Magic value (RTFILEAIOREQ_MAGIC). */
102 uint32_t u32Magic;
103} RTFILEAIOREQINTERNAL, *PRTFILEAIOREQINTERNAL;
104
105/**
106 * Async I/O completion context state.
107 */
108typedef struct RTFILEAIOCTXINTERNAL
109{
110 /** Current number of requests active on this context. */
111 volatile int32_t cRequests;
112 /** Maximum number of requests this context can handle. */
113 uint32_t cMaxRequests;
114 /** The ID of the thread which is currently waiting for requests. */
115 volatile RTTHREAD hThreadWait;
116 /** Flag whether the thread was woken up. */
117 volatile bool fWokenUp;
118 /** Flag whether the thread is currently waiting in the syscall. */
119 volatile bool fWaiting;
120 /** Magic value (RTFILEAIOCTX_MAGIC). */
121 uint32_t u32Magic;
122 /** Flag whether the thread was woken up due to a internal event. */
123 volatile bool fWokenUpInternal;
124 /** List of new requests which needs to be inserted into apReqs by the
125 * waiting thread. */
126 volatile PRTFILEAIOREQINTERNAL apReqsNewHead[5];
127 /** Special entry for requests which are canceled. Because only one
128 * request can be canceled at a time and the thread canceling the request
129 * has to wait we need only one entry. */
130 volatile PRTFILEAIOREQINTERNAL pReqToCancel;
131 /** Event semaphore the canceling thread is waiting for completion of
132 * the operation. */
133 RTSEMEVENT SemEventCancel;
134 /** Head of submitted elements waiting to get into the array. */
135 PRTFILEAIOREQINTERNAL pReqsWaitHead;
136 /** Tail of submitted elements waiting to get into the array. */
137 PRTFILEAIOREQINTERNAL pReqsWaitTail;
138 /** Maximum number of elements in the waiting array. */
139 unsigned cReqsWaitMax;
140 /** First free slot in the waiting list. */
141 unsigned iFirstFree;
142 /** List of requests we are currently waiting on.
143 * Size depends on cMaxRequests and AIO_LISTIO_MAX. */
144 volatile PRTFILEAIOREQINTERNAL apReqs[1];
145} RTFILEAIOCTXINTERNAL, *PRTFILEAIOCTXINTERNAL;
146
147/**
148 * Internal worker for waking up the waiting thread.
149 */
150static void rtFileAioCtxWakeup(PRTFILEAIOCTXINTERNAL pCtxInt)
151{
152 /*
153 * Read the thread handle before the status flag.
154 * If we read the handle after the flag we might
155 * end up with an invalid handle because the thread
156 * waiting in RTFileAioCtxWakeup() might get scheduled
157 * before we read the flag and returns.
158 * We can ensure that the handle is valid if fWaiting is true
159 * when reading the handle before the status flag.
160 */
161 RTTHREAD hThread;
162 ASMAtomicReadHandle(&pCtxInt->hThreadWait, &hThread);
163 bool fWaiting = ASMAtomicReadBool(&pCtxInt->fWaiting);
164 if (fWaiting)
165 {
166 /*
167 * If a thread waits the handle must be valid.
168 * It is possible that the thread returns from
169 * aio_suspend() before the signal is send.
170 * This is no problem because we already set fWokenUp
171 * to true which will let the thread return VERR_INTERRUPTED
172 * and the next call to RTFileAioCtxWait() will not
173 * return VERR_INTERRUPTED because signals are not saved
174 * and will simply vanish if the destination thread can't
175 * receive it.
176 */
177 Assert(hThread != NIL_RTTHREAD);
178 RTThreadPoke(hThread);
179 }
180}
181
182/**
183 * Internal worker processing events and inserting new requests into the waiting list.
184 */
185static int rtFileAioCtxProcessEvents(PRTFILEAIOCTXINTERNAL pCtxInt)
186{
187 int rc = VINF_SUCCESS;
188
189 /* Process new requests first. */
190 bool fWokenUp = ASMAtomicXchgBool(&pCtxInt->fWokenUpInternal, false);
191 if (fWokenUp)
192 {
193 for (unsigned iSlot = 0; iSlot < RT_ELEMENTS(pCtxInt->apReqsNewHead); iSlot++)
194 {
195 PRTFILEAIOREQINTERNAL pReqHead = (PRTFILEAIOREQINTERNAL)ASMAtomicXchgPtr((void* volatile*)&pCtxInt->apReqsNewHead[iSlot],
196 NULL);
197
198 while ( (pCtxInt->iFirstFree < pCtxInt->cReqsWaitMax)
199 && pReqHead)
200 {
201 pCtxInt->apReqs[pCtxInt->iFirstFree] = pReqHead;
202 pReqHead->iWaitingList = pCtxInt->iFirstFree;
203 pReqHead = pReqHead->pNext;
204
205 /* Clear pointer to next and previous element just for safety. */
206 pCtxInt->apReqs[pCtxInt->iFirstFree]->pNext = NULL;
207 pCtxInt->apReqs[pCtxInt->iFirstFree]->pPrev = NULL;
208 pCtxInt->iFirstFree++;
209
210 Assert( (pCtxInt->iFirstFree <= pCtxInt->cMaxRequests)
211 && (pCtxInt->iFirstFree <= pCtxInt->cReqsWaitMax));
212 }
213
214 /* Append the rest to the wait list. */
215 if (pReqHead)
216 {
217 if (!pCtxInt->pReqsWaitHead)
218 {
219 Assert(!pCtxInt->pReqsWaitTail);
220 pCtxInt->pReqsWaitHead = pReqHead;
221 pReqHead->pPrev = NULL;
222 }
223 else
224 {
225 AssertPtr(pCtxInt->pReqsWaitTail);
226
227 pCtxInt->pReqsWaitTail->pNext = pReqHead;
228 pReqHead->pPrev = pCtxInt->pReqsWaitTail;
229 }
230
231 /* Update tail. */
232 while (pReqHead->pNext)
233 pReqHead = pReqHead->pNext;
234
235 pCtxInt->pReqsWaitTail = pReqHead;
236 pCtxInt->pReqsWaitTail->pNext = NULL;
237 }
238 }
239
240 /* Check if a request needs to be canceled. */
241 PRTFILEAIOREQINTERNAL pReqToCancel = (PRTFILEAIOREQINTERNAL)ASMAtomicReadPtr((void* volatile*)&pCtxInt->pReqToCancel);
242 if (pReqToCancel)
243 {
244 /* The request can be in the array waiting for completion or still in the list because it is full. */
245 if (pReqToCancel->iWaitingList != RTFILEAIOCTX_WAIT_ENTRY_INVALID)
246 {
247 /* Put it out of the waiting list. */
248 pCtxInt->apReqs[pReqToCancel->iWaitingList] = pCtxInt->apReqs[--pCtxInt->iFirstFree];
249 pCtxInt->apReqs[pReqToCancel->iWaitingList]->iWaitingList = pReqToCancel->iWaitingList;
250 }
251 else
252 {
253 /* Unlink from the waiting list. */
254 PRTFILEAIOREQINTERNAL pPrev = pReqToCancel->pPrev;
255 PRTFILEAIOREQINTERNAL pNext = pReqToCancel->pNext;
256
257 if (pNext)
258 pNext->pPrev = pPrev;
259 else
260 {
261 /* We canceled the tail. */
262 pCtxInt->pReqsWaitTail = pPrev;
263 }
264
265 if (pPrev)
266 pPrev->pNext = pNext;
267 else
268 {
269 /* We canceled the head. */
270 pCtxInt->pReqsWaitHead = pNext;
271 }
272 }
273
274 ASMAtomicDecS32(&pCtxInt->cRequests);
275 AssertMsg(pCtxInt->cRequests >= 0, ("Canceled request not which is not in this context\n"));
276 RTSemEventSignal(pCtxInt->SemEventCancel);
277 }
278 }
279 else
280 {
281 if (ASMAtomicXchgBool(&pCtxInt->fWokenUp, false))
282 rc = VERR_INTERRUPTED;
283 }
284
285 return rc;
286}
287
288RTR3DECL(int) RTFileAioGetLimits(PRTFILEAIOLIMITS pAioLimits)
289{
290 int rcBSD = 0;
291 AssertPtrReturn(pAioLimits, VERR_INVALID_POINTER);
292
293#if defined(RT_OS_DARWIN)
294 int cReqsOutstandingMax = 0;
295 size_t cbParameter = sizeof(int);
296
297 rcBSD = sysctlbyname("kern.aioprocmax", /* name */
298 &cReqsOutstandingMax, /* Where to store the old value. */
299 &cbParameter, /* Size of the memory pointed to. */
300 NULL, /* Where the new value is located. */
301 NULL); /* Where the size of the new value is stored. */
302 if (rcBSD == -1)
303 return RTErrConvertFromErrno(errno);
304
305 pAioLimits->cReqsOutstandingMax = cReqsOutstandingMax;
306 pAioLimits->cbBufferAlignment = 0;
307#elif defined(RT_OS_FREEBSD)
308 /*
309 * The AIO API is implemented in a kernel module which is not
310 * loaded by default.
311 * If it is loaded there are additional sysctl parameters.
312 */
313 int cReqsOutstandingMax = 0;
314 size_t cbParameter = sizeof(int);
315
316 rcBSD = sysctlbyname("vfs.aio.max_aio_per_proc", /* name */
317 &cReqsOutstandingMax, /* Where to store the old value. */
318 &cbParameter, /* Size of the memory pointed to. */
319 NULL, /* Where the new value is located. */
320 NULL); /* Where the size of the new value is stored. */
321 if (rcBSD == -1)
322 {
323 /* ENOENT means the value is unknown thus the module is not loaded. */
324 if (errno == ENOENT)
325 return VERR_NOT_SUPPORTED;
326 else
327 return RTErrConvertFromErrno(errno);
328 }
329
330 pAioLimits->cReqsOutstandingMax = cReqsOutstandingMax;
331 pAioLimits->cbBufferAlignment = 0;
332#else
333 pAioLimits->cReqsOutstandingMax = RTFILEAIO_UNLIMITED_REQS;
334 pAioLimits->cbBufferAlignment = 0;
335#endif
336
337 return VINF_SUCCESS;
338}
339
340RTR3DECL(int) RTFileAioReqCreate(PRTFILEAIOREQ phReq)
341{
342 AssertPtrReturn(phReq, VERR_INVALID_POINTER);
343
344 PRTFILEAIOREQINTERNAL pReqInt = (PRTFILEAIOREQINTERNAL)RTMemAllocZ(sizeof(RTFILEAIOREQINTERNAL));
345 if (RT_UNLIKELY(!pReqInt))
346 return VERR_NO_MEMORY;
347
348 pReqInt->pCtxInt = NULL;
349 pReqInt->u32Magic = RTFILEAIOREQ_MAGIC;
350 pReqInt->iWaitingList = RTFILEAIOCTX_WAIT_ENTRY_INVALID;
351 RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
352
353 *phReq = (RTFILEAIOREQ)pReqInt;
354
355 return VINF_SUCCESS;
356}
357
358
359RTDECL(int) RTFileAioReqDestroy(RTFILEAIOREQ hReq)
360{
361 /*
362 * Validate the handle and ignore nil.
363 */
364 if (hReq == NIL_RTFILEAIOREQ)
365 return VINF_SUCCESS;
366 PRTFILEAIOREQINTERNAL pReqInt = hReq;
367 RTFILEAIOREQ_VALID_RETURN(pReqInt);
368 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_IN_PROGRESS);
369
370 /*
371 * Trash the magic and free it.
372 */
373 ASMAtomicUoWriteU32(&pReqInt->u32Magic, ~RTFILEAIOREQ_MAGIC);
374 RTMemFree(pReqInt);
375 return VINF_SUCCESS;
376}
377
378/**
379 * Worker setting up the request.
380 */
381DECLINLINE(int) rtFileAioReqPrepareTransfer(RTFILEAIOREQ hReq, RTFILE hFile,
382 unsigned uTransferDirection,
383 RTFOFF off, void *pvBuf, size_t cbTransfer,
384 void *pvUser)
385{
386 /*
387 * Validate the input.
388 */
389 PRTFILEAIOREQINTERNAL pReqInt = hReq;
390 RTFILEAIOREQ_VALID_RETURN(pReqInt);
391 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_IN_PROGRESS);
392 Assert(hFile != NIL_RTFILE);
393 AssertPtr(pvBuf);
394 Assert(off >= 0);
395 Assert(cbTransfer > 0);
396
397 memset(&pReqInt->AioCB, 0, sizeof(struct aiocb));
398 pReqInt->AioCB.aio_lio_opcode = uTransferDirection;
399 pReqInt->AioCB.aio_fildes = (int)hFile;
400 pReqInt->AioCB.aio_offset = off;
401 pReqInt->AioCB.aio_nbytes = cbTransfer;
402 pReqInt->AioCB.aio_buf = pvBuf;
403 pReqInt->pvUser = pvUser;
404 pReqInt->pCtxInt = NULL;
405 pReqInt->Rc = VERR_FILE_AIO_IN_PROGRESS;
406 RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
407
408 return VINF_SUCCESS;
409}
410
411
412RTDECL(int) RTFileAioReqPrepareRead(RTFILEAIOREQ hReq, RTFILE hFile, RTFOFF off,
413 void *pvBuf, size_t cbRead, void *pvUser)
414{
415 return rtFileAioReqPrepareTransfer(hReq, hFile, LIO_READ,
416 off, pvBuf, cbRead, pvUser);
417}
418
419
420RTDECL(int) RTFileAioReqPrepareWrite(RTFILEAIOREQ hReq, RTFILE hFile, RTFOFF off,
421 void const *pvBuf, size_t cbWrite, void *pvUser)
422{
423 return rtFileAioReqPrepareTransfer(hReq, hFile, LIO_WRITE,
424 off, (void *)pvBuf, cbWrite, pvUser);
425}
426
427
428RTDECL(int) RTFileAioReqPrepareFlush(RTFILEAIOREQ hReq, RTFILE hFile, void *pvUser)
429{
430 PRTFILEAIOREQINTERNAL pReqInt = (PRTFILEAIOREQINTERNAL)hReq;
431
432 RTFILEAIOREQ_VALID_RETURN(pReqInt);
433 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_IN_PROGRESS);
434 Assert(hFile != NIL_RTFILE);
435
436 pReqInt->fFlush = true;
437 pReqInt->AioCB.aio_fildes = (int)hFile;
438 pReqInt->pvUser = pvUser;
439 RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
440
441 return VINF_SUCCESS;
442}
443
444
445RTDECL(void *) RTFileAioReqGetUser(RTFILEAIOREQ hReq)
446{
447 PRTFILEAIOREQINTERNAL pReqInt = hReq;
448 RTFILEAIOREQ_VALID_RETURN_RC(pReqInt, NULL);
449
450 return pReqInt->pvUser;
451}
452
453
454RTDECL(int) RTFileAioReqCancel(RTFILEAIOREQ hReq)
455{
456 PRTFILEAIOREQINTERNAL pReqInt = hReq;
457 RTFILEAIOREQ_VALID_RETURN(pReqInt);
458 RTFILEAIOREQ_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_NOT_SUBMITTED);
459
460 ASMAtomicXchgBool(&pReqInt->fCanceled, true);
461
462 int rcPosix = aio_cancel(pReqInt->AioCB.aio_fildes, &pReqInt->AioCB);
463
464 if (rcPosix == AIO_CANCELED)
465 {
466 PRTFILEAIOCTXINTERNAL pCtxInt = pReqInt->pCtxInt;
467 /*
468 * Notify the waiting thread that the request was canceled.
469 */
470 AssertMsg(VALID_PTR(pCtxInt),
471 ("Invalid state. Request was canceled but wasn't submitted\n"));
472
473 Assert(!pCtxInt->pReqToCancel);
474 ASMAtomicWritePtr((void* volatile*)&pCtxInt->pReqToCancel, pReqInt);
475 rtFileAioCtxWakeup(pCtxInt);
476
477 /* Wait for acknowledge. */
478 int rc = RTSemEventWait(pCtxInt->SemEventCancel, RT_INDEFINITE_WAIT);
479 AssertRC(rc);
480
481 ASMAtomicWritePtr((void* volatile*)&pCtxInt->pReqToCancel, NULL);
482 pReqInt->Rc = VERR_FILE_AIO_CANCELED;
483 RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
484 return VINF_SUCCESS;
485 }
486 else if (rcPosix == AIO_ALLDONE)
487 return VERR_FILE_AIO_COMPLETED;
488 else if (rcPosix == AIO_NOTCANCELED)
489 return VERR_FILE_AIO_IN_PROGRESS;
490 else
491 return RTErrConvertFromErrno(errno);
492}
493
494
495RTDECL(int) RTFileAioReqGetRC(RTFILEAIOREQ hReq, size_t *pcbTransfered)
496{
497 PRTFILEAIOREQINTERNAL pReqInt = hReq;
498 RTFILEAIOREQ_VALID_RETURN(pReqInt);
499 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_IN_PROGRESS);
500 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, PREPARED, VERR_FILE_AIO_NOT_SUBMITTED);
501 AssertPtrNull(pcbTransfered);
502
503 if ( (RT_SUCCESS(pReqInt->Rc))
504 && (pcbTransfered))
505 *pcbTransfered = pReqInt->cbTransfered;
506
507 return pReqInt->Rc;
508}
509
510
511RTDECL(int) RTFileAioCtxCreate(PRTFILEAIOCTX phAioCtx, uint32_t cAioReqsMax)
512{
513 PRTFILEAIOCTXINTERNAL pCtxInt;
514 unsigned cReqsWaitMax;
515
516 AssertPtrReturn(phAioCtx, VERR_INVALID_POINTER);
517
518 if (cAioReqsMax == RTFILEAIO_UNLIMITED_REQS)
519 return VERR_OUT_OF_RANGE;
520
521 cReqsWaitMax = RT_MIN(cAioReqsMax, AIO_LISTIO_MAX);
522
523 pCtxInt = (PRTFILEAIOCTXINTERNAL)RTMemAllocZ( sizeof(RTFILEAIOCTXINTERNAL)
524 + cReqsWaitMax * sizeof(PRTFILEAIOREQINTERNAL));
525 if (RT_UNLIKELY(!pCtxInt))
526 return VERR_NO_MEMORY;
527
528 /* Create event semaphore. */
529 int rc = RTSemEventCreate(&pCtxInt->SemEventCancel);
530 if (RT_FAILURE(rc))
531 {
532 RTMemFree(pCtxInt);
533 return rc;
534 }
535
536 pCtxInt->u32Magic = RTFILEAIOCTX_MAGIC;
537 pCtxInt->cMaxRequests = cAioReqsMax;
538 pCtxInt->cReqsWaitMax = cReqsWaitMax;
539 *phAioCtx = (RTFILEAIOCTX)pCtxInt;
540
541 return VINF_SUCCESS;
542}
543
544
545RTDECL(int) RTFileAioCtxDestroy(RTFILEAIOCTX hAioCtx)
546{
547 PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
548
549 AssertPtrReturn(pCtxInt, VERR_INVALID_HANDLE);
550
551 if (RT_UNLIKELY(pCtxInt->cRequests))
552 return VERR_FILE_AIO_BUSY;
553
554 RTSemEventDestroy(pCtxInt->SemEventCancel);
555 RTMemFree(pCtxInt);
556
557 return VINF_SUCCESS;
558}
559
560
561RTDECL(uint32_t) RTFileAioCtxGetMaxReqCount(RTFILEAIOCTX hAioCtx)
562{
563 PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
564
565 if (hAioCtx == NIL_RTFILEAIOCTX)
566 return RTFILEAIO_UNLIMITED_REQS;
567 else
568 return pCtxInt->cMaxRequests;
569}
570
571RTDECL(int) RTFileAioCtxAssociateWithFile(RTFILEAIOCTX hAioCtx, RTFILE hFile)
572{
573 return VINF_SUCCESS;
574}
575
576RTDECL(int) RTFileAioCtxSubmit(RTFILEAIOCTX hAioCtx, PRTFILEAIOREQ pahReqs, size_t cReqs)
577{
578 int rc = VINF_SUCCESS;
579 PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
580
581 /* Parameter checks */
582 AssertPtrReturn(pCtxInt, VERR_INVALID_HANDLE);
583 AssertReturn(cReqs != 0, VERR_INVALID_POINTER);
584 AssertPtrReturn(pahReqs, VERR_INVALID_PARAMETER);
585
586 /* Check that we don't exceed the limit */
587 if (ASMAtomicUoReadS32(&pCtxInt->cRequests) + cReqs > pCtxInt->cMaxRequests)
588 return VERR_FILE_AIO_LIMIT_EXCEEDED;
589
590 PRTFILEAIOREQINTERNAL pHead = NULL;
591
592 do
593 {
594 int rcPosix = 0;
595 size_t cReqsSubmit = 0;
596 size_t i = 0;
597 PRTFILEAIOREQINTERNAL pReqInt;
598
599 while ( (i < cReqs)
600 && (i < AIO_LISTIO_MAX))
601 {
602 pReqInt = pahReqs[i];
603 if (RTFILEAIOREQ_IS_NOT_VALID(pReqInt))
604 {
605 /* Undo everything and stop submitting. */
606 for (size_t iUndo = 0; iUndo < i; iUndo++)
607 {
608 pReqInt = pahReqs[iUndo];
609 RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
610 pReqInt->pCtxInt = NULL;
611
612 /* Unlink from the list again. */
613 PRTFILEAIOREQINTERNAL pNext, pPrev;
614 pNext = pReqInt->pNext;
615 pPrev = pReqInt->pPrev;
616 if (pNext)
617 pNext->pPrev = pPrev;
618 if (pPrev)
619 pPrev->pNext = pNext;
620 else
621 pHead = pNext;
622 }
623 rc = VERR_INVALID_HANDLE;
624 break;
625 }
626
627 pReqInt->pCtxInt = pCtxInt;
628
629 /* Link them together. */
630 pReqInt->pNext = pHead;
631 if (pHead)
632 pHead->pPrev = pReqInt;
633 pReqInt->pPrev = NULL;
634 pHead = pReqInt;
635 RTFILEAIOREQ_SET_STATE(pReqInt, SUBMITTED);
636
637 if (pReqInt->fFlush)
638 break;
639
640 cReqsSubmit++;
641 i++;
642 }
643
644 if (cReqsSubmit)
645 {
646 rcPosix = lio_listio(LIO_NOWAIT, (struct aiocb **)pahReqs, cReqsSubmit, NULL);
647 if (RT_UNLIKELY(rcPosix < 0))
648 {
649 size_t cReqsSubmitted = cReqsSubmit;
650
651 if (errno == EAGAIN)
652 rc = VERR_FILE_AIO_INSUFFICIENT_RESSOURCES;
653 else
654 rc = RTErrConvertFromErrno(errno);
655
656 /* Check which ones were not submitted. */
657 for (i = 0; i < cReqsSubmit; i++)
658 {
659 pReqInt = pahReqs[i];
660
661 rcPosix = aio_error(&pReqInt->AioCB);
662
663 if ((rcPosix != EINPROGRESS) && (rcPosix != 0))
664 {
665 cReqsSubmitted--;
666
667#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
668 if (errno == EINVAL)
669#else
670 if (rcPosix == EINVAL)
671#endif
672 {
673 /* Was not submitted. */
674 RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
675 }
676 else
677 {
678 /* An error occurred. */
679 RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
680
681 /*
682 * Looks like Apple and glibc interpret the standard in different ways.
683 * glibc returns the error code which would be in errno but Apple returns
684 * -1 and sets errno to the appropriate value
685 */
686#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
687 Assert(rcPosix == -1);
688 pReqInt->Rc = RTErrConvertFromErrno(errno);
689#elif defined(RT_OS_LINUX)
690 pReqInt->Rc = RTErrConvertFromErrno(rcPosix);
691#endif
692 pReqInt->cbTransfered = 0;
693 }
694 /* Unlink from the list. */
695 PRTFILEAIOREQINTERNAL pNext, pPrev;
696 pNext = pReqInt->pNext;
697 pPrev = pReqInt->pPrev;
698 if (pNext)
699 pNext->pPrev = pPrev;
700 if (pPrev)
701 pPrev->pNext = pNext;
702 else
703 pHead = pNext;
704
705 pReqInt->pNext = NULL;
706 pReqInt->pPrev = NULL;
707 }
708 }
709 ASMAtomicAddS32(&pCtxInt->cRequests, cReqsSubmitted);
710 AssertMsg(pCtxInt->cRequests > 0, ("Adding requests resulted in overflow\n"));
711 break;
712 }
713
714 ASMAtomicAddS32(&pCtxInt->cRequests, cReqsSubmit);
715 AssertMsg(pCtxInt->cRequests > 0, ("Adding requests resulted in overflow\n"));
716 cReqs -= cReqsSubmit;
717 pahReqs += cReqsSubmit;
718 }
719
720 /*
721 * Check if we have a flush request now.
722 * If not we hit the AIO_LISTIO_MAX limit
723 * and will continue submitting requests
724 * above.
725 */
726 if (cReqs && RT_SUCCESS_NP(rc))
727 {
728 pReqInt = pahReqs[0];
729 RTFILEAIOREQ_VALID_RETURN(pReqInt);
730
731 if (pReqInt->fFlush)
732 {
733 /*
734 * lio_listio does not work with flush requests so
735 * we have to use aio_fsync directly.
736 */
737 rcPosix = aio_fsync(O_SYNC, &pReqInt->AioCB);
738 if (RT_UNLIKELY(rcPosix < 0))
739 {
740 rc = RTErrConvertFromErrno(errno);
741 RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
742 pReqInt->Rc = rc;
743 pReqInt->cbTransfered = 0;
744
745 /* Unlink from the list. */
746 PRTFILEAIOREQINTERNAL pNext, pPrev;
747 pNext = pReqInt->pNext;
748 pPrev = pReqInt->pPrev;
749 if (pNext)
750 pNext->pPrev = pPrev;
751 if (pPrev)
752 pPrev->pNext = pNext;
753 else
754 pHead = pNext;
755 break;
756 }
757
758 ASMAtomicIncS32(&pCtxInt->cRequests);
759 AssertMsg(pCtxInt->cRequests > 0, ("Adding requests resulted in overflow\n"));
760 cReqs--;
761 pahReqs++;
762 }
763 }
764 } while ( cReqs
765 && RT_SUCCESS_NP(rc));
766
767 if (pHead)
768 {
769 /*
770 * Forward successfully submitted requests to the thread waiting for requests.
771 * We search for a free slot first and if we don't find one
772 * we will grab the first one and append our list to the existing entries.
773 */
774 unsigned iSlot = 0;
775 while ( (iSlot < RT_ELEMENTS(pCtxInt->apReqsNewHead))
776 && !ASMAtomicCmpXchgPtr((void * volatile *)&pCtxInt->apReqsNewHead[iSlot], pHead, NULL))
777 iSlot++;
778
779 if (iSlot == RT_ELEMENTS(pCtxInt->apReqsNewHead))
780 {
781 /* Nothing found. */
782 PRTFILEAIOREQINTERNAL pOldHead = (PRTFILEAIOREQINTERNAL)ASMAtomicXchgPtr((void * volatile *)&pCtxInt->apReqsNewHead[0],
783 NULL);
784
785 /* Find the end of the current head and link the old list to the current. */
786 PRTFILEAIOREQINTERNAL pTail = pHead;
787 while (pTail->pNext)
788 pTail = pTail->pNext;
789
790 pTail->pNext = pOldHead;
791
792 ASMAtomicXchgPtr((void * volatile *)&pCtxInt->apReqsNewHead[0], pHead);
793 }
794
795 /* Set the internal wakeup flag and wakeup the thread if possible. */
796 bool fWokenUp = ASMAtomicXchgBool(&pCtxInt->fWokenUpInternal, true);
797 if (!fWokenUp)
798 rtFileAioCtxWakeup(pCtxInt);
799 }
800
801 return rc;
802}
803
804
805RTDECL(int) RTFileAioCtxWait(RTFILEAIOCTX hAioCtx, size_t cMinReqs, RTMSINTERVAL cMillies,
806 PRTFILEAIOREQ pahReqs, size_t cReqs, uint32_t *pcReqs)
807{
808 int rc = VINF_SUCCESS;
809 int cRequestsCompleted = 0;
810 PRTFILEAIOCTXINTERNAL pCtxInt = (PRTFILEAIOCTXINTERNAL)hAioCtx;
811 struct timespec Timeout;
812 struct timespec *pTimeout = NULL;
813 uint64_t StartNanoTS = 0;
814
815 /* Check parameters. */
816 AssertPtrReturn(pCtxInt, VERR_INVALID_HANDLE);
817 AssertPtrReturn(pcReqs, VERR_INVALID_POINTER);
818 AssertPtrReturn(pahReqs, VERR_INVALID_POINTER);
819 AssertReturn(cReqs != 0, VERR_INVALID_PARAMETER);
820 AssertReturn(cReqs >= cMinReqs, VERR_OUT_OF_RANGE);
821
822 int32_t cRequestsWaiting = ASMAtomicReadS32(&pCtxInt->cRequests);
823
824 if (RT_UNLIKELY(cRequestsWaiting <= 0))
825 return VERR_FILE_AIO_NO_REQUEST;
826
827 if (RT_UNLIKELY(cMinReqs > (uint32_t)cRequestsWaiting))
828 return VERR_INVALID_PARAMETER;
829
830 if (cMillies != RT_INDEFINITE_WAIT)
831 {
832 Timeout.tv_sec = cMillies / 1000;
833 Timeout.tv_nsec = (cMillies % 1000) * 1000000;
834 pTimeout = &Timeout;
835 StartNanoTS = RTTimeNanoTS();
836 }
837
838 /* Wait for at least one. */
839 if (!cMinReqs)
840 cMinReqs = 1;
841
842 /* For the wakeup call. */
843 Assert(pCtxInt->hThreadWait == NIL_RTTHREAD);
844 ASMAtomicWriteHandle(&pCtxInt->hThreadWait, RTThreadSelf());
845
846 /* Update the waiting list once before we enter the loop. */
847 rc = rtFileAioCtxProcessEvents(pCtxInt);
848
849 while ( cMinReqs
850 && RT_SUCCESS_NP(rc))
851 {
852#ifdef RT_STRICT
853 if (RT_UNLIKELY(!pCtxInt->iFirstFree))
854 {
855 for (unsigned i = 0; i < pCtxInt->cReqsWaitMax; i++)
856 RTAssertMsg2Weak("wait[%d] = %#p\n", i, pCtxInt->apReqs[i]);
857
858 AssertMsgFailed(("No request to wait for. pReqsWaitHead=%#p pReqsWaitTail=%#p\n",
859 pCtxInt->pReqsWaitHead, pCtxInt->pReqsWaitTail));
860 }
861#endif
862
863 ASMAtomicXchgBool(&pCtxInt->fWaiting, true);
864 int rcPosix = aio_suspend((const struct aiocb * const *)pCtxInt->apReqs,
865 pCtxInt->iFirstFree, pTimeout);
866 ASMAtomicXchgBool(&pCtxInt->fWaiting, false);
867 if (rcPosix < 0)
868 {
869 /* Check that this is an external wakeup event. */
870 if (errno == EINTR)
871 rc = rtFileAioCtxProcessEvents(pCtxInt);
872 else
873 rc = RTErrConvertFromErrno(errno);
874 }
875 else
876 {
877 /* Requests finished. */
878 unsigned iReqCurr = 0;
879 unsigned cDone = 0;
880
881 /* Remove completed requests from the waiting list. */
882 while ( (iReqCurr < pCtxInt->iFirstFree)
883 && (cDone < cReqs))
884 {
885 PRTFILEAIOREQINTERNAL pReq = pCtxInt->apReqs[iReqCurr];
886 int rcReq = aio_error(&pReq->AioCB);
887
888 if (rcReq != EINPROGRESS)
889 {
890 /* Completed store the return code. */
891 if (rcReq == 0)
892 {
893 pReq->Rc = VINF_SUCCESS;
894 /* Call aio_return() to free ressources. */
895 pReq->cbTransfered = aio_return(&pReq->AioCB);
896 }
897 else
898 {
899#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
900 pReq->Rc = RTErrConvertFromErrno(errno);
901#else
902 pReq->Rc = RTErrConvertFromErrno(rcReq);
903#endif
904 }
905
906 /* Mark the request as finished. */
907 RTFILEAIOREQ_SET_STATE(pReq, COMPLETED);
908 cDone++;
909
910 /* If there are other entries waiting put the head into the now free entry. */
911 if (pCtxInt->pReqsWaitHead)
912 {
913 PRTFILEAIOREQINTERNAL pReqInsert = pCtxInt->pReqsWaitHead;
914
915 pCtxInt->pReqsWaitHead = pReqInsert->pNext;
916 if (!pCtxInt->pReqsWaitHead)
917 {
918 /* List is empty now. Clear tail too. */
919 pCtxInt->pReqsWaitTail = NULL;
920 }
921
922 pReqInsert->iWaitingList = pReq->iWaitingList;
923 pCtxInt->apReqs[pReqInsert->iWaitingList] = pReqInsert;
924 iReqCurr++;
925 }
926 else
927 {
928 /*
929 * Move the last entry into the current position to avoid holes
930 * but only if it is not the last element already.
931 */
932 if (pReq->iWaitingList < pCtxInt->iFirstFree - 1)
933 {
934 pCtxInt->apReqs[pReq->iWaitingList] = pCtxInt->apReqs[--pCtxInt->iFirstFree];
935 pCtxInt->apReqs[pReq->iWaitingList]->iWaitingList = pReq->iWaitingList;
936 }
937 else
938 pCtxInt->iFirstFree--;
939
940 pCtxInt->apReqs[pCtxInt->iFirstFree] = NULL;
941 }
942
943 /* Put the request into the completed list. */
944 pahReqs[cRequestsCompleted++] = pReq;
945 pReq->iWaitingList = RTFILEAIOCTX_WAIT_ENTRY_INVALID;
946 }
947 else
948 iReqCurr++;
949 }
950
951 AssertMsg((cDone <= cReqs), ("Overflow cReqs=%u cMinReqs=%u cDone=%u\n",
952 cReqs, cDone));
953 cReqs -= cDone;
954 cMinReqs = RT_MAX(cMinReqs, cDone) - cDone;
955 ASMAtomicSubS32(&pCtxInt->cRequests, cDone);
956
957 AssertMsg(pCtxInt->cRequests >= 0, ("Finished more requests than currently active\n"));
958
959 if (!cMinReqs)
960 break;
961
962 if (cMillies != RT_INDEFINITE_WAIT)
963 {
964 uint64_t TimeDiff;
965
966 /* Recalculate the timeout. */
967 TimeDiff = RTTimeSystemNanoTS() - StartNanoTS;
968 Timeout.tv_sec = Timeout.tv_sec - (TimeDiff / 1000000);
969 Timeout.tv_nsec = Timeout.tv_nsec - (TimeDiff % 1000000);
970 }
971
972 /* Check for new elements. */
973 rc = rtFileAioCtxProcessEvents(pCtxInt);
974 }
975 }
976
977 *pcReqs = cRequestsCompleted;
978 Assert(pCtxInt->hThreadWait == RTThreadSelf());
979 ASMAtomicWriteHandle(&pCtxInt->hThreadWait, NIL_RTTHREAD);
980
981 return rc;
982}
983
984
985RTDECL(int) RTFileAioCtxWakeup(RTFILEAIOCTX hAioCtx)
986{
987 PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
988 RTFILEAIOCTX_VALID_RETURN(pCtxInt);
989
990 /** @todo r=bird: Define the protocol for how to resume work after calling
991 * this function. */
992
993 bool fWokenUp = ASMAtomicXchgBool(&pCtxInt->fWokenUp, true);
994 if (!fWokenUp)
995 rtFileAioCtxWakeup(pCtxInt);
996
997 return VINF_SUCCESS;
998}
999
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette