VirtualBox

source: vbox/trunk/src/VBox/Runtime/r3/posix/rtmempage-exec-mmap-heap-posix.cpp@ 40306

Last change on this file since 40306 was 40306, checked in by vboxsync, 13 years ago

build fix

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 23.1 KB
Line 
1/* $Id: rtmempage-exec-mmap-heap-posix.cpp 40306 2012-03-01 01:16:36Z vboxsync $ */
2/** @file
3 * IPRT - RTMemPage*, POSIX with heap.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#include "internal/iprt.h"
32#include <iprt/mem.h>
33
34#include <iprt/asm.h>
35#include <iprt/assert.h>
36#include <iprt/avl.h>
37#include <iprt/critsect.h>
38#include <iprt/err.h>
39#include <iprt/once.h>
40#include <iprt/param.h>
41#include <iprt/string.h>
42#include "internal/mem.h"
43
44#include <stdlib.h>
45#include <errno.h>
46#include <sys/mman.h>
47#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
48# define MAP_ANONYMOUS MAP_ANON
49#endif
50
51
52/*******************************************************************************
53* Defined Constants And Macros *
54*******************************************************************************/
55/** Threshold at which to we switch to simply calling mmap. */
56#define RTMEMPAGEPOSIX_MMAP_THRESHOLD _128K
57/** The size of a heap block (power of two) - in bytes. */
58#define RTMEMPAGEPOSIX_BLOCK_SIZE _2M
59AssertCompile(RTMEMPAGEPOSIX_BLOCK_SIZE == (RTMEMPAGEPOSIX_BLOCK_SIZE / PAGE_SIZE) * PAGE_SIZE);
60/** The number of pages per heap block. */
61#define RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT (RTMEMPAGEPOSIX_BLOCK_SIZE / PAGE_SIZE)
62
63
64/*******************************************************************************
65* Structures and Typedefs *
66*******************************************************************************/
67/** Pointer to a page heap block. */
68typedef struct RTHEAPPAGEBLOCK *PRTHEAPPAGEBLOCK;
69
70/**
71 * A simple page heap.
72 */
73typedef struct RTHEAPPAGE
74{
75 /** Magic number (RTHEAPPAGE_MAGIC). */
76 uint32_t u32Magic;
77 /** The number of pages in the heap (in BlockTree). */
78 uint32_t cHeapPages;
79 /** The number of currently free pages. */
80 uint32_t cFreePages;
81 /** Number of successful calls. */
82 uint32_t cAllocCalls;
83 /** Number of successful free calls. */
84 uint32_t cFreeCalls;
85 /** The free call number at which we last tried to minimize the heap. */
86 uint32_t uLastMinimizeCall;
87 /** Tree of heap blocks. */
88 AVLRPVTREE BlockTree;
89 /** Allocation hint no 1 (last freed). */
90 PRTHEAPPAGEBLOCK pHint1;
91 /** Allocation hint no 2 (last alloc). */
92 PRTHEAPPAGEBLOCK pHint2;
93 /** Critical section protecting the heap. */
94 RTCRITSECT CritSect;
95 /** Set if the memory must allocated with execute access. */
96 bool fExec;
97} RTHEAPPAGE;
98#define RTHEAPPAGE_MAGIC UINT32_C(0xfeedface)
99/** Pointer to a page heap. */
100typedef RTHEAPPAGE *PRTHEAPPAGE;
101
102
103/**
104 * Describes a page heap block.
105 */
106typedef struct RTHEAPPAGEBLOCK
107{
108 /** The AVL tree node core (void pointer range). */
109 AVLRPVNODECORE Core;
110 /** Allocation bitmap. Set bits marks allocated pages. */
111 uint32_t bmAlloc[RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT / 32];
112 /** Allocation boundrary bitmap. Set bits marks the start of
113 * allocations. */
114 uint32_t bmFirst[RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT / 32];
115 /** The number of free pages. */
116 uint32_t cFreePages;
117 /** Pointer back to the heap. */
118 PRTHEAPPAGE pHeap;
119} RTHEAPPAGEBLOCK;
120
121
122/**
123 * Argument package for rtHeapPageAllocCallback.
124 */
125typedef struct RTHEAPPAGEALLOCARGS
126{
127 /** The number of pages to allocate. */
128 size_t cPages;
129 /** Non-null on success. */
130 void *pvAlloc;
131 /** Whether the pages should be zeroed or not. */
132 bool fZero;
133} RTHEAPPAGEALLOCARGS;
134
135
136/*******************************************************************************
137* Global Variables *
138*******************************************************************************/
139/** Initialize once structure. */
140static RTONCE g_MemPagePosixInitOnce = RTONCE_INITIALIZER;
141/** The page heap. */
142static RTHEAPPAGE g_MemPagePosixHeap;
143/** The exec page heap. */
144static RTHEAPPAGE g_MemExecPosixHeap;
145
146
147#ifdef RT_OS_OS2
148/*
149 * A quick mmap/munmap mockup for avoid duplicating lots of good code.
150 */
151# define INCL_BASE
152# include <os2.h>
153# define MAP_PRIVATE 0
154# define MAP_ANONYMOUS 0
155# define MAP_FAILED (void *)-1
156# undef mmap
157# define mmap iprt_mmap
158# undef munmap
159# define munmap iprt_munmap
160
161static void *mmap(void *pvWhere, size_t cb, int fProt, int fFlags, int fd, off_t off)
162{
163 NOREF(pvWhere); NOREF(fd); NOREF(off);
164 void *pv = NULL;
165 ULONG fAlloc = OBJ_ANY | PAG_COMMIT;
166 if (fProt & PROT_EXEC)
167 fAlloc |= PAG_EXECUTE;
168 if (fProt & PROT_READ)
169 fAlloc |= PAG_READ;
170 if (fProt & PROT_WRITE)
171 fAlloc |= PAG_WRITE;
172 APIRET rc = DosAllocMem(&pv, cb, fAlloc);
173 if (rc == NO_ERROR)
174 return pv;
175 errno = ENOMEM;
176 return MAP_FAILED;
177}
178
179static int munmap(void *pv, size_t cb)
180{
181 APIRET rc = DosFreeMem(pv);
182 if (rc == NO_ERROR)
183 return 0;
184 errno = EINVAL;
185 return -1;
186}
187
188#endif
189
190/**
191 * Initializes the heap.
192 *
193 * @returns IPRT status code.
194 * @param pHeap The page heap to initialize.
195 * @param fExec Whether the heap memory should be marked as
196 * executable or not.
197 */
198int RTHeapPageInit(PRTHEAPPAGE pHeap, bool fExec)
199{
200 int rc = RTCritSectInitEx(&pHeap->CritSect,
201 RTCRITSECT_FLAGS_NO_LOCK_VAL | RTCRITSECT_FLAGS_NO_NESTING | RTCRITSECT_FLAGS_BOOTSTRAP_HACK,
202 NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, NULL);
203 if (RT_SUCCESS(rc))
204 {
205 pHeap->cHeapPages = 0;
206 pHeap->cFreePages = 0;
207 pHeap->cAllocCalls = 0;
208 pHeap->cFreeCalls = 0;
209 pHeap->uLastMinimizeCall = 0;
210 pHeap->BlockTree = NULL;
211 pHeap->fExec = fExec;
212 pHeap->u32Magic = RTHEAPPAGE_MAGIC;
213 }
214 return rc;
215}
216
217
218/**
219 * Deletes the heap and all the memory it tracks.
220 *
221 * @returns IPRT status code.
222 * @param pHeap The page heap to delete.
223 */
224int RTHeapPageDelete(PRTHEAPPAGE pHeap)
225{
226 NOREF(pHeap);
227 return VERR_NOT_IMPLEMENTED;
228}
229
230
231/**
232 * Avoids some gotos in rtHeapPageAllocFromBlock.
233 *
234 * @returns VINF_SUCCESS.
235 * @param pBlock The block.
236 * @param iPage The page to start allocating at.
237 * @param cPages The number of pages.
238 * @param fZero Whether to clear them.
239 * @param ppv Where to return the allocation address.
240 */
241DECLINLINE(int) rtHeapPageAllocFromBlockSuccess(PRTHEAPPAGEBLOCK pBlock, uint32_t iPage, size_t cPages, bool fZero, void **ppv)
242{
243 PRTHEAPPAGE pHeap = pBlock->pHeap;
244
245 ASMBitSet(&pBlock->bmFirst[0], iPage);
246 pBlock->cFreePages -= cPages;
247 pHeap->cFreePages -= cPages;
248 if (!pHeap->pHint2 || pHeap->pHint2->cFreePages < pBlock->cFreePages)
249 pHeap->pHint2 = pBlock;
250 pHeap->cAllocCalls++;
251
252 void *pv = (uint8_t *)pBlock->Core.Key + (iPage << PAGE_SHIFT);
253 *ppv = pv;
254 if (fZero)
255 RT_BZERO(pv, cPages << PAGE_SHIFT);
256
257 return VINF_SUCCESS;
258}
259
260
261/**
262 * Checks if a page range is free in the specified block.
263 *
264 * @returns @c true if the range is free, @c false if not.
265 * @param pBlock The block.
266 * @param iFirst The first page to check.
267 * @param cPages The number of pages to check.
268 */
269DECLINLINE(bool) rtHeapPageIsPageRangeFree(PRTHEAPPAGEBLOCK pBlock, uint32_t iFirst, uint32_t cPages)
270{
271 uint32_t i = iFirst + cPages;
272 while (i-- > iFirst)
273 {
274 if (ASMBitTest(&pBlock->bmAlloc[0], i))
275 return false;
276 Assert(!ASMBitTest(&pBlock->bmFirst[0], i));
277 }
278 return true;
279}
280
281
282/**
283 * Tries to allocate a chunk of pages from a heap block.
284 *
285 * @retval VINF_SUCCESS on success.
286 * @retval VERR_NO_MEMORY if the allocation failed.
287 * @param pBlock The block to allocate from.
288 * @param cPages The size of the allocation.
289 * @param fZero Whether it should be zeroed or not.
290 * @param ppv Where to return the allocation address on success.
291 */
292DECLINLINE(int) rtHeapPageAllocFromBlock(PRTHEAPPAGEBLOCK pBlock, size_t cPages, bool fZero, void **ppv)
293{
294 if (pBlock->cFreePages >= cPages)
295 {
296 int iPage = ASMBitFirstClear(&pBlock->bmAlloc[0], RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT);
297 Assert(iPage >= 0);
298
299 /* special case: single page. */
300 if (cPages == 1)
301 {
302 ASMBitSet(&pBlock->bmAlloc[0], iPage);
303 return rtHeapPageAllocFromBlockSuccess(pBlock, iPage, cPages, fZero, ppv);
304 }
305
306 while ( iPage >= 0
307 && (unsigned)iPage <= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT - cPages)
308 {
309 if (rtHeapPageIsPageRangeFree(pBlock, iPage + 1, cPages - 1))
310 {
311 ASMBitSetRange(&pBlock->bmAlloc[0], iPage, iPage + cPages);
312 return rtHeapPageAllocFromBlockSuccess(pBlock, iPage, cPages, fZero, ppv);
313 }
314
315 /* next */
316 iPage = ASMBitNextSet(&pBlock->bmAlloc[0], RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT, iPage);
317 if (iPage < 0 || iPage >= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT - 1)
318 break;
319 iPage = ASMBitNextClear(&pBlock->bmAlloc[0], RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT, iPage);
320 }
321 }
322
323 return VERR_NO_MEMORY;
324}
325
326
327/**
328 * RTAvlrPVDoWithAll callback.
329 *
330 * @returns 0 to continue the enum, non-zero to quit it.
331 * @param pNode The node.
332 * @param pvUser The user argument.
333 */
334static DECLCALLBACK(int) rtHeapPageAllocCallback(PAVLRPVNODECORE pNode, void *pvUser)
335{
336 PRTHEAPPAGEBLOCK pBlock = RT_FROM_MEMBER(pNode, RTHEAPPAGEBLOCK, Core);
337 RTHEAPPAGEALLOCARGS *pArgs = (RTHEAPPAGEALLOCARGS *)pvUser;
338 int rc = rtHeapPageAllocFromBlock(pBlock, pArgs->cPages, pArgs->fZero, &pArgs->pvAlloc);
339 return RT_SUCCESS(rc) ? 1 : 0;
340}
341
342
343/**
344 * Worker for RTHeapPageAlloc.
345 *
346 * @returns IPRT status code
347 * @param pHeap The heap - locked.
348 * @param cPages The page count.
349 * @param pszTag The tag.
350 * @param fZero Whether to zero the memory.
351 * @param ppv Where to return the address of the allocation
352 * on success.
353 */
354static int rtHeapPageAllocLocked(PRTHEAPPAGE pHeap, size_t cPages, const char *pszTag, bool fZero, void **ppv)
355{
356 int rc;
357 NOREF(pszTag);
358
359 /*
360 * Use the hints first.
361 */
362 if (pHeap->pHint1)
363 {
364 rc = rtHeapPageAllocFromBlock(pHeap->pHint1, cPages, fZero, ppv);
365 if (rc != VERR_NO_MEMORY)
366 return rc;
367 }
368 if (pHeap->pHint2)
369 {
370 rc = rtHeapPageAllocFromBlock(pHeap->pHint2, cPages, fZero, ppv);
371 if (rc != VERR_NO_MEMORY)
372 return rc;
373 }
374
375 /*
376 * Search the heap for a block with enough free space.
377 *
378 * N.B. This search algorithm is not optimal at all. What (hopefully) saves
379 * it are the two hints above.
380 */
381 if (pHeap->cFreePages >= cPages)
382 {
383 RTHEAPPAGEALLOCARGS Args;
384 Args.cPages = cPages;
385 Args.pvAlloc = NULL;
386 Args.fZero = fZero;
387 RTAvlrPVDoWithAll(&pHeap->BlockTree, true /*fFromLeft*/, rtHeapPageAllocCallback, &Args);
388 if (Args.pvAlloc)
389 {
390 *ppv = Args.pvAlloc;
391 return VINF_SUCCESS;
392 }
393 }
394
395 /*
396 * Didn't find anytyhing, so expand the heap with a new block.
397 */
398 RTCritSectLeave(&pHeap->CritSect);
399 void *pvPages;
400 pvPages = mmap(NULL, RTMEMPAGEPOSIX_BLOCK_SIZE,
401 PROT_READ | PROT_WRITE | (pHeap->fExec ? PROT_EXEC : 0),
402 MAP_PRIVATE | MAP_ANONYMOUS,
403 -1, 0);
404 if (pvPages == MAP_FAILED)
405 {
406 RTCritSectEnter(&pHeap->CritSect);
407 return RTErrConvertFromErrno(errno);
408
409 }
410 /** @todo Eliminate this rtMemBaseAlloc dependency! */
411 PRTHEAPPAGEBLOCK pBlock = (PRTHEAPPAGEBLOCK)rtMemBaseAlloc(sizeof(*pBlock));
412 if (!pBlock)
413 {
414 munmap(pvPages, RTMEMPAGEPOSIX_BLOCK_SIZE);
415 RTCritSectEnter(&pHeap->CritSect);
416 return VERR_NO_MEMORY;
417 }
418
419 RT_ZERO(*pBlock);
420 pBlock->Core.Key = pvPages;
421 pBlock->Core.KeyLast = (uint8_t *)pvPages + RTMEMPAGEPOSIX_BLOCK_SIZE - 1;
422 pBlock->cFreePages = RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
423 pBlock->pHeap = pHeap;
424
425 RTCritSectEnter(&pHeap->CritSect);
426
427 bool fRc = RTAvlrPVInsert(&pHeap->BlockTree, &pBlock->Core); Assert(fRc); NOREF(fRc);
428 pHeap->cFreePages += RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
429 pHeap->cHeapPages += RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
430
431 /*
432 * Grab memory from the new block (cannot fail).
433 */
434 rc = rtHeapPageAllocFromBlock(pBlock, cPages, fZero, ppv);
435 Assert(rc == VINF_SUCCESS);
436
437 return rc;
438}
439
440
441/**
442 * Allocates one or more pages off the heap.
443 *
444 * @returns IPRT status code.
445 * @param pHeap The page heap.
446 * @param cPages The number of pages to allocate.
447 * @param pszTag The allocation tag.
448 * @param fZero Set if the pages should be zeroed or not.
449 * @param ppv Where to return the pointer to the pages.
450 */
451int RTHeapPageAlloc(PRTHEAPPAGE pHeap, size_t cPages, const char *pszTag, bool fZero, void **ppv)
452{
453 /*
454 * Validate input.
455 */
456 AssertPtr(ppv);
457 *ppv = NULL;
458 AssertPtrReturn(pHeap, VERR_INVALID_HANDLE);
459 AssertReturn(pHeap->u32Magic == RTHEAPPAGE_MAGIC, VERR_INVALID_HANDLE);
460 AssertMsgReturn(cPages < RTMEMPAGEPOSIX_BLOCK_SIZE, ("%#zx\n", cPages), VERR_OUT_OF_RANGE);
461
462 /*
463 * Grab the lock and call a worker with many returns.
464 */
465 int rc = RTCritSectEnter(&pHeap->CritSect);
466 if (RT_SUCCESS(rc))
467 {
468 rc = rtHeapPageAllocLocked(pHeap, cPages, pszTag, fZero, ppv);
469 RTCritSectLeave(&pHeap->CritSect);
470 }
471
472 return rc;
473}
474
475
476/**
477 * RTAvlrPVDoWithAll callback.
478 *
479 * @returns 0 to continue the enum, non-zero to quit it.
480 * @param pNode The node.
481 * @param pvUser Pointer to a block pointer variable. For returning
482 * the address of the block to be freed.
483 */
484static DECLCALLBACK(int) rtHeapPageFindUnusedBlockCallback(PAVLRPVNODECORE pNode, void *pvUser)
485{
486 PRTHEAPPAGEBLOCK pBlock = RT_FROM_MEMBER(pNode, RTHEAPPAGEBLOCK, Core);
487 if (pBlock->cFreePages == RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT)
488 {
489 *(PRTHEAPPAGEBLOCK *)pvUser = pBlock;
490 return 1;
491 }
492 return 0;
493}
494
495
496/**
497 * Allocates one or more pages off the heap.
498 *
499 * @returns IPRT status code.
500 * @param pHeap The page heap.
501 * @param pv Pointer to what RTHeapPageAlloc returned.
502 * @param cPages The number of pages that was allocated.
503 */
504int RTHeapPageFree(PRTHEAPPAGE pHeap, void *pv, size_t cPages)
505{
506 /*
507 * Validate input.
508 */
509 if (!pv)
510 return VINF_SUCCESS;
511 AssertPtrReturn(pHeap, VERR_INVALID_HANDLE);
512 AssertReturn(pHeap->u32Magic == RTHEAPPAGE_MAGIC, VERR_INVALID_HANDLE);
513
514 /*
515 * Grab the lock and look up the page.
516 */
517 int rc = RTCritSectEnter(&pHeap->CritSect);
518 if (RT_SUCCESS(rc))
519 {
520 PRTHEAPPAGEBLOCK pBlock = (PRTHEAPPAGEBLOCK)RTAvlrPVRangeGet(&pHeap->BlockTree, pv);
521 if (pBlock)
522 {
523 /*
524 * Validate the specified address range.
525 */
526 uint32_t const iPage = (uint32_t)(((uintptr_t)pv - (uintptr_t)pBlock->Core.Key) >> PAGE_SHIFT);
527 /* Check the range is within the block. */
528 bool fOk = iPage + cPages <= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
529 /* Check that it's the start of an allocation. */
530 fOk = fOk && ASMBitTest(&pBlock->bmFirst[0], iPage);
531 /* Check that the range ends at an allocation boundrary. */
532 fOk = fOk && ( iPage + cPages == RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT
533 || ASMBitTest(&pBlock->bmFirst[0], iPage + cPages)
534 || !ASMBitTest(&pBlock->bmAlloc[0], iPage + cPages));
535 /* Check the other pages. */
536 uint32_t const iLastPage = iPage + cPages - 1;
537 for (uint32_t i = iPage + 1; i < iLastPage && fOk; i++)
538 fOk = ASMBitTest(&pBlock->bmAlloc[0], i)
539 && !ASMBitTest(&pBlock->bmFirst[0], i);
540 if (fOk)
541 {
542 /*
543 * Free the memory.
544 */
545 ASMBitClearRange(&pBlock->bmAlloc[0], iPage, iPage + cPages);
546 ASMBitClear(&pBlock->bmFirst[0], iPage);
547 pBlock->cFreePages += cPages;
548 pHeap->cFreePages += cPages;
549 pHeap->cFreeCalls++;
550 if (!pHeap->pHint1 || pHeap->pHint1->cFreePages < pBlock->cFreePages)
551 pHeap->pHint1 = pBlock;
552
553 /*
554 * Shrink the heap. Not very efficient because of the AVL tree.
555 */
556 if ( pHeap->cFreePages >= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT * 3
557 && pHeap->cFreePages >= pHeap->cHeapPages / 2 /* 50% free */
558 && pHeap->cFreeCalls - pHeap->uLastMinimizeCall > RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT
559 )
560 {
561 uint32_t cFreePageTarget = pHeap->cHeapPages / 4; /* 25% free */
562 while (pHeap->cFreePages > cFreePageTarget)
563 {
564 pHeap->uLastMinimizeCall = pHeap->cFreeCalls;
565
566 pBlock = NULL;
567 RTAvlrPVDoWithAll(&pHeap->BlockTree, false /*fFromLeft*/,
568 rtHeapPageFindUnusedBlockCallback, &pBlock);
569 if (!pBlock)
570 break;
571
572 void *pv2 = RTAvlrPVRemove(&pHeap->BlockTree, pBlock->Core.Key); Assert(pv2); NOREF(pv2);
573 pHeap->cHeapPages -= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
574 pHeap->cFreePages -= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
575 pHeap->pHint1 = NULL;
576 pHeap->pHint2 = NULL;
577 RTCritSectLeave(&pHeap->CritSect);
578
579 munmap(pBlock->Core.Key, RTMEMPAGEPOSIX_BLOCK_SIZE);
580 pBlock->Core.Key = pBlock->Core.KeyLast = NULL;
581 pBlock->cFreePages = 0;
582 rtMemBaseFree(pBlock);
583
584 RTCritSectEnter(&pHeap->CritSect);
585 }
586 }
587 }
588 else
589 rc = VERR_INVALID_POINTER;
590 }
591 else
592 rc = VERR_INVALID_POINTER;
593
594 RTCritSectLeave(&pHeap->CritSect);
595 }
596
597 return rc;
598}
599
600
601/**
602 * Initializes the heap.
603 *
604 * @returns IPRT status code
605 * @param pvUser1 Unused.
606 * @param pvUser2 Unused.
607 */
608static DECLCALLBACK(int) rtMemPagePosixInitOnce(void *pvUser1, void *pvUser2)
609{
610 NOREF(pvUser1); NOREF(pvUser2);
611 int rc = RTHeapPageInit(&g_MemPagePosixHeap, false /*fExec*/);
612 if (RT_SUCCESS(rc))
613 {
614 rc = RTHeapPageInit(&g_MemExecPosixHeap, true /*fExec*/);
615 if (RT_SUCCESS(rc))
616 return rc;
617 RTHeapPageDelete(&g_MemPagePosixHeap);
618 }
619 return rc;
620}
621
622
623/**
624 * Allocates memory from the specified heap.
625 *
626 * @returns Address of the allocated memory.
627 * @param cb The number of bytes to allocate.
628 * @param pszTag The tag.
629 * @param fZero Whether to zero the memory or not.
630 * @param pHeap The heap to use.
631 */
632static void *rtMemPagePosixAlloc(size_t cb, const char *pszTag, bool fZero, PRTHEAPPAGE pHeap)
633{
634 /*
635 * Validate & adjust the input.
636 */
637 Assert(cb > 0);
638 NOREF(pszTag);
639 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
640
641 /*
642 * If the allocation is relatively large, we use mmap/munmap directly.
643 */
644 void *pv;
645 if (cb >= RTMEMPAGEPOSIX_MMAP_THRESHOLD)
646 {
647
648 pv = mmap(NULL, cb,
649 PROT_READ | PROT_WRITE | (pHeap == &g_MemExecPosixHeap ? PROT_EXEC : 0),
650 MAP_PRIVATE | MAP_ANONYMOUS,
651 -1, 0);
652 if (pv != MAP_FAILED)
653 {
654 AssertPtr(pv);
655 if (fZero)
656 RT_BZERO(pv, cb);
657 }
658 else
659 pv = NULL;
660 }
661 else
662 {
663 int rc = RTOnce(&g_MemPagePosixInitOnce, rtMemPagePosixInitOnce, NULL, NULL);
664 if (RT_SUCCESS(rc))
665 rc = RTHeapPageAlloc(pHeap, cb >> PAGE_SHIFT, pszTag, fZero, &pv);
666 if (RT_FAILURE(rc))
667 pv = NULL;
668 }
669
670 return pv;
671}
672
673
674/**
675 * Free memory allocated by rtMemPagePosixAlloc.
676 *
677 * @param pv The address of the memory to free.
678 * @param cb The size.
679 * @param pHeap The heap.
680 */
681static void rtMemPagePosixFree(void *pv, size_t cb, PRTHEAPPAGE pHeap)
682{
683 /*
684 * Validate & adjust the input.
685 */
686 if (!pv)
687 return;
688 AssertPtr(pv);
689 Assert(cb > 0);
690 Assert(!((uintptr_t)pv & PAGE_OFFSET_MASK));
691 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
692
693 /*
694 * If the allocation is relatively large, we use mmap/munmap directly.
695 */
696 if (cb >= RTMEMPAGEPOSIX_MMAP_THRESHOLD)
697 {
698 int rc = munmap(pv, cb);
699 AssertMsg(rc == 0, ("rc=%d pv=%p cb=%#zx\n", rc, pv, cb)); NOREF(rc);
700 }
701 else
702 {
703 int rc = RTHeapPageFree(pHeap, pv, cb >> PAGE_SHIFT);
704 AssertRC(rc);
705 }
706}
707
708
709
710
711
712RTDECL(void *) RTMemPageAllocTag(size_t cb, const char *pszTag) RT_NO_THROW
713{
714 return rtMemPagePosixAlloc(cb, pszTag, false /*fZero*/, &g_MemPagePosixHeap);
715}
716
717
718RTDECL(void *) RTMemPageAllocZTag(size_t cb, const char *pszTag) RT_NO_THROW
719{
720 return rtMemPagePosixAlloc(cb, pszTag, true /*fZero*/, &g_MemPagePosixHeap);
721}
722
723
724RTDECL(void) RTMemPageFree(void *pv, size_t cb) RT_NO_THROW
725{
726 return rtMemPagePosixFree(pv, cb, &g_MemPagePosixHeap);
727}
728
729
730
731
732
733RTDECL(void *) RTMemExecAllocTag(size_t cb, const char *pszTag) RT_NO_THROW
734{
735 return rtMemPagePosixAlloc(cb, pszTag, false /*fZero*/, &g_MemExecPosixHeap);
736}
737
738
739RTDECL(void) RTMemExecFree(void *pv, size_t cb) RT_NO_THROW
740{
741 return rtMemPagePosixFree(pv, cb, &g_MemExecPosixHeap);
742}
743
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette