VirtualBox

source: vbox/trunk/src/VBox/Runtime/r3/posix/rtmempage-exec-mmap-heap-posix.cpp@ 39443

Last change on this file since 39443 was 39083, checked in by vboxsync, 14 years ago

IPRT: -Wunused-parameter.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 22.1 KB
Line 
1/* $Id: rtmempage-exec-mmap-heap-posix.cpp 39083 2011-10-22 00:28:46Z vboxsync $ */
2/** @file
3 * IPRT - RTMemPage*, POSIX with heap.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#include "internal/iprt.h"
32#include <iprt/mem.h>
33
34#include <iprt/asm.h>
35#include <iprt/assert.h>
36#include <iprt/avl.h>
37#include <iprt/critsect.h>
38#include <iprt/err.h>
39#include <iprt/once.h>
40#include <iprt/param.h>
41#include <iprt/string.h>
42#include "internal/mem.h"
43
44#include <stdlib.h>
45#include <errno.h>
46#include <sys/mman.h>
47#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
48# define MAP_ANONYMOUS MAP_ANON
49#endif
50
51
52
53/*******************************************************************************
54* Defined Constants And Macros *
55*******************************************************************************/
56/** Threshold at which to we switch to simply calling mmap. */
57#define RTMEMPAGEPOSIX_MMAP_THRESHOLD _128K
58/** The size of a heap block (power of two) - in bytes. */
59#define RTMEMPAGEPOSIX_BLOCK_SIZE _2M
60AssertCompile(RTMEMPAGEPOSIX_BLOCK_SIZE == (RTMEMPAGEPOSIX_BLOCK_SIZE / PAGE_SIZE) * PAGE_SIZE);
61/** The number of pages per heap block. */
62#define RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT (RTMEMPAGEPOSIX_BLOCK_SIZE / PAGE_SIZE)
63
64
65/*******************************************************************************
66* Structures and Typedefs *
67*******************************************************************************/
68/** Pointer to a page heap block. */
69typedef struct RTHEAPPAGEBLOCK *PRTHEAPPAGEBLOCK;
70
71/**
72 * A simple page heap.
73 */
74typedef struct RTHEAPPAGE
75{
76 /** Magic number (RTHEAPPAGE_MAGIC). */
77 uint32_t u32Magic;
78 /** The number of pages in the heap (in BlockTree). */
79 uint32_t cHeapPages;
80 /** The number of currently free pages. */
81 uint32_t cFreePages;
82 /** Number of successful calls. */
83 uint32_t cAllocCalls;
84 /** Number of successful free calls. */
85 uint32_t cFreeCalls;
86 /** The free call number at which we last tried to minimize the heap. */
87 uint32_t uLastMinimizeCall;
88 /** Tree of heap blocks. */
89 AVLRPVTREE BlockTree;
90 /** Allocation hint no 1 (last freed). */
91 PRTHEAPPAGEBLOCK pHint1;
92 /** Allocation hint no 2 (last alloc). */
93 PRTHEAPPAGEBLOCK pHint2;
94 /** Critical section protecting the heap. */
95 RTCRITSECT CritSect;
96 /** Set if the memory must allocated with execute access. */
97 bool fExec;
98} RTHEAPPAGE;
99#define RTHEAPPAGE_MAGIC UINT32_C(0xfeedface)
100/** Pointer to a page heap. */
101typedef RTHEAPPAGE *PRTHEAPPAGE;
102
103
104/**
105 * Describes a page heap block.
106 */
107typedef struct RTHEAPPAGEBLOCK
108{
109 /** The AVL tree node core (void pointer range). */
110 AVLRPVNODECORE Core;
111 /** Allocation bitmap. Set bits marks allocated pages. */
112 uint32_t bmAlloc[RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT / 32];
113 /** Allocation boundrary bitmap. Set bits marks the start of
114 * allocations. */
115 uint32_t bmFirst[RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT / 32];
116 /** The number of free pages. */
117 uint32_t cFreePages;
118 /** Pointer back to the heap. */
119 PRTHEAPPAGE pHeap;
120} RTHEAPPAGEBLOCK;
121
122
123/**
124 * Argument package for rtHeapPageAllocCallback.
125 */
126typedef struct RTHEAPPAGEALLOCARGS
127{
128 /** The number of pages to allocate. */
129 size_t cPages;
130 /** Non-null on success. */
131 void *pvAlloc;
132 /** Whether the pages should be zeroed or not. */
133 bool fZero;
134} RTHEAPPAGEALLOCARGS;
135
136
137/*******************************************************************************
138* Global Variables *
139*******************************************************************************/
140/** Initialize once structure. */
141static RTONCE g_MemPagePosixInitOnce = RTONCE_INITIALIZER;
142/** The page heap. */
143static RTHEAPPAGE g_MemPagePosixHeap;
144/** The exec page heap. */
145static RTHEAPPAGE g_MemExecPosixHeap;
146
147
148/**
149 * Initializes the heap.
150 *
151 * @returns IPRT status code.
152 * @param pHeap The page heap to initialize.
153 * @param fExec Whether the heap memory should be marked as
154 * executable or not.
155 */
156int RTHeapPageInit(PRTHEAPPAGE pHeap, bool fExec)
157{
158 int rc = RTCritSectInitEx(&pHeap->CritSect,
159 RTCRITSECT_FLAGS_NO_LOCK_VAL | RTCRITSECT_FLAGS_NO_NESTING | RTCRITSECT_FLAGS_BOOTSTRAP_HACK,
160 NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, NULL);
161 if (RT_SUCCESS(rc))
162 {
163 pHeap->cHeapPages = 0;
164 pHeap->cFreePages = 0;
165 pHeap->cAllocCalls = 0;
166 pHeap->cFreeCalls = 0;
167 pHeap->uLastMinimizeCall = 0;
168 pHeap->BlockTree = NULL;
169 pHeap->fExec = fExec;
170 pHeap->u32Magic = RTHEAPPAGE_MAGIC;
171 }
172 return rc;
173}
174
175
176/**
177 * Deletes the heap and all the memory it tracks.
178 *
179 * @returns IPRT status code.
180 * @param pHeap The page heap to delete.
181 */
182int RTHeapPageDelete(PRTHEAPPAGE pHeap)
183{
184 NOREF(pHeap);
185 return VERR_NOT_IMPLEMENTED;
186}
187
188
189/**
190 * Avoids some gotos in rtHeapPageAllocFromBlock.
191 *
192 * @returns VINF_SUCCESS.
193 * @param pBlock The block.
194 * @param iPage The page to start allocating at.
195 * @param cPages The number of pages.
196 * @param fZero Whether to clear them.
197 * @param ppv Where to return the allocation address.
198 */
199DECLINLINE(int) rtHeapPageAllocFromBlockSuccess(PRTHEAPPAGEBLOCK pBlock, uint32_t iPage, size_t cPages, bool fZero, void **ppv)
200{
201 PRTHEAPPAGE pHeap = pBlock->pHeap;
202
203 ASMBitSet(&pBlock->bmFirst[0], iPage);
204 pBlock->cFreePages -= cPages;
205 pHeap->cFreePages -= cPages;
206 if (!pHeap->pHint2 || pHeap->pHint2->cFreePages < pBlock->cFreePages)
207 pHeap->pHint2 = pBlock;
208 pHeap->cAllocCalls++;
209
210 void *pv = (uint8_t *)pBlock->Core.Key + (iPage << PAGE_SHIFT);
211 *ppv = pv;
212 if (fZero)
213 RT_BZERO(pv, cPages << PAGE_SHIFT);
214
215 return VINF_SUCCESS;
216}
217
218
219/**
220 * Checks if a page range is free in the specified block.
221 *
222 * @returns @c true if the range is free, @c false if not.
223 * @param pBlock The block.
224 * @param iFirst The first page to check.
225 * @param cPages The number of pages to check.
226 */
227DECLINLINE(bool) rtHeapPageIsPageRangeFree(PRTHEAPPAGEBLOCK pBlock, uint32_t iFirst, uint32_t cPages)
228{
229 uint32_t i = iFirst + cPages;
230 while (i-- > iFirst)
231 {
232 if (ASMBitTest(&pBlock->bmAlloc[0], i))
233 return false;
234 Assert(!ASMBitTest(&pBlock->bmFirst[0], i));
235 }
236 return true;
237}
238
239
240/**
241 * Tries to allocate a chunk of pages from a heap block.
242 *
243 * @retval VINF_SUCCESS on success.
244 * @retval VERR_NO_MEMORY if the allocation failed.
245 * @param pBlock The block to allocate from.
246 * @param cPages The size of the allocation.
247 * @param fZero Whether it should be zeroed or not.
248 * @param ppv Where to return the allocation address on success.
249 */
250DECLINLINE(int) rtHeapPageAllocFromBlock(PRTHEAPPAGEBLOCK pBlock, size_t cPages, bool fZero, void **ppv)
251{
252 if (pBlock->cFreePages >= cPages)
253 {
254 int iPage = ASMBitFirstClear(&pBlock->bmAlloc[0], RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT);
255 Assert(iPage >= 0);
256
257 /* special case: single page. */
258 if (cPages == 1)
259 {
260 ASMBitSet(&pBlock->bmAlloc[0], iPage);
261 return rtHeapPageAllocFromBlockSuccess(pBlock, iPage, cPages, fZero, ppv);
262 }
263
264 while ( iPage >= 0
265 && (unsigned)iPage <= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT - cPages)
266 {
267 if (rtHeapPageIsPageRangeFree(pBlock, iPage + 1, cPages - 1))
268 {
269 ASMBitSetRange(&pBlock->bmAlloc[0], iPage, iPage + cPages);
270 return rtHeapPageAllocFromBlockSuccess(pBlock, iPage, cPages, fZero, ppv);
271 }
272
273 /* next */
274 iPage = ASMBitNextSet(&pBlock->bmAlloc[0], RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT, iPage);
275 if (iPage < 0 || iPage >= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT - 1)
276 break;
277 iPage = ASMBitNextClear(&pBlock->bmAlloc[0], RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT, iPage);
278 }
279 }
280
281 return VERR_NO_MEMORY;
282}
283
284
285/**
286 * RTAvlrPVDoWithAll callback.
287 *
288 * @returns 0 to continue the enum, non-zero to quit it.
289 * @param pNode The node.
290 * @param pvUser The user argument.
291 */
292static DECLCALLBACK(int) rtHeapPageAllocCallback(PAVLRPVNODECORE pNode, void *pvUser)
293{
294 PRTHEAPPAGEBLOCK pBlock = RT_FROM_MEMBER(pNode, RTHEAPPAGEBLOCK, Core);
295 RTHEAPPAGEALLOCARGS *pArgs = (RTHEAPPAGEALLOCARGS *)pvUser;
296 int rc = rtHeapPageAllocFromBlock(pBlock, pArgs->cPages, pArgs->fZero, &pArgs->pvAlloc);
297 return RT_SUCCESS(rc) ? 1 : 0;
298}
299
300
301/**
302 * Worker for RTHeapPageAlloc.
303 *
304 * @returns IPRT status code
305 * @param pHeap The heap - locked.
306 * @param cPages The page count.
307 * @param pszTag The tag.
308 * @param fZero Whether to zero the memory.
309 * @param ppv Where to return the address of the allocation
310 * on success.
311 */
312static int rtHeapPageAllocLocked(PRTHEAPPAGE pHeap, size_t cPages, const char *pszTag, bool fZero, void **ppv)
313{
314 int rc;
315 NOREF(pszTag);
316
317 /*
318 * Use the hints first.
319 */
320 if (pHeap->pHint1)
321 {
322 rc = rtHeapPageAllocFromBlock(pHeap->pHint1, cPages, fZero, ppv);
323 if (rc != VERR_NO_MEMORY)
324 return rc;
325 }
326 if (pHeap->pHint2)
327 {
328 rc = rtHeapPageAllocFromBlock(pHeap->pHint2, cPages, fZero, ppv);
329 if (rc != VERR_NO_MEMORY)
330 return rc;
331 }
332
333 /*
334 * Search the heap for a block with enough free space.
335 *
336 * N.B. This search algorithm is not optimal at all. What (hopefully) saves
337 * it are the two hints above.
338 */
339 if (pHeap->cFreePages >= cPages)
340 {
341 RTHEAPPAGEALLOCARGS Args;
342 Args.cPages = cPages;
343 Args.pvAlloc = NULL;
344 Args.fZero = fZero;
345 RTAvlrPVDoWithAll(&pHeap->BlockTree, true /*fFromLeft*/, rtHeapPageAllocCallback, &Args);
346 if (Args.pvAlloc)
347 {
348 *ppv = Args.pvAlloc;
349 return VINF_SUCCESS;
350 }
351 }
352
353 /*
354 * Didn't find anytyhing, so expand the heap with a new block.
355 */
356 RTCritSectLeave(&pHeap->CritSect);
357 void *pvPages;
358 pvPages = mmap(NULL, RTMEMPAGEPOSIX_BLOCK_SIZE,
359 PROT_READ | PROT_WRITE | (pHeap->fExec ? PROT_EXEC : 0),
360 MAP_PRIVATE | MAP_ANONYMOUS,
361 -1, 0);
362 if (pvPages == MAP_FAILED)
363 {
364 RTCritSectEnter(&pHeap->CritSect);
365 return RTErrConvertFromErrno(errno);
366
367 }
368 /** @todo Eliminate this rtMemBaseAlloc dependency! */
369 PRTHEAPPAGEBLOCK pBlock = (PRTHEAPPAGEBLOCK)rtMemBaseAlloc(sizeof(*pBlock));
370 if (!pBlock)
371 {
372 munmap(pvPages, RTMEMPAGEPOSIX_BLOCK_SIZE);
373 RTCritSectEnter(&pHeap->CritSect);
374 return VERR_NO_MEMORY;
375 }
376
377 RT_ZERO(*pBlock);
378 pBlock->Core.Key = pvPages;
379 pBlock->Core.KeyLast = (uint8_t *)pvPages + RTMEMPAGEPOSIX_BLOCK_SIZE - 1;
380 pBlock->cFreePages = RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
381 pBlock->pHeap = pHeap;
382
383 RTCritSectEnter(&pHeap->CritSect);
384
385 bool fRc = RTAvlrPVInsert(&pHeap->BlockTree, &pBlock->Core); Assert(fRc); NOREF(fRc);
386 pHeap->cFreePages += RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
387 pHeap->cHeapPages += RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
388
389 /*
390 * Grab memory from the new block (cannot fail).
391 */
392 rc = rtHeapPageAllocFromBlock(pBlock, cPages, fZero, ppv);
393 Assert(rc == VINF_SUCCESS);
394
395 return rc;
396}
397
398
399/**
400 * Allocates one or more pages off the heap.
401 *
402 * @returns IPRT status code.
403 * @param pHeap The page heap.
404 * @param cPages The number of pages to allocate.
405 * @param pszTag The allocation tag.
406 * @param fZero Set if the pages should be zeroed or not.
407 * @param ppv Where to return the pointer to the pages.
408 */
409int RTHeapPageAlloc(PRTHEAPPAGE pHeap, size_t cPages, const char *pszTag, bool fZero, void **ppv)
410{
411 /*
412 * Validate input.
413 */
414 AssertPtr(ppv);
415 *ppv = NULL;
416 AssertPtrReturn(pHeap, VERR_INVALID_HANDLE);
417 AssertReturn(pHeap->u32Magic == RTHEAPPAGE_MAGIC, VERR_INVALID_HANDLE);
418 AssertMsgReturn(cPages < RTMEMPAGEPOSIX_BLOCK_SIZE, ("%#zx\n", cPages), VERR_OUT_OF_RANGE);
419
420 /*
421 * Grab the lock and call a worker with many returns.
422 */
423 int rc = RTCritSectEnter(&pHeap->CritSect);
424 if (RT_SUCCESS(rc))
425 {
426 rc = rtHeapPageAllocLocked(pHeap, cPages, pszTag, fZero, ppv);
427 RTCritSectLeave(&pHeap->CritSect);
428 }
429
430 return rc;
431}
432
433
434/**
435 * RTAvlrPVDoWithAll callback.
436 *
437 * @returns 0 to continue the enum, non-zero to quit it.
438 * @param pNode The node.
439 * @param pvUser Pointer to a block pointer variable. For returning
440 * the address of the block to be freed.
441 */
442static DECLCALLBACK(int) rtHeapPageFindUnusedBlockCallback(PAVLRPVNODECORE pNode, void *pvUser)
443{
444 PRTHEAPPAGEBLOCK pBlock = RT_FROM_MEMBER(pNode, RTHEAPPAGEBLOCK, Core);
445 if (pBlock->cFreePages == RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT)
446 {
447 *(PRTHEAPPAGEBLOCK *)pvUser = pBlock;
448 return 1;
449 }
450 return 0;
451}
452
453
454/**
455 * Allocates one or more pages off the heap.
456 *
457 * @returns IPRT status code.
458 * @param pHeap The page heap.
459 * @param pv Pointer to what RTHeapPageAlloc returned.
460 * @param cPages The number of pages that was allocated.
461 */
462int RTHeapPageFree(PRTHEAPPAGE pHeap, void *pv, size_t cPages)
463{
464 /*
465 * Validate input.
466 */
467 if (!pv)
468 return VINF_SUCCESS;
469 AssertPtrReturn(pHeap, VERR_INVALID_HANDLE);
470 AssertReturn(pHeap->u32Magic == RTHEAPPAGE_MAGIC, VERR_INVALID_HANDLE);
471
472 /*
473 * Grab the lock and look up the page.
474 */
475 int rc = RTCritSectEnter(&pHeap->CritSect);
476 if (RT_SUCCESS(rc))
477 {
478 PRTHEAPPAGEBLOCK pBlock = (PRTHEAPPAGEBLOCK)RTAvlrPVRangeGet(&pHeap->BlockTree, pv);
479 if (pBlock)
480 {
481 /*
482 * Validate the specified address range.
483 */
484 uint32_t const iPage = (uint32_t)(((uintptr_t)pv - (uintptr_t)pBlock->Core.Key) >> PAGE_SHIFT);
485 /* Check the range is within the block. */
486 bool fOk = iPage + cPages <= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
487 /* Check that it's the start of an allocation. */
488 fOk = fOk && ASMBitTest(&pBlock->bmFirst[0], iPage);
489 /* Check that the range ends at an allocation boundrary. */
490 fOk = fOk && ( iPage + cPages == RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT
491 || ASMBitTest(&pBlock->bmFirst[0], iPage + cPages)
492 || !ASMBitTest(&pBlock->bmAlloc[0], iPage + cPages));
493 /* Check the other pages. */
494 uint32_t const iLastPage = iPage + cPages - 1;
495 for (uint32_t i = iPage + 1; i < iLastPage && fOk; i++)
496 fOk = ASMBitTest(&pBlock->bmAlloc[0], i)
497 && !ASMBitTest(&pBlock->bmFirst[0], i);
498 if (fOk)
499 {
500 /*
501 * Free the memory.
502 */
503 ASMBitClearRange(&pBlock->bmAlloc[0], iPage, iPage + cPages);
504 ASMBitClear(&pBlock->bmFirst[0], iPage);
505 pBlock->cFreePages += cPages;
506 pHeap->cFreePages += cPages;
507 pHeap->cFreeCalls++;
508 if (!pHeap->pHint1 || pHeap->pHint1->cFreePages < pBlock->cFreePages)
509 pHeap->pHint1 = pBlock;
510
511 /*
512 * Shrink the heap. Not very efficient because of the AVL tree.
513 */
514 if ( pHeap->cFreePages >= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT * 3
515 && pHeap->cFreePages >= pHeap->cHeapPages / 2 /* 50% free */
516 && pHeap->cFreeCalls - pHeap->uLastMinimizeCall > RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT
517 )
518 {
519 uint32_t cFreePageTarget = pHeap->cHeapPages / 4; /* 25% free */
520 while (pHeap->cFreePages > cFreePageTarget)
521 {
522 pHeap->uLastMinimizeCall = pHeap->cFreeCalls;
523
524 pBlock = NULL;
525 RTAvlrPVDoWithAll(&pHeap->BlockTree, false /*fFromLeft*/,
526 rtHeapPageFindUnusedBlockCallback, &pBlock);
527 if (!pBlock)
528 break;
529
530 void *pv2 = RTAvlrPVRemove(&pHeap->BlockTree, pBlock->Core.Key); Assert(pv2); NOREF(pv2);
531 pHeap->cHeapPages -= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
532 pHeap->cFreePages -= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
533 pHeap->pHint1 = NULL;
534 pHeap->pHint2 = NULL;
535 RTCritSectLeave(&pHeap->CritSect);
536
537 munmap(pBlock->Core.Key, RTMEMPAGEPOSIX_BLOCK_SIZE);
538 pBlock->Core.Key = pBlock->Core.KeyLast = NULL;
539 pBlock->cFreePages = 0;
540 rtMemBaseFree(pBlock);
541
542 RTCritSectEnter(&pHeap->CritSect);
543 }
544 }
545 }
546 else
547 rc = VERR_INVALID_POINTER;
548 }
549 else
550 rc = VERR_INVALID_POINTER;
551
552 RTCritSectLeave(&pHeap->CritSect);
553 }
554
555 return rc;
556}
557
558
559/**
560 * Initializes the heap.
561 *
562 * @returns IPRT status code
563 * @param pvUser1 Unused.
564 * @param pvUser2 Unused.
565 */
566static DECLCALLBACK(int) rtMemPagePosixInitOnce(void *pvUser1, void *pvUser2)
567{
568 NOREF(pvUser1); NOREF(pvUser2);
569 int rc = RTHeapPageInit(&g_MemPagePosixHeap, false /*fExec*/);
570 if (RT_SUCCESS(rc))
571 {
572 rc = RTHeapPageInit(&g_MemExecPosixHeap, true /*fExec*/);
573 if (RT_SUCCESS(rc))
574 return rc;
575 RTHeapPageDelete(&g_MemPagePosixHeap);
576 }
577 return rc;
578}
579
580
581/**
582 * Allocates memory from the specified heap.
583 *
584 * @returns Address of the allocated memory.
585 * @param cb The number of bytes to allocate.
586 * @param pszTag The tag.
587 * @param fZero Whether to zero the memory or not.
588 * @param pHeap The heap to use.
589 */
590static void *rtMemPagePosixAlloc(size_t cb, const char *pszTag, bool fZero, PRTHEAPPAGE pHeap)
591{
592 /*
593 * Validate & adjust the input.
594 */
595 Assert(cb > 0);
596 NOREF(pszTag);
597 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
598
599 /*
600 * If the allocation is relatively large, we use mmap/munmap directly.
601 */
602 void *pv;
603 if (cb >= RTMEMPAGEPOSIX_MMAP_THRESHOLD)
604 {
605
606 pv = mmap(NULL, cb,
607 PROT_READ | PROT_WRITE | (pHeap == &g_MemExecPosixHeap ? PROT_EXEC : 0),
608 MAP_PRIVATE | MAP_ANONYMOUS,
609 -1, 0);
610 if (pv != MAP_FAILED)
611 {
612 AssertPtr(pv);
613 if (fZero)
614 RT_BZERO(pv, cb);
615 }
616 else
617 pv = NULL;
618 }
619 else
620 {
621 int rc = RTOnce(&g_MemPagePosixInitOnce, rtMemPagePosixInitOnce, NULL, NULL);
622 if (RT_SUCCESS(rc))
623 rc = RTHeapPageAlloc(pHeap, cb >> PAGE_SHIFT, pszTag, fZero, &pv);
624 if (RT_FAILURE(rc))
625 pv = NULL;
626 }
627
628 return pv;
629}
630
631
632/**
633 * Free memory allocated by rtMemPagePosixAlloc.
634 *
635 * @param pv The address of the memory to free.
636 * @param cb The size.
637 * @param pHeap The heap.
638 */
639static void rtMemPagePosixFree(void *pv, size_t cb, PRTHEAPPAGE pHeap)
640{
641 /*
642 * Validate & adjust the input.
643 */
644 if (!pv)
645 return;
646 AssertPtr(pv);
647 Assert(cb > 0);
648 Assert(!((uintptr_t)pv & PAGE_OFFSET_MASK));
649 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
650
651 /*
652 * If the allocation is relatively large, we use mmap/munmap directly.
653 */
654 if (cb >= RTMEMPAGEPOSIX_MMAP_THRESHOLD)
655 {
656 int rc = munmap(pv, cb);
657 AssertMsg(rc == 0, ("rc=%d pv=%p cb=%#zx\n", rc, pv, cb)); NOREF(rc);
658 }
659 else
660 {
661 int rc = RTHeapPageFree(pHeap, pv, cb >> PAGE_SHIFT);
662 AssertRC(rc);
663 }
664}
665
666
667
668
669
670RTDECL(void *) RTMemPageAllocTag(size_t cb, const char *pszTag) RT_NO_THROW
671{
672 return rtMemPagePosixAlloc(cb, pszTag, false /*fZero*/, &g_MemPagePosixHeap);
673}
674
675
676RTDECL(void *) RTMemPageAllocZTag(size_t cb, const char *pszTag) RT_NO_THROW
677{
678 return rtMemPagePosixAlloc(cb, pszTag, true /*fZero*/, &g_MemPagePosixHeap);
679}
680
681
682RTDECL(void) RTMemPageFree(void *pv, size_t cb) RT_NO_THROW
683{
684 return rtMemPagePosixFree(pv, cb, &g_MemPagePosixHeap);
685}
686
687
688
689
690
691RTDECL(void *) RTMemExecAllocTag(size_t cb, const char *pszTag) RT_NO_THROW
692{
693 return rtMemPagePosixAlloc(cb, pszTag, false /*fZero*/, &g_MemExecPosixHeap);
694}
695
696
697RTDECL(void) RTMemExecFree(void *pv, size_t cb) RT_NO_THROW
698{
699 return rtMemPagePosixFree(pv, cb, &g_MemExecPosixHeap);
700}
701
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette