VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/MMAllHyper.cpp@ 19666

Last change on this file since 19666 was 19666, checked in by vboxsync, 16 years ago

Deal with VMMCALLHOST_MMHYPER_LOCK correctly.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 47.2 KB
Line 
1/* $Id: MMAllHyper.cpp 19666 2009-05-13 15:31:31Z vboxsync $ */
2/** @file
3 * MM - Memory Manager - Hypervisor Memory Area, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_MM_HYPER_HEAP
27#include <VBox/mm.h>
28#include <VBox/stam.h>
29#include "MMInternal.h"
30#include <VBox/vm.h>
31
32#include <VBox/err.h>
33#include <VBox/param.h>
34#include <iprt/assert.h>
35#include <VBox/log.h>
36#include <iprt/asm.h>
37#include <iprt/string.h>
38
39
40/*******************************************************************************
41* Defined Constants And Macros *
42*******************************************************************************/
43#define ASSERT_L(u1, u2) AssertMsg((u1) < (u2), ("u1=%#x u2=%#x\n", u1, u2))
44#define ASSERT_LE(u1, u2) AssertMsg((u1) <= (u2), ("u1=%#x u2=%#x\n", u1, u2))
45#define ASSERT_GE(u1, u2) AssertMsg((u1) >= (u2), ("u1=%#x u2=%#x\n", u1, u2))
46#define ASSERT_ALIGN(u1) AssertMsg(!((u1) & (MMHYPER_HEAP_ALIGN_MIN - 1)), ("u1=%#x (%d)\n", u1, u1))
47
48#define ASSERT_OFFPREV(pHeap, pChunk) \
49 do { Assert(MMHYPERCHUNK_GET_OFFPREV(pChunk) <= 0); \
50 Assert(MMHYPERCHUNK_GET_OFFPREV(pChunk) >= (intptr_t)(pHeap)->CTX_SUFF(pbHeap) - (intptr_t)(pChunk)); \
51 AssertMsg( MMHYPERCHUNK_GET_OFFPREV(pChunk) != 0 \
52 || (uint8_t *)(pChunk) == (pHeap)->CTX_SUFF(pbHeap), \
53 ("pChunk=%p pvHyperHeap=%p\n", (pChunk), (pHeap)->CTX_SUFF(pbHeap))); \
54 } while (0)
55
56#define ASSERT_OFFNEXT(pHeap, pChunk) \
57 do { ASSERT_ALIGN((pChunk)->offNext); \
58 ASSERT_L((pChunk)->offNext, (uintptr_t)(pHeap)->CTX_SUFF(pbHeap) + (pHeap)->offPageAligned - (uintptr_t)(pChunk)); \
59 } while (0)
60
61#define ASSERT_OFFHEAP(pHeap, pChunk) \
62 do { Assert((pChunk)->offHeap); \
63 AssertMsg((PMMHYPERHEAP)((pChunk)->offHeap + (uintptr_t)pChunk) == (pHeap), \
64 ("offHeap=%RX32 pChunk=%p pHeap=%p\n", (pChunk)->offHeap, (pChunk), (pHeap))); \
65 Assert((pHeap)->u32Magic == MMHYPERHEAP_MAGIC); \
66 } while (0)
67
68#ifdef VBOX_WITH_STATISTICS
69#define ASSERT_OFFSTAT(pHeap, pChunk) \
70 do { if (MMHYPERCHUNK_ISFREE(pChunk)) \
71 Assert(!(pChunk)->offStat); \
72 else if ((pChunk)->offStat) \
73 { \
74 Assert((pChunk)->offStat); \
75 AssertMsg(!((pChunk)->offStat & (MMHYPER_HEAP_ALIGN_MIN - 1)), ("offStat=%RX32\n", (pChunk)->offStat)); \
76 uintptr_t uPtr = (uintptr_t)(pChunk)->offStat + (uintptr_t)pChunk; NOREF(uPtr); \
77 AssertMsg(uPtr - (uintptr_t)(pHeap)->CTX_SUFF(pbHeap) < (pHeap)->offPageAligned, \
78 ("%p - %p < %RX32\n", uPtr, (pHeap)->CTX_SUFF(pbHeap), (pHeap)->offPageAligned)); \
79 } \
80 } while (0)
81#else
82#define ASSERT_OFFSTAT(pHeap, pChunk) \
83 do { Assert(!(pChunk)->offStat); \
84 } while (0)
85#endif
86
87#define ASSERT_CHUNK(pHeap, pChunk) \
88 do { ASSERT_OFFNEXT(pHeap, pChunk); \
89 ASSERT_OFFPREV(pHeap, pChunk); \
90 ASSERT_OFFHEAP(pHeap, pChunk); \
91 ASSERT_OFFSTAT(pHeap, pChunk); \
92 } while (0)
93#define ASSERT_CHUNK_USED(pHeap, pChunk) \
94 do { ASSERT_OFFNEXT(pHeap, pChunk); \
95 ASSERT_OFFPREV(pHeap, pChunk); \
96 Assert(MMHYPERCHUNK_ISUSED(pChunk)); \
97 } while (0)
98
99#define ASSERT_FREE_OFFPREV(pHeap, pChunk) \
100 do { ASSERT_ALIGN((pChunk)->offPrev); \
101 ASSERT_GE(((pChunk)->offPrev & (MMHYPER_HEAP_ALIGN_MIN - 1)), (intptr_t)(pHeap)->CTX_SUFF(pbHeap) - (intptr_t)(pChunk)); \
102 Assert((pChunk)->offPrev != MMHYPERCHUNK_GET_OFFPREV(&(pChunk)->core) || !(pChunk)->offPrev); \
103 AssertMsg( (pChunk)->offPrev \
104 || (uintptr_t)(pChunk) - (uintptr_t)(pHeap)->CTX_SUFF(pbHeap) == (pHeap)->offFreeHead, \
105 ("pChunk=%p offChunk=%#x offFreeHead=%#x\n", (pChunk), (uintptr_t)(pChunk) - (uintptr_t)(pHeap)->CTX_SUFF(pbHeap),\
106 (pHeap)->offFreeHead)); \
107 } while (0)
108
109#define ASSERT_FREE_OFFNEXT(pHeap, pChunk) \
110 do { ASSERT_ALIGN((pChunk)->offNext); \
111 ASSERT_L((pChunk)->offNext, (uintptr_t)(pHeap)->CTX_SUFF(pbHeap) + (pHeap)->offPageAligned - (uintptr_t)(pChunk)); \
112 Assert((pChunk)->offNext != (pChunk)->core.offNext || !(pChunk)->offNext); \
113 AssertMsg( (pChunk)->offNext \
114 || (uintptr_t)(pChunk) - (uintptr_t)(pHeap)->CTX_SUFF(pbHeap) == (pHeap)->offFreeTail, \
115 ("pChunk=%p offChunk=%#x offFreeTail=%#x\n", (pChunk), (uintptr_t)(pChunk) - (uintptr_t)(pHeap)->CTX_SUFF(pbHeap), \
116 (pHeap)->offFreeTail)); \
117 } while (0)
118
119#define ASSERT_FREE_CB(pHeap, pChunk) \
120 do { ASSERT_ALIGN((pChunk)->cb); \
121 Assert((pChunk)->cb > 0); \
122 if ((pChunk)->core.offNext) \
123 AssertMsg((pChunk)->cb == ((pChunk)->core.offNext - sizeof(MMHYPERCHUNK)), \
124 ("cb=%d offNext=%d\n", (pChunk)->cb, (pChunk)->core.offNext)); \
125 else \
126 ASSERT_LE((pChunk)->cb, (uintptr_t)(pHeap)->CTX_SUFF(pbHeap) + (pHeap)->offPageAligned - (uintptr_t)(pChunk)); \
127 } while (0)
128
129#define ASSERT_CHUNK_FREE(pHeap, pChunk) \
130 do { ASSERT_CHUNK(pHeap, &(pChunk)->core); \
131 Assert(MMHYPERCHUNK_ISFREE(pChunk)); \
132 ASSERT_FREE_OFFNEXT(pHeap, pChunk); \
133 ASSERT_FREE_OFFPREV(pHeap, pChunk); \
134 ASSERT_FREE_CB(pHeap, pChunk); \
135 } while (0)
136
137
138/*******************************************************************************
139* Internal Functions *
140*******************************************************************************/
141static PMMHYPERCHUNK mmHyperAllocChunk(PMMHYPERHEAP pHeap, uint32_t cb, unsigned uAlignment);
142static void *mmHyperAllocPages(PMMHYPERHEAP pHeap, uint32_t cb);
143#ifdef VBOX_WITH_STATISTICS
144static PMMHYPERSTAT mmHyperStat(PMMHYPERHEAP pHeap, MMTAG enmTag);
145#ifdef IN_RING3
146static void mmR3HyperStatRegisterOne(PVM pVM, PMMHYPERSTAT pStat);
147#endif
148#endif
149static int mmHyperFree(PMMHYPERHEAP pHeap, PMMHYPERCHUNK pChunk);
150#ifdef MMHYPER_HEAP_STRICT
151static void mmHyperHeapCheck(PMMHYPERHEAP pHeap);
152#endif
153
154/**
155 * Locks the hypervisor heap.
156 * This might call back to Ring-3 in order to deal with lock contention in GC and R3.
157 *
158 * @param pVM The VM handle.
159 */
160static int mmHyperLock(PVM pVM)
161{
162 PMMHYPERHEAP pHeap = pVM->mm.s.CTX_SUFF(pHyperHeap);
163
164#ifdef IN_RING3
165 if (!PDMCritSectIsInitialized(&pHeap->Lock))
166 return VINF_SUCCESS; /* early init */
167
168 int rc = PDMCritSectEnter(&pHeap->Lock, VERR_INTERNAL_ERROR);
169#else
170 Assert(PDMCritSectIsInitialized(&pHeap->Lock));
171 int rc = PDMCritSectEnter(&pHeap->Lock, VERR_GENERAL_FAILURE);
172 if (rc == VERR_GENERAL_FAILURE)
173 {
174# ifdef IN_RC
175 rc = VMMGCCallHost(pVM, VMMCALLHOST_MMHYPER_LOCK, 0);
176# else
177 rc = VMMR0CallHost(pVM, VMMCALLHOST_MMHYPER_LOCK, 0);
178# endif
179 }
180#endif
181 AssertRC(rc);
182 return rc;
183}
184
185
186/**
187 * Unlocks the hypervisor heap.
188 *
189 * @param pVM The VM handle.
190 */
191static void mmHyperUnlock(PVM pVM)
192{
193 PMMHYPERHEAP pHeap = pVM->mm.s.CTX_SUFF(pHyperHeap);
194
195#ifdef IN_RING3
196 if (!PDMCritSectIsInitialized(&pHeap->Lock))
197 return; /* early init */
198#endif
199 Assert(PDMCritSectIsInitialized(&pHeap->Lock));
200 PDMCritSectLeave(&pHeap->Lock);
201}
202
203/**
204 * Allocates memory in the Hypervisor (RC VMM) area.
205 * The returned memory is of course zeroed.
206 *
207 * @returns VBox status code.
208 * @param pVM The VM to operate on.
209 * @param cb Number of bytes to allocate.
210 * @param uAlignment Required memory alignment in bytes.
211 * Values are 0,8,16,32 and PAGE_SIZE.
212 * 0 -> default alignment, i.e. 8 bytes.
213 * @param enmTag The statistics tag.
214 * @param ppv Where to store the address to the allocated
215 * memory.
216 */
217static int mmHyperAllocInternal(PVM pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv)
218{
219 AssertMsg(cb >= 8, ("Hey! Do you really mean to allocate less than 8 bytes?! cb=%d\n", cb));
220
221 /*
222 * Validate input and adjust it to reasonable values.
223 */
224 if (!uAlignment || uAlignment < MMHYPER_HEAP_ALIGN_MIN)
225 uAlignment = MMHYPER_HEAP_ALIGN_MIN;
226 uint32_t cbAligned;
227 switch (uAlignment)
228 {
229 case 8:
230 case 16:
231 case 32:
232 cbAligned = RT_ALIGN_32(cb, MMHYPER_HEAP_ALIGN_MIN);
233 if (!cbAligned || cbAligned < cb)
234 {
235 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_INVALID_PARAMETER\n", cb, uAlignment));
236 AssertMsgFailed(("Nice try.\n"));
237 return VERR_INVALID_PARAMETER;
238 }
239 break;
240
241 case PAGE_SIZE:
242 AssertMsg(RT_ALIGN_32(cb, PAGE_SIZE) == cb, ("The size isn't page aligned. (cb=%#x)\n", cb));
243 cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
244 if (!cbAligned)
245 {
246 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_INVALID_PARAMETER\n", cb, uAlignment));
247 AssertMsgFailed(("Nice try.\n"));
248 return VERR_INVALID_PARAMETER;
249 }
250 break;
251
252 default:
253 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_INVALID_PARAMETER\n", cb, uAlignment));
254 AssertMsgFailed(("Invalid alignment %u\n", uAlignment));
255 return VERR_INVALID_PARAMETER;
256 }
257
258
259 /*
260 * Get heap and statisticsStatistics.
261 */
262 PMMHYPERHEAP pHeap = pVM->mm.s.CTX_SUFF(pHyperHeap);
263#ifdef VBOX_WITH_STATISTICS
264 PMMHYPERSTAT pStat = mmHyperStat(pHeap, enmTag);
265 if (!pStat)
266 {
267 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_MM_HYPER_NO_MEMORY\n", cb, uAlignment));
268 AssertMsgFailed(("Failed to allocate statistics!\n"));
269 return VERR_MM_HYPER_NO_MEMORY;
270 }
271#endif
272 if (uAlignment < PAGE_SIZE)
273 {
274 /*
275 * Allocate a chunk.
276 */
277 PMMHYPERCHUNK pChunk = mmHyperAllocChunk(pHeap, cbAligned, uAlignment);
278 if (pChunk)
279 {
280#ifdef VBOX_WITH_STATISTICS
281 const uint32_t cbChunk = pChunk->offNext
282 ? pChunk->offNext
283 : pHeap->CTX_SUFF(pbHeap) + pHeap->offPageAligned - (uint8_t *)pChunk;
284 pStat->cbAllocated += (uint32_t)cbChunk;
285 pStat->cbCurAllocated += (uint32_t)cbChunk;
286 if (pStat->cbCurAllocated > pStat->cbMaxAllocated)
287 pStat->cbMaxAllocated = pStat->cbCurAllocated;
288 pStat->cAllocations++;
289 pChunk->offStat = (uintptr_t)pStat - (uintptr_t)pChunk;
290#else
291 pChunk->offStat = 0;
292#endif
293 void *pv = pChunk + 1;
294 *ppv = pv;
295 ASMMemZero32(pv, cbAligned);
296 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VINF_SUCCESS and *ppv=%p\n", cb, uAlignment, pv));
297 return VINF_SUCCESS;
298 }
299 }
300 else
301 {
302 /*
303 * Allocate page aligned memory.
304 */
305 void *pv = mmHyperAllocPages(pHeap, cbAligned);
306 if (pv)
307 {
308#ifdef VBOX_WITH_STATISTICS
309 pStat->cbAllocated += cbAligned;
310 pStat->cbCurAllocated += cbAligned;
311 if (pStat->cbCurAllocated > pStat->cbMaxAllocated)
312 pStat->cbMaxAllocated = pStat->cbCurAllocated;
313 pStat->cAllocations++;
314#endif
315 *ppv = pv;
316 /* ASMMemZero32(pv, cbAligned); - not required since memory is alloc-only and SUPPageAlloc zeros it. */
317 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VINF_SUCCESS and *ppv=%p\n", cb, uAlignment, ppv));
318 return VINF_SUCCESS;
319 }
320 }
321
322#ifdef VBOX_WITH_STATISTICS
323 pStat->cAllocations++;
324 pStat->cFailures++;
325#endif
326 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_MM_HYPER_NO_MEMORY\n", cb, uAlignment));
327 AssertMsgFailed(("Failed to allocate %d bytes!\n", cb));
328 return VERR_MM_HYPER_NO_MEMORY;
329}
330
331/**
332 * Wrapper for mmHyperAllocInternal
333 */
334VMMDECL(int) MMHyperAlloc(PVM pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv)
335{
336 int rc;
337
338 rc = mmHyperLock(pVM);
339 AssertRCReturn(rc, rc);
340
341 rc = mmHyperAllocInternal(pVM, cb, uAlignment, enmTag, ppv);
342
343 mmHyperUnlock(pVM);
344 return rc;
345}
346
347/**
348 * Allocates a chunk of memory from the specified heap.
349 * The caller validates the parameters of this request.
350 *
351 * @returns Pointer to the allocated chunk.
352 * @returns NULL on failure.
353 * @param pHeap The heap.
354 * @param cb Size of the memory block to allocate.
355 * @param uAlignment The alignment specifications for the allocated block.
356 * @internal
357 */
358static PMMHYPERCHUNK mmHyperAllocChunk(PMMHYPERHEAP pHeap, uint32_t cb, unsigned uAlignment)
359{
360 Log3(("mmHyperAllocChunk: Enter cb=%#x uAlignment=%#x\n", cb, uAlignment));
361#ifdef MMHYPER_HEAP_STRICT
362 mmHyperHeapCheck(pHeap);
363#endif
364#ifdef MMHYPER_HEAP_STRICT_FENCE
365 uint32_t cbFence = RT_MAX(MMHYPER_HEAP_STRICT_FENCE_SIZE, uAlignment);
366 cb += cbFence;
367#endif
368
369 /*
370 * Check if there are any free chunks. (NIL_OFFSET use/not-use forces this check)
371 */
372 if (pHeap->offFreeHead == NIL_OFFSET)
373 return NULL;
374
375 /*
376 * Small alignments - from the front of the heap.
377 *
378 * Must split off free chunks at the end to prevent messing up the
379 * last free node which we take the page aligned memory from the top of.
380 */
381 PMMHYPERCHUNK pRet = NULL;
382 PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)((char *)pHeap->CTX_SUFF(pbHeap) + pHeap->offFreeHead);
383 while (pFree)
384 {
385 ASSERT_CHUNK_FREE(pHeap, pFree);
386 if (pFree->cb >= cb)
387 {
388 unsigned offAlign = (uintptr_t)(&pFree->core + 1) & (uAlignment - 1);
389 if (offAlign)
390 offAlign = uAlignment - offAlign;
391 if (!offAlign || pFree->cb - offAlign >= cb)
392 {
393 Log3(("mmHyperAllocChunk: Using pFree=%p pFree->cb=%d offAlign=%d\n", pFree, pFree->cb, offAlign));
394
395 /*
396 * Adjust the node in front.
397 * Because of multiple alignments we need to special case allocation of the first block.
398 */
399 if (offAlign)
400 {
401 MMHYPERCHUNKFREE Free = *pFree;
402 if (MMHYPERCHUNK_GET_OFFPREV(&pFree->core))
403 {
404 /* just add a bit of memory to it. */
405 PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pFree + MMHYPERCHUNK_GET_OFFPREV(&Free.core));
406 pPrev->core.offNext += offAlign;
407 AssertMsg(!MMHYPERCHUNK_ISFREE(&pPrev->core), ("Impossible!\n"));
408 Log3(("mmHyperAllocChunk: Added %d bytes to %p\n", offAlign, pPrev));
409 }
410 else
411 {
412 /* make new head node, mark it USED for simplisity. */
413 PMMHYPERCHUNK pPrev = (PMMHYPERCHUNK)pHeap->CTX_SUFF(pbHeap);
414 Assert(pPrev == &pFree->core);
415 pPrev->offPrev = 0;
416 MMHYPERCHUNK_SET_TYPE(pPrev, MMHYPERCHUNK_FLAGS_USED);
417 pPrev->offNext = offAlign;
418 Log3(("mmHyperAllocChunk: Created new first node of %d bytes\n", offAlign));
419
420 }
421 Log3(("mmHyperAllocChunk: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - offAlign, -(int)offAlign));
422 pHeap->cbFree -= offAlign;
423
424 /* Recreate pFree node and adjusting everything... */
425 pFree = (PMMHYPERCHUNKFREE)((char *)pFree + offAlign);
426 *pFree = Free;
427
428 pFree->cb -= offAlign;
429 if (pFree->core.offNext)
430 {
431 pFree->core.offNext -= offAlign;
432 PMMHYPERCHUNK pNext = (PMMHYPERCHUNK)((char *)pFree + pFree->core.offNext);
433 MMHYPERCHUNK_SET_OFFPREV(pNext, -(int32_t)pFree->core.offNext);
434 ASSERT_CHUNK(pHeap, pNext);
435 }
436 if (MMHYPERCHUNK_GET_OFFPREV(&pFree->core))
437 MMHYPERCHUNK_SET_OFFPREV(&pFree->core, MMHYPERCHUNK_GET_OFFPREV(&pFree->core) - offAlign);
438
439 if (pFree->offNext)
440 {
441 pFree->offNext -= offAlign;
442 PMMHYPERCHUNKFREE pNext = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offNext);
443 pNext->offPrev = -(int32_t)pFree->offNext;
444 ASSERT_CHUNK_FREE(pHeap, pNext);
445 }
446 else
447 pHeap->offFreeTail += offAlign;
448 if (pFree->offPrev)
449 {
450 pFree->offPrev -= offAlign;
451 PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offPrev);
452 pPrev->offNext = -pFree->offPrev;
453 ASSERT_CHUNK_FREE(pHeap, pPrev);
454 }
455 else
456 pHeap->offFreeHead += offAlign;
457 pFree->core.offHeap = (uintptr_t)pHeap - (uintptr_t)pFree;
458 pFree->core.offStat = 0;
459 ASSERT_CHUNK_FREE(pHeap, pFree);
460 Log3(("mmHyperAllocChunk: Realigned pFree=%p\n", pFree));
461 }
462
463 /*
464 * Split off a new FREE chunk?
465 */
466 if (pFree->cb >= cb + RT_ALIGN(sizeof(MMHYPERCHUNKFREE), MMHYPER_HEAP_ALIGN_MIN))
467 {
468 /*
469 * Move the FREE chunk up to make room for the new USED chunk.
470 */
471 const int off = cb + sizeof(MMHYPERCHUNK);
472 PMMHYPERCHUNKFREE pNew = (PMMHYPERCHUNKFREE)((char *)&pFree->core + off);
473 *pNew = *pFree;
474 pNew->cb -= off;
475 if (pNew->core.offNext)
476 {
477 pNew->core.offNext -= off;
478 PMMHYPERCHUNK pNext = (PMMHYPERCHUNK)((char *)pNew + pNew->core.offNext);
479 MMHYPERCHUNK_SET_OFFPREV(pNext, -(int32_t)pNew->core.offNext);
480 ASSERT_CHUNK(pHeap, pNext);
481 }
482 pNew->core.offPrev = -off;
483 MMHYPERCHUNK_SET_TYPE(pNew, MMHYPERCHUNK_FLAGS_FREE);
484
485 if (pNew->offNext)
486 {
487 pNew->offNext -= off;
488 PMMHYPERCHUNKFREE pNext = (PMMHYPERCHUNKFREE)((char *)pNew + pNew->offNext);
489 pNext->offPrev = -(int32_t)pNew->offNext;
490 ASSERT_CHUNK_FREE(pHeap, pNext);
491 }
492 else
493 pHeap->offFreeTail += off;
494 if (pNew->offPrev)
495 {
496 pNew->offPrev -= off;
497 PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pNew + pNew->offPrev);
498 pPrev->offNext = -pNew->offPrev;
499 ASSERT_CHUNK_FREE(pHeap, pPrev);
500 }
501 else
502 pHeap->offFreeHead += off;
503 pNew->core.offHeap = (uintptr_t)pHeap - (uintptr_t)pNew;
504 pNew->core.offStat = 0;
505 ASSERT_CHUNK_FREE(pHeap, pNew);
506
507 /*
508 * Update the old FREE node making it a USED node.
509 */
510 pFree->core.offNext = off;
511 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_USED);
512
513
514 Log3(("mmHyperAllocChunk: cbFree %d -> %d (%d)\n", pHeap->cbFree,
515 pHeap->cbFree - (cb + sizeof(MMHYPERCHUNK)), -(int)(cb + sizeof(MMHYPERCHUNK))));
516 pHeap->cbFree -= (uint32_t)(cb + sizeof(MMHYPERCHUNK));
517 pRet = &pFree->core;
518 ASSERT_CHUNK(pHeap, &pFree->core);
519 Log3(("mmHyperAllocChunk: Created free chunk pNew=%p cb=%d\n", pNew, pNew->cb));
520 }
521 else
522 {
523 /*
524 * Link out of free list.
525 */
526 if (pFree->offNext)
527 {
528 PMMHYPERCHUNKFREE pNext = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offNext);
529 if (pFree->offPrev)
530 {
531 pNext->offPrev += pFree->offPrev;
532 PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offPrev);
533 pPrev->offNext += pFree->offNext;
534 ASSERT_CHUNK_FREE(pHeap, pPrev);
535 }
536 else
537 {
538 pHeap->offFreeHead += pFree->offNext;
539 pNext->offPrev = 0;
540 }
541 ASSERT_CHUNK_FREE(pHeap, pNext);
542 }
543 else
544 {
545 if (pFree->offPrev)
546 {
547 pHeap->offFreeTail += pFree->offPrev;
548 PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offPrev);
549 pPrev->offNext = 0;
550 ASSERT_CHUNK_FREE(pHeap, pPrev);
551 }
552 else
553 {
554 pHeap->offFreeHead = NIL_OFFSET;
555 pHeap->offFreeTail = NIL_OFFSET;
556 }
557 }
558
559 Log3(("mmHyperAllocChunk: cbFree %d -> %d (%d)\n", pHeap->cbFree,
560 pHeap->cbFree - pFree->cb, -(int32_t)pFree->cb));
561 pHeap->cbFree -= pFree->cb;
562 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_USED);
563 pRet = &pFree->core;
564 ASSERT_CHUNK(pHeap, &pFree->core);
565 Log3(("mmHyperAllocChunk: Converted free chunk %p to used chunk.\n", pFree));
566 }
567 Log3(("mmHyperAllocChunk: Returning %p\n", pRet));
568 break;
569 }
570 }
571
572 /* next */
573 pFree = pFree->offNext ? (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offNext) : NULL;
574 }
575
576#ifdef MMHYPER_HEAP_STRICT_FENCE
577 uint32_t *pu32End = (uint32_t *)((uint8_t *)(pRet + 1) + cb);
578 uint32_t *pu32EndReal = pRet->offNext
579 ? (uint32_t *)((uint8_t *)pRet + pRet->offNext)
580 : (uint32_t *)(pHeap->CTX_SUFF(pbHeap) + pHeap->cbHeap);
581 cbFence += (uintptr_t)pu32EndReal - (uintptr_t)pu32End; Assert(!(cbFence & 0x3));
582 ASMMemFill32((uint8_t *)pu32EndReal - cbFence, cbFence, MMHYPER_HEAP_STRICT_FENCE_U32);
583 pu32EndReal[-1] = cbFence;
584#endif
585#ifdef MMHYPER_HEAP_STRICT
586 mmHyperHeapCheck(pHeap);
587#endif
588 return pRet;
589}
590
591
592/**
593 * Allocates one or more pages of memory from the specified heap.
594 * The caller validates the parameters of this request.
595 *
596 * @returns Pointer to the allocated chunk.
597 * @returns NULL on failure.
598 * @param pHeap The heap.
599 * @param cb Size of the memory block to allocate.
600 * @internal
601 */
602static void *mmHyperAllocPages(PMMHYPERHEAP pHeap, uint32_t cb)
603{
604 Log3(("mmHyperAllocPages: Enter cb=%#x\n", cb));
605
606#ifdef MMHYPER_HEAP_STRICT
607 mmHyperHeapCheck(pHeap);
608#endif
609
610 /*
611 * Check if there are any free chunks. (NIL_OFFSET use/not-use forces this check)
612 */
613 if (pHeap->offFreeHead == NIL_OFFSET)
614 return NULL;
615
616 /*
617 * Page aligned chunks.
618 *
619 * Page aligned chunks can only be allocated from the last FREE chunk.
620 * This is for reasons of simplicity and fragmentation. Page aligned memory
621 * must also be allocated in page aligned sizes. Page aligned memory cannot
622 * be freed either.
623 *
624 * So, for this to work, the last FREE chunk needs to end on a page aligned
625 * boundrary.
626 */
627 PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)((char *)pHeap->CTX_SUFF(pbHeap) + pHeap->offFreeTail);
628 ASSERT_CHUNK_FREE(pHeap, pFree);
629 if ( (((uintptr_t)(&pFree->core + 1) + pFree->cb) & (PAGE_OFFSET_MASK - 1))
630 || pFree->cb + sizeof(MMHYPERCHUNK) < cb)
631 {
632 Log3(("mmHyperAllocPages: Not enough/no page aligned memory!\n"));
633 return NULL;
634 }
635
636 void *pvRet;
637 if (pFree->cb > cb)
638 {
639 /*
640 * Simple, just cut the top of the free node and return it.
641 */
642 pFree->cb -= cb;
643 pvRet = (char *)(&pFree->core + 1) + pFree->cb;
644 AssertMsg(RT_ALIGN_P(pvRet, PAGE_SIZE) == pvRet, ("pvRet=%p cb=%#x pFree=%p pFree->cb=%#x\n", pvRet, cb, pFree, pFree->cb));
645 Log3(("mmHyperAllocPages: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - cb, -(int)cb));
646 pHeap->cbFree -= cb;
647 ASSERT_CHUNK_FREE(pHeap, pFree);
648 Log3(("mmHyperAllocPages: Allocated from pFree=%p new pFree->cb=%d\n", pFree, pFree->cb));
649 }
650 else
651 {
652 /*
653 * Unlink the FREE node.
654 */
655 pvRet = (char *)(&pFree->core + 1) + pFree->cb - cb;
656 Log3(("mmHyperAllocPages: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - pFree->cb, -(int32_t)pFree->cb));
657 pHeap->cbFree -= pFree->cb;
658
659 /* a scrap of spare memory (unlikely)? add it to the sprevious chunk. */
660 if (pvRet != (void *)pFree)
661 {
662 AssertMsg(MMHYPERCHUNK_GET_OFFPREV(&pFree->core), ("How the *beep* did someone manage to allocated up all the heap with page aligned memory?!?\n"));
663 PMMHYPERCHUNK pPrev = (PMMHYPERCHUNK)((char *)pFree + MMHYPERCHUNK_GET_OFFPREV(&pFree->core));
664 pPrev->offNext += (uintptr_t)pvRet - (uintptr_t)pFree;
665 AssertMsg(!MMHYPERCHUNK_ISFREE(pPrev), ("Free bug?\n"));
666#ifdef VBOX_WITH_STATISTICS
667 PMMHYPERSTAT pStat = (PMMHYPERSTAT)((uintptr_t)pPrev + pPrev->offStat);
668 pStat->cbAllocated += (uintptr_t)pvRet - (uintptr_t)pFree;
669 pStat->cbCurAllocated += (uintptr_t)pvRet - (uintptr_t)pFree;
670#endif
671 Log3(("mmHyperAllocPages: Added %d to %p (page align)\n", (uintptr_t)pvRet - (uintptr_t)pFree, pFree));
672 }
673
674 /* unlink from FREE chain. */
675 if (pFree->offPrev)
676 {
677 pHeap->offFreeTail += pFree->offPrev;
678 ((PMMHYPERCHUNKFREE)((char *)pFree + pFree->offPrev))->offNext = 0;
679 }
680 else
681 {
682 pHeap->offFreeTail = NIL_OFFSET;
683 pHeap->offFreeHead = NIL_OFFSET;
684 }
685 Log3(("mmHyperAllocPages: Unlinked pFree=%d\n", pFree));
686 }
687 pHeap->offPageAligned = (uintptr_t)pvRet - (uintptr_t)pHeap->CTX_SUFF(pbHeap);
688 Log3(("mmHyperAllocPages: Returning %p (page aligned)\n", pvRet));
689
690#ifdef MMHYPER_HEAP_STRICT
691 mmHyperHeapCheck(pHeap);
692#endif
693 return pvRet;
694}
695
696#ifdef VBOX_WITH_STATISTICS
697
698/**
699 * Get the statistic record for a tag.
700 *
701 * @returns Pointer to a stat record.
702 * @returns NULL on failure.
703 * @param pHeap The heap.
704 * @param enmTag The tag.
705 */
706static PMMHYPERSTAT mmHyperStat(PMMHYPERHEAP pHeap, MMTAG enmTag)
707{
708 /* try look it up first. */
709 PMMHYPERSTAT pStat = (PMMHYPERSTAT)RTAvloGCPhysGet(&pHeap->HyperHeapStatTree, enmTag);
710 if (!pStat)
711 {
712 /* try allocate a new one */
713 PMMHYPERCHUNK pChunk = mmHyperAllocChunk(pHeap, RT_ALIGN(sizeof(*pStat), MMHYPER_HEAP_ALIGN_MIN), MMHYPER_HEAP_ALIGN_MIN);
714 if (!pChunk)
715 return NULL;
716 pStat = (PMMHYPERSTAT)(pChunk + 1);
717 pChunk->offStat = (uintptr_t)pStat - (uintptr_t)pChunk;
718
719 ASMMemZero32(pStat, sizeof(*pStat));
720 pStat->Core.Key = enmTag;
721 RTAvloGCPhysInsert(&pHeap->HyperHeapStatTree, &pStat->Core);
722 }
723 if (!pStat->fRegistered)
724 {
725# ifdef IN_RING3
726 mmR3HyperStatRegisterOne(pHeap->pVMR3, pStat);
727# else
728 /** @todo schedule a R3 action. */
729# endif
730 }
731 return pStat;
732}
733
734
735# ifdef IN_RING3
736/**
737 * Registers statistics with STAM.
738 *
739 */
740static void mmR3HyperStatRegisterOne(PVM pVM, PMMHYPERSTAT pStat)
741{
742 if (pStat->fRegistered)
743 return;
744 const char *pszTag = mmR3GetTagName((MMTAG)pStat->Core.Key);
745 STAMR3RegisterF(pVM, &pStat->cbCurAllocated, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Number of bytes currently allocated.", "/MM/HyperHeap/%s", pszTag);
746 STAMR3RegisterF(pVM, &pStat->cAllocations, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Number of alloc calls.", "/MM/HyperHeap/%s/cAllocations", pszTag);
747 STAMR3RegisterF(pVM, &pStat->cFrees, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Number of free calls.", "/MM/HyperHeap/%s/cFrees", pszTag);
748 STAMR3RegisterF(pVM, &pStat->cFailures, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Number of failures.", "/MM/HyperHeap/%s/cFailures", pszTag);
749 STAMR3RegisterF(pVM, &pStat->cbAllocated, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Total number of allocated bytes.", "/MM/HyperHeap/%s/cbAllocated", pszTag);
750 STAMR3RegisterF(pVM, &pStat->cbFreed, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Total number of freed bytes.", "/MM/HyperHeap/%s/cbFreed", pszTag);
751 STAMR3RegisterF(pVM, &pStat->cbMaxAllocated, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Max number of bytes allocated at the same time.","/MM/HyperHeap/%s/cbMaxAllocated", pszTag);
752 pStat->fRegistered = true;
753}
754# endif /* IN_RING3 */
755
756#endif /* VBOX_WITH_STATISTICS */
757
758
759/**
760 * Free memory allocated using MMHyperAlloc().
761 * The caller validates the parameters of this request.
762 *
763 * @returns VBox status code.
764 * @param pVM The VM to operate on.
765 * @param pv The memory to free.
766 * @remark Try avoid free hyper memory.
767 */
768static int mmHyperFreeInternal(PVM pVM, void *pv)
769{
770 Log2(("MMHyperFree: pv=%p\n", pv));
771 if (!pv)
772 return VINF_SUCCESS;
773 AssertMsgReturn(RT_ALIGN_P(pv, MMHYPER_HEAP_ALIGN_MIN) == pv,
774 ("Invalid pointer %p!\n", pv),
775 VERR_INVALID_POINTER);
776
777 /*
778 * Get the heap and stats.
779 * Validate the chunk at the same time.
780 */
781 PMMHYPERCHUNK pChunk = (PMMHYPERCHUNK)((PMMHYPERCHUNK)pv - 1);
782
783 AssertMsgReturn( (uintptr_t)pChunk + pChunk->offNext >= (uintptr_t)pChunk
784 || RT_ALIGN_32(pChunk->offNext, MMHYPER_HEAP_ALIGN_MIN) != pChunk->offNext,
785 ("%p: offNext=%#RX32\n", pv, pChunk->offNext),
786 VERR_INVALID_POINTER);
787
788 AssertMsgReturn(MMHYPERCHUNK_ISUSED(pChunk),
789 ("%p: Not used!\n", pv),
790 VERR_INVALID_POINTER);
791
792 int32_t offPrev = MMHYPERCHUNK_GET_OFFPREV(pChunk);
793 AssertMsgReturn( (uintptr_t)pChunk + offPrev <= (uintptr_t)pChunk
794 && !((uint32_t)-offPrev & (MMHYPER_HEAP_ALIGN_MIN - 1)),
795 ("%p: offPrev=%#RX32!\n", pv, offPrev),
796 VERR_INVALID_POINTER);
797
798 /* statistics */
799#ifdef VBOX_WITH_STATISTICS
800 PMMHYPERSTAT pStat = (PMMHYPERSTAT)((uintptr_t)pChunk + pChunk->offStat);
801 AssertMsgReturn( RT_ALIGN_P(pStat, MMHYPER_HEAP_ALIGN_MIN) == (void *)pStat
802 && pChunk->offStat,
803 ("%p: offStat=%#RX32!\n", pv, pChunk->offStat),
804 VERR_INVALID_POINTER);
805#else
806 AssertMsgReturn(!pChunk->offStat,
807 ("%p: offStat=%#RX32!\n", pv, pChunk->offStat),
808 VERR_INVALID_POINTER);
809#endif
810
811 /* The heap structure. */
812 PMMHYPERHEAP pHeap = (PMMHYPERHEAP)((uintptr_t)pChunk + pChunk->offHeap);
813 AssertMsgReturn( !((uintptr_t)pHeap & PAGE_OFFSET_MASK)
814 && pChunk->offHeap,
815 ("%p: pHeap=%#x offHeap=%RX32\n", pv, pHeap->u32Magic, pChunk->offHeap),
816 VERR_INVALID_POINTER);
817
818 AssertMsgReturn(pHeap->u32Magic == MMHYPERHEAP_MAGIC,
819 ("%p: u32Magic=%#x\n", pv, pHeap->u32Magic),
820 VERR_INVALID_POINTER);
821 Assert(pHeap == pVM->mm.s.CTX_SUFF(pHyperHeap));
822
823 /* Some more verifications using additional info from pHeap. */
824 AssertMsgReturn((uintptr_t)pChunk + offPrev >= (uintptr_t)pHeap->CTX_SUFF(pbHeap),
825 ("%p: offPrev=%#RX32!\n", pv, offPrev),
826 VERR_INVALID_POINTER);
827
828 AssertMsgReturn(pChunk->offNext < pHeap->cbHeap,
829 ("%p: offNext=%#RX32!\n", pv, pChunk->offNext),
830 VERR_INVALID_POINTER);
831
832 AssertMsgReturn( (uintptr_t)pv - (uintptr_t)pHeap->CTX_SUFF(pbHeap) <= pHeap->offPageAligned,
833 ("Invalid pointer %p! (heap: %p-%p)\n", pv, pHeap->CTX_SUFF(pbHeap),
834 (char *)pHeap->CTX_SUFF(pbHeap) + pHeap->offPageAligned),
835 VERR_INVALID_POINTER);
836
837#ifdef MMHYPER_HEAP_STRICT
838 mmHyperHeapCheck(pHeap);
839#endif
840
841#if defined(VBOX_WITH_STATISTICS) || defined(MMHYPER_HEAP_FREE_POISON)
842 /* calc block size. */
843 const uint32_t cbChunk = pChunk->offNext
844 ? pChunk->offNext
845 : pHeap->CTX_SUFF(pbHeap) + pHeap->offPageAligned - (uint8_t *)pChunk;
846#endif
847#ifdef MMHYPER_HEAP_FREE_POISON
848 /* poison the block */
849 memset(pChunk + 1, MMHYPER_HEAP_FREE_POISON, cbChunk - sizeof(*pChunk));
850#endif
851
852#ifdef MMHYPER_HEAP_FREE_DELAY
853# ifdef MMHYPER_HEAP_FREE_POISON
854 /*
855 * Check poison.
856 */
857 unsigned i = RT_ELEMENTS(pHeap->aDelayedFrees);
858 while (i-- > 0)
859 if (pHeap->aDelayedFrees[i].offChunk)
860 {
861 PMMHYPERCHUNK pCur = (PMMHYPERCHUNK)((uintptr_t)pHeap + pHeap->aDelayedFrees[i].offChunk);
862 const size_t cb = pCur->offNext
863 ? pCur->offNext - sizeof(*pCur)
864 : pHeap->CTX_SUFF(pbHeap) + pHeap->offPageAligned - (uint8_t *)pCur - sizeof(*pCur);
865 uint8_t *pab = (uint8_t *)(pCur + 1);
866 for (unsigned off = 0; off < cb; off++)
867 AssertReleaseMsg(pab[off] == 0xCB,
868 ("caller=%RTptr cb=%#zx off=%#x: %.*Rhxs\n",
869 pHeap->aDelayedFrees[i].uCaller, cb, off, RT_MIN(cb - off, 32), &pab[off]));
870 }
871# endif /* MMHYPER_HEAP_FREE_POISON */
872
873 /*
874 * Delayed freeing.
875 */
876 int rc = VINF_SUCCESS;
877 if (pHeap->aDelayedFrees[pHeap->iDelayedFree].offChunk)
878 {
879 PMMHYPERCHUNK pChunkFree = (PMMHYPERCHUNK)((uintptr_t)pHeap + pHeap->aDelayedFrees[pHeap->iDelayedFree].offChunk);
880 rc = mmHyperFree(pHeap, pChunkFree);
881 }
882 pHeap->aDelayedFrees[pHeap->iDelayedFree].offChunk = (uintptr_t)pChunk - (uintptr_t)pHeap;
883 pHeap->aDelayedFrees[pHeap->iDelayedFree].uCaller = (uintptr_t)ASMReturnAddress();
884 pHeap->iDelayedFree = (pHeap->iDelayedFree + 1) % RT_ELEMENTS(pHeap->aDelayedFrees);
885
886#else /* !MMHYPER_HEAP_FREE_POISON */
887 /*
888 * Call the worker.
889 */
890 int rc = mmHyperFree(pHeap, pChunk);
891#endif /* !MMHYPER_HEAP_FREE_POISON */
892
893 /*
894 * Update statistics.
895 */
896#ifdef VBOX_WITH_STATISTICS
897 pStat->cFrees++;
898 if (RT_SUCCESS(rc))
899 {
900 pStat->cbFreed += cbChunk;
901 pStat->cbCurAllocated -= cbChunk;
902 }
903 else
904 pStat->cFailures++;
905#endif
906
907 return rc;
908}
909
910
911/**
912 * Wrapper for mmHyperFreeInternal
913 */
914VMMDECL(int) MMHyperFree(PVM pVM, void *pv)
915{
916 int rc;
917
918 rc = mmHyperLock(pVM);
919 AssertRCReturn(rc, rc);
920
921 rc = mmHyperFreeInternal(pVM, pv);
922
923 mmHyperUnlock(pVM);
924 return rc;
925}
926
927
928/**
929 * Free memory a memory chunk.
930 *
931 * @returns VBox status code.
932 * @param pHeap The heap.
933 * @param pChunk The memory chunk to free.
934 */
935static int mmHyperFree(PMMHYPERHEAP pHeap, PMMHYPERCHUNK pChunk)
936{
937 Log3(("mmHyperFree: Enter pHeap=%p pChunk=%p\n", pHeap, pChunk));
938 PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)pChunk;
939
940 /*
941 * Insert into the free list (which is sorted on address).
942 *
943 * We'll search towards the end of the heap to locate the
944 * closest FREE chunk.
945 */
946 PMMHYPERCHUNKFREE pLeft = NULL;
947 PMMHYPERCHUNKFREE pRight = NULL;
948 if (pHeap->offFreeTail != NIL_OFFSET)
949 {
950 if (pFree->core.offNext)
951 {
952 pRight = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->core.offNext);
953 ASSERT_CHUNK(pHeap, &pRight->core);
954 while (!MMHYPERCHUNK_ISFREE(&pRight->core))
955 {
956 if (!pRight->core.offNext)
957 {
958 pRight = NULL;
959 break;
960 }
961 pRight = (PMMHYPERCHUNKFREE)((char *)pRight + pRight->core.offNext);
962 ASSERT_CHUNK(pHeap, &pRight->core);
963 }
964 }
965 if (!pRight)
966 pRight = (PMMHYPERCHUNKFREE)((char *)pHeap->CTX_SUFF(pbHeap) + pHeap->offFreeTail); /** @todo this can't be correct! 'pLeft = .. ; else' I think */
967 if (pRight)
968 {
969 ASSERT_CHUNK_FREE(pHeap, pRight);
970 if (pRight->offPrev)
971 {
972 pLeft = (PMMHYPERCHUNKFREE)((char *)pRight + pRight->offPrev);
973 ASSERT_CHUNK_FREE(pHeap, pLeft);
974 }
975 }
976 }
977 if (pLeft == pFree)
978 {
979 AssertMsgFailed(("Freed twice! pv=%p (pChunk=%p)\n", pChunk + 1, pChunk));
980 return VERR_INVALID_POINTER;
981 }
982 pChunk->offStat = 0;
983
984 /*
985 * Head free chunk list?
986 */
987 if (!pLeft)
988 {
989 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_FREE);
990 pFree->offPrev = 0;
991 pHeap->offFreeHead = (uintptr_t)pFree - (uintptr_t)pHeap->CTX_SUFF(pbHeap);
992 if (pRight)
993 {
994 pFree->offNext = (uintptr_t)pRight - (uintptr_t)pFree;
995 pRight->offPrev = -(int32_t)pFree->offNext;
996 }
997 else
998 {
999 pFree->offNext = 0;
1000 pHeap->offFreeTail = pHeap->offFreeHead;
1001 }
1002 Log3(("mmHyperFree: Inserted %p at head of free chain.\n", pFree));
1003 }
1004 else
1005 {
1006 /*
1007 * Can we merge with left hand free chunk?
1008 */
1009 if ((char *)pLeft + pLeft->core.offNext == (char *)pFree)
1010 {
1011 if (pFree->core.offNext)
1012 {
1013 pLeft->core.offNext = pLeft->core.offNext + pFree->core.offNext;
1014 MMHYPERCHUNK_SET_OFFPREV(((PMMHYPERCHUNK)((char *)pLeft + pLeft->core.offNext)), -(int32_t)pLeft->core.offNext);
1015 }
1016 else
1017 pLeft->core.offNext = 0;
1018 pFree = pLeft;
1019 Log3(("mmHyperFree: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - pLeft->cb, -(int32_t)pLeft->cb));
1020 pHeap->cbFree -= pLeft->cb;
1021 Log3(("mmHyperFree: Merging %p into %p (cb=%d).\n", pFree, pLeft, pLeft->cb));
1022 }
1023 /*
1024 * No, just link it into the free list then.
1025 */
1026 else
1027 {
1028 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_FREE);
1029 pFree->offPrev = (uintptr_t)pLeft - (uintptr_t)pFree;
1030 pLeft->offNext = -pFree->offPrev;
1031 if (pRight)
1032 {
1033 pFree->offNext = (uintptr_t)pRight - (uintptr_t)pFree;
1034 pRight->offPrev = -(int32_t)pFree->offNext;
1035 }
1036 else
1037 {
1038 pFree->offNext = 0;
1039 pHeap->offFreeTail = (uintptr_t)pFree - (uintptr_t)pHeap->CTX_SUFF(pbHeap);
1040 }
1041 Log3(("mmHyperFree: Inserted %p after %p in free list.\n", pFree, pLeft));
1042 }
1043 }
1044
1045 /*
1046 * Can we merge with right hand free chunk?
1047 */
1048 if (pRight && (char *)pRight == (char *)pFree + pFree->core.offNext)
1049 {
1050 /* core */
1051 if (pRight->core.offNext)
1052 {
1053 pFree->core.offNext += pRight->core.offNext;
1054 PMMHYPERCHUNK pNext = (PMMHYPERCHUNK)((char *)pFree + pFree->core.offNext);
1055 MMHYPERCHUNK_SET_OFFPREV(pNext, -(int32_t)pFree->core.offNext);
1056 ASSERT_CHUNK(pHeap, pNext);
1057 }
1058 else
1059 pFree->core.offNext = 0;
1060
1061 /* free */
1062 if (pRight->offNext)
1063 {
1064 pFree->offNext += pRight->offNext;
1065 ((PMMHYPERCHUNKFREE)((char *)pFree + pFree->offNext))->offPrev = -(int32_t)pFree->offNext;
1066 }
1067 else
1068 {
1069 pFree->offNext = 0;
1070 pHeap->offFreeTail = (uintptr_t)pFree - (uintptr_t)pHeap->CTX_SUFF(pbHeap);
1071 }
1072 Log3(("mmHyperFree: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - pRight->cb, -(int32_t)pRight->cb));
1073 pHeap->cbFree -= pRight->cb;
1074 Log3(("mmHyperFree: Merged %p (cb=%d) into %p.\n", pRight, pRight->cb, pFree));
1075 }
1076
1077 /* calculate the size. */
1078 if (pFree->core.offNext)
1079 pFree->cb = pFree->core.offNext - sizeof(MMHYPERCHUNK);
1080 else
1081 pFree->cb = pHeap->offPageAligned - ((uintptr_t)pFree - (uintptr_t)pHeap->CTX_SUFF(pbHeap)) - sizeof(MMHYPERCHUNK);
1082 Log3(("mmHyperFree: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree + pFree->cb, pFree->cb));
1083 pHeap->cbFree += pFree->cb;
1084 ASSERT_CHUNK_FREE(pHeap, pFree);
1085
1086#ifdef MMHYPER_HEAP_STRICT
1087 mmHyperHeapCheck(pHeap);
1088#endif
1089 return VINF_SUCCESS;
1090}
1091
1092
1093#if defined(DEBUG) || defined(MMHYPER_HEAP_STRICT)
1094/**
1095 * Dumps a heap chunk to the log.
1096 *
1097 * @param pHeap Pointer to the heap.
1098 * @param pCur Pointer to the chunk.
1099 */
1100static void mmHyperHeapDumpOne(PMMHYPERHEAP pHeap, PMMHYPERCHUNKFREE pCur)
1101{
1102 if (MMHYPERCHUNK_ISUSED(&pCur->core))
1103 {
1104 if (pCur->core.offStat)
1105 {
1106 PMMHYPERSTAT pStat = (PMMHYPERSTAT)((uintptr_t)pCur + pCur->core.offStat);
1107 const char *pszSelf = pCur->core.offStat == sizeof(MMHYPERCHUNK) ? " stat record" : "";
1108#ifdef IN_RING3
1109 Log(("%p %06x USED offNext=%06x offPrev=-%06x %s%s\n",
1110 pCur, (uintptr_t)pCur - (uintptr_t)pHeap->CTX_SUFF(pbHeap),
1111 pCur->core.offNext, -MMHYPERCHUNK_GET_OFFPREV(&pCur->core),
1112 mmR3GetTagName((MMTAG)pStat->Core.Key), pszSelf));
1113#else
1114 Log(("%p %06x USED offNext=%06x offPrev=-%06x %d%s\n",
1115 pCur, (uintptr_t)pCur - (uintptr_t)pHeap->CTX_SUFF(pbHeap),
1116 pCur->core.offNext, -MMHYPERCHUNK_GET_OFFPREV(&pCur->core),
1117 (MMTAG)pStat->Core.Key, pszSelf));
1118#endif
1119 }
1120 else
1121 Log(("%p %06x USED offNext=%06x offPrev=-%06x\n",
1122 pCur, (uintptr_t)pCur - (uintptr_t)pHeap->CTX_SUFF(pbHeap),
1123 pCur->core.offNext, -MMHYPERCHUNK_GET_OFFPREV(&pCur->core)));
1124 }
1125 else
1126 Log(("%p %06x FREE offNext=%06x offPrev=-%06x : cb=%06x offNext=%06x offPrev=-%06x\n",
1127 pCur, (uintptr_t)pCur - (uintptr_t)pHeap->CTX_SUFF(pbHeap),
1128 pCur->core.offNext, -MMHYPERCHUNK_GET_OFFPREV(&pCur->core), pCur->cb, pCur->offNext, pCur->offPrev));
1129}
1130#endif /* DEBUG || MMHYPER_HEAP_STRICT */
1131
1132
1133#ifdef MMHYPER_HEAP_STRICT
1134/**
1135 * Internal consitency check.
1136 */
1137static void mmHyperHeapCheck(PMMHYPERHEAP pHeap)
1138{
1139 PMMHYPERCHUNKFREE pPrev = NULL;
1140 PMMHYPERCHUNKFREE pCur = (PMMHYPERCHUNKFREE)pHeap->CTX_SUFF(pbHeap);
1141 for (;;)
1142 {
1143 if (MMHYPERCHUNK_ISUSED(&pCur->core))
1144 ASSERT_CHUNK_USED(pHeap, &pCur->core);
1145 else
1146 ASSERT_CHUNK_FREE(pHeap, pCur);
1147 if (pPrev)
1148 AssertMsg((int32_t)pPrev->core.offNext == -MMHYPERCHUNK_GET_OFFPREV(&pCur->core),
1149 ("pPrev->core.offNext=%d offPrev=%d\n", pPrev->core.offNext, MMHYPERCHUNK_GET_OFFPREV(&pCur->core)));
1150
1151# ifdef MMHYPER_HEAP_STRICT_FENCE
1152 uint32_t off = (uint8_t *)pCur - pHeap->CTX_SUFF(pbHeap);
1153 if ( MMHYPERCHUNK_ISUSED(&pCur->core)
1154 && off < pHeap->offPageAligned)
1155 {
1156 uint32_t cbCur = pCur->core.offNext
1157 ? pCur->core.offNext
1158 : pHeap->cbHeap - off;
1159 uint32_t *pu32End = ((uint32_t *)((uint8_t *)pCur + cbCur));
1160 uint32_t cbFence = pu32End[-1];
1161 if (RT_UNLIKELY( cbFence >= cbCur - sizeof(*pCur)
1162 || cbFence < MMHYPER_HEAP_STRICT_FENCE_SIZE))
1163 {
1164 mmHyperHeapDumpOne(pHeap, pCur);
1165 Assert(cbFence < cbCur - sizeof(*pCur));
1166 Assert(cbFence >= MMHYPER_HEAP_STRICT_FENCE_SIZE);
1167 }
1168
1169 uint32_t *pu32Bad = ASMMemIsAllU32((uint8_t *)pu32End - cbFence, cbFence - sizeof(uint32_t), MMHYPER_HEAP_STRICT_FENCE_U32);
1170 if (RT_UNLIKELY(pu32Bad))
1171 {
1172 mmHyperHeapDumpOne(pHeap, pCur);
1173 Assert(!pu32Bad);
1174 }
1175 }
1176# endif
1177
1178 /* next */
1179 if (!pCur->core.offNext)
1180 break;
1181 pPrev = pCur;
1182 pCur = (PMMHYPERCHUNKFREE)((char *)pCur + pCur->core.offNext);
1183 }
1184}
1185#endif
1186
1187
1188/**
1189 * Performs consistency checks on the heap if MMHYPER_HEAP_STRICT was
1190 * defined at build time.
1191 *
1192 * @param pVM Pointer to the shared VM structure.
1193 */
1194VMMDECL(void) MMHyperHeapCheck(PVM pVM)
1195{
1196#ifdef MMHYPER_HEAP_STRICT
1197 int rc;
1198
1199 rc = mmHyperLock(pVM);
1200 AssertRC(rc);
1201 mmHyperHeapCheck(pVM->mm.s.CTX_SUFF(pHyperHeap));
1202 mmHyperUnlock(pVM);
1203#endif
1204}
1205
1206
1207#ifdef DEBUG
1208/**
1209 * Dumps the hypervisor heap to Log.
1210 * @param pVM VM Handle.
1211 */
1212VMMDECL(void) MMHyperHeapDump(PVM pVM)
1213{
1214 Log(("MMHyperHeapDump: *** heap dump - start ***\n"));
1215 PMMHYPERHEAP pHeap = pVM->mm.s.CTX_SUFF(pHyperHeap);
1216 PMMHYPERCHUNKFREE pCur = (PMMHYPERCHUNKFREE)pHeap->CTX_SUFF(pbHeap);
1217 for (;;)
1218 {
1219 mmHyperHeapDumpOne(pHeap, pCur);
1220
1221 /* next */
1222 if (!pCur->core.offNext)
1223 break;
1224 pCur = (PMMHYPERCHUNKFREE)((char *)pCur + pCur->core.offNext);
1225 }
1226 Log(("MMHyperHeapDump: *** heap dump - end ***\n"));
1227}
1228#endif
1229
1230
1231/**
1232 * Query the amount of free memory in the hypervisor heap.
1233 *
1234 * @returns Number of free bytes in the hypervisor heap.
1235 */
1236VMMDECL(size_t) MMHyperHeapGetFreeSize(PVM pVM)
1237{
1238 return pVM->mm.s.CTX_SUFF(pHyperHeap)->cbFree;
1239}
1240
1241/**
1242 * Query the size the hypervisor heap.
1243 *
1244 * @returns The size of the hypervisor heap in bytes.
1245 */
1246VMMDECL(size_t) MMHyperHeapGetSize(PVM pVM)
1247{
1248 return pVM->mm.s.CTX_SUFF(pHyperHeap)->cbHeap;
1249}
1250
1251
1252/**
1253 * Query the address and size the hypervisor memory area.
1254 *
1255 * @returns Base address of the hypervisor area.
1256 * @param pVM VM Handle.
1257 * @param pcb Where to store the size of the hypervisor area. (out)
1258 */
1259VMMDECL(RTGCPTR) MMHyperGetArea(PVM pVM, size_t *pcb)
1260{
1261 if (pcb)
1262 *pcb = pVM->mm.s.cbHyperArea;
1263 return pVM->mm.s.pvHyperAreaGC;
1264}
1265
1266
1267/**
1268 * Checks if an address is within the hypervisor memory area.
1269 *
1270 * @returns true if inside.
1271 * @returns false if outside.
1272 * @param pVM VM handle.
1273 * @param GCPtr The pointer to check.
1274 */
1275VMMDECL(bool) MMHyperIsInsideArea(PVM pVM, RTGCPTR GCPtr)
1276{
1277 return (RTGCUINTPTR)GCPtr - (RTGCUINTPTR)pVM->mm.s.pvHyperAreaGC < pVM->mm.s.cbHyperArea;
1278}
1279
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette