VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/MMPagePool.cpp@ 82557

Last change on this file since 82557 was 82557, checked in by vboxsync, 5 years ago

PGMPool,MM: Use ring-0 mapping while in ring-0, so let the page pool do its own allocations rather than going through MMPage*. The MMPage* code is mostly code, but we still need it for a dummy page allocation. I'll address this tomorrow. [build fix] bugref:9528

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 19.0 KB
Line 
1/* $Id: MMPagePool.cpp 82557 2019-12-12 00:35:39Z vboxsync $ */
2/** @file
3 * MM - Memory Manager - Page Pool.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_MM_POOL
23#include <VBox/vmm/mm.h>
24#include <VBox/vmm/pgm.h>
25#include <VBox/vmm/stam.h>
26#include "MMInternal.h"
27#include <VBox/vmm/vm.h>
28#include <VBox/param.h>
29#include <iprt/errcore.h>
30#include <VBox/log.h>
31#include <iprt/alloc.h>
32#include <iprt/assert.h>
33#define USE_INLINE_ASM_BIT_OPS
34#ifdef USE_INLINE_ASM_BIT_OPS
35# include <iprt/asm.h>
36#endif
37#include <iprt/string.h>
38
39
40
41/*********************************************************************************************************************************
42* Internal Functions *
43*********************************************************************************************************************************/
44#ifdef IN_RING3
45static void * mmR3PagePoolAlloc(PMMPAGEPOOL pPool);
46#if 0
47static void mmR3PagePoolFree(PMMPAGEPOOL pPool, void *pv);
48#endif
49#endif
50
51
52/**
53 * Initializes the page pool
54 *
55 * @return VBox status code.
56 * @param pVM The cross context VM structure.
57 * @thread The Emulation Thread.
58 */
59int mmR3PagePoolInit(PVM pVM)
60{
61 AssertMsg(!pVM->mm.s.pPagePoolR3, ("Already initialized!\n"));
62
63 /*
64 * Allocate the pool structures.
65 */
66 /** @todo @bugref{1865},@bugref{3202}: mapping the page pool page into ring-0.
67 * Need to change the ways we allocate it... */
68 AssertReleaseReturn(sizeof(*pVM->mm.s.pPagePoolR3) + sizeof(*pVM->mm.s.pPagePoolLowR3) < PAGE_SIZE, VERR_INTERNAL_ERROR);
69 int rc = SUPR3PageAllocEx(1, 0 /*fFlags*/, (void **)&pVM->mm.s.pPagePoolR3, NULL /*pR0Ptr*/, NULL /*paPages*/);
70 if (RT_FAILURE(rc))
71 return rc;
72 memset(pVM->mm.s.pPagePoolR3, 0, PAGE_SIZE);
73 pVM->mm.s.pPagePoolR3->pVM = pVM;
74 STAM_REG(pVM, &pVM->mm.s.pPagePoolR3->cPages, STAMTYPE_U32, "/MM/Page/Def/cPages", STAMUNIT_PAGES, "Number of pages in the default pool.");
75 STAM_REG(pVM, &pVM->mm.s.pPagePoolR3->cFreePages, STAMTYPE_U32, "/MM/Page/Def/cFreePages", STAMUNIT_PAGES, "Number of free pages in the default pool.");
76 STAM_REG(pVM, &pVM->mm.s.pPagePoolR3->cSubPools, STAMTYPE_U32, "/MM/Page/Def/cSubPools", STAMUNIT_COUNT, "Number of sub pools in the default pool.");
77 STAM_REG(pVM, &pVM->mm.s.pPagePoolR3->cAllocCalls, STAMTYPE_COUNTER, "/MM/Page/Def/cAllocCalls", STAMUNIT_CALLS, "Number of MMR3PageAlloc() calls for the default pool.");
78 STAM_REG(pVM, &pVM->mm.s.pPagePoolR3->cFreeCalls, STAMTYPE_COUNTER, "/MM/Page/Def/cFreeCalls", STAMUNIT_CALLS, "Number of MMR3PageFree()+MMR3PageFreeByPhys() calls for the default pool.");
79 STAM_REG(pVM, &pVM->mm.s.pPagePoolR3->cToPhysCalls, STAMTYPE_COUNTER, "/MM/Page/Def/cToPhysCalls", STAMUNIT_CALLS, "Number of MMR3Page2Phys() calls for this pool.");
80 STAM_REG(pVM, &pVM->mm.s.pPagePoolR3->cToVirtCalls, STAMTYPE_COUNTER, "/MM/Page/Def/cToVirtCalls", STAMUNIT_CALLS, "Number of MMR3PagePhys2Page()+MMR3PageFreeByPhys() calls for the default pool.");
81 STAM_REG(pVM, &pVM->mm.s.pPagePoolR3->cErrors, STAMTYPE_COUNTER, "/MM/Page/Def/cErrors", STAMUNIT_ERRORS,"Number of errors for the default pool.");
82
83 pVM->mm.s.pPagePoolLowR3 = pVM->mm.s.pPagePoolR3 + 1;
84 pVM->mm.s.pPagePoolLowR3->pVM = pVM;
85 pVM->mm.s.pPagePoolLowR3->fLow = true;
86 STAM_REG(pVM, &pVM->mm.s.pPagePoolLowR3->cPages, STAMTYPE_U32, "/MM/Page/Low/cPages", STAMUNIT_PAGES, "Number of pages in the <4GB pool.");
87 STAM_REG(pVM, &pVM->mm.s.pPagePoolLowR3->cFreePages, STAMTYPE_U32, "/MM/Page/Low/cFreePages", STAMUNIT_PAGES, "Number of free pages in the <4GB pool.");
88 STAM_REG(pVM, &pVM->mm.s.pPagePoolLowR3->cSubPools, STAMTYPE_U32, "/MM/Page/Low/cSubPools", STAMUNIT_COUNT, "Number of sub pools in the <4GB pool.");
89 STAM_REG(pVM, &pVM->mm.s.pPagePoolLowR3->cAllocCalls, STAMTYPE_COUNTER, "/MM/Page/Low/cAllocCalls", STAMUNIT_CALLS, "Number of MMR3PageAllocLow() calls for the <4GB pool.");
90 STAM_REG(pVM, &pVM->mm.s.pPagePoolLowR3->cFreeCalls, STAMTYPE_COUNTER, "/MM/Page/Low/cFreeCalls", STAMUNIT_CALLS, "Number of MMR3PageFreeLow()+MMR3PageFreeByPhys() calls for the <4GB pool.");
91 STAM_REG(pVM, &pVM->mm.s.pPagePoolLowR3->cToPhysCalls,STAMTYPE_COUNTER, "/MM/Page/Low/cToPhysCalls", STAMUNIT_CALLS, "Number of MMR3Page2Phys() calls for the <4GB pool.");
92 STAM_REG(pVM, &pVM->mm.s.pPagePoolLowR3->cToVirtCalls,STAMTYPE_COUNTER, "/MM/Page/Low/cToVirtCalls", STAMUNIT_CALLS, "Number of MMR3PagePhys2Page()+MMR3PageFreeByPhys() calls for the <4GB pool.");
93 STAM_REG(pVM, &pVM->mm.s.pPagePoolLowR3->cErrors, STAMTYPE_COUNTER, "/MM/Page/Low/cErrors", STAMUNIT_ERRORS,"Number of errors for the <4GB pool.");
94
95#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
96 pVM->mm.s.pPagePoolR0 = (uintptr_t)pVM->mm.s.pPagePoolR3;
97 pVM->mm.s.pPagePoolLowR0 = (uintptr_t)pVM->mm.s.pPagePoolLowR3;
98#endif
99
100 /** @todo init a mutex? */
101 return VINF_SUCCESS;
102}
103
104
105/**
106 * Release all locks and free the allocated memory.
107 *
108 * @param pVM The cross context VM structure.
109 * @thread The Emulation Thread.
110 */
111void mmR3PagePoolTerm(PVM pVM)
112{
113 if (pVM->mm.s.pPagePoolR3)
114 {
115 /*
116 * Unlock all memory held by subpools and free the memory.
117 * (The MM Heap will free the memory used for internal stuff.)
118 */
119 Assert(!pVM->mm.s.pPagePoolR3->fLow);
120 PMMPAGESUBPOOL pSubPool = pVM->mm.s.pPagePoolR3->pHead;
121 while (pSubPool)
122 {
123 int rc = SUPR3PageFreeEx(pSubPool->pvPages, pSubPool->cPages);
124 AssertMsgRC(rc, ("SUPR3PageFreeEx(%p) failed with rc=%Rrc\n", pSubPool->pvPages, rc));
125 pSubPool->pvPages = NULL;
126
127 /* next */
128 pSubPool = pSubPool->pNext;
129 }
130 pVM->mm.s.pPagePoolR3 = NULL;
131#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
132 pVM->mm.s.pPagePoolR0 = NIL_RTR0PTR;
133#endif
134 }
135
136 if (pVM->mm.s.pPagePoolLowR3)
137 {
138 /*
139 * Free the memory.
140 */
141 Assert(pVM->mm.s.pPagePoolLowR3->fLow);
142 PMMPAGESUBPOOL pSubPool = pVM->mm.s.pPagePoolLowR3->pHead;
143 while (pSubPool)
144 {
145 int rc = SUPR3LowFree(pSubPool->pvPages, pSubPool->cPages);
146 AssertMsgRC(rc, ("SUPR3LowFree(%p) failed with rc=%d\n", pSubPool->pvPages, rc));
147 pSubPool->pvPages = NULL;
148
149 /* next */
150 pSubPool = pSubPool->pNext;
151 }
152 pVM->mm.s.pPagePoolLowR3 = NULL;
153#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
154 pVM->mm.s.pPagePoolLowR0 = NIL_RTR0PTR;
155#endif
156 }
157}
158
159
160/**
161 * Allocates a page from the page pool.
162 *
163 * @returns Pointer to allocated page(s).
164 * @returns NULL on failure.
165 * @param pPool Pointer to the page pool.
166 * @thread The Emulation Thread.
167 */
168DECLINLINE(void *) mmR3PagePoolAlloc(PMMPAGEPOOL pPool)
169{
170 VM_ASSERT_EMT(pPool->pVM);
171 STAM_COUNTER_INC(&pPool->cAllocCalls);
172
173 /*
174 * Walk free list.
175 */
176 if (pPool->pHeadFree)
177 {
178 PMMPAGESUBPOOL pSub = pPool->pHeadFree;
179 /* decrement free count and unlink if no more free entries. */
180 if (!--pSub->cPagesFree)
181 pPool->pHeadFree = pSub->pNextFree;
182#ifdef VBOX_WITH_STATISTICS
183 pPool->cFreePages--;
184#endif
185
186 /* find free spot in bitmap. */
187#ifdef USE_INLINE_ASM_BIT_OPS
188 const int iPage = ASMBitFirstClear(pSub->auBitmap, pSub->cPages);
189 if (iPage >= 0)
190 {
191 Assert(!ASMBitTest(pSub->auBitmap, iPage));
192 ASMBitSet(pSub->auBitmap, iPage);
193 return (uint8_t *)pSub->pvPages + PAGE_SIZE * iPage;
194 }
195#else
196 unsigned *pu = &pSub->auBitmap[0];
197 unsigned *puEnd = &pSub->auBitmap[pSub->cPages / (sizeof(pSub->auBitmap) * 8)];
198 while (pu < puEnd)
199 {
200 unsigned u;
201 if ((u = *pu) != ~0U)
202 {
203 unsigned iBit = 0;
204 unsigned uMask = 1;
205 while (iBit < sizeof(pSub->auBitmap[0]) * 8)
206 {
207 if (!(u & uMask))
208 {
209 *pu |= uMask;
210 return (uint8_t *)pSub->pvPages
211 + PAGE_SIZE * (iBit + ((uint8_t *)pu - (uint8_t *)&pSub->auBitmap[0]) * 8);
212 }
213 iBit++;
214 uMask <<= 1;
215 }
216 STAM_COUNTER_INC(&pPool->cErrors);
217 AssertMsgFailed(("how odd, expected to find a free bit in %#x, but didn't\n", u));
218 }
219 /* next */
220 pu++;
221 }
222#endif
223 STAM_COUNTER_INC(&pPool->cErrors);
224#ifdef VBOX_WITH_STATISTICS
225 pPool->cFreePages++;
226#endif
227 AssertMsgFailed(("how strange, expected to find a free bit in %p, but didn't (%d pages supposed to be free!)\n", pSub, pSub->cPagesFree + 1));
228 }
229
230 /*
231 * Allocate new subpool.
232 */
233 unsigned cPages = !pPool->fLow ? 128 : 32;
234 PMMPAGESUBPOOL pSub;
235 int rc = MMHyperAlloc(pPool->pVM,
236 RT_UOFFSETOF_DYN(MMPAGESUBPOOL, auBitmap[cPages / (sizeof(pSub->auBitmap[0]) * 8)])
237 + (sizeof(SUPPAGE) + sizeof(MMPPLOOKUPHCPHYS)) * cPages
238 + sizeof(MMPPLOOKUPHCPTR),
239 0,
240 MM_TAG_MM_PAGE,
241 (void **)&pSub);
242 if (RT_FAILURE(rc))
243 return NULL;
244
245 PSUPPAGE paPhysPages = (PSUPPAGE)&pSub->auBitmap[cPages / (sizeof(pSub->auBitmap[0]) * 8)];
246 Assert((uintptr_t)paPhysPages >= (uintptr_t)&pSub->auBitmap[1]);
247 if (!pPool->fLow)
248 {
249 rc = SUPR3PageAllocEx(cPages,
250 0 /* fFlags */,
251 &pSub->pvPages,
252 NULL,
253 paPhysPages);
254 if (RT_FAILURE(rc))
255 rc = VMSetError(pPool->pVM, rc, RT_SRC_POS,
256 N_("Failed to lock host %zd bytes of memory (out of memory)"), (size_t)cPages << PAGE_SHIFT);
257 }
258 else
259 rc = SUPR3LowAlloc(cPages, &pSub->pvPages, NULL, paPhysPages);
260 if (RT_SUCCESS(rc))
261 {
262 /*
263 * Setup the sub structure and allocate the requested page.
264 */
265 pSub->cPages = cPages;
266 pSub->cPagesFree= cPages - 1;
267 pSub->paPhysPages = paPhysPages;
268 memset(pSub->auBitmap, 0, cPages / 8);
269 /* allocate first page. */
270 pSub->auBitmap[0] |= 1;
271 /* link into free chain. */
272 pSub->pNextFree = pPool->pHeadFree;
273 pPool->pHeadFree= pSub;
274 /* link into main chain. */
275 pSub->pNext = pPool->pHead;
276 pPool->pHead = pSub;
277 /* update pool statistics. */
278 pPool->cSubPools++;
279 pPool->cPages += cPages;
280#ifdef VBOX_WITH_STATISTICS
281 pPool->cFreePages += cPages - 1;
282#endif
283
284 /*
285 * Initialize the physical pages with backpointer to subpool.
286 */
287 unsigned i = cPages;
288 while (i-- > 0)
289 {
290 AssertMsg(paPhysPages[i].Phys && !(paPhysPages[i].Phys & PAGE_OFFSET_MASK),
291 ("i=%d Phys=%d\n", i, paPhysPages[i].Phys));
292 paPhysPages[i].uReserved = (RTHCUINTPTR)pSub;
293 }
294
295 /*
296 * Initialize the physical lookup record with backpointers to the physical pages.
297 */
298 PMMPPLOOKUPHCPHYS paLookupPhys = (PMMPPLOOKUPHCPHYS)&paPhysPages[cPages];
299 i = cPages;
300 while (i-- > 0)
301 {
302 paLookupPhys[i].pPhysPage = &paPhysPages[i];
303 paLookupPhys[i].Core.Key = paPhysPages[i].Phys;
304 RTAvlHCPhysInsert(&pPool->pLookupPhys, &paLookupPhys[i].Core);
305 }
306
307 /*
308 * And the one record for virtual memory lookup.
309 */
310 PMMPPLOOKUPHCPTR pLookupVirt = (PMMPPLOOKUPHCPTR)&paLookupPhys[cPages];
311 pLookupVirt->pSubPool = pSub;
312 pLookupVirt->Core.Key = pSub->pvPages;
313 RTAvlPVInsert(&pPool->pLookupVirt, &pLookupVirt->Core);
314
315 /* return allocated page (first). */
316 return pSub->pvPages;
317 }
318
319 MMHyperFree(pPool->pVM, pSub);
320 STAM_COUNTER_INC(&pPool->cErrors);
321 if (pPool->fLow)
322 VMSetError(pPool->pVM, rc, RT_SRC_POS,
323 N_("Failed to expand page pool for memory below 4GB. Current size: %d pages"),
324 pPool->cPages);
325 AssertMsgFailed(("Failed to expand pool%s. rc=%Rrc poolsize=%d\n",
326 pPool->fLow ? " (<4GB)" : "", rc, pPool->cPages));
327 return NULL;
328}
329
330#if 0
331
332/**
333 * Frees a page from the page pool.
334 *
335 * @param pPool Pointer to the page pool.
336 * @param pv Pointer to the page to free.
337 * I.e. pointer returned by mmR3PagePoolAlloc().
338 * @thread The Emulation Thread.
339 */
340DECLINLINE(void) mmR3PagePoolFree(PMMPAGEPOOL pPool, void *pv)
341{
342 VM_ASSERT_EMT(pPool->pVM);
343 STAM_COUNTER_INC(&pPool->cFreeCalls);
344
345 /*
346 * Lookup the virtual address.
347 */
348 PMMPPLOOKUPHCPTR pLookup = (PMMPPLOOKUPHCPTR)RTAvlPVGetBestFit(&pPool->pLookupVirt, pv, false);
349 if ( !pLookup
350 || (uint8_t *)pv >= (uint8_t *)pLookup->pSubPool->pvPages + (pLookup->pSubPool->cPages << PAGE_SHIFT)
351 )
352 {
353 STAM_COUNTER_INC(&pPool->cErrors);
354 AssertMsgFailed(("invalid pointer %p\n", pv));
355 return;
356 }
357
358 /*
359 * Free the page.
360 */
361 PMMPAGESUBPOOL pSubPool = pLookup->pSubPool;
362 /* clear bitmap bit */
363 const unsigned iPage = ((uint8_t *)pv - (uint8_t *)pSubPool->pvPages) >> PAGE_SHIFT;
364#ifdef USE_INLINE_ASM_BIT_OPS
365 Assert(ASMBitTest(pSubPool->auBitmap, iPage));
366 ASMBitClear(pSubPool->auBitmap, iPage);
367#else
368 unsigned iBit = iPage % (sizeof(pSubPool->auBitmap[0]) * 8);
369 unsigned iIndex = iPage / (sizeof(pSubPool->auBitmap[0]) * 8);
370 pSubPool->auBitmap[iIndex] &= ~(1 << iBit);
371#endif
372 /* update stats. */
373 pSubPool->cPagesFree++;
374#ifdef VBOX_WITH_STATISTICS
375 pPool->cFreePages++;
376#endif
377 if (pSubPool->cPagesFree == 1)
378 {
379 pSubPool->pNextFree = pPool->pHeadFree;
380 pPool->pHeadFree = pSubPool;
381 }
382}
383
384
385/**
386 * Allocates a page from the page pool.
387 *
388 * This function may returns pages which has physical addresses any
389 * where. If you require a page to be within the first 4GB of physical
390 * memory, use MMR3PageAllocLow().
391 *
392 * @returns Pointer to the allocated page page.
393 * @returns NULL on failure.
394 * @param pVM The cross context VM structure.
395 * @thread The Emulation Thread.
396 */
397VMMR3DECL(void *) MMR3PageAlloc(PVM pVM)
398{
399 /* Note: unprotected by locks; currently fine as it's used during init or under the PGM lock */
400 return mmR3PagePoolAlloc(pVM->mm.s.pPagePoolR3);
401}
402
403
404/**
405 * Allocates a page from the page pool and return its physical address.
406 *
407 * This function may returns pages which has physical addresses any
408 * where. If you require a page to be within the first 4GB of physical
409 * memory, use MMR3PageAllocLow().
410 *
411 * @returns Pointer to the allocated page page.
412 * @returns NIL_RTHCPHYS on failure.
413 * @param pVM The cross context VM structure.
414 * @thread The Emulation Thread.
415 */
416VMMR3DECL(RTHCPHYS) MMR3PageAllocPhys(PVM pVM)
417{
418 /* Note: unprotected by locks; currently fine as it's used during init or under the PGM lock */
419 /** @todo optimize this, it's the most common case now. */
420 void *pv = mmR3PagePoolAlloc(pVM->mm.s.pPagePoolR3);
421 if (pv)
422 return mmPagePoolPtr2Phys(pVM->mm.s.pPagePoolR3, pv);
423 return NIL_RTHCPHYS;
424}
425
426
427/**
428 * Frees a page allocated from the page pool by MMR3PageAlloc() or
429 * MMR3PageAllocPhys().
430 *
431 * @param pVM The cross context VM structure.
432 * @param pvPage Pointer to the page.
433 * @thread The Emulation Thread.
434 */
435VMMR3DECL(void) MMR3PageFree(PVM pVM, void *pvPage)
436{
437 mmR3PagePoolFree(pVM->mm.s.pPagePoolR3, pvPage);
438}
439
440
441/**
442 * Allocates a page from the low page pool.
443 *
444 * @returns Pointer to the allocated page.
445 * @returns NULL on failure.
446 * @param pVM The cross context VM structure.
447 * @thread The Emulation Thread.
448 */
449VMMR3DECL(void *) MMR3PageAllocLow(PVM pVM)
450{
451 return mmR3PagePoolAlloc(pVM->mm.s.pPagePoolLowR3);
452}
453
454
455/**
456 * Frees a page allocated from the page pool by MMR3PageAllocLow().
457 *
458 * @param pVM The cross context VM structure.
459 * @param pvPage Pointer to the page.
460 * @thread The Emulation Thread.
461 */
462VMMR3DECL(void) MMR3PageFreeLow(PVM pVM, void *pvPage)
463{
464 mmR3PagePoolFree(pVM->mm.s.pPagePoolLowR3, pvPage);
465}
466
467
468/**
469 * Free a page allocated from the page pool by physical address.
470 * This works for pages allocated by MMR3PageAlloc(), MMR3PageAllocPhys()
471 * and MMR3PageAllocLow().
472 *
473 * @param pVM The cross context VM structure.
474 * @param HCPhysPage The physical address of the page to be freed.
475 * @thread The Emulation Thread.
476 */
477VMMR3DECL(void) MMR3PageFreeByPhys(PVM pVM, RTHCPHYS HCPhysPage)
478{
479 void *pvPage = mmPagePoolPhys2Ptr(pVM->mm.s.pPagePoolR3, HCPhysPage);
480 if (!pvPage)
481 pvPage = mmPagePoolPhys2Ptr(pVM->mm.s.pPagePoolLowR3, HCPhysPage);
482 if (pvPage)
483 mmR3PagePoolFree(pVM->mm.s.pPagePoolR3, pvPage);
484 else
485 AssertMsgFailed(("Invalid address HCPhysPT=%#x\n", HCPhysPage));
486}
487
488#endif
489
490/**
491 * Gets the HC pointer to the dummy page.
492 *
493 * The dummy page is used as a place holder to prevent potential bugs
494 * from doing really bad things to the system.
495 *
496 * @returns Pointer to the dummy page.
497 * @param pVM The cross context VM structure.
498 * @thread The Emulation Thread.
499 */
500VMMR3DECL(void *) MMR3PageDummyHCPtr(PVM pVM)
501{
502 VM_ASSERT_EMT(pVM);
503 if (!pVM->mm.s.pvDummyPage)
504 {
505 pVM->mm.s.pvDummyPage = mmR3PagePoolAlloc(pVM->mm.s.pPagePoolR3);
506 AssertRelease(pVM->mm.s.pvDummyPage);
507 pVM->mm.s.HCPhysDummyPage = mmPagePoolPtr2Phys(pVM->mm.s.pPagePoolR3, pVM->mm.s.pvDummyPage);
508 AssertRelease(!(pVM->mm.s.HCPhysDummyPage & ~X86_PTE_PAE_PG_MASK));
509 }
510 return pVM->mm.s.pvDummyPage;
511}
512
513
514/**
515 * Gets the HC Phys to the dummy page.
516 *
517 * The dummy page is used as a place holder to prevent potential bugs
518 * from doing really bad things to the system.
519 *
520 * @returns Pointer to the dummy page.
521 * @param pVM The cross context VM structure.
522 * @thread The Emulation Thread.
523 */
524VMMR3DECL(RTHCPHYS) MMR3PageDummyHCPhys(PVM pVM)
525{
526 VM_ASSERT_EMT(pVM);
527 if (!pVM->mm.s.pvDummyPage)
528 MMR3PageDummyHCPtr(pVM);
529 return pVM->mm.s.HCPhysDummyPage;
530}
531
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette