VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp@ 20278

Last change on this file since 20278 was 20278, checked in by vboxsync, 16 years ago

Shadow page mode exit fix for AMD-V nested paging. (not called with type nested)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 33.5 KB
Line 
1/* $Id: PGMAllMap.cpp 20278 2009-06-04 13:14:37Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM
26#include <VBox/pgm.h>
27#include "PGMInternal.h"
28#include <VBox/vm.h>
29#include <iprt/assert.h>
30#include <iprt/asm.h>
31#include <VBox/err.h>
32
33
34/**
35 * Maps a range of physical pages at a given virtual address
36 * in the guest context.
37 *
38 * The GC virtual address range must be within an existing mapping.
39 *
40 * @returns VBox status code.
41 * @param pVM The virtual machine.
42 * @param GCPtr Where to map the page(s). Must be page aligned.
43 * @param HCPhys Start of the range of physical pages. Must be page aligned.
44 * @param cbPages Number of bytes to map. Must be page aligned.
45 * @param fFlags Page flags (X86_PTE_*).
46 */
47VMMDECL(int) PGMMap(PVM pVM, RTGCUINTPTR GCPtr, RTHCPHYS HCPhys, uint32_t cbPages, unsigned fFlags)
48{
49 AssertMsg(pVM->pgm.s.offVM, ("Bad init order\n"));
50
51 /*
52 * Validate input.
53 */
54 AssertMsg(RT_ALIGN_T(GCPtr, PAGE_SIZE, RTGCUINTPTR) == GCPtr, ("Invalid alignment GCPtr=%#x\n", GCPtr));
55 AssertMsg(cbPages > 0 && RT_ALIGN_32(cbPages, PAGE_SIZE) == cbPages, ("Invalid cbPages=%#x\n", cbPages));
56 AssertMsg(!(fFlags & X86_PDE_PG_MASK), ("Invalid flags %#x\n", fFlags));
57
58 /* hypervisor defaults */
59 if (!fFlags)
60 fFlags = X86_PTE_P | X86_PTE_A | X86_PTE_D;
61
62 /*
63 * Find the mapping.
64 */
65 PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
66 while (pCur)
67 {
68 if (GCPtr - pCur->GCPtr < pCur->cb)
69 {
70 if (GCPtr + cbPages - 1 > pCur->GCPtrLast)
71 {
72 AssertMsgFailed(("Invalid range!!\n"));
73 return VERR_INVALID_PARAMETER;
74 }
75
76 /*
77 * Setup PTE.
78 */
79 X86PTEPAE Pte;
80 Pte.u = fFlags | (HCPhys & X86_PTE_PAE_PG_MASK);
81
82 /*
83 * Update the page tables.
84 */
85 for (;;)
86 {
87 RTGCUINTPTR off = GCPtr - pCur->GCPtr;
88 const unsigned iPT = off >> X86_PD_SHIFT;
89 const unsigned iPageNo = (off >> PAGE_SHIFT) & X86_PT_MASK;
90
91 /* 32-bit */
92 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPageNo].u = (uint32_t)Pte.u; /* ASSUMES HCPhys < 4GB and/or that we're never gonna do 32-bit on a PAE host! */
93
94 /* pae */
95 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPageNo / 512].a[iPageNo % 512].u = Pte.u;
96
97 /* next */
98 cbPages -= PAGE_SIZE;
99 if (!cbPages)
100 break;
101 GCPtr += PAGE_SIZE;
102 Pte.u += PAGE_SIZE;
103 }
104
105 return VINF_SUCCESS;
106 }
107
108 /* next */
109 pCur = pCur->CTX_SUFF(pNext);
110 }
111
112 AssertMsgFailed(("GCPtr=%#x was not found in any mapping ranges!\n", GCPtr));
113 return VERR_INVALID_PARAMETER;
114}
115
116
117/**
118 * Sets (replaces) the page flags for a range of pages in a mapping.
119 *
120 * @returns VBox status.
121 * @param pVM VM handle.
122 * @param GCPtr Virtual address of the first page in the range.
123 * @param cb Size (in bytes) of the range to apply the modification to.
124 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
125 */
126VMMDECL(int) PGMMapSetPage(PVM pVM, RTGCPTR GCPtr, uint64_t cb, uint64_t fFlags)
127{
128 return PGMMapModifyPage(pVM, GCPtr, cb, fFlags, 0);
129}
130
131
132/**
133 * Modify page flags for a range of pages in a mapping.
134 *
135 * The existing flags are ANDed with the fMask and ORed with the fFlags.
136 *
137 * @returns VBox status code.
138 * @param pVM VM handle.
139 * @param GCPtr Virtual address of the first page in the range.
140 * @param cb Size (in bytes) of the range to apply the modification to.
141 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
142 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
143 */
144VMMDECL(int) PGMMapModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
145{
146 /*
147 * Validate input.
148 */
149 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#x\n", fFlags));
150 Assert(cb);
151
152 /*
153 * Align the input.
154 */
155 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
156 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
157 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK);
158
159 /*
160 * Find the mapping.
161 */
162 PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
163 while (pCur)
164 {
165 RTGCUINTPTR off = (RTGCUINTPTR)GCPtr - (RTGCUINTPTR)pCur->GCPtr;
166 if (off < pCur->cb)
167 {
168 AssertMsgReturn(off + cb <= pCur->cb,
169 ("Invalid page range %#x LB%#x. mapping '%s' %#x to %#x\n",
170 GCPtr, cb, pCur->pszDesc, pCur->GCPtr, pCur->GCPtrLast),
171 VERR_INVALID_PARAMETER);
172
173 /*
174 * Perform the requested operation.
175 */
176 while (cb > 0)
177 {
178 unsigned iPT = off >> X86_PD_SHIFT;
179 unsigned iPTE = (off >> PAGE_SHIFT) & X86_PT_MASK;
180 while (cb > 0 && iPTE < RT_ELEMENTS(pCur->aPTs[iPT].CTX_SUFF(pPT)->a))
181 {
182 /* 32-Bit */
183 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPTE].u &= fMask | X86_PTE_PG_MASK;
184 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPTE].u |= fFlags & ~X86_PTE_PG_MASK;
185
186 /* PAE */
187 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPTE / 512].a[iPTE % 512].u &= fMask | X86_PTE_PAE_PG_MASK;
188 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPTE / 512].a[iPTE % 512].u |= fFlags & ~X86_PTE_PAE_PG_MASK;
189
190 /* invalidate tls */
191 PGM_INVL_PG(VMMGetCpu(pVM), (RTGCUINTPTR)pCur->GCPtr + off);
192
193 /* next */
194 iPTE++;
195 cb -= PAGE_SIZE;
196 off += PAGE_SIZE;
197 }
198 }
199
200 return VINF_SUCCESS;
201 }
202 /* next */
203 pCur = pCur->CTX_SUFF(pNext);
204 }
205
206 AssertMsgFailed(("Page range %#x LB%#x not found\n", GCPtr, cb));
207 return VERR_INVALID_PARAMETER;
208}
209
210
211#ifndef IN_RING0
212/**
213 * Sets all PDEs involved with the mapping in the shadow page table.
214 *
215 * @param pVM The VM handle.
216 * @param pMap Pointer to the mapping in question.
217 * @param iNewPDE The index of the 32-bit PDE corresponding to the base of the mapping.
218 */
219void pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE)
220{
221 Log4(("pgmMapSetShadowPDEs new pde %x (mappings enabled %d)\n", iNewPDE, pgmMapAreMappingsEnabled(&pVM->pgm.s)));
222
223 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
224 return;
225
226 /* This only applies to raw mode where we only support 1 VCPU. */
227 PVMCPU pVCpu = VMMGetCpu0(pVM);
228 if (!pVCpu->pgm.s.CTX_SUFF(pShwPageCR3))
229 return; /* too early */
230
231 PGMMODE enmShadowMode = PGMGetShadowMode(pVCpu);
232 Assert(enmShadowMode <= PGMMODE_PAE_NX);
233
234 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
235
236 /*
237 * Insert the page tables into the shadow page directories.
238 */
239 unsigned i = pMap->cPTs;
240 iNewPDE += i;
241 while (i-- > 0)
242 {
243 iNewPDE--;
244
245 switch (enmShadowMode)
246 {
247 case PGMMODE_32_BIT:
248 {
249 PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(&pVCpu->pgm.s);
250 AssertFatal(pShw32BitPd);
251#ifdef IN_RC /* Lock mapping to prevent it from being reused during pgmPoolFree. */
252 PGMDynLockHCPage(pVM, (uint8_t *)pShw32BitPd);
253#endif
254 /* Free any previous user, unless it's us. */
255 Assert( (pShw32BitPd->a[iNewPDE].u & (X86_PDE_P | PGM_PDFLAGS_MAPPING)) != (X86_PDE_P | PGM_PDFLAGS_MAPPING)
256 || (pShw32BitPd->a[iNewPDE].u & X86_PDE_PG_MASK) == pMap->aPTs[i].HCPhysPT);
257 if ( pShw32BitPd->a[iNewPDE].n.u1Present
258 && !(pShw32BitPd->a[iNewPDE].u & PGM_PDFLAGS_MAPPING))
259 pgmPoolFree(pVM, pShw32BitPd->a[iNewPDE].u & X86_PDE_PG_MASK, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iNewPDE);
260
261 /* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags. */
262 pShw32BitPd->a[iNewPDE].u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US
263 | (uint32_t)pMap->aPTs[i].HCPhysPT;
264#ifdef IN_RC
265 /* Unlock dynamic mappings again. */
266 PGMDynUnlockHCPage(pVM, (uint8_t *)pShw32BitPd);
267#endif
268 break;
269 }
270
271 case PGMMODE_PAE:
272 case PGMMODE_PAE_NX:
273 {
274 const uint32_t iPdPt = iNewPDE / 256;
275 unsigned iPaePde = iNewPDE * 2 % 512;
276 PX86PDPT pShwPdpt = pgmShwGetPaePDPTPtr(&pVCpu->pgm.s);
277 Assert(pShwPdpt);
278#ifdef IN_RC /* Lock mapping to prevent it from being reused during pgmShwSyncPaePDPtr. */
279 PGMDynLockHCPage(pVM, (uint8_t *)pShwPdpt);
280#endif
281
282 /*
283 * Get the shadow PD.
284 * If no PD, sync it (PAE guest) or fake (not present or 32-bit guest).
285 * Note! The RW, US and A bits are reserved for PAE PDPTEs. Setting the
286 * accessed bit causes invalid VT-x guest state errors.
287 */
288 PX86PDPAE pShwPaePd = pgmShwGetPaePDPtr(&pVCpu->pgm.s, iPdPt << X86_PDPT_SHIFT);
289 if (!pShwPaePd)
290 {
291 X86PDPE GstPdpe;
292 if (PGMGetGuestMode(pVCpu) < PGMMODE_PAE)
293 GstPdpe.u = X86_PDPE_P;
294 else
295 {
296 PX86PDPE pGstPdpe = pgmGstGetPaePDPEPtr(&pVCpu->pgm.s, iPdPt << X86_PDPT_SHIFT);
297 if (pGstPdpe)
298 GstPdpe = *pGstPdpe;
299 else
300 GstPdpe.u = X86_PDPE_P;
301 }
302 int rc = pgmShwSyncPaePDPtr(pVCpu, iPdPt << X86_PDPT_SHIFT, &GstPdpe, &pShwPaePd);
303 AssertFatalRC(rc);
304 }
305 Assert(pShwPaePd);
306#ifdef IN_RC /* Lock mapping to prevent it from being reused during pgmPoolFree. */
307 PGMDynLockHCPage(pVM, (uint8_t *)pShwPaePd);
308#endif
309
310 /*
311 * Mark the page as locked; disallow flushing.
312 */
313 PPGMPOOLPAGE pPoolPagePd = pgmPoolGetPage(pPool, pShwPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
314 AssertFatal(pPoolPagePd);
315 if (!pgmPoolIsPageLocked(&pVM->pgm.s, pPoolPagePd))
316 pgmPoolLockPage(pPool, pPoolPagePd);
317#ifdef VBOX_STRICT
318 else if (pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING)
319 {
320 Assert(PGMGetGuestMode(pVCpu) >= PGMMODE_PAE); /** @todo We may hit this during reset, will fix later. */
321 AssertFatalMsg( (pShwPaePd->a[iPaePde].u & X86_PDE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT0
322 || !PGMMODE_WITH_PAGING(PGMGetGuestMode(pVCpu)),
323 ("%RX64 vs %RX64\n", pShwPaePd->a[iPaePde+1].u & X86_PDE_PG_MASK, pMap->aPTs[i].HCPhysPaePT0));
324 Assert(pShwPaePd->a[iPaePde+1].u & PGM_PDFLAGS_MAPPING);
325 AssertFatalMsg( (pShwPaePd->a[iPaePde+1].u & X86_PDE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT1
326 || !PGMMODE_WITH_PAGING(PGMGetGuestMode(pVCpu)),
327 ("%RX64 vs %RX64\n", pShwPaePd->a[iPaePde+1].u & X86_PDE_PG_MASK, pMap->aPTs[i].HCPhysPaePT1));
328 }
329#endif
330
331 /*
332 * Insert our first PT, freeing anything we might be replacing unless it's a mapping (i.e. us).
333 */
334 Assert( (pShwPaePd->a[iPaePde].u & (X86_PDE_P | PGM_PDFLAGS_MAPPING)) != (X86_PDE_P | PGM_PDFLAGS_MAPPING)
335 || (pShwPaePd->a[iPaePde].u & X86_PDE_PAE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT0);
336 if ( pShwPaePd->a[iPaePde].n.u1Present
337 && !(pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING))
338 {
339 Assert(!(pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING));
340 pgmPoolFree(pVM, pShwPaePd->a[iPaePde].u & X86_PDE_PG_MASK, pPoolPagePd->idx, iPaePde);
341 }
342 pShwPaePd->a[iPaePde].u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US
343 | pMap->aPTs[i].HCPhysPaePT0;
344
345 /* 2nd 2 MB PDE of the 4 MB region, same as above. */
346 iPaePde++;
347 AssertFatal(iPaePde < 512);
348 Assert( (pShwPaePd->a[iPaePde].u & (X86_PDE_P | PGM_PDFLAGS_MAPPING)) != (X86_PDE_P | PGM_PDFLAGS_MAPPING)
349 || (pShwPaePd->a[iPaePde].u & X86_PDE_PAE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT1);
350 if ( pShwPaePd->a[iPaePde].n.u1Present
351 && !(pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING))
352 pgmPoolFree(pVM, pShwPaePd->a[iPaePde].u & X86_PDE_PG_MASK, pPoolPagePd->idx, iPaePde);
353 pShwPaePd->a[iPaePde].u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US
354 | pMap->aPTs[i].HCPhysPaePT1;
355
356 /*
357 * Set the PGM_PDFLAGS_MAPPING flag in the page directory pointer entry. (legacy PAE guest mode)
358 */
359 pShwPdpt->a[iPdPt].u |= PGM_PLXFLAGS_MAPPING;
360
361#ifdef IN_RC
362 /* Unlock dynamic mappings again. */
363 PGMDynUnlockHCPage(pVM, (uint8_t *)pShwPaePd);
364 PGMDynUnlockHCPage(pVM, (uint8_t *)pShwPdpt);
365#endif
366 break;
367 }
368
369 default:
370 AssertFailed();
371 break;
372 }
373 }
374}
375
376
377/**
378 * Clears all PDEs involved with the mapping in the shadow page table.
379 *
380 * @param pVM The VM handle.
381 * @param pShwPageCR3 CR3 root page
382 * @param pMap Pointer to the mapping in question.
383 * @param iOldPDE The index of the 32-bit PDE corresponding to the base of the mapping.
384 * @param fDeactivateCR3 Set if it's pgmMapDeactivateCR3 calling.
385 */
386void pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE, bool fDeactivateCR3)
387{
388 Log(("pgmMapClearShadowPDEs: old pde %x (cPTs=%x) (mappings enabled %d) fDeactivateCR3=%RTbool\n", iOldPDE, pMap->cPTs, pgmMapAreMappingsEnabled(&pVM->pgm.s), fDeactivateCR3));
389
390 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
391 return;
392
393 Assert(pShwPageCR3);
394
395 /* This only applies to raw mode where we only support 1 VCPU. */
396 PVMCPU pVCpu = VMMGetCpu0(pVM);
397# ifdef IN_RC
398 Assert(pShwPageCR3 != pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
399# endif
400
401 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
402
403 PX86PDPT pCurrentShwPdpt = NULL;
404 if ( PGMGetGuestMode(pVCpu) >= PGMMODE_PAE
405 && pShwPageCR3 != pVCpu->pgm.s.CTX_SUFF(pShwPageCR3))
406 {
407 pCurrentShwPdpt = pgmShwGetPaePDPTPtr(&pVCpu->pgm.s);
408#ifdef IN_RC /* Lock mapping to prevent it from being reused (currently not possible). */
409 if (pCurrentShwPdpt)
410 PGMDynLockHCPage(pVM, (uint8_t *)pCurrentShwPdpt);
411#endif
412 }
413
414 unsigned i = pMap->cPTs;
415 PGMMODE enmShadowMode = PGMGetShadowMode(pVCpu);
416
417 iOldPDE += i;
418 while (i-- > 0)
419 {
420 iOldPDE--;
421
422 switch(enmShadowMode)
423 {
424 case PGMMODE_32_BIT:
425 {
426 PX86PD pShw32BitPd = (PX86PD)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
427 AssertFatal(pShw32BitPd);
428
429 Assert(!pShw32BitPd->a[iOldPDE].n.u1Present || (pShw32BitPd->a[iOldPDE].u & PGM_PDFLAGS_MAPPING));
430 pShw32BitPd->a[iOldPDE].u = 0;
431 break;
432 }
433
434 case PGMMODE_PAE:
435 case PGMMODE_PAE_NX:
436 {
437 const unsigned iPdpt = iOldPDE / 256; /* iOldPDE * 2 / 512; iOldPDE is in 4 MB pages */
438 unsigned iPaePde = iOldPDE * 2 % 512;
439 PX86PDPT pShwPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
440 PX86PDPAE pShwPaePd = pgmShwGetPaePDPtr(&pVCpu->pgm.s, pShwPdpt, (iPdpt << X86_PDPT_SHIFT));
441
442 /*
443 * Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entry. (legacy PAE guest mode)
444 */
445 if (fDeactivateCR3)
446 pShwPdpt->a[iPdpt].u &= ~PGM_PLXFLAGS_MAPPING;
447 else if (pShwPdpt->a[iPdpt].u & PGM_PLXFLAGS_MAPPING)
448 {
449 /* See if there are any other mappings here. This is suboptimal code. */
450 pShwPdpt->a[iPdpt].u &= ~PGM_PLXFLAGS_MAPPING;
451 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
452 if ( pCur != pMap
453 && ( (pCur->GCPtr >> X86_PDPT_SHIFT) == iPdpt
454 || (pCur->GCPtrLast >> X86_PDPT_SHIFT) == iPdpt))
455 {
456 pShwPdpt->a[iPdpt].u |= PGM_PLXFLAGS_MAPPING;
457 break;
458 }
459 }
460
461 /*
462 * If the page directory of the old CR3 is reused in the new one, then don't
463 * clear the hypervisor mappings.
464 */
465 if ( pCurrentShwPdpt
466 && (pCurrentShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK) == (pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK) )
467 {
468 LogFlow(("pgmMapClearShadowPDEs: Pdpe %d reused -> don't clear hypervisor mappings!\n", iPdpt));
469 break;
470 }
471
472 /*
473 * Clear the mappings in the PD.
474 */
475 AssertFatal(pShwPaePd);
476 Assert(!pShwPaePd->a[iPaePde].n.u1Present || (pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING));
477 pShwPaePd->a[iPaePde].u = 0;
478
479 iPaePde++;
480 AssertFatal(iPaePde < 512);
481 Assert(!pShwPaePd->a[iPaePde].n.u1Present || (pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING));
482 pShwPaePd->a[iPaePde].u = 0;
483
484 /*
485 * Unlock the shadow pool PD page if the PDPTE no longer holds any mappings.
486 */
487 if ( fDeactivateCR3
488 || !(pShwPdpt->a[iPdpt].u & PGM_PLXFLAGS_MAPPING))
489 {
490 PPGMPOOLPAGE pPoolPagePd = pgmPoolGetPage(pPool, pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
491 AssertFatal(pPoolPagePd);
492 if (pgmPoolIsPageLocked(&pVM->pgm.s, pPoolPagePd))
493 pgmPoolUnlockPage(pPool, pPoolPagePd);
494 }
495 break;
496 }
497
498 default:
499 AssertFailed();
500 break;
501 }
502 }
503#ifdef IN_RC
504 /* Unlock dynamic mappings again. */
505 if (pCurrentShwPdpt)
506 PGMDynUnlockHCPage(pVM, (uint8_t *)pCurrentShwPdpt);
507#endif
508}
509#endif /* !IN_RING0 */
510
511#if defined(VBOX_STRICT) && !defined(IN_RING0)
512/**
513 * Clears all PDEs involved with the mapping in the shadow page table.
514 *
515 * @param pVM The VM handle.
516 * @param pVCpu The VMCPU handle.
517 * @param pShwPageCR3 CR3 root page
518 * @param pMap Pointer to the mapping in question.
519 * @param iPDE The index of the 32-bit PDE corresponding to the base of the mapping.
520 */
521static void pgmMapCheckShadowPDEs(PVM pVM, PVMCPU pVCpu, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iPDE)
522{
523 Assert(pShwPageCR3);
524
525 uint32_t i = pMap->cPTs;
526 PGMMODE enmShadowMode = PGMGetShadowMode(pVCpu);
527 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
528
529 iPDE += i;
530 while (i-- > 0)
531 {
532 iPDE--;
533
534 switch (enmShadowMode)
535 {
536 case PGMMODE_32_BIT:
537 {
538 PCX86PD pShw32BitPd = (PCX86PD)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
539 AssertFatal(pShw32BitPd);
540
541 AssertMsg(pShw32BitPd->a[iPDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT),
542 ("Expected %x vs %x; iPDE=%#x %RGv %s\n",
543 pShw32BitPd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT),
544 iPDE, pMap->GCPtr, R3STRING(pMap->pszDesc) ));
545 break;
546 }
547
548 case PGMMODE_PAE:
549 case PGMMODE_PAE_NX:
550 {
551 const unsigned iPdpt = iPDE / 256; /* iPDE * 2 / 512; iPDE is in 4 MB pages */
552 unsigned iPaePDE = iPDE * 2 % 512;
553 PX86PDPT pShwPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
554 PCX86PDPAE pShwPaePd = pgmShwGetPaePDPtr(&pVCpu->pgm.s, pShwPdpt, iPdpt << X86_PDPT_SHIFT);
555 AssertFatal(pShwPaePd);
556
557 AssertMsg(pShwPaePd->a[iPaePDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0),
558 ("Expected %RX64 vs %RX64; iPDE=%#x iPdpt=%#x iPaePDE=%#x %RGv %s\n",
559 pShwPaePd->a[iPaePDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0),
560 iPDE, iPdpt, iPaePDE, pMap->GCPtr, R3STRING(pMap->pszDesc) ));
561
562 iPaePDE++;
563 AssertFatal(iPaePDE < 512);
564
565 AssertMsg(pShwPaePd->a[iPaePDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1),
566 ("Expected %RX64 vs %RX64; iPDE=%#x iPdpt=%#x iPaePDE=%#x %RGv %s\n",
567 pShwPaePd->a[iPaePDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1),
568 iPDE, iPdpt, iPaePDE, pMap->GCPtr, R3STRING(pMap->pszDesc) ));
569
570 AssertMsg(pShwPdpt->a[iPdpt].u & PGM_PLXFLAGS_MAPPING,
571 ("%RX64; iPdpt=%#x iPDE=%#x iPaePDE=%#x %RGv %s\n",
572 pShwPdpt->a[iPdpt].u,
573 iPDE, iPdpt, iPaePDE, pMap->GCPtr, R3STRING(pMap->pszDesc) ));
574
575 PCPGMPOOLPAGE pPoolPagePd = pgmPoolGetPage(pPool, pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
576 AssertFatal(pPoolPagePd);
577 AssertMsg(pPoolPagePd->cLocked, (".idx=%d .type=%d\n", pPoolPagePd->idx, pPoolPagePd->enmKind));
578 break;
579 }
580
581 default:
582 AssertFailed();
583 break;
584 }
585 }
586}
587
588
589/**
590 * Check the hypervisor mappings in the active CR3.
591 *
592 * @param pVM The virtual machine.
593 */
594VMMDECL(void) PGMMapCheck(PVM pVM)
595{
596 /*
597 * Can skip this if mappings are disabled.
598 */
599 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
600 return;
601
602 Assert(pVM->cCPUs == 1);
603
604 /* This only applies to raw mode where we only support 1 VCPU. */
605 PVMCPU pVCpu = VMMGetCpu0(pVM);
606 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
607
608 /*
609 * Iterate mappings.
610 */
611 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
612 {
613 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
614 pgmMapCheckShadowPDEs(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3), pCur, iPDE);
615 }
616}
617#endif /* defined(VBOX_STRICT) && !defined(IN_RING0) */
618
619#ifndef IN_RING0
620
621/**
622 * Apply the hypervisor mappings to the active CR3.
623 *
624 * @returns VBox status.
625 * @param pVM The virtual machine.
626 * @param pShwPageCR3 CR3 root page
627 */
628int pgmMapActivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3)
629{
630 /*
631 * Can skip this if mappings are disabled.
632 */
633 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
634 return VINF_SUCCESS;
635
636 /* Note. A log flush (in RC) can cause problems when called from MapCR3 (inconsistent state will trigger assertions). */
637 Log4(("pgmMapActivateCR3: fixed mappings=%d idxShwPageCR3=%#x\n", pVM->pgm.s.fMappingsFixed, pShwPageCR3 ? pShwPageCR3->idx : NIL_PGMPOOL_IDX));
638
639#ifdef VBOX_STRICT
640 PVMCPU pVCpu = VMMGetCpu0(pVM);
641 Assert(pShwPageCR3 && pShwPageCR3 == pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
642#endif
643
644 /*
645 * Iterate mappings.
646 */
647 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
648 {
649 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
650 pgmMapSetShadowPDEs(pVM, pCur, iPDE);
651 }
652 return VINF_SUCCESS;
653}
654
655
656/**
657 * Remove the hypervisor mappings from the specified CR3
658 *
659 * @returns VBox status.
660 * @param pVM The virtual machine.
661 * @param pShwPageCR3 CR3 root page
662 */
663int pgmMapDeactivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3)
664{
665 /*
666 * Can skip this if mappings are disabled.
667 */
668 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
669 return VINF_SUCCESS;
670
671 Assert(pShwPageCR3);
672 Log4(("pgmMapDeactivateCR3: fixed mappings=%d idxShwPageCR3=%#x\n", pVM->pgm.s.fMappingsFixed, pShwPageCR3 ? pShwPageCR3->idx : NIL_PGMPOOL_IDX));
673
674 /*
675 * Iterate mappings.
676 */
677 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
678 {
679 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
680 pgmMapClearShadowPDEs(pVM, pShwPageCR3, pCur, iPDE, true /*fDeactivateCR3*/);
681 }
682 return VINF_SUCCESS;
683}
684
685
686/**
687 * Checks guest PD for conflicts with VMM GC mappings.
688 *
689 * @returns true if conflict detected.
690 * @returns false if not.
691 * @param pVM The virtual machine.
692 */
693VMMDECL(bool) PGMMapHasConflicts(PVM pVM)
694{
695 /*
696 * Can skip this if mappings are safely fixed.
697 */
698 if (pVM->pgm.s.fMappingsFixed)
699 return false;
700
701 Assert(pVM->cCPUs == 1);
702
703 /* This only applies to raw mode where we only support 1 VCPU. */
704 PVMCPU pVCpu = &pVM->aCpus[0];
705
706 PGMMODE const enmGuestMode = PGMGetGuestMode(pVCpu);
707 Assert(enmGuestMode <= PGMMODE_PAE_NX);
708
709 /*
710 * Iterate mappings.
711 */
712 if (enmGuestMode == PGMMODE_32_BIT)
713 {
714 /*
715 * Resolve the page directory.
716 */
717 PX86PD pPD = pgmGstGet32bitPDPtr(&pVCpu->pgm.s);
718 Assert(pPD);
719
720 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
721 {
722 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
723 unsigned iPT = pCur->cPTs;
724 while (iPT-- > 0)
725 if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */
726 && (pVM->fRawR0Enabled || pPD->a[iPDE + iPT].n.u1User))
727 {
728 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
729
730#ifdef IN_RING3
731 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
732 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
733 (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc,
734 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
735#else
736 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n"
737 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
738 (iPT + iPDE) << X86_PD_SHIFT,
739 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
740#endif
741 return true;
742 }
743 }
744 }
745 else if ( enmGuestMode == PGMMODE_PAE
746 || enmGuestMode == PGMMODE_PAE_NX)
747 {
748 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
749 {
750 RTGCPTR GCPtr = pCur->GCPtr;
751
752 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT;
753 while (iPT-- > 0)
754 {
755 X86PDEPAE Pde = pgmGstGetPaePDE(&pVCpu->pgm.s, GCPtr);
756
757 if ( Pde.n.u1Present
758 && (pVM->fRawR0Enabled || Pde.n.u1User))
759 {
760 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
761#ifdef IN_RING3
762 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n"
763 " PDE=%016RX64.\n",
764 GCPtr, pCur->pszDesc, Pde.u));
765#else
766 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping (PAE)\n"
767 " PDE=%016RX64.\n",
768 GCPtr, Pde.u));
769#endif
770 return true;
771 }
772 GCPtr += (1 << X86_PD_PAE_SHIFT);
773 }
774 }
775 }
776 else
777 AssertFailed();
778
779 return false;
780}
781
782
783/**
784 * Checks and resolves (ring 3 only) guest conflicts with VMM GC mappings.
785 *
786 * @returns VBox status.
787 * @param pVM The virtual machine.
788 */
789VMMDECL(int) PGMMapResolveConflicts(PVM pVM)
790{
791 /*
792 * Can skip this if mappings are safely fixed.
793 */
794 if (pVM->pgm.s.fMappingsFixed)
795 return VINF_SUCCESS;
796
797 Assert(pVM->cCPUs == 1);
798
799 /* This only applies to raw mode where we only support 1 VCPU. */
800 PVMCPU pVCpu = &pVM->aCpus[0];
801
802 PGMMODE const enmGuestMode = PGMGetGuestMode(pVCpu);
803 Assert(enmGuestMode <= PGMMODE_PAE_NX);
804
805 if (enmGuestMode == PGMMODE_32_BIT)
806 {
807 /*
808 * Resolve the page directory.
809 */
810 PX86PD pPD = pgmGstGet32bitPDPtr(&pVCpu->pgm.s);
811 Assert(pPD);
812
813 /*
814 * Iterate mappings.
815 */
816 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; )
817 {
818 PPGMMAPPING pNext = pCur->CTX_SUFF(pNext);
819 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
820 unsigned iPT = pCur->cPTs;
821 while (iPT-- > 0)
822 {
823 if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */
824 && ( pVM->fRawR0Enabled
825 || pPD->a[iPDE + iPT].n.u1User))
826 {
827 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
828
829#ifdef IN_RING3
830 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
831 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
832 (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc,
833 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
834 int rc = pgmR3SyncPTResolveConflict(pVM, pCur, pPD, iPDE << X86_PD_SHIFT);
835 AssertRCReturn(rc, rc);
836 break;
837#else
838 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n"
839 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
840 (iPT + iPDE) << X86_PD_SHIFT,
841 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
842 return VINF_PGM_SYNC_CR3;
843#endif
844 }
845 }
846 pCur = pNext;
847 }
848 }
849 else if ( enmGuestMode == PGMMODE_PAE
850 || enmGuestMode == PGMMODE_PAE_NX)
851 {
852 /*
853 * Iterate mappings.
854 */
855 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur;)
856 {
857 PPGMMAPPING pNext = pCur->CTX_SUFF(pNext);
858 RTGCPTR GCPtr = pCur->GCPtr;
859 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT;
860 while (iPT-- > 0)
861 {
862 X86PDEPAE Pde = pgmGstGetPaePDE(&pVCpu->pgm.s, GCPtr);
863
864 if ( Pde.n.u1Present
865 && (pVM->fRawR0Enabled || Pde.n.u1User))
866 {
867 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
868#ifdef IN_RING3
869 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n"
870 " PDE=%016RX64.\n",
871 GCPtr, pCur->pszDesc, Pde.u));
872 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pCur, pCur->GCPtr);
873 AssertRCReturn(rc, rc);
874 break;
875#else
876 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping (PAE)\n"
877 " PDE=%016RX64.\n",
878 GCPtr, Pde.u));
879 return VINF_PGM_SYNC_CR3;
880#endif
881 }
882 GCPtr += (1 << X86_PD_PAE_SHIFT);
883 }
884 pCur = pNext;
885 }
886 }
887 else
888 AssertFailed();
889
890 Assert(!PGMMapHasConflicts(pVM));
891 return VINF_SUCCESS;
892}
893
894#endif /* IN_RING0 */
895
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette