VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/IOMR3Mmio.cpp@ 104840

Last change on this file since 104840 was 104840, checked in by vboxsync, 12 months ago

VMM/PGM: Refactored RAM ranges, MMIO2 ranges and ROM ranges and added MMIO ranges (to PGM) so we can safely access RAM ranges at runtime w/o fear of them ever being freed up. It is now only possible to create these during VM creation and loading, and they will live till VM destruction (except for MMIO2 which could be destroyed during loading (PCNet fun)). The lookup handling is by table instead of pointer tree. No more ring-0 pointers in shared data. bugref:10687 bugref:10093

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 28.6 KB
Line 
1/* $Id: IOMR3Mmio.cpp 104840 2024-06-05 00:59:51Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor, MMIO related APIs.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.215389.xyz.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_IOM_MMIO
33#include <VBox/vmm/iom.h>
34#include <VBox/sup.h>
35#include <VBox/vmm/mm.h>
36#include <VBox/vmm/stam.h>
37#include <VBox/vmm/dbgf.h>
38#include <VBox/vmm/pdmapi.h>
39#include <VBox/vmm/pdmdev.h>
40#include "IOMInternal.h"
41#include <VBox/vmm/vm.h>
42
43#include <VBox/param.h>
44#include <iprt/assert.h>
45#include <iprt/mem.h>
46#include <iprt/string.h>
47#include <VBox/log.h>
48#include <VBox/err.h>
49
50#include "IOMInline.h"
51
52
53#ifdef VBOX_WITH_STATISTICS
54
55/**
56 * Register statistics for a MMIO entry.
57 */
58void iomR3MmioRegStats(PVM pVM, PIOMMMIOENTRYR3 pRegEntry)
59{
60 bool const fDoRZ = pRegEntry->fRing0 || pRegEntry->fRawMode;
61 PIOMMMIOSTATSENTRY pStats = &pVM->iom.s.paMmioStats[pRegEntry->idxStats];
62
63 /* Format the prefix: */
64 char szName[80];
65 size_t cchPrefix = RTStrPrintf(szName, sizeof(szName), "/IOM/MmioRegions/%RGp-%RGp",
66 pRegEntry->GCPhysMapping, pRegEntry->GCPhysMapping + pRegEntry->cbRegion - 1);
67
68 /* Mangle the description if this isn't the first device instance: */
69 const char *pszDesc = pRegEntry->pszDesc;
70 char *pszFreeDesc = NULL;
71 if (pRegEntry->pDevIns && pRegEntry->pDevIns->iInstance > 0 && pszDesc)
72 pszDesc = pszFreeDesc = RTStrAPrintf2("%u / %s", pRegEntry->pDevIns->iInstance, pszDesc);
73
74 /* Register statistics: */
75 int rc = STAMR3Register(pVM, &pRegEntry->idxSelf, STAMTYPE_U16, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_NONE, pszDesc); AssertRC(rc);
76 RTStrFree(pszFreeDesc);
77
78# define SET_NM_SUFFIX(a_sz) memcpy(&szName[cchPrefix], a_sz, sizeof(a_sz))
79 SET_NM_SUFFIX("/Read-Complicated");
80 rc = STAMR3Register(pVM, &pStats->ComplicatedReads, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
81 SET_NM_SUFFIX("/Read-FFor00");
82 rc = STAMR3Register(pVM, &pStats->FFor00Reads, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
83 SET_NM_SUFFIX("/Read-R3");
84 rc = STAMR3Register(pVM, &pStats->ProfReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_USED, szName, STAMUNIT_TICKS_PER_CALL, NULL); AssertRC(rc);
85 if (fDoRZ)
86 {
87 SET_NM_SUFFIX("/Read-RZ");
88 rc = STAMR3Register(pVM, &pStats->ProfReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_USED, szName, STAMUNIT_TICKS_PER_CALL, NULL); AssertRC(rc);
89 SET_NM_SUFFIX("/Read-RZtoR3");
90 rc = STAMR3Register(pVM, &pStats->ReadRZToR3, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
91 }
92 SET_NM_SUFFIX("/Read-Total");
93 rc = STAMR3Register(pVM, &pStats->Reads, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
94
95 SET_NM_SUFFIX("/Write-Complicated");
96 rc = STAMR3Register(pVM, &pStats->ComplicatedWrites, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
97 SET_NM_SUFFIX("/Write-R3");
98 rc = STAMR3Register(pVM, &pStats->ProfWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_USED, szName, STAMUNIT_TICKS_PER_CALL, NULL); AssertRC(rc);
99 if (fDoRZ)
100 {
101 SET_NM_SUFFIX("/Write-RZ");
102 rc = STAMR3Register(pVM, &pStats->ProfWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_USED, szName, STAMUNIT_TICKS_PER_CALL, NULL); AssertRC(rc);
103 SET_NM_SUFFIX("/Write-RZtoR3");
104 rc = STAMR3Register(pVM, &pStats->WriteRZToR3, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
105 SET_NM_SUFFIX("/Write-RZtoR3-Commit");
106 rc = STAMR3Register(pVM, &pStats->CommitRZToR3, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
107 }
108 SET_NM_SUFFIX("/Write-Total");
109 rc = STAMR3Register(pVM, &pStats->Writes, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
110}
111
112
113/**
114 * Deregister statistics for a MMIO entry.
115 */
116static void iomR3MmioDeregStats(PVM pVM, PIOMMMIOENTRYR3 pRegEntry, RTGCPHYS GCPhys)
117{
118 char szPrefix[80];
119 RTStrPrintf(szPrefix, sizeof(szPrefix), "/IOM/MmioRegions/%RGp-%RGp", GCPhys, GCPhys + pRegEntry->cbRegion - 1);
120 STAMR3DeregisterByPrefix(pVM->pUVM, szPrefix);
121}
122
123
124/**
125 * Grows the statistics table.
126 *
127 * @returns VBox status code.
128 * @param pVM The cross context VM structure.
129 * @param cNewEntries The minimum number of new entrie.
130 * @see IOMR0IoPortGrowStatisticsTable
131 */
132static int iomR3MmioGrowStatisticsTable(PVM pVM, uint32_t cNewEntries)
133{
134 AssertReturn(cNewEntries <= _64K, VERR_IOM_TOO_MANY_MMIO_REGISTRATIONS);
135
136 int rc;
137 if (!SUPR3IsDriverless())
138 {
139 rc = VMMR3CallR0Emt(pVM, pVM->apCpusR3[0], VMMR0_DO_IOM_GROW_MMIO_STATS, cNewEntries, NULL);
140 AssertLogRelRCReturn(rc, rc);
141 AssertReturn(cNewEntries <= pVM->iom.s.cMmioStatsAllocation, VERR_IOM_MMIO_IPE_2);
142 }
143 else
144 {
145 /*
146 * Validate input and state.
147 */
148 uint32_t const cOldEntries = pVM->iom.s.cMmioStatsAllocation;
149 AssertReturn(cNewEntries > cOldEntries, VERR_IOM_MMIO_IPE_1);
150 AssertReturn(pVM->iom.s.cMmioStats <= cOldEntries, VERR_IOM_MMIO_IPE_2);
151
152 /*
153 * Calc size and allocate a new table.
154 */
155 uint32_t const cbNew = RT_ALIGN_32(cNewEntries * sizeof(IOMMMIOSTATSENTRY), HOST_PAGE_SIZE);
156 cNewEntries = cbNew / sizeof(IOMMMIOSTATSENTRY);
157
158 PIOMMMIOSTATSENTRY const paMmioStats = (PIOMMMIOSTATSENTRY)RTMemPageAllocZ(cbNew);
159 if (paMmioStats)
160 {
161 /*
162 * Anything to copy over, update and free the old one.
163 */
164 PIOMMMIOSTATSENTRY const pOldMmioStats = pVM->iom.s.paMmioStats;
165 if (pOldMmioStats)
166 memcpy(paMmioStats, pOldMmioStats, cOldEntries * sizeof(IOMMMIOSTATSENTRY));
167
168 pVM->iom.s.paMmioStats = paMmioStats;
169 pVM->iom.s.cMmioStatsAllocation = cNewEntries;
170
171 RTMemPageFree(pOldMmioStats, RT_ALIGN_32(cOldEntries * sizeof(IOMMMIOSTATSENTRY), HOST_PAGE_SIZE));
172
173 rc = VINF_SUCCESS;
174 }
175 else
176 rc = VERR_NO_PAGE_MEMORY;
177 }
178
179 return rc;
180}
181
182#endif /* VBOX_WITH_STATISTICS */
183
184/**
185 * Grows the I/O port registration statistics table.
186 *
187 * @returns VBox status code.
188 * @param pVM The cross context VM structure.
189 * @param cNewEntries The minimum number of new entrie.
190 * @see IOMR0MmioGrowRegistrationTables
191 */
192static int iomR3MmioGrowTable(PVM pVM, uint32_t cNewEntries)
193{
194 AssertReturn(cNewEntries <= _4K, VERR_IOM_TOO_MANY_MMIO_REGISTRATIONS);
195
196 int rc;
197 if (!SUPR3IsDriverless())
198 {
199 rc = VMMR3CallR0Emt(pVM, pVM->apCpusR3[0], VMMR0_DO_IOM_GROW_MMIO_REGS, cNewEntries, NULL);
200 AssertLogRelRCReturn(rc, rc);
201 AssertReturn(cNewEntries <= pVM->iom.s.cMmioAlloc, VERR_IOM_MMIO_IPE_2);
202 }
203 else
204 {
205 /*
206 * Validate input and state.
207 */
208 uint32_t const cOldEntries = pVM->iom.s.cMmioAlloc;
209 AssertReturn(cNewEntries >= cOldEntries, VERR_IOM_MMIO_IPE_1);
210
211 /*
212 * Allocate the new tables. We use a single allocation for the three tables (ring-0,
213 * ring-3, lookup) and does a partial mapping of the result to ring-3.
214 */
215 uint32_t const cbRing3 = RT_ALIGN_32(cNewEntries * sizeof(IOMMMIOENTRYR3), HOST_PAGE_SIZE);
216 uint32_t const cbShared = RT_ALIGN_32(cNewEntries * sizeof(IOMMMIOLOOKUPENTRY), HOST_PAGE_SIZE);
217 uint32_t const cbNew = cbRing3 + cbShared;
218
219 /* Use the rounded up space as best we can. */
220 cNewEntries = RT_MIN(cbRing3 / sizeof(IOMMMIOENTRYR3), cbShared / sizeof(IOMMMIOLOOKUPENTRY));
221
222 PIOMMMIOENTRYR3 const paRing3 = (PIOMMMIOENTRYR3)RTMemPageAllocZ(cbNew);
223 if (paRing3)
224 {
225 PIOMMMIOLOOKUPENTRY const paLookup = (PIOMMMIOLOOKUPENTRY)((uintptr_t)paRing3 + cbRing3);
226
227 /*
228 * Copy over the old info and initialize the idxSelf and idxStats members.
229 */
230 if (pVM->iom.s.paMmioRegs != NULL)
231 {
232 memcpy(paRing3, pVM->iom.s.paMmioRegs, sizeof(paRing3[0]) * cOldEntries);
233 memcpy(paLookup, pVM->iom.s.paMmioLookup, sizeof(paLookup[0]) * cOldEntries);
234 }
235
236 size_t i = cbRing3 / sizeof(*paRing3);
237 while (i-- > cOldEntries)
238 {
239 paRing3[i].idxSelf = (uint16_t)i;
240 paRing3[i].idxStats = UINT16_MAX;
241 }
242
243 /*
244 * Update the variables and free the old memory.
245 */
246 void * const pvFree = pVM->iom.s.paMmioRegs;
247
248 pVM->iom.s.paMmioRegs = paRing3;
249 pVM->iom.s.paMmioLookup = paLookup;
250 pVM->iom.s.cMmioAlloc = cNewEntries;
251
252 RTMemPageFree(pvFree,
253 RT_ALIGN_32(cOldEntries * sizeof(IOMMMIOENTRYR3), HOST_PAGE_SIZE)
254 + RT_ALIGN_32(cOldEntries * sizeof(IOMMMIOLOOKUPENTRY), HOST_PAGE_SIZE));
255
256 rc = VINF_SUCCESS;
257 }
258 else
259 rc = VERR_NO_PAGE_MEMORY;
260 }
261 return rc;
262}
263
264
265/**
266 * Worker for PDMDEVHLPR3::pfnMmioCreateEx.
267 */
268VMMR3_INT_DECL(int) IOMR3MmioCreate(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS cbRegion, uint32_t fFlags, PPDMPCIDEV pPciDev,
269 uint32_t iPciRegion, PFNIOMMMIONEWWRITE pfnWrite, PFNIOMMMIONEWREAD pfnRead,
270 PFNIOMMMIONEWFILL pfnFill, void *pvUser, const char *pszDesc, PIOMMMIOHANDLE phRegion)
271{
272 /*
273 * Validate input.
274 */
275 AssertPtrReturn(phRegion, VERR_INVALID_POINTER);
276 *phRegion = UINT32_MAX;
277 PVMCPU const pVCpu = VMMGetCpu(pVM);
278 AssertReturn(pVCpu && pVCpu->idCpu == 0, VERR_VM_THREAD_NOT_EMT);
279 VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
280 AssertReturn(!pVM->iom.s.fMmioFrozen, VERR_WRONG_ORDER);
281
282 AssertPtrReturn(pDevIns, VERR_INVALID_POINTER);
283
284 AssertMsgReturn(cbRegion > 0 && cbRegion <= MM_MMIO_64_MAX, ("cbRegion=%#RGp (max %#RGp)\n", cbRegion, MM_MMIO_64_MAX),
285 VERR_OUT_OF_RANGE);
286 AssertMsgReturn(!(cbRegion & GUEST_PAGE_OFFSET_MASK), ("cbRegion=%#RGp\n", cbRegion), VERR_UNSUPPORTED_ALIGNMENT);
287
288 AssertMsgReturn( !(fFlags & ~IOMMMIO_FLAGS_VALID_MASK)
289 && (fFlags & IOMMMIO_FLAGS_READ_MODE) <= IOMMMIO_FLAGS_READ_DWORD_QWORD
290 && (fFlags & IOMMMIO_FLAGS_WRITE_MODE) <= IOMMMIO_FLAGS_WRITE_ONLY_DWORD_QWORD,
291 ("%#x\n", fFlags),
292 VERR_INVALID_FLAGS);
293
294 AssertReturn(pfnWrite || pfnRead, VERR_INVALID_PARAMETER);
295 AssertPtrNullReturn(pfnWrite, VERR_INVALID_POINTER);
296 AssertPtrNullReturn(pfnRead, VERR_INVALID_POINTER);
297 AssertPtrNullReturn(pfnFill, VERR_INVALID_POINTER);
298
299 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
300 AssertReturn(*pszDesc != '\0', VERR_INVALID_POINTER);
301 AssertReturn(strlen(pszDesc) < 128, VERR_INVALID_POINTER);
302
303 /*
304 * Ensure that we've got table space for it.
305 */
306#ifndef VBOX_WITH_STATISTICS
307 uint16_t const idxStats = UINT16_MAX;
308#else
309 uint32_t const idxStats = pVM->iom.s.cMmioStats;
310 uint32_t const cNewMmioStats = idxStats + 1;
311 AssertReturn(cNewMmioStats <= _64K, VERR_IOM_TOO_MANY_MMIO_REGISTRATIONS);
312 if (cNewMmioStats > pVM->iom.s.cMmioStatsAllocation)
313 {
314 int rc = iomR3MmioGrowStatisticsTable(pVM, cNewMmioStats);
315 AssertRCReturn(rc, rc);
316 AssertReturn(idxStats == pVM->iom.s.cMmioStats, VERR_IOM_MMIO_IPE_1);
317 }
318#endif
319
320 uint32_t idx = pVM->iom.s.cMmioRegs;
321 if (idx >= pVM->iom.s.cMmioAlloc)
322 {
323 int rc = iomR3MmioGrowTable(pVM, pVM->iom.s.cMmioAlloc + 1);
324 AssertRCReturn(rc, rc);
325 AssertReturn(idx == pVM->iom.s.cMmioRegs, VERR_IOM_MMIO_IPE_1);
326 }
327
328 /*
329 * Create a matching ad-hoc RAM range for this MMIO region.
330 */
331 uint16_t idRamRange = 0;
332 int rc = PGMR3PhysMmioRegister(pVM, pVCpu, cbRegion, pszDesc, &idRamRange);
333 AssertRCReturn(rc, rc);
334
335 /*
336 * Enter it.
337 */
338 pVM->iom.s.paMmioRegs[idx].cbRegion = cbRegion;
339 pVM->iom.s.paMmioRegs[idx].GCPhysMapping = NIL_RTGCPHYS;
340 pVM->iom.s.paMmioRegs[idx].pvUser = pvUser;
341 pVM->iom.s.paMmioRegs[idx].pDevIns = pDevIns;
342 pVM->iom.s.paMmioRegs[idx].pfnWriteCallback = pfnWrite;
343 pVM->iom.s.paMmioRegs[idx].pfnReadCallback = pfnRead;
344 pVM->iom.s.paMmioRegs[idx].pfnFillCallback = pfnFill;
345 pVM->iom.s.paMmioRegs[idx].pszDesc = pszDesc;
346 pVM->iom.s.paMmioRegs[idx].pPciDev = pPciDev;
347 pVM->iom.s.paMmioRegs[idx].iPciRegion = iPciRegion;
348 pVM->iom.s.paMmioRegs[idx].idxStats = (uint16_t)idxStats;
349 pVM->iom.s.paMmioRegs[idx].fMapped = false;
350 pVM->iom.s.paMmioRegs[idx].fFlags = fFlags;
351 pVM->iom.s.paMmioRegs[idx].idRamRange = idRamRange;
352 pVM->iom.s.paMmioRegs[idx].idxSelf = idx;
353
354 pVM->iom.s.cMmioRegs = idx + 1;
355#ifdef VBOX_WITH_STATISTICS
356 pVM->iom.s.cMmioStats = cNewMmioStats;
357#endif
358 *phRegion = idx;
359 return VINF_SUCCESS;
360}
361
362
363/**
364 * Worker for PDMDEVHLPR3::pfnMmioMap.
365 */
366VMMR3_INT_DECL(int) IOMR3MmioMap(PVM pVM, PVMCPU pVCpu, PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion, RTGCPHYS GCPhys)
367{
368 /*
369 * Validate input and state.
370 */
371 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
372 AssertPtrReturn(pDevIns, VERR_INVALID_HANDLE);
373 AssertReturn(hRegion < pVM->iom.s.cMmioRegs, VERR_IOM_INVALID_MMIO_HANDLE);
374 PIOMMMIOENTRYR3 const pRegEntry = &pVM->iom.s.paMmioRegs[hRegion];
375 AssertReturn(pRegEntry->pDevIns == pDevIns, VERR_IOM_INVALID_MMIO_HANDLE);
376
377 RTGCPHYS const cbRegion = pRegEntry->cbRegion;
378 AssertMsgReturn(cbRegion > 0 && cbRegion <= MM_MMIO_64_MAX, ("cbRegion=%RGp\n", cbRegion), VERR_IOM_MMIO_IPE_1);
379 RTGCPHYS const GCPhysLast = GCPhys + cbRegion - 1;
380
381 AssertLogRelMsgReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK),
382 ("Misaligned! GCPhys=%RGp LB %RGp %s (%s[#%u])\n",
383 GCPhys, cbRegion, pRegEntry->pszDesc, pDevIns->pReg->szName, pDevIns->iInstance),
384 VERR_IOM_INVALID_MMIO_RANGE);
385 AssertLogRelMsgReturn(GCPhysLast > GCPhys,
386 ("Wrapped! GCPhys=%RGp LB %RGp %s (%s[#%u])\n",
387 GCPhys, cbRegion, pRegEntry->pszDesc, pDevIns->pReg->szName, pDevIns->iInstance),
388 VERR_IOM_INVALID_MMIO_RANGE);
389
390 /*
391 * Do the mapping.
392 */
393 int rc = VINF_SUCCESS;
394 IOM_LOCK_EXCL(pVM);
395
396 if (!pRegEntry->fMapped)
397 {
398 uint32_t const cEntries = RT_MIN(pVM->iom.s.cMmioLookupEntries, pVM->iom.s.cMmioRegs);
399 Assert(pVM->iom.s.cMmioLookupEntries == cEntries);
400
401 PIOMMMIOLOOKUPENTRY paEntries = pVM->iom.s.paMmioLookup;
402 PIOMMMIOLOOKUPENTRY pEntry;
403 if (cEntries > 0)
404 {
405 uint32_t iFirst = 0;
406 uint32_t iEnd = cEntries;
407 uint32_t i = cEntries / 2;
408 for (;;)
409 {
410 pEntry = &paEntries[i];
411 if (pEntry->GCPhysLast < GCPhys)
412 {
413 i += 1;
414 if (i < iEnd)
415 iFirst = i;
416 else
417 {
418 /* Register with PGM before we shuffle the array: */
419 ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, GCPhys);
420 rc = PGMR3PhysMmioMap(pVM, pVCpu, GCPhys, cbRegion, pRegEntry->idRamRange,
421 pVM->iom.s.hNewMmioHandlerType, hRegion);
422 AssertRCReturnStmt(rc, ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, NIL_RTGCPHYS); IOM_UNLOCK_EXCL(pVM), rc);
423
424 /* Insert after the entry we just considered: */
425 pEntry += 1;
426 if (i < cEntries)
427 memmove(pEntry + 1, pEntry, sizeof(*pEntry) * (cEntries - i));
428 break;
429 }
430 }
431 else if (pEntry->GCPhysFirst > GCPhysLast)
432 {
433 if (i > iFirst)
434 iEnd = i;
435 else
436 {
437 /* Register with PGM before we shuffle the array: */
438 ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, GCPhys);
439 rc = PGMR3PhysMmioMap(pVM, pVCpu, GCPhys, cbRegion, pRegEntry->idRamRange,
440 pVM->iom.s.hNewMmioHandlerType, hRegion);
441 AssertRCReturnStmt(rc, ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, NIL_RTGCPHYS); IOM_UNLOCK_EXCL(pVM), rc);
442
443 /* Insert at the entry we just considered: */
444 if (i < cEntries)
445 memmove(pEntry + 1, pEntry, sizeof(*pEntry) * (cEntries - i));
446 break;
447 }
448 }
449 else
450 {
451 /* Oops! We've got a conflict. */
452 AssertLogRelMsgFailed(("%RGp..%RGp (%s) conflicts with existing mapping %RGp..%RGp (%s)\n",
453 GCPhys, GCPhysLast, pRegEntry->pszDesc,
454 pEntry->GCPhysFirst, pEntry->GCPhysLast, pVM->iom.s.paMmioRegs[pEntry->idx].pszDesc));
455 IOM_UNLOCK_EXCL(pVM);
456 return VERR_IOM_MMIO_RANGE_CONFLICT;
457 }
458
459 i = iFirst + (iEnd - iFirst) / 2;
460 }
461 }
462 else
463 {
464 /* First entry in the lookup table: */
465 ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, GCPhys);
466 rc = PGMR3PhysMmioMap(pVM, pVCpu, GCPhys, cbRegion, pRegEntry->idRamRange,
467 pVM->iom.s.hNewMmioHandlerType, hRegion);
468 AssertRCReturnStmt(rc, ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, NIL_RTGCPHYS); IOM_UNLOCK_EXCL(pVM), rc);
469
470 pEntry = paEntries;
471 }
472
473 /*
474 * Fill in the entry and bump the table size.
475 */
476 pRegEntry->fMapped = true;
477 pEntry->idx = hRegion;
478 pEntry->GCPhysFirst = GCPhys;
479 pEntry->GCPhysLast = GCPhysLast;
480 pVM->iom.s.cMmioLookupEntries = cEntries + 1;
481
482#ifdef VBOX_WITH_STATISTICS
483 /* Don't register stats here when we're creating the VM as the
484 statistics table may still be reallocated. */
485 if (pVM->enmVMState >= VMSTATE_CREATED)
486 iomR3MmioRegStats(pVM, pRegEntry);
487#endif
488
489#ifdef VBOX_STRICT
490 /*
491 * Assert table sanity.
492 */
493 AssertMsg(paEntries[0].GCPhysLast >= paEntries[0].GCPhysFirst, ("%RGp %RGp\n", paEntries[0].GCPhysLast, paEntries[0].GCPhysFirst));
494 AssertMsg(paEntries[0].idx < pVM->iom.s.cMmioRegs, ("%#x %#x\n", paEntries[0].idx, pVM->iom.s.cMmioRegs));
495
496 RTGCPHYS GCPhysPrev = paEntries[0].GCPhysLast;
497 for (size_t i = 1; i <= cEntries; i++)
498 {
499 AssertMsg(paEntries[i].GCPhysLast >= paEntries[i].GCPhysFirst, ("%u: %RGp %RGp\n", i, paEntries[i].GCPhysLast, paEntries[i].GCPhysFirst));
500 AssertMsg(paEntries[i].idx < pVM->iom.s.cMmioRegs, ("%u: %#x %#x\n", i, paEntries[i].idx, pVM->iom.s.cMmioRegs));
501 AssertMsg(GCPhysPrev < paEntries[i].GCPhysFirst, ("%u: %RGp %RGp\n", i, GCPhysPrev, paEntries[i].GCPhysFirst));
502 GCPhysPrev = paEntries[i].GCPhysLast;
503 }
504#endif
505 }
506 else
507 {
508 AssertFailed();
509 rc = VERR_IOM_MMIO_REGION_ALREADY_MAPPED;
510 }
511
512 IOM_UNLOCK_EXCL(pVM);
513 return rc;
514}
515
516
517/**
518 * Worker for PDMDEVHLPR3::pfnMmioUnmap.
519 */
520VMMR3_INT_DECL(int) IOMR3MmioUnmap(PVM pVM, PVMCPU pVCpu, PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion)
521{
522 /*
523 * Validate input and state.
524 */
525 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
526 AssertPtrReturn(pDevIns, VERR_INVALID_HANDLE);
527 AssertReturn(hRegion < pVM->iom.s.cMmioRegs, VERR_IOM_INVALID_MMIO_HANDLE);
528 PIOMMMIOENTRYR3 const pRegEntry = &pVM->iom.s.paMmioRegs[hRegion];
529 AssertReturn(pRegEntry->pDevIns == pDevIns, VERR_IOM_INVALID_MMIO_HANDLE);
530
531 /*
532 * Do the mapping.
533 */
534 int rc;
535 IOM_LOCK_EXCL(pVM);
536
537 if (pRegEntry->fMapped)
538 {
539 RTGCPHYS const GCPhys = pRegEntry->GCPhysMapping;
540 RTGCPHYS const GCPhysLast = GCPhys + pRegEntry->cbRegion - 1;
541 uint32_t const cEntries = RT_MIN(pVM->iom.s.cMmioLookupEntries, pVM->iom.s.cMmioRegs);
542 Assert(pVM->iom.s.cMmioLookupEntries == cEntries);
543 Assert(cEntries > 0);
544
545 PIOMMMIOLOOKUPENTRY paEntries = pVM->iom.s.paMmioLookup;
546 uint32_t iFirst = 0;
547 uint32_t iEnd = cEntries;
548 uint32_t i = cEntries / 2;
549 for (;;)
550 {
551 PIOMMMIOLOOKUPENTRY pEntry = &paEntries[i];
552 if (pEntry->GCPhysLast < GCPhys)
553 {
554 i += 1;
555 if (i < iEnd)
556 iFirst = i;
557 else
558 {
559 rc = VERR_IOM_MMIO_IPE_1;
560 AssertLogRelMsgFailedBreak(("%RGp..%RGp (%s) not found!\n", GCPhys, GCPhysLast, pRegEntry->pszDesc));
561 }
562 }
563 else if (pEntry->GCPhysFirst > GCPhysLast)
564 {
565 if (i > iFirst)
566 iEnd = i;
567 else
568 {
569 rc = VERR_IOM_MMIO_IPE_1;
570 AssertLogRelMsgFailedBreak(("%RGp..%RGp (%s) not found!\n", GCPhys, GCPhysLast, pRegEntry->pszDesc));
571 }
572 }
573 else if (pEntry->idx == hRegion)
574 {
575 Assert(pEntry->GCPhysFirst == GCPhys);
576 Assert(pEntry->GCPhysLast == GCPhysLast);
577#ifdef VBOX_WITH_STATISTICS
578 iomR3MmioDeregStats(pVM, pRegEntry, GCPhys);
579#endif
580 if (i + 1 < cEntries)
581 memmove(pEntry, pEntry + 1, sizeof(*pEntry) * (cEntries - i - 1));
582 pVM->iom.s.cMmioLookupEntries = cEntries - 1;
583
584 rc = PGMR3PhysMmioUnmap(pVM, pVCpu, GCPhys, pRegEntry->cbRegion, pRegEntry->idRamRange);
585 AssertRC(rc);
586
587 pRegEntry->fMapped = false;
588 ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, NIL_RTGCPHYS);
589 break;
590 }
591 else
592 {
593 AssertLogRelMsgFailed(("Lookig for %RGp..%RGp (%s), found %RGp..%RGp (%s) instead!\n",
594 GCPhys, GCPhysLast, pRegEntry->pszDesc,
595 pEntry->GCPhysFirst, pEntry->GCPhysLast, pVM->iom.s.paMmioRegs[pEntry->idx].pszDesc));
596 rc = VERR_IOM_MMIO_IPE_1;
597 break;
598 }
599
600 i = iFirst + (iEnd - iFirst) / 2;
601 }
602
603#ifdef VBOX_STRICT
604 /*
605 * Assert table sanity.
606 */
607 AssertMsg(paEntries[0].GCPhysLast >= paEntries[0].GCPhysFirst, ("%RGp %RGp\n", paEntries[0].GCPhysLast, paEntries[0].GCPhysFirst));
608 AssertMsg(paEntries[0].idx < pVM->iom.s.cMmioRegs, ("%#x %#x\n", paEntries[0].idx, pVM->iom.s.cMmioRegs));
609
610 RTGCPHYS GCPhysPrev = paEntries[0].GCPhysLast;
611 for (i = 1; i < cEntries - 1; i++)
612 {
613 AssertMsg(paEntries[i].GCPhysLast >= paEntries[i].GCPhysFirst, ("%u: %RGp %RGp\n", i, paEntries[i].GCPhysLast, paEntries[i].GCPhysFirst));
614 AssertMsg(paEntries[i].idx < pVM->iom.s.cMmioRegs, ("%u: %#x %#x\n", i, paEntries[i].idx, pVM->iom.s.cMmioRegs));
615 AssertMsg(GCPhysPrev < paEntries[i].GCPhysFirst, ("%u: %RGp %RGp\n", i, GCPhysPrev, paEntries[i].GCPhysFirst));
616 GCPhysPrev = paEntries[i].GCPhysLast;
617 }
618#endif
619 }
620 else
621 {
622 AssertFailed();
623 rc = VERR_IOM_MMIO_REGION_NOT_MAPPED;
624 }
625
626 IOM_UNLOCK_EXCL(pVM);
627 return rc;
628}
629
630
631VMMR3_INT_DECL(int) IOMR3MmioReduce(PVM pVM, PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion, RTGCPHYS cbRegion)
632{
633 RT_NOREF(pVM, pDevIns, hRegion, cbRegion);
634 AssertFailed();
635 return VERR_NOT_IMPLEMENTED;
636}
637
638
639/**
640 * Validates @a hRegion, making sure it belongs to @a pDevIns.
641 *
642 * @returns VBox status code.
643 * @param pVM The cross context VM structure.
644 * @param pDevIns The device which allegedly owns @a hRegion.
645 * @param hRegion The handle to validate.
646 */
647VMMR3_INT_DECL(int) IOMR3MmioValidateHandle(PVM pVM, PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion)
648{
649 AssertPtrReturn(pDevIns, VERR_INVALID_HANDLE);
650 AssertReturn(hRegion < RT_MIN(pVM->iom.s.cMmioRegs, pVM->iom.s.cMmioAlloc), VERR_IOM_INVALID_MMIO_HANDLE);
651 PIOMMMIOENTRYR3 const pRegEntry = &pVM->iom.s.paMmioRegs[hRegion];
652 AssertReturn(pRegEntry->pDevIns == pDevIns, VERR_IOM_INVALID_MMIO_HANDLE);
653 return VINF_SUCCESS;
654}
655
656
657/**
658 * Gets the mapping address of MMIO region @a hRegion.
659 *
660 * @returns Mapping address if mapped, NIL_RTGCPHYS if not mapped or invalid
661 * input.
662 * @param pVM The cross context VM structure.
663 * @param pDevIns The device which allegedly owns @a hRegion.
664 * @param hRegion The handle to validate.
665 */
666VMMR3_INT_DECL(RTGCPHYS) IOMR3MmioGetMappingAddress(PVM pVM, PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion)
667{
668 AssertPtrReturn(pDevIns, NIL_RTGCPHYS);
669 AssertReturn(hRegion < RT_MIN(pVM->iom.s.cMmioRegs, pVM->iom.s.cMmioAlloc), NIL_RTGCPHYS);
670 PIOMMMIOENTRYR3 const pRegEntry = &pVM->iom.s.paMmioRegs[hRegion];
671 AssertReturn(pRegEntry->pDevIns == pDevIns, NIL_RTGCPHYS);
672 return pRegEntry->GCPhysMapping;
673}
674
675
676/**
677 * Display all registered MMIO ranges.
678 *
679 * @param pVM The cross context VM structure.
680 * @param pHlp The info helpers.
681 * @param pszArgs Arguments, ignored.
682 */
683DECLCALLBACK(void) iomR3MmioInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
684{
685 RT_NOREF(pszArgs);
686
687 /* No locking needed here as registerations are only happening during VMSTATE_CREATING. */
688 pHlp->pfnPrintf(pHlp,
689 "MMIO registrations: %u (%u allocated)\n"
690 " ## Ctx %.*s %.*s PCI Description\n",
691 pVM->iom.s.cMmioRegs, pVM->iom.s.cMmioAlloc,
692 sizeof(RTGCPHYS) * 2, "Size",
693 sizeof(RTGCPHYS) * 2 * 2 + 1, "Mapping");
694 PIOMMMIOENTRYR3 paRegs = pVM->iom.s.paMmioRegs;
695 for (uint32_t i = 0; i < pVM->iom.s.cMmioRegs; i++)
696 {
697 const char * const pszRing = paRegs[i].fRing0 ? paRegs[i].fRawMode ? "+0+C" : "+0 "
698 : paRegs[i].fRawMode ? "+C " : " ";
699 if (paRegs[i].fMapped && paRegs[i].pPciDev)
700 pHlp->pfnPrintf(pHlp, "%3u R3%s %RGp %RGp-%RGp pci%u/%u %s\n", paRegs[i].idxSelf, pszRing, paRegs[i].cbRegion,
701 paRegs[i].GCPhysMapping, paRegs[i].GCPhysMapping + paRegs[i].cbRegion - 1,
702 paRegs[i].pPciDev->idxSubDev, paRegs[i].iPciRegion, paRegs[i].pszDesc);
703 else if (paRegs[i].fMapped && !paRegs[i].pPciDev)
704 pHlp->pfnPrintf(pHlp, "%3u R3%s %RGp %RGp-%RGp %s\n", paRegs[i].idxSelf, pszRing, paRegs[i].cbRegion,
705 paRegs[i].GCPhysMapping, paRegs[i].GCPhysMapping + paRegs[i].cbRegion - 1, paRegs[i].pszDesc);
706 else if (paRegs[i].pPciDev)
707 pHlp->pfnPrintf(pHlp, "%3u R3%s %RGp %.*s pci%u/%u %s\n", paRegs[i].idxSelf, pszRing, paRegs[i].cbRegion,
708 sizeof(RTGCPHYS) * 2, "unmapped", paRegs[i].pPciDev->idxSubDev, paRegs[i].iPciRegion, paRegs[i].pszDesc);
709 else
710 pHlp->pfnPrintf(pHlp, "%3u R3%s %RGp %.*s %s\n", paRegs[i].idxSelf, pszRing, paRegs[i].cbRegion,
711 sizeof(RTGCPHYS) * 2, "unmapped", paRegs[i].pszDesc);
712 }
713}
714
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette