VirtualBox

source: vbox/trunk/src/VBox/VMM/VMM.cpp@ 4811

Last change on this file since 4811 was 4811, checked in by vboxsync, 18 years ago

Split VMMR0Entry into VMMR0EntryInt, VMMR0EntryFast and VMMr0EntryEx. This will prevent the SUPCallVMMR0Ex path from causing harm and messing up the paths that has to be optimized.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 97.2 KB
Line 
1/* $Id: VMM.cpp 4811 2007-09-14 17:53:56Z vboxsync $ */
2/** @file
3 * VMM - The Virtual Machine Monitor Core.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18//#define NO_SUPCALLR0VMM
19
20/** @page pg_vmm VMM - The Virtual Machine Monitor
21 *
22 * !Revise this! It's already incorrect!
23 *
24 * The Virtual Machine Monitor (VMM) is the core of the virtual machine. It
25 * manages the alternate reality; controlling the virtualization, managing
26 * resources, tracking CPU state, it's resources and so on...
27 *
28 * We will split the VMM into smaller entities:
29 *
30 * - Virtual Machine Core Monitor (VMCM), which purpose it is to
31 * provide ring and world switching, that including routing
32 * interrupts to the host OS and traps to the appropriate trap
33 * handlers. It will implement an external interface for
34 * managing trap handlers.
35 *
36 * - CPU Monitor (CM), tracking the state of the CPU (in the alternate
37 * reality) and implementing external interfaces to read and change
38 * the state.
39 *
40 * - Memory Monitor (MM), which purpose it is to virtualize physical
41 * pages, segment descriptor tables, interrupt descriptor tables, task
42 * segments, and keep track of all memory providing external interfaces
43 * to access content and map pages. (Internally splitt into smaller entities!)
44 *
45 * - IO Monitor (IOM), which virtualizes in and out I/O operations. It
46 * interacts with the MM to implement memory mapped I/O. External
47 * interfaces for adding and removing I/O ranges are implemented.
48 *
49 * - External Interrupt Monitor (EIM), which purpose it is to manage
50 * interrupts generated by virtual devices. This monitor provides
51 * an interfaces for raising interrupts which is accessible at any
52 * time and from all thread.
53 * <p>
54 * A subentity of the EIM is the vitual Programmable Interrupt
55 * Controller Device (VPICD), and perhaps a virtual I/O Advanced
56 * Programmable Interrupt Controller Device (VAPICD).
57 *
58 * - Direct Memory Access Monitor (DMAM), which purpose it is to support
59 * virtual device using the DMA controller. Interfaces must be as the
60 * EIM interfaces independent and threadable.
61 * <p>
62 * A subentity of the DMAM is a virtual DMA Controller Device (VDMACD).
63 *
64 *
65 * Entities working on a higher level:
66 *
67 * - Device Manager (DM), which is a support facility for virtualized
68 * hardware. This provides generic facilities for efficient device
69 * virtualization. It will manage device attaching and detaching
70 * conversing with EIM and IOM.
71 *
72 * - Debugger Facility (DBGF) provides the basic features for
73 * debugging the alternate reality execution.
74 *
75 *
76 *
77 * @section pg_vmm_s_use_cases Use Cases
78 *
79 * @subsection pg_vmm_s_use_case_boot Bootstrap
80 *
81 * - Basic Init:
82 * - Init SUPDRV.
83 *
84 * - Init Virtual Machine Instance:
85 * - Load settings.
86 * - Check resource requirements (memory, com, stuff).
87 *
88 * - Init Host Ring 3 part:
89 * - Init Core code.
90 * - Load Pluggable Components.
91 * - Init Pluggable Components.
92 *
93 * - Init Host Ring 0 part:
94 * - Load Core (core = core components like VMM, RMI, CA, and so on) code.
95 * - Init Core code.
96 * - Load Pluggable Component code.
97 * - Init Pluggable Component code.
98 *
99 * - Allocate first chunk of memory and pin it down. This block of memory
100 * will fit the following pieces:
101 * - Virtual Machine Instance data. (Config, CPU state, VMM state, ++)
102 * (This is available from everywhere (at different addresses though)).
103 * - VMM Guest Context code.
104 * - Pluggable devices Guest Context code.
105 * - Page tables (directory and everything) for the VMM Guest
106 *
107 * - Setup Guest (Ring 0) part:
108 * - Setup initial page tables (i.e. directory all the stuff).
109 * - Load Core Guest Context code.
110 * - Load Pluggable Devices Guest Context code.
111 *
112 *
113 */
114
115
116/*******************************************************************************
117* Header Files *
118*******************************************************************************/
119#define LOG_GROUP LOG_GROUP_VMM
120#include <VBox/vmm.h>
121#include <VBox/vmapi.h>
122#include <VBox/pgm.h>
123#include <VBox/cfgm.h>
124#include <VBox/pdmqueue.h>
125#include <VBox/pdmapi.h>
126#include <VBox/cpum.h>
127#include <VBox/mm.h>
128#include <VBox/iom.h>
129#include <VBox/trpm.h>
130#include <VBox/selm.h>
131#include <VBox/em.h>
132#include <VBox/sup.h>
133#include <VBox/dbgf.h>
134#include <VBox/csam.h>
135#include <VBox/patm.h>
136#include <VBox/rem.h>
137#include <VBox/ssm.h>
138#include <VBox/tm.h>
139#include "VMMInternal.h"
140#include "VMMSwitcher/VMMSwitcher.h"
141#include <VBox/vm.h>
142#include <VBox/err.h>
143#include <VBox/param.h>
144#include <VBox/version.h>
145#include <VBox/x86.h>
146#include <VBox/hwaccm.h>
147#include <iprt/assert.h>
148#include <iprt/alloc.h>
149#include <iprt/asm.h>
150#include <iprt/time.h>
151#include <iprt/stream.h>
152#include <iprt/string.h>
153#include <iprt/stdarg.h>
154#include <iprt/ctype.h>
155
156
157
158/** The saved state version. */
159#define VMM_SAVED_STATE_VERSION 3
160
161
162/*******************************************************************************
163* Internal Functions *
164*******************************************************************************/
165static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM);
166static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
167static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser);
168static int vmmR3ServiceCallHostRequest(PVM pVM);
169static DECLCALLBACK(void) vmmR3InfoFF(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
170
171
172/*******************************************************************************
173* Global Variables *
174*******************************************************************************/
175/** Array of switcher defininitions.
176 * The type and index shall match!
177 */
178static PVMMSWITCHERDEF s_apSwitchers[VMMSWITCHER_MAX] =
179{
180 NULL, /* invalid entry */
181#ifndef RT_ARCH_AMD64
182 &vmmR3Switcher32BitTo32Bit_Def,
183 &vmmR3Switcher32BitToPAE_Def,
184 NULL, //&vmmR3Switcher32BitToAMD64_Def,
185 &vmmR3SwitcherPAETo32Bit_Def,
186 &vmmR3SwitcherPAEToPAE_Def,
187 NULL, //&vmmR3SwitcherPAEToAMD64_Def,
188# ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
189 &vmmR3SwitcherAMD64ToPAE_Def,
190# else
191 NULL, //&vmmR3SwitcherAMD64ToPAE_Def,
192# endif
193 NULL //&vmmR3SwitcherAMD64ToAMD64_Def,
194#else
195 NULL, //&vmmR3Switcher32BitTo32Bit_Def,
196 NULL, //&vmmR3Switcher32BitToPAE_Def,
197 NULL, //&vmmR3Switcher32BitToAMD64_Def,
198 NULL, //&vmmR3SwitcherPAETo32Bit_Def,
199 NULL, //&vmmR3SwitcherPAEToPAE_Def,
200 NULL, //&vmmR3SwitcherPAEToAMD64_Def,
201 &vmmR3SwitcherAMD64ToPAE_Def,
202 NULL //&vmmR3SwitcherAMD64ToAMD64_Def,
203#endif
204};
205
206
207
208/**
209 * Initiates the core code.
210 *
211 * This is core per VM code which might need fixups and/or for ease of use
212 * are put on linear contiguous backing.
213 *
214 * @returns VBox status code.
215 * @param pVM Pointer to VM structure.
216 */
217static int vmmR3InitCoreCode(PVM pVM)
218{
219 /*
220 * Calc the size.
221 */
222 unsigned cbCoreCode = 0;
223 for (unsigned iSwitcher = 0; iSwitcher < ELEMENTS(s_apSwitchers); iSwitcher++)
224 {
225 pVM->vmm.s.aoffSwitchers[iSwitcher] = cbCoreCode;
226 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
227 if (pSwitcher)
228 {
229 AssertRelease((unsigned)pSwitcher->enmType == iSwitcher);
230 cbCoreCode += RT_ALIGN_32(pSwitcher->cbCode + 1, 32);
231 }
232 }
233
234 /*
235 * Allocate continguous pages for switchers and deal with
236 * conflicts in the intermediate mapping of the code.
237 */
238 pVM->vmm.s.cbCoreCode = RT_ALIGN_32(cbCoreCode, PAGE_SIZE);
239 pVM->vmm.s.pvHCCoreCodeR3 = SUPContAlloc2(pVM->vmm.s.cbCoreCode >> PAGE_SHIFT, &pVM->vmm.s.pvHCCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);
240 int rc = VERR_NO_MEMORY;
241 if (pVM->vmm.s.pvHCCoreCodeR3)
242 {
243 rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvHCCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);
244 if (rc == VERR_PGM_MAPPINGS_FIX_CONFLICT)
245 {
246 /* try more allocations. */
247 struct
248 {
249 RTR0PTR pvR0;
250 void *pvR3;
251 RTHCPHYS HCPhys;
252 RTUINT cb;
253 } aBadTries[16];
254 unsigned i = 0;
255 do
256 {
257 aBadTries[i].pvR3 = pVM->vmm.s.pvHCCoreCodeR3;
258 aBadTries[i].pvR0 = pVM->vmm.s.pvHCCoreCodeR0;
259 aBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode;
260 i++;
261 pVM->vmm.s.pvHCCoreCodeR0 = NIL_RTR0PTR;
262 pVM->vmm.s.HCPhysCoreCode = NIL_RTHCPHYS;
263 pVM->vmm.s.pvHCCoreCodeR3 = SUPContAlloc2(pVM->vmm.s.cbCoreCode >> PAGE_SHIFT, &pVM->vmm.s.pvHCCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);
264 if (!pVM->vmm.s.pvHCCoreCodeR3)
265 break;
266 rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvHCCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);
267 } while ( rc == VERR_PGM_MAPPINGS_FIX_CONFLICT
268 && i < ELEMENTS(aBadTries) - 1);
269
270 /* cleanup */
271 if (VBOX_FAILURE(rc))
272 {
273 aBadTries[i].pvR3 = pVM->vmm.s.pvHCCoreCodeR3;
274 aBadTries[i].pvR0 = pVM->vmm.s.pvHCCoreCodeR0;
275 aBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode;
276 aBadTries[i].cb = pVM->vmm.s.cbCoreCode;
277 i++;
278 LogRel(("Failed to allocated and map core code: rc=%Vrc\n", rc));
279 }
280 while (i-- > 0)
281 {
282 LogRel(("Core code alloc attempt #%d: pvR3=%p pvR0=%p HCPhys=%VHp\n",
283 i, aBadTries[i].pvR3, aBadTries[i].pvR0, aBadTries[i].HCPhys));
284 SUPContFree(aBadTries[i].pvR3, aBadTries[i].cb >> PAGE_SHIFT);
285 }
286 }
287 }
288 if (VBOX_SUCCESS(rc))
289 {
290 /*
291 * copy the code.
292 */
293 for (unsigned iSwitcher = 0; iSwitcher < ELEMENTS(s_apSwitchers); iSwitcher++)
294 {
295 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
296 if (pSwitcher)
297 memcpy((uint8_t *)pVM->vmm.s.pvHCCoreCodeR3 + pVM->vmm.s.aoffSwitchers[iSwitcher],
298 pSwitcher->pvCode, pSwitcher->cbCode);
299 }
300
301 /*
302 * Map the code into the GC address space.
303 */
304 rc = MMR3HyperMapHCPhys(pVM, pVM->vmm.s.pvHCCoreCodeR3, pVM->vmm.s.HCPhysCoreCode, cbCoreCode, "Core Code", &pVM->vmm.s.pvGCCoreCode);
305 if (VBOX_SUCCESS(rc))
306 {
307 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
308 LogRel(("CoreCode: R3=%VHv R0=%VHv GC=%VGv Phys=%VHp cb=%#x\n",
309 pVM->vmm.s.pvHCCoreCodeR3, pVM->vmm.s.pvHCCoreCodeR0, pVM->vmm.s.pvGCCoreCode, pVM->vmm.s.HCPhysCoreCode, pVM->vmm.s.cbCoreCode));
310
311 /*
312 * Finally, PGM probably have selected a switcher already but we need
313 * to do get the addresses so we'll reselect it.
314 * This may legally fail so, we're ignoring the rc.
315 */
316 VMMR3SelectSwitcher(pVM, pVM->vmm.s.enmSwitcher);
317 return rc;
318 }
319
320 /* shit */
321 AssertMsgFailed(("PGMR3Map(,%VGv, %VGp, %#x, 0) failed with rc=%Vrc\n", pVM->vmm.s.pvGCCoreCode, pVM->vmm.s.HCPhysCoreCode, cbCoreCode, rc));
322 SUPContFree(pVM->vmm.s.pvHCCoreCodeR3, pVM->vmm.s.cbCoreCode >> PAGE_SHIFT);
323 }
324 else
325 VMSetError(pVM, rc, RT_SRC_POS,
326 N_("Failed to allocate %d bytes of contiguous memory for the world switcher code."),
327 cbCoreCode);
328
329 pVM->vmm.s.pvHCCoreCodeR3 = NULL;
330 pVM->vmm.s.pvHCCoreCodeR0 = NIL_RTR0PTR;
331 pVM->vmm.s.pvGCCoreCode = 0;
332 return rc;
333}
334
335
336/**
337 * Initializes the VMM.
338 *
339 * @returns VBox status code.
340 * @param pVM The VM to operate on.
341 */
342VMMR3DECL(int) VMMR3Init(PVM pVM)
343{
344 LogFlow(("VMMR3Init\n"));
345
346 /*
347 * Assert alignment, sizes and order.
348 */
349 AssertMsg(pVM->vmm.s.offVM == 0, ("Already initialized!\n"));
350 AssertMsg(sizeof(pVM->vmm.padding) >= sizeof(pVM->vmm.s),
351 ("pVM->vmm.padding is too small! vmm.padding %d while vmm.s is %d\n",
352 sizeof(pVM->vmm.padding), sizeof(pVM->vmm.s)));
353
354 /*
355 * Init basic VM VMM members.
356 */
357 pVM->vmm.s.offVM = RT_OFFSETOF(VM, vmm);
358 int rc = CFGMR3QueryU32(CFGMR3GetRoot(pVM), "YieldEMTInterval", &pVM->vmm.s.cYieldEveryMillies);
359 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
360 pVM->vmm.s.cYieldEveryMillies = 23; /* Value arrived at after experimenting with the grub boot prompt. */
361 //pVM->vmm.s.cYieldEveryMillies = 8; //debugging
362 else
363 AssertMsgRCReturn(rc, ("Configuration error. Failed to query \"YieldEMTInterval\", rc=%Vrc\n", rc), rc);
364
365 /* GC switchers are enabled by default. Turned off by HWACCM. */
366 pVM->vmm.s.fSwitcherDisabled = false;
367
368 /*
369 * Register the saved state data unit.
370 */
371 rc = SSMR3RegisterInternal(pVM, "vmm", 1, VMM_SAVED_STATE_VERSION, VMM_STACK_SIZE + sizeof(RTGCPTR),
372 NULL, vmmR3Save, NULL,
373 NULL, vmmR3Load, NULL);
374 if (VBOX_FAILURE(rc))
375 return rc;
376
377 /*
378 * Register the Ring-0 VM handle with the session for fast ioctl calls.
379 */
380 rc = SUPSetVMForFastIOCtl(pVM->pVMR0);
381 if (VBOX_FAILURE(rc))
382 return rc;
383
384 /*
385 * Init core code.
386 */
387 rc = vmmR3InitCoreCode(pVM);
388 if (VBOX_SUCCESS(rc))
389 {
390 /*
391 * Allocate & init VMM GC stack.
392 * The stack pages are also used by the VMM R0 when VMMR0CallHost is invoked.
393 * (The page protection is modifed during R3 init completion.)
394 */
395#ifdef VBOX_STRICT_VMM_STACK
396 rc = MMHyperAlloc(pVM, VMM_STACK_SIZE + PAGE_SIZE + PAGE_SIZE, PAGE_SIZE, MM_TAG_VMM, (void **)&pVM->vmm.s.pbHCStack);
397#else
398 rc = MMHyperAlloc(pVM, VMM_STACK_SIZE, PAGE_SIZE, MM_TAG_VMM, (void **)&pVM->vmm.s.pbHCStack);
399#endif
400 if (VBOX_SUCCESS(rc))
401 {
402 /* Set HC and GC stack pointers to top of stack. */
403 pVM->vmm.s.CallHostR0JmpBuf.pvSavedStack = (RTR0PTR)pVM->vmm.s.pbHCStack;
404 pVM->vmm.s.pbGCStack = MMHyperHC2GC(pVM, pVM->vmm.s.pbHCStack);
405 pVM->vmm.s.pbGCStackBottom = pVM->vmm.s.pbGCStack + VMM_STACK_SIZE;
406 AssertRelease(pVM->vmm.s.pbGCStack);
407
408 /* Set hypervisor eip. */
409 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStack);
410
411 /*
412 * Allocate GC & R0 Logger instances (they are finalized in the relocator).
413 */
414#ifdef LOG_ENABLED
415 PRTLOGGER pLogger = RTLogDefaultInstance();
416 if (pLogger)
417 {
418 pVM->vmm.s.cbLoggerGC = RT_OFFSETOF(RTLOGGERGC, afGroups[pLogger->cGroups]);
419 rc = MMHyperAlloc(pVM, pVM->vmm.s.cbLoggerGC, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pLoggerHC);
420 if (VBOX_SUCCESS(rc))
421 {
422 pVM->vmm.s.pLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pLoggerHC);
423
424/*
425 * Ring-0 logging isn't 100% safe yet (thread id reuse / process exit cleanup), so
426 * you have to sign up here by adding your defined(DEBUG_<userid>) to the #if.
427 *
428 * If you want to log in non-debug modes, you'll have to remember to change SUPDRvShared.c
429 * to not stub all the log functions.
430 *
431 * You might also wish to enable the AssertMsg1/2 overrides in VMMR0.cpp when enabling this.
432 */
433# if defined(DEBUG_sandervl) || defined(DEBUG_frank)
434 rc = MMHyperAlloc(pVM, RT_OFFSETOF(VMMR0LOGGER, Logger.afGroups[pLogger->cGroups]),
435 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pR0Logger);
436 if (VBOX_SUCCESS(rc))
437 {
438 pVM->vmm.s.pR0Logger->pVM = pVM;
439 //pVM->vmm.s.pR0Logger->fCreated = false;
440 pVM->vmm.s.pR0Logger->cbLogger = RT_OFFSETOF(RTLOGGER, afGroups[pLogger->cGroups]);
441 }
442# endif
443 }
444 }
445#endif /* LOG_ENABLED */
446
447#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
448 /*
449 * Allocate GC Release Logger instances (finalized in the relocator).
450 */
451 if (VBOX_SUCCESS(rc))
452 {
453 PRTLOGGER pRelLogger = RTLogRelDefaultInstance();
454 if (pRelLogger)
455 {
456 pVM->vmm.s.cbRelLoggerGC = RT_OFFSETOF(RTLOGGERGC, afGroups[pRelLogger->cGroups]);
457 rc = MMHyperAlloc(pVM, pVM->vmm.s.cbRelLoggerGC, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pRelLoggerHC);
458 if (VBOX_SUCCESS(rc))
459 pVM->vmm.s.pRelLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pRelLoggerHC);
460 }
461 }
462#endif /* VBOX_WITH_GC_AND_R0_RELEASE_LOG */
463
464#ifdef VBOX_WITH_NMI
465 /*
466 * Allocate mapping for the host APIC.
467 */
468 if (VBOX_SUCCESS(rc))
469 {
470 rc = MMR3HyperReserve(pVM, PAGE_SIZE, "Host APIC", &pVM->vmm.s.GCPtrApicBase);
471 AssertRC(rc);
472 }
473#endif
474 if (VBOX_SUCCESS(rc))
475 {
476 rc = RTCritSectInit(&pVM->vmm.s.CritSectVMLock);
477 if (VBOX_SUCCESS(rc))
478 {
479 /*
480 * Debug info.
481 */
482 DBGFR3InfoRegisterInternal(pVM, "ff", "Displays the current Forced actions Flags.", vmmR3InfoFF);
483
484 /*
485 * Statistics.
486 */
487 STAM_REG(pVM, &pVM->vmm.s.StatRunGC, STAMTYPE_COUNTER, "/VMM/RunGC", STAMUNIT_OCCURENCES, "Number of context switches.");
488 STAM_REG(pVM, &pVM->vmm.s.StatGCRetNormal, STAMTYPE_COUNTER, "/VMM/GCRet/Normal", STAMUNIT_OCCURENCES, "Number of VINF_SUCCESS returns.");
489 STAM_REG(pVM, &pVM->vmm.s.StatGCRetInterrupt, STAMTYPE_COUNTER, "/VMM/GCRet/Interrupt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT returns.");
490 STAM_REG(pVM, &pVM->vmm.s.StatGCRetInterruptHyper, STAMTYPE_COUNTER, "/VMM/GCRet/InterruptHyper", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_HYPER returns.");
491 STAM_REG(pVM, &pVM->vmm.s.StatGCRetGuestTrap, STAMTYPE_COUNTER, "/VMM/GCRet/GuestTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_GUEST_TRAP returns.");
492 STAM_REG(pVM, &pVM->vmm.s.StatGCRetRingSwitch, STAMTYPE_COUNTER, "/VMM/GCRet/RingSwitch", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH returns.");
493 STAM_REG(pVM, &pVM->vmm.s.StatGCRetRingSwitchInt, STAMTYPE_COUNTER, "/VMM/GCRet/RingSwitchInt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH_INT returns.");
494 STAM_REG(pVM, &pVM->vmm.s.StatGCRetExceptionPrivilege, STAMTYPE_COUNTER, "/VMM/GCRet/ExceptionPrivilege", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_EXCEPTION_PRIVILEGED returns.");
495 STAM_REG(pVM, &pVM->vmm.s.StatGCRetStaleSelector, STAMTYPE_COUNTER, "/VMM/GCRet/StaleSelector", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_STALE_SELECTOR returns.");
496 STAM_REG(pVM, &pVM->vmm.s.StatGCRetIRETTrap, STAMTYPE_COUNTER, "/VMM/GCRet/IRETTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_IRET_TRAP returns.");
497 STAM_REG(pVM, &pVM->vmm.s.StatGCRetEmulate, STAMTYPE_COUNTER, "/VMM/GCRet/Emulate", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION returns.");
498 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchEmulate, STAMTYPE_COUNTER, "/VMM/GCRet/PatchEmulate", STAMUNIT_OCCURENCES, "Number of VINF_PATCH_EMULATE_INSTR returns.");
499 STAM_REG(pVM, &pVM->vmm.s.StatGCRetIORead, STAMTYPE_COUNTER, "/VMM/GCRet/IORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_IOPORT_READ returns.");
500 STAM_REG(pVM, &pVM->vmm.s.StatGCRetIOWrite, STAMTYPE_COUNTER, "/VMM/GCRet/IOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_IOPORT_WRITE returns.");
501 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIORead, STAMTYPE_COUNTER, "/VMM/GCRet/MMIORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_READ returns.");
502 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIOWrite, STAMTYPE_COUNTER, "/VMM/GCRet/MMIOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_WRITE returns.");
503 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIOReadWrite, STAMTYPE_COUNTER, "/VMM/GCRet/MMIOReadWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_READ_WRITE returns.");
504 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIOPatchRead, STAMTYPE_COUNTER, "/VMM/GCRet/MMIOPatchRead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_READ returns.");
505 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIOPatchWrite, STAMTYPE_COUNTER, "/VMM/GCRet/MMIOPatchWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_WRITE returns.");
506 STAM_REG(pVM, &pVM->vmm.s.StatGCRetLDTFault, STAMTYPE_COUNTER, "/VMM/GCRet/LDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_GDT_FAULT returns.");
507 STAM_REG(pVM, &pVM->vmm.s.StatGCRetGDTFault, STAMTYPE_COUNTER, "/VMM/GCRet/GDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_LDT_FAULT returns.");
508 STAM_REG(pVM, &pVM->vmm.s.StatGCRetIDTFault, STAMTYPE_COUNTER, "/VMM/GCRet/IDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_IDT_FAULT returns.");
509 STAM_REG(pVM, &pVM->vmm.s.StatGCRetTSSFault, STAMTYPE_COUNTER, "/VMM/GCRet/TSSFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_TSS_FAULT returns.");
510 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPDFault, STAMTYPE_COUNTER, "/VMM/GCRet/PDFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_PD_FAULT returns.");
511 STAM_REG(pVM, &pVM->vmm.s.StatGCRetCSAMTask, STAMTYPE_COUNTER, "/VMM/GCRet/CSAMTask", STAMUNIT_OCCURENCES, "Number of VINF_CSAM_PENDING_ACTION returns.");
512 STAM_REG(pVM, &pVM->vmm.s.StatGCRetSyncCR3, STAMTYPE_COUNTER, "/VMM/GCRet/SyncCR", STAMUNIT_OCCURENCES, "Number of VINF_PGM_SYNC_CR3 returns.");
513 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMisc, STAMTYPE_COUNTER, "/VMM/GCRet/Misc", STAMUNIT_OCCURENCES, "Number of misc returns.");
514 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchInt3, STAMTYPE_COUNTER, "/VMM/GCRet/PatchInt3", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_INT3 returns.");
515 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchPF, STAMTYPE_COUNTER, "/VMM/GCRet/PatchPF", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_PF returns.");
516 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchGP, STAMTYPE_COUNTER, "/VMM/GCRet/PatchGP", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_GP returns.");
517 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchIretIRQ, STAMTYPE_COUNTER, "/VMM/GCRet/PatchIret", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PENDING_IRQ_AFTER_IRET returns.");
518 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPageOverflow, STAMTYPE_COUNTER, "/VMM/GCRet/InvlpgOverflow", STAMUNIT_OCCURENCES, "Number of VERR_REM_FLUSHED_PAGES_OVERFLOW returns.");
519 STAM_REG(pVM, &pVM->vmm.s.StatGCRetRescheduleREM, STAMTYPE_COUNTER, "/VMM/GCRet/ScheduleREM", STAMUNIT_OCCURENCES, "Number of VINF_EM_RESCHEDULE_REM returns.");
520 STAM_REG(pVM, &pVM->vmm.s.StatGCRetToR3, STAMTYPE_COUNTER, "/VMM/GCRet/ToR3", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
521 STAM_REG(pVM, &pVM->vmm.s.StatGCRetTimerPending, STAMTYPE_COUNTER, "/VMM/GCRet/TimerPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TIMER_PENDING returns.");
522 STAM_REG(pVM, &pVM->vmm.s.StatGCRetInterruptPending, STAMTYPE_COUNTER, "/VMM/GCRet/InterruptPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_PENDING returns.");
523 STAM_REG(pVM, &pVM->vmm.s.StatGCRetCallHost, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/Misc", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
524 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPGMGrowRAM, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/GrowRAM", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
525 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPDMLock, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/PDMLock", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
526 STAM_REG(pVM, &pVM->vmm.s.StatGCRetLogFlush, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/LogFlush", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
527 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPDMQueueFlush, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/QueueFlush", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
528 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPGMPoolGrow, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/PGMPoolGrow",STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
529 STAM_REG(pVM, &pVM->vmm.s.StatGCRetRemReplay, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/REMReplay", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
530 STAM_REG(pVM, &pVM->vmm.s.StatGCRetVMSetError, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/VMSetError", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
531 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPGMLock, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/PGMLock", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
532 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPATMDuplicateFn, STAMTYPE_COUNTER, "/VMM/GCRet/PATMDuplicateFn", STAMUNIT_OCCURENCES, "Number of VINF_PATM_DUPLICATE_FUNCTION returns.");
533 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPGMChangeMode, STAMTYPE_COUNTER, "/VMM/GCRet/PGMChangeMode", STAMUNIT_OCCURENCES, "Number of VINF_PGM_CHANGE_MODE returns.");
534 STAM_REG(pVM, &pVM->vmm.s.StatGCRetEmulHlt, STAMTYPE_COUNTER, "/VMM/GCRet/EmulHlt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_EMULATE_INSTR_HLT returns.");
535 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPendingRequest, STAMTYPE_COUNTER, "/VMM/GCRet/PendingRequest", STAMUNIT_OCCURENCES, "Number of VINF_EM_PENDING_REQUEST returns.");
536
537 return VINF_SUCCESS;
538 }
539 AssertRC(rc);
540 }
541 }
542 /** @todo: Need failure cleanup. */
543
544 //more todo in here?
545 //if (VBOX_SUCCESS(rc))
546 //{
547 //}
548 //int rc2 = vmmR3TermCoreCode(pVM);
549 //AssertRC(rc2));
550 }
551
552 return rc;
553}
554
555
556/**
557 * Ring-3 init finalizing.
558 *
559 * @returns VBox status code.
560 * @param pVM The VM handle.
561 */
562VMMR3DECL(int) VMMR3InitFinalize(PVM pVM)
563{
564#ifdef VBOX_STRICT_VMM_STACK
565 /*
566 * Two inaccessible pages at each sides of the stack to catch over/under-flows.
567 */
568 memset(pVM->vmm.s.pbHCStack - PAGE_SIZE, 0xcc, PAGE_SIZE);
569 PGMMapSetPage(pVM, MMHyperHC2GC(pVM, pVM->vmm.s.pbHCStack - PAGE_SIZE), PAGE_SIZE, 0);
570 RTMemProtect(pVM->vmm.s.pbHCStack - PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_NONE);
571
572 memset(pVM->vmm.s.pbHCStack + VMM_STACK_SIZE, 0xcc, PAGE_SIZE);
573 PGMMapSetPage(pVM, MMHyperHC2GC(pVM, pVM->vmm.s.pbHCStack + VMM_STACK_SIZE), PAGE_SIZE, 0);
574 RTMemProtect(pVM->vmm.s.pbHCStack + VMM_STACK_SIZE, PAGE_SIZE, RTMEM_PROT_NONE);
575#endif
576
577 /*
578 * Set page attributes to r/w for stack pages.
579 */
580 int rc = PGMMapSetPage(pVM, pVM->vmm.s.pbGCStack, VMM_STACK_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
581 AssertRC(rc);
582 if (VBOX_SUCCESS(rc))
583 {
584 /*
585 * Create the EMT yield timer.
586 */
587 rc = TMR3TimerCreateInternal(pVM, TMCLOCK_REAL, vmmR3YieldEMT, NULL, "EMT Yielder", &pVM->vmm.s.pYieldTimer);
588 if (VBOX_SUCCESS(rc))
589 rc = TMTimerSetMillies(pVM->vmm.s.pYieldTimer, pVM->vmm.s.cYieldEveryMillies);
590 }
591#ifdef VBOX_WITH_NMI
592 /*
593 * Map the host APIC into GC - This may be host os specific!
594 */
595 if (VBOX_SUCCESS(rc))
596 rc = PGMMap(pVM, pVM->vmm.s.GCPtrApicBase, 0xfee00000, PAGE_SIZE,
597 X86_PTE_P | X86_PTE_RW | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A | X86_PTE_D);
598#endif
599 return rc;
600}
601
602
603/**
604 * Initializes the R0 VMM.
605 *
606 * @returns VBox status code.
607 * @param pVM The VM to operate on.
608 */
609VMMR3DECL(int) VMMR3InitR0(PVM pVM)
610{
611 int rc;
612
613 /*
614 * Initialize the ring-0 logger if we haven't done so yet.
615 */
616 if ( pVM->vmm.s.pR0Logger
617 && !pVM->vmm.s.pR0Logger->fCreated)
618 {
619 rc = VMMR3UpdateLoggers(pVM);
620 if (VBOX_FAILURE(rc))
621 return rc;
622 }
623
624 /*
625 * Call Ring-0 entry with init code.
626 */
627 for (;;)
628 {
629#ifdef NO_SUPCALLR0VMM
630 //rc = VERR_GENERAL_FAILURE;
631 rc = VINF_SUCCESS;
632#else
633 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_VMMR0_INIT, (void *)VBOX_VERSION);
634#endif
635 if ( pVM->vmm.s.pR0Logger
636 && pVM->vmm.s.pR0Logger->Logger.offScratch > 0)
637 RTLogFlushToLogger(&pVM->vmm.s.pR0Logger->Logger, NULL);
638 if (rc != VINF_VMM_CALL_HOST)
639 break;
640 rc = vmmR3ServiceCallHostRequest(pVM);
641 if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
642 break;
643 break; // remove this when we do setjmp for all ring-0 stuff.
644 }
645
646 if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
647 {
648 LogRel(("R0 init failed, rc=%Vra\n", rc));
649 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
650 rc = VERR_INTERNAL_ERROR;
651 }
652 return rc;
653}
654
655
656/**
657 * Initializes the GC VMM.
658 *
659 * @returns VBox status code.
660 * @param pVM The VM to operate on.
661 */
662VMMR3DECL(int) VMMR3InitGC(PVM pVM)
663{
664 /* In VMX mode, there's no need to init GC. */
665 if (pVM->vmm.s.fSwitcherDisabled)
666 return VINF_SUCCESS;
667
668 /*
669 * Call VMMGCInit():
670 * -# resolve the address.
671 * -# setup stackframe and EIP to use the trampoline.
672 * -# do a generic hypervisor call.
673 */
674 RTGCPTR GCPtrEP;
675 int rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &GCPtrEP);
676 if (VBOX_SUCCESS(rc))
677 {
678 CPUMHyperSetCtxCore(pVM, NULL);
679 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom); /* Clear the stack. */
680 uint64_t u64TS = RTTimeProgramStartNanoTS();
681#if GC_ARCH_BITS == 32
682 CPUMPushHyper(pVM, (uint32_t)(u64TS >> 32)); /* Param 3: The program startup TS - Hi. */
683 CPUMPushHyper(pVM, (uint32_t)u64TS); /* Param 3: The program startup TS - Lo. */
684#else /* 64-bit GC */
685 CPUMPushHyper(pVM, u64TS); /* Param 3: The program startup TS. */
686#endif
687 CPUMPushHyper(pVM, VBOX_VERSION); /* Param 2: Version argument. */
688 CPUMPushHyper(pVM, VMMGC_DO_VMMGC_INIT); /* Param 1: Operation. */
689 CPUMPushHyper(pVM, pVM->pVMGC); /* Param 0: pVM */
690 CPUMPushHyper(pVM, 3 * sizeof(RTGCPTR)); /* trampoline param: stacksize. */
691 CPUMPushHyper(pVM, GCPtrEP); /* Call EIP. */
692 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnGCCallTrampoline);
693
694 for (;;)
695 {
696#ifdef NO_SUPCALLR0VMM
697 //rc = VERR_GENERAL_FAILURE;
698 rc = VINF_SUCCESS;
699#else
700 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_CALL_HYPERVISOR, NULL);
701#endif
702#ifdef LOG_ENABLED
703 PRTLOGGERGC pLogger = pVM->vmm.s.pLoggerHC;
704 if ( pLogger
705 && pLogger->offScratch > 0)
706 RTLogFlushGC(NULL, pLogger);
707#endif
708#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
709 PRTLOGGERGC pRelLogger = pVM->vmm.s.pRelLoggerHC;
710 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
711 RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger);
712#endif
713 if (rc != VINF_VMM_CALL_HOST)
714 break;
715 rc = vmmR3ServiceCallHostRequest(pVM);
716 if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
717 break;
718 }
719
720 if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
721 {
722 VMMR3FatalDump(pVM, rc);
723 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
724 rc = VERR_INTERNAL_ERROR;
725 }
726 AssertRC(rc);
727 }
728 return rc;
729}
730
731
732/**
733 * Terminate the VMM bits.
734 *
735 * @returns VINF_SUCCESS.
736 * @param pVM The VM handle.
737 */
738VMMR3DECL(int) VMMR3Term(PVM pVM)
739{
740 /** @todo must call ring-0 so the logger thread instance can be properly removed. */
741
742#ifdef VBOX_STRICT_VMM_STACK
743 /*
744 * Make the two stack guard pages present again.
745 */
746 RTMemProtect(pVM->vmm.s.pbHCStack - PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
747 RTMemProtect(pVM->vmm.s.pbHCStack + VMM_STACK_SIZE, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
748#endif
749 return VINF_SUCCESS;
750}
751
752
753/**
754 * Applies relocations to data and code managed by this
755 * component. This function will be called at init and
756 * whenever the VMM need to relocate it self inside the GC.
757 *
758 * The VMM will need to apply relocations to the core code.
759 *
760 * @param pVM The VM handle.
761 * @param offDelta The relocation delta.
762 */
763VMMR3DECL(void) VMMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
764{
765 LogFlow(("VMMR3Relocate: offDelta=%VGv\n", offDelta));
766
767 /*
768 * Recalc the GC address.
769 */
770 pVM->vmm.s.pvGCCoreCode = MMHyperHC2GC(pVM, pVM->vmm.s.pvHCCoreCodeR3);
771
772 /*
773 * The stack.
774 */
775 CPUMSetHyperESP(pVM, CPUMGetHyperESP(pVM) + offDelta);
776 pVM->vmm.s.pbGCStack = MMHyperHC2GC(pVM, pVM->vmm.s.pbHCStack);
777 pVM->vmm.s.pbGCStackBottom = pVM->vmm.s.pbGCStack + VMM_STACK_SIZE;
778
779 /*
780 * All the switchers.
781 */
782 for (unsigned iSwitcher = 0; iSwitcher < ELEMENTS(s_apSwitchers); iSwitcher++)
783 {
784 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
785 if (pSwitcher && pSwitcher->pfnRelocate)
786 {
787 unsigned off = pVM->vmm.s.aoffSwitchers[iSwitcher];
788 pSwitcher->pfnRelocate(pVM,
789 pSwitcher,
790 (uint8_t *)pVM->vmm.s.pvHCCoreCodeR0 + off,
791 (uint8_t *)pVM->vmm.s.pvHCCoreCodeR3 + off,
792 pVM->vmm.s.pvGCCoreCode + off,
793 pVM->vmm.s.HCPhysCoreCode + off);
794 }
795 }
796
797 /*
798 * Recalc the GC address for the current switcher.
799 */
800 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[pVM->vmm.s.enmSwitcher];
801 RTGCPTR GCPtr = pVM->vmm.s.pvGCCoreCode + pVM->vmm.s.aoffSwitchers[pVM->vmm.s.enmSwitcher];
802 pVM->vmm.s.pfnGCGuestToHost = GCPtr + pSwitcher->offGCGuestToHost;
803 pVM->vmm.s.pfnGCCallTrampoline = GCPtr + pSwitcher->offGCCallTrampoline;
804 pVM->pfnVMMGCGuestToHostAsm = GCPtr + pSwitcher->offGCGuestToHostAsm;
805 pVM->pfnVMMGCGuestToHostAsmHyperCtx = GCPtr + pSwitcher->offGCGuestToHostAsmHyperCtx;
806 pVM->pfnVMMGCGuestToHostAsmGuestCtx = GCPtr + pSwitcher->offGCGuestToHostAsmGuestCtx;
807
808 /*
809 * Get other GC entry points.
810 */
811 int rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuest", &pVM->vmm.s.pfnCPUMGCResumeGuest);
812 AssertReleaseMsgRC(rc, ("CPUMGCResumeGuest not found! rc=%Vra\n", rc));
813
814 rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuestV86", &pVM->vmm.s.pfnCPUMGCResumeGuestV86);
815 AssertReleaseMsgRC(rc, ("CPUMGCResumeGuestV86 not found! rc=%Vra\n", rc));
816
817 /*
818 * Update the logger.
819 */
820 VMMR3UpdateLoggers(pVM);
821}
822
823
824/**
825 * Updates the settings for the GC and R0 loggers.
826 *
827 * @returns VBox status code.
828 * @param pVM The VM handle.
829 */
830VMMR3DECL(int) VMMR3UpdateLoggers(PVM pVM)
831{
832 /*
833 * Simply clone the logger instance (for GC).
834 */
835 int rc = VINF_SUCCESS;
836 RTGCPTR GCPtrLoggerFlush = 0;
837
838 if (pVM->vmm.s.pLoggerHC
839#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
840 || pVM->vmm.s.pRelLoggerHC
841#endif
842 )
843 {
844 rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCLoggerFlush", &GCPtrLoggerFlush);
845 AssertReleaseMsgRC(rc, ("vmmGCLoggerFlush not found! rc=%Vra\n", rc));
846 }
847
848 if (pVM->vmm.s.pLoggerHC)
849 {
850 RTGCPTR GCPtrLoggerWrapper = 0;
851 rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCLoggerWrapper", &GCPtrLoggerWrapper);
852 AssertReleaseMsgRC(rc, ("vmmGCLoggerWrapper not found! rc=%Vra\n", rc));
853 pVM->vmm.s.pLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pLoggerHC);
854 rc = RTLogCloneGC(NULL /* default */, pVM->vmm.s.pLoggerHC, pVM->vmm.s.cbLoggerGC,
855 GCPtrLoggerWrapper, GCPtrLoggerFlush, RTLOGFLAGS_BUFFERED);
856 AssertReleaseMsgRC(rc, ("RTLogCloneGC failed! rc=%Vra\n", rc));
857 }
858
859#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
860 if (pVM->vmm.s.pRelLoggerHC)
861 {
862 RTGCPTR GCPtrLoggerWrapper = 0;
863 rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCRelLoggerWrapper", &GCPtrLoggerWrapper);
864 AssertReleaseMsgRC(rc, ("vmmGCRelLoggerWrapper not found! rc=%Vra\n", rc));
865 pVM->vmm.s.pRelLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pRelLoggerHC);
866 rc = RTLogCloneGC(RTLogRelDefaultInstance(), pVM->vmm.s.pRelLoggerHC, pVM->vmm.s.cbRelLoggerGC,
867 GCPtrLoggerWrapper, GCPtrLoggerFlush, RTLOGFLAGS_BUFFERED);
868 AssertReleaseMsgRC(rc, ("RTLogCloneGC failed! rc=%Vra\n", rc));
869 }
870#endif /* VBOX_WITH_GC_AND_R0_RELEASE_LOG */
871
872 /*
873 * For the ring-0 EMT logger, we use a per-thread logger
874 * instance in ring-0. Only initialize it once.
875 */
876 PVMMR0LOGGER pR0Logger = pVM->vmm.s.pR0Logger;
877 if (pR0Logger)
878 {
879 if (!pR0Logger->fCreated)
880 {
881 RTR0PTR pfnLoggerWrapper = NIL_RTR0PTR;
882 rc = PDMR3GetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerWrapper", &pfnLoggerWrapper);
883 AssertReleaseMsgRCReturn(rc, ("VMMLoggerWrapper not found! rc=%Vra\n", rc), rc);
884
885 RTR0PTR pfnLoggerFlush = NIL_RTR0PTR;
886 rc = PDMR3GetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerFlush", &pfnLoggerFlush);
887 AssertReleaseMsgRCReturn(rc, ("VMMLoggerFlush not found! rc=%Vra\n", rc), rc);
888
889 rc = RTLogCreateForR0(&pR0Logger->Logger, pR0Logger->cbLogger,
890 *(PFNRTLOGGER *)&pfnLoggerWrapper, *(PFNRTLOGFLUSH *)&pfnLoggerFlush,
891 RTLOGFLAGS_BUFFERED, RTLOGDEST_DUMMY);
892 AssertReleaseMsgRCReturn(rc, ("RTLogCloneGC failed! rc=%Vra\n", rc), rc);
893 pR0Logger->fCreated = true;
894 }
895
896 rc = RTLogCopyGroupsAndFlags(&pR0Logger->Logger, NULL /* default */, RTLOGFLAGS_BUFFERED, 0);
897 AssertRC(rc);
898 }
899
900 return rc;
901}
902
903
904/**
905 * Generic switch code relocator.
906 *
907 * @param pVM The VM handle.
908 * @param pSwitcher The switcher definition.
909 * @param pu8CodeR3 Pointer to the core code block for the switcher, ring-3 mapping.
910 * @param pu8CodeR0 Pointer to the core code block for the switcher, ring-0 mapping.
911 * @param GCPtrCode The guest context address corresponding to pu8Code.
912 * @param u32IDCode The identity mapped (ID) address corresponding to pu8Code.
913 * @param SelCS The hypervisor CS selector.
914 * @param SelDS The hypervisor DS selector.
915 * @param SelTSS The hypervisor TSS selector.
916 * @param GCPtrGDT The GC address of the hypervisor GDT.
917 * @param SelCS64 The 64-bit mode hypervisor CS selector.
918 */
919static void vmmR3SwitcherGenericRelocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode,
920 RTSEL SelCS, RTSEL SelDS, RTSEL SelTSS, RTGCPTR GCPtrGDT, RTSEL SelCS64)
921{
922 union
923 {
924 const uint8_t *pu8;
925 const uint16_t *pu16;
926 const uint32_t *pu32;
927 const uint64_t *pu64;
928 const void *pv;
929 uintptr_t u;
930 } u;
931 u.pv = pSwitcher->pvFixups;
932
933 /*
934 * Process fixups.
935 */
936 uint8_t u8;
937 while ((u8 = *u.pu8++) != FIX_THE_END)
938 {
939 /*
940 * Get the source (where to write the fixup).
941 */
942 uint32_t offSrc = *u.pu32++;
943 Assert(offSrc < pSwitcher->cbCode);
944 union
945 {
946 uint8_t *pu8;
947 uint16_t *pu16;
948 uint32_t *pu32;
949 uint64_t *pu64;
950 uintptr_t u;
951 } uSrc;
952 uSrc.pu8 = pu8CodeR3 + offSrc;
953
954 /* The fixup target and method depends on the type. */
955 switch (u8)
956 {
957 /*
958 * 32-bit relative, source in HC and target in GC.
959 */
960 case FIX_HC_2_GC_NEAR_REL:
961 {
962 Assert(offSrc - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offSrc - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
963 uint32_t offTrg = *u.pu32++;
964 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
965 *uSrc.pu32 = (uint32_t)((GCPtrCode + offTrg) - (uSrc.u + 4));
966 break;
967 }
968
969 /*
970 * 32-bit relative, source in HC and target in ID.
971 */
972 case FIX_HC_2_ID_NEAR_REL:
973 {
974 Assert(offSrc - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offSrc - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
975 uint32_t offTrg = *u.pu32++;
976 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
977 *uSrc.pu32 = (uint32_t)((u32IDCode + offTrg) - (uSrc.u + 4));
978 break;
979 }
980
981 /*
982 * 32-bit relative, source in GC and target in HC.
983 */
984 case FIX_GC_2_HC_NEAR_REL:
985 {
986 Assert(offSrc - pSwitcher->offGCCode < pSwitcher->cbGCCode);
987 uint32_t offTrg = *u.pu32++;
988 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
989 *uSrc.pu32 = (uint32_t)(((uintptr_t)pu8CodeR0 + offTrg) - (GCPtrCode + offSrc + 4));
990 break;
991 }
992
993 /*
994 * 32-bit relative, source in GC and target in ID.
995 */
996 case FIX_GC_2_ID_NEAR_REL:
997 {
998 Assert(offSrc - pSwitcher->offGCCode < pSwitcher->cbGCCode);
999 uint32_t offTrg = *u.pu32++;
1000 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1001 *uSrc.pu32 = (uint32_t)((u32IDCode + offTrg) - (GCPtrCode + offSrc + 4));
1002 break;
1003 }
1004
1005 /*
1006 * 32-bit relative, source in ID and target in HC.
1007 */
1008 case FIX_ID_2_HC_NEAR_REL:
1009 {
1010 Assert(offSrc - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offSrc - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1011 uint32_t offTrg = *u.pu32++;
1012 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
1013 *uSrc.pu32 = (uint32_t)(((uintptr_t)pu8CodeR0 + offTrg) - (u32IDCode + offSrc + 4));
1014 break;
1015 }
1016
1017 /*
1018 * 32-bit relative, source in ID and target in HC.
1019 */
1020 case FIX_ID_2_GC_NEAR_REL:
1021 {
1022 Assert(offSrc - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offSrc - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1023 uint32_t offTrg = *u.pu32++;
1024 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
1025 *uSrc.pu32 = (uint32_t)((GCPtrCode + offTrg) - (u32IDCode + offSrc + 4));
1026 break;
1027 }
1028
1029 /*
1030 * 16:32 far jump, target in GC.
1031 */
1032 case FIX_GC_FAR32:
1033 {
1034 uint32_t offTrg = *u.pu32++;
1035 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
1036 *uSrc.pu32++ = (uint32_t)(GCPtrCode + offTrg);
1037 *uSrc.pu16++ = SelCS;
1038 break;
1039 }
1040
1041 /*
1042 * Make 32-bit GC pointer given CPUM offset.
1043 */
1044 case FIX_GC_CPUM_OFF:
1045 {
1046 uint32_t offCPUM = *u.pu32++;
1047 Assert(offCPUM < sizeof(pVM->cpum));
1048 *uSrc.pu32 = (uint32_t)(VM_GUEST_ADDR(pVM, &pVM->cpum) + offCPUM);
1049 break;
1050 }
1051
1052 /*
1053 * Make 32-bit GC pointer given VM offset.
1054 */
1055 case FIX_GC_VM_OFF:
1056 {
1057 uint32_t offVM = *u.pu32++;
1058 Assert(offVM < sizeof(VM));
1059 *uSrc.pu32 = (uint32_t)(VM_GUEST_ADDR(pVM, pVM) + offVM);
1060 break;
1061 }
1062
1063 /*
1064 * Make 32-bit HC pointer given CPUM offset.
1065 */
1066 case FIX_HC_CPUM_OFF:
1067 {
1068 uint32_t offCPUM = *u.pu32++;
1069 Assert(offCPUM < sizeof(pVM->cpum));
1070 *uSrc.pu32 = (uint32_t)pVM->pVMR0 + RT_OFFSETOF(VM, cpum) + offCPUM;
1071 break;
1072 }
1073
1074 /*
1075 * Make 32-bit R0 pointer given VM offset.
1076 */
1077 case FIX_HC_VM_OFF:
1078 {
1079 uint32_t offVM = *u.pu32++;
1080 Assert(offVM < sizeof(VM));
1081 *uSrc.pu32 = (uint32_t)pVM->pVMR0 + offVM;
1082 break;
1083 }
1084
1085 /*
1086 * Store the 32-Bit CR3 (32-bit) for the intermediate memory context.
1087 */
1088 case FIX_INTER_32BIT_CR3:
1089 {
1090
1091 *uSrc.pu32 = PGMGetInter32BitCR3(pVM);
1092 break;
1093 }
1094
1095 /*
1096 * Store the PAE CR3 (32-bit) for the intermediate memory context.
1097 */
1098 case FIX_INTER_PAE_CR3:
1099 {
1100
1101 *uSrc.pu32 = PGMGetInterPaeCR3(pVM);
1102 break;
1103 }
1104
1105 /*
1106 * Store the AMD64 CR3 (32-bit) for the intermediate memory context.
1107 */
1108 case FIX_INTER_AMD64_CR3:
1109 {
1110
1111 *uSrc.pu32 = PGMGetInterAmd64CR3(pVM);
1112 break;
1113 }
1114
1115 /*
1116 * Store the 32-Bit CR3 (32-bit) for the hypervisor (shadow) memory context.
1117 */
1118 case FIX_HYPER_32BIT_CR3:
1119 {
1120
1121 *uSrc.pu32 = PGMGetHyper32BitCR3(pVM);
1122 break;
1123 }
1124
1125 /*
1126 * Store the PAE CR3 (32-bit) for the hypervisor (shadow) memory context.
1127 */
1128 case FIX_HYPER_PAE_CR3:
1129 {
1130
1131 *uSrc.pu32 = PGMGetHyperPaeCR3(pVM);
1132 break;
1133 }
1134
1135 /*
1136 * Store the AMD64 CR3 (32-bit) for the hypervisor (shadow) memory context.
1137 */
1138 case FIX_HYPER_AMD64_CR3:
1139 {
1140
1141 *uSrc.pu32 = PGMGetHyperAmd64CR3(pVM);
1142 break;
1143 }
1144
1145 /*
1146 * Store Hypervisor CS (16-bit).
1147 */
1148 case FIX_HYPER_CS:
1149 {
1150 *uSrc.pu16 = SelCS;
1151 break;
1152 }
1153
1154 /*
1155 * Store Hypervisor DS (16-bit).
1156 */
1157 case FIX_HYPER_DS:
1158 {
1159 *uSrc.pu16 = SelDS;
1160 break;
1161 }
1162
1163 /*
1164 * Store Hypervisor TSS (16-bit).
1165 */
1166 case FIX_HYPER_TSS:
1167 {
1168 *uSrc.pu16 = SelTSS;
1169 break;
1170 }
1171
1172 /*
1173 * Store the 32-bit GC address of the 2nd dword of the TSS descriptor (in the GDT).
1174 */
1175 case FIX_GC_TSS_GDTE_DW2:
1176 {
1177 RTGCPTR GCPtr = GCPtrGDT + (SelTSS & ~7) + 4;
1178 *uSrc.pu32 = (uint32_t)GCPtr;
1179 break;
1180 }
1181
1182
1183 ///@todo case FIX_CR4_MASK:
1184 ///@todo case FIX_CR4_OSFSXR:
1185
1186 /*
1187 * Insert relative jump to specified target it FXSAVE/FXRSTOR isn't supported by the cpu.
1188 */
1189 case FIX_NO_FXSAVE_JMP:
1190 {
1191 uint32_t offTrg = *u.pu32++;
1192 Assert(offTrg < pSwitcher->cbCode);
1193 if (!CPUMSupportsFXSR(pVM))
1194 {
1195 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
1196 *uSrc.pu32++ = offTrg - (offSrc + 5);
1197 }
1198 else
1199 {
1200 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
1201 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
1202 }
1203 break;
1204 }
1205
1206 /*
1207 * Insert relative jump to specified target it SYSENTER isn't used by the host.
1208 */
1209 case FIX_NO_SYSENTER_JMP:
1210 {
1211 uint32_t offTrg = *u.pu32++;
1212 Assert(offTrg < pSwitcher->cbCode);
1213 if (!CPUMIsHostUsingSysEnter(pVM))
1214 {
1215 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
1216 *uSrc.pu32++ = offTrg - (offSrc + 5);
1217 }
1218 else
1219 {
1220 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
1221 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
1222 }
1223 break;
1224 }
1225
1226 /*
1227 * Insert relative jump to specified target it SYSENTER isn't used by the host.
1228 */
1229 case FIX_NO_SYSCALL_JMP:
1230 {
1231 uint32_t offTrg = *u.pu32++;
1232 Assert(offTrg < pSwitcher->cbCode);
1233 if (!CPUMIsHostUsingSysEnter(pVM))
1234 {
1235 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
1236 *uSrc.pu32++ = offTrg - (offSrc + 5);
1237 }
1238 else
1239 {
1240 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
1241 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
1242 }
1243 break;
1244 }
1245
1246 /*
1247 * 32-bit HC pointer fixup to (HC) target within the code (32-bit offset).
1248 */
1249 case FIX_HC_32BIT:
1250 {
1251 uint32_t offTrg = *u.pu32++;
1252 Assert(offSrc < pSwitcher->cbCode);
1253 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
1254 *uSrc.pu32 = (uintptr_t)pu8CodeR0 + offTrg;
1255 break;
1256 }
1257
1258#if defined(RT_ARCH_AMD64) || defined(VBOX_WITH_HYBIRD_32BIT_KERNEL)
1259 /*
1260 * 64-bit HC pointer fixup to (HC) target within the code (32-bit offset).
1261 */
1262 case FIX_HC_64BIT:
1263 {
1264 uint32_t offTrg = *u.pu32++;
1265 Assert(offSrc < pSwitcher->cbCode);
1266 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
1267 *uSrc.pu64 = (uintptr_t)pu8CodeR0 + offTrg;
1268 break;
1269 }
1270
1271 /*
1272 * 64-bit HC Code Selector (no argument).
1273 */
1274 case FIX_HC_64BIT_CS:
1275 {
1276 Assert(offSrc < pSwitcher->cbCode);
1277#if defined(RT_OS_DARWIN) && defined(VBOX_WITH_HYBIRD_32BIT_KERNEL)
1278 *uSrc.pu16 = 0x80; /* KERNEL64_CS from i386/seg.h */
1279#else
1280 AssertFatalMsgFailed(("FIX_HC_64BIT_CS not implemented for this host\n"));
1281#endif
1282 break;
1283 }
1284
1285 /*
1286 * 64-bit HC pointer to the CPUM instance data (no argument).
1287 */
1288 case FIX_HC_64BIT_CPUM:
1289 {
1290 Assert(offSrc < pSwitcher->cbCode);
1291 *uSrc.pu64 = pVM->pVMR0 + RT_OFFSETOF(VM, cpum);
1292 break;
1293 }
1294#endif
1295
1296 /*
1297 * 32-bit ID pointer to (ID) target within the code (32-bit offset).
1298 */
1299 case FIX_ID_32BIT:
1300 {
1301 uint32_t offTrg = *u.pu32++;
1302 Assert(offSrc < pSwitcher->cbCode);
1303 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1304 *uSrc.pu32 = u32IDCode + offTrg;
1305 break;
1306 }
1307
1308 /*
1309 * 64-bit ID pointer to (ID) target within the code (32-bit offset).
1310 */
1311 case FIX_ID_64BIT:
1312 {
1313 uint32_t offTrg = *u.pu32++;
1314 Assert(offSrc < pSwitcher->cbCode);
1315 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1316 *uSrc.pu64 = u32IDCode + offTrg;
1317 break;
1318 }
1319
1320 /*
1321 * Far 16:32 ID pointer to 64-bit mode (ID) target within the code (32-bit offset).
1322 */
1323 case FIX_ID_FAR32_TO_64BIT_MODE:
1324 {
1325 uint32_t offTrg = *u.pu32++;
1326 Assert(offSrc < pSwitcher->cbCode);
1327 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1328 *uSrc.pu32++ = u32IDCode + offTrg;
1329 *uSrc.pu16 = SelCS64;
1330 AssertRelease(SelCS64);
1331 break;
1332 }
1333
1334#ifdef VBOX_WITH_NMI
1335 /*
1336 * 32-bit address to the APIC base.
1337 */
1338 case FIX_GC_APIC_BASE_32BIT:
1339 {
1340 *uSrc.pu32 = pVM->vmm.s.GCPtrApicBase;
1341 break;
1342 }
1343#endif
1344
1345 default:
1346 AssertReleaseMsgFailed(("Unknown fixup %d in switcher %s\n", u8, pSwitcher->pszDesc));
1347 break;
1348 }
1349 }
1350
1351#ifdef LOG_ENABLED
1352 /*
1353 * If Log2 is enabled disassemble the switcher code.
1354 *
1355 * The switcher code have 1-2 HC parts, 1 GC part and 0-2 ID parts.
1356 */
1357 if (LogIs2Enabled())
1358 {
1359 RTLogPrintf("*** Disassembly of switcher %d '%s' %#x bytes ***\n"
1360 " pu8CodeR0 = %p\n"
1361 " pu8CodeR3 = %p\n"
1362 " GCPtrCode = %VGv\n"
1363 " u32IDCode = %08x\n"
1364 " pVMGC = %VGv\n"
1365 " pCPUMGC = %VGv\n"
1366 " pVMHC = %p\n"
1367 " pCPUMHC = %p\n"
1368 " GCPtrGDT = %VGv\n"
1369 " InterCR3s = %08x, %08x, %08x (32-Bit, PAE, AMD64)\n"
1370 " HyperCR3s = %08x, %08x, %08x (32-Bit, PAE, AMD64)\n"
1371 " SelCS = %04x\n"
1372 " SelDS = %04x\n"
1373 " SelCS64 = %04x\n"
1374 " SelTSS = %04x\n",
1375 pSwitcher->enmType, pSwitcher->pszDesc, pSwitcher->cbCode,
1376 pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode, VM_GUEST_ADDR(pVM, pVM),
1377 VM_GUEST_ADDR(pVM, &pVM->cpum), pVM, &pVM->cpum,
1378 GCPtrGDT,
1379 PGMGetHyper32BitCR3(pVM), PGMGetHyperPaeCR3(pVM), PGMGetHyperAmd64CR3(pVM),
1380 PGMGetInter32BitCR3(pVM), PGMGetInterPaeCR3(pVM), PGMGetInterAmd64CR3(pVM),
1381 SelCS, SelDS, SelCS64, SelTSS);
1382
1383 uint32_t offCode = 0;
1384 while (offCode < pSwitcher->cbCode)
1385 {
1386 /*
1387 * Figure out where this is.
1388 */
1389 const char *pszDesc = NULL;
1390 RTUINTPTR uBase;
1391 uint32_t cbCode;
1392 if (offCode - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0)
1393 {
1394 pszDesc = "HCCode0";
1395 uBase = (RTUINTPTR)pu8CodeR0;
1396 offCode = pSwitcher->offHCCode0;
1397 cbCode = pSwitcher->cbHCCode0;
1398 }
1399 else if (offCode - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1)
1400 {
1401 pszDesc = "HCCode1";
1402 uBase = (RTUINTPTR)pu8CodeR0;
1403 offCode = pSwitcher->offHCCode1;
1404 cbCode = pSwitcher->cbHCCode1;
1405 }
1406 else if (offCode - pSwitcher->offGCCode < pSwitcher->cbGCCode)
1407 {
1408 pszDesc = "GCCode";
1409 uBase = GCPtrCode;
1410 offCode = pSwitcher->offGCCode;
1411 cbCode = pSwitcher->cbGCCode;
1412 }
1413 else if (offCode - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0)
1414 {
1415 pszDesc = "IDCode0";
1416 uBase = u32IDCode;
1417 offCode = pSwitcher->offIDCode0;
1418 cbCode = pSwitcher->cbIDCode0;
1419 }
1420 else if (offCode - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1)
1421 {
1422 pszDesc = "IDCode1";
1423 uBase = u32IDCode;
1424 offCode = pSwitcher->offIDCode1;
1425 cbCode = pSwitcher->cbIDCode1;
1426 }
1427 else
1428 {
1429 RTLogPrintf(" %04x: %02x '%c' (nowhere)\n",
1430 offCode, pu8CodeR3[offCode], isprint(pu8CodeR3[offCode]) ? pu8CodeR3[offCode] : ' ');
1431 offCode++;
1432 continue;
1433 }
1434
1435 /*
1436 * Disassemble it.
1437 */
1438 RTLogPrintf(" %s: offCode=%#x cbCode=%#x\n", pszDesc, offCode, cbCode);
1439 DISCPUSTATE Cpu = {0};
1440 Cpu.mode = CPUMODE_32BIT;
1441 while (cbCode > 0)
1442 {
1443 /* try label it */
1444 if (pSwitcher->offR0HostToGuest == offCode)
1445 RTLogPrintf(" *R0HostToGuest:\n");
1446 if (pSwitcher->offGCGuestToHost == offCode)
1447 RTLogPrintf(" *GCGuestToHost:\n");
1448 if (pSwitcher->offGCCallTrampoline == offCode)
1449 RTLogPrintf(" *GCCallTrampoline:\n");
1450 if (pSwitcher->offGCGuestToHostAsm == offCode)
1451 RTLogPrintf(" *GCGuestToHostAsm:\n");
1452 if (pSwitcher->offGCGuestToHostAsmHyperCtx == offCode)
1453 RTLogPrintf(" *GCGuestToHostAsmHyperCtx:\n");
1454 if (pSwitcher->offGCGuestToHostAsmGuestCtx == offCode)
1455 RTLogPrintf(" *GCGuestToHostAsmGuestCtx:\n");
1456
1457 /* disas */
1458 uint32_t cbInstr = 0;
1459 char szDisas[256];
1460 if (DISInstr(&Cpu, (RTUINTPTR)pu8CodeR3 + offCode, uBase - (RTUINTPTR)pu8CodeR3, &cbInstr, szDisas))
1461 RTLogPrintf(" %04x: %s", offCode, szDisas); //for whatever reason szDisas includes '\n'.
1462 else
1463 {
1464 RTLogPrintf(" %04x: %02x '%c'\n",
1465 offCode, pu8CodeR3[offCode], isprint(pu8CodeR3[offCode]) ? pu8CodeR3[offCode] : ' ');
1466 cbInstr = 1;
1467 }
1468 offCode += cbInstr;
1469 cbCode -= RT_MIN(cbInstr, cbCode);
1470 }
1471 }
1472 }
1473#endif
1474}
1475
1476
1477/**
1478 * Relocator for the 32-Bit to 32-Bit world switcher.
1479 */
1480DECLCALLBACK(void) vmmR3Switcher32BitTo32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1481{
1482 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1483 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
1484}
1485
1486
1487/**
1488 * Relocator for the 32-Bit to PAE world switcher.
1489 */
1490DECLCALLBACK(void) vmmR3Switcher32BitToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1491{
1492 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1493 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
1494}
1495
1496
1497/**
1498 * Relocator for the PAE to 32-Bit world switcher.
1499 */
1500DECLCALLBACK(void) vmmR3SwitcherPAETo32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1501{
1502 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1503 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
1504}
1505
1506
1507/**
1508 * Relocator for the PAE to PAE world switcher.
1509 */
1510DECLCALLBACK(void) vmmR3SwitcherPAEToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1511{
1512 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1513 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
1514}
1515
1516
1517/**
1518 * Relocator for the AMD64 to PAE world switcher.
1519 */
1520DECLCALLBACK(void) vmmR3SwitcherAMD64ToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1521{
1522 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1523 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), SELMGetHyperCS64(pVM));
1524}
1525
1526
1527/**
1528 * Gets the pointer to g_szRTAssertMsg1 in GC.
1529 * @returns Pointer to VMMGC::g_szRTAssertMsg1.
1530 * Returns NULL if not present.
1531 * @param pVM The VM handle.
1532 */
1533VMMR3DECL(const char *) VMMR3GetGCAssertMsg1(PVM pVM)
1534{
1535 RTGCPTR GCPtr;
1536 int rc = PDMR3GetSymbolGC(pVM, NULL, "g_szRTAssertMsg1", &GCPtr);
1537 if (VBOX_SUCCESS(rc))
1538 return (const char *)MMHyperGC2HC(pVM, GCPtr);
1539 return NULL;
1540}
1541
1542
1543/**
1544 * Gets the pointer to g_szRTAssertMsg2 in GC.
1545 * @returns Pointer to VMMGC::g_szRTAssertMsg2.
1546 * Returns NULL if not present.
1547 * @param pVM The VM handle.
1548 */
1549VMMR3DECL(const char *) VMMR3GetGCAssertMsg2(PVM pVM)
1550{
1551 RTGCPTR GCPtr;
1552 int rc = PDMR3GetSymbolGC(pVM, NULL, "g_szRTAssertMsg2", &GCPtr);
1553 if (VBOX_SUCCESS(rc))
1554 return (const char *)MMHyperGC2HC(pVM, GCPtr);
1555 return NULL;
1556}
1557
1558
1559/**
1560 * Execute state save operation.
1561 *
1562 * @returns VBox status code.
1563 * @param pVM VM Handle.
1564 * @param pSSM SSM operation handle.
1565 */
1566static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM)
1567{
1568 LogFlow(("vmmR3Save:\n"));
1569
1570 /*
1571 * The hypervisor stack.
1572 */
1573 SSMR3PutGCPtr(pSSM, pVM->vmm.s.pbGCStackBottom);
1574 RTGCPTR GCPtrESP = CPUMGetHyperESP(pVM);
1575 Assert(pVM->vmm.s.pbGCStackBottom - GCPtrESP <= VMM_STACK_SIZE);
1576 SSMR3PutGCPtr(pSSM, GCPtrESP);
1577 SSMR3PutMem(pSSM, pVM->vmm.s.pbHCStack, VMM_STACK_SIZE);
1578 return SSMR3PutU32(pSSM, ~0); /* terminator */
1579}
1580
1581
1582/**
1583 * Execute state load operation.
1584 *
1585 * @returns VBox status code.
1586 * @param pVM VM Handle.
1587 * @param pSSM SSM operation handle.
1588 * @param u32Version Data layout version.
1589 */
1590static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
1591{
1592 LogFlow(("vmmR3Load:\n"));
1593
1594 /*
1595 * Validate version.
1596 */
1597 if (u32Version != VMM_SAVED_STATE_VERSION)
1598 {
1599 Log(("vmmR3Load: Invalid version u32Version=%d!\n", u32Version));
1600 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
1601 }
1602
1603 /*
1604 * Check that the stack is in the same place, or that it's fearly empty.
1605 */
1606 RTGCPTR GCPtrStackBottom;
1607 SSMR3GetGCPtr(pSSM, &GCPtrStackBottom);
1608 RTGCPTR GCPtrESP;
1609 int rc = SSMR3GetGCPtr(pSSM, &GCPtrESP);
1610 if (VBOX_FAILURE(rc))
1611 return rc;
1612 if ( GCPtrStackBottom == pVM->vmm.s.pbGCStackBottom
1613 || (GCPtrStackBottom - GCPtrESP < 32)) /** @todo This will break if we start preemting the hypervisor. */
1614 {
1615 /*
1616 * We *must* set the ESP because the CPUM load + PGM load relocations will render
1617 * the ESP in CPUM fatally invalid.
1618 */
1619 CPUMSetHyperESP(pVM, GCPtrESP);
1620
1621 /* restore the stack. */
1622 SSMR3GetMem(pSSM, pVM->vmm.s.pbHCStack, VMM_STACK_SIZE);
1623
1624 /* terminator */
1625 uint32_t u32;
1626 rc = SSMR3GetU32(pSSM, &u32);
1627 if (VBOX_FAILURE(rc))
1628 return rc;
1629 if (u32 != ~0U)
1630 {
1631 AssertMsgFailed(("u32=%#x\n", u32));
1632 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1633 }
1634 return VINF_SUCCESS;
1635 }
1636
1637 LogRel(("The stack is not in the same place and it's not empty! GCPtrStackBottom=%VGv pbGCStackBottom=%VGv ESP=%VGv\n",
1638 GCPtrStackBottom, pVM->vmm.s.pbGCStackBottom, GCPtrESP));
1639 if (SSMR3HandleGetAfter(pSSM) == SSMAFTER_DEBUG_IT)
1640 return VINF_SUCCESS; /* ignore this */
1641 AssertFailed();
1642 return VERR_SSM_LOAD_CONFIG_MISMATCH;
1643}
1644
1645
1646/**
1647 * Selects the switcher to be used for switching to GC.
1648 *
1649 * @returns VBox status code.
1650 * @param pVM VM handle.
1651 * @param enmSwitcher The new switcher.
1652 * @remark This function may be called before the VMM is initialized.
1653 */
1654VMMR3DECL(int) VMMR3SelectSwitcher(PVM pVM, VMMSWITCHER enmSwitcher)
1655{
1656 /*
1657 * Validate input.
1658 */
1659 if ( enmSwitcher < VMMSWITCHER_INVALID
1660 || enmSwitcher >= VMMSWITCHER_MAX)
1661 {
1662 AssertMsgFailed(("Invalid input enmSwitcher=%d\n", enmSwitcher));
1663 return VERR_INVALID_PARAMETER;
1664 }
1665
1666 /*
1667 * Select the new switcher.
1668 */
1669 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[enmSwitcher];
1670 if (pSwitcher)
1671 {
1672 Log(("VMMR3SelectSwitcher: enmSwitcher %d -> %d %s\n", pVM->vmm.s.enmSwitcher, enmSwitcher, pSwitcher->pszDesc));
1673 pVM->vmm.s.enmSwitcher = enmSwitcher;
1674
1675 RTR0PTR pbCodeR0 = (RTR0PTR)pVM->vmm.s.pvHCCoreCodeR0 + pVM->vmm.s.aoffSwitchers[enmSwitcher]; /** @todo fix the pvHCCoreCodeR0 type */
1676 pVM->vmm.s.pfnR0HostToGuest = pbCodeR0 + pSwitcher->offR0HostToGuest;
1677
1678 RTGCPTR GCPtr = pVM->vmm.s.pvGCCoreCode + pVM->vmm.s.aoffSwitchers[enmSwitcher];
1679 pVM->vmm.s.pfnGCGuestToHost = GCPtr + pSwitcher->offGCGuestToHost;
1680 pVM->vmm.s.pfnGCCallTrampoline = GCPtr + pSwitcher->offGCCallTrampoline;
1681 pVM->pfnVMMGCGuestToHostAsm = GCPtr + pSwitcher->offGCGuestToHostAsm;
1682 pVM->pfnVMMGCGuestToHostAsmHyperCtx = GCPtr + pSwitcher->offGCGuestToHostAsmHyperCtx;
1683 pVM->pfnVMMGCGuestToHostAsmGuestCtx = GCPtr + pSwitcher->offGCGuestToHostAsmGuestCtx;
1684 return VINF_SUCCESS;
1685 }
1686 return VERR_NOT_IMPLEMENTED;
1687}
1688
1689/**
1690 * Disable the switcher logic permanently.
1691 *
1692 * @returns VBox status code.
1693 * @param pVM VM handle.
1694 */
1695VMMR3DECL(int) VMMR3DisableSwitcher(PVM pVM)
1696{
1697/** @todo r=bird: I would suggest that we create a dummy switcher which just does something like:
1698 * @code
1699 * mov eax, VERR_INTERNAL_ERROR
1700 * ret
1701 * @endcode
1702 * And then check for fSwitcherDisabled in VMMR3SelectSwitcher() in order to prevent it from being removed.
1703 */
1704 pVM->vmm.s.fSwitcherDisabled = true;
1705 return VINF_SUCCESS;
1706}
1707
1708
1709/**
1710 * Resolve a builtin GC symbol.
1711 * Called by PDM when loading or relocating GC modules.
1712 *
1713 * @returns VBox status
1714 * @param pVM VM Handle.
1715 * @param pszSymbol Symbol to resolv
1716 * @param pGCPtrValue Where to store the symbol value.
1717 * @remark This has to work before VMMR3Relocate() is called.
1718 */
1719VMMR3DECL(int) VMMR3GetImportGC(PVM pVM, const char *pszSymbol, PRTGCPTR pGCPtrValue)
1720{
1721 if (!strcmp(pszSymbol, "g_Logger"))
1722 {
1723 if (pVM->vmm.s.pLoggerHC)
1724 pVM->vmm.s.pLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pLoggerHC);
1725 *pGCPtrValue = pVM->vmm.s.pLoggerGC;
1726 }
1727 else if (!strcmp(pszSymbol, "g_RelLogger"))
1728 {
1729#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
1730 if (pVM->vmm.s.pRelLoggerHC)
1731 pVM->vmm.s.pRelLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pRelLoggerHC);
1732 *pGCPtrValue = pVM->vmm.s.pRelLoggerGC;
1733#else
1734 *pGCPtrValue = NIL_RTGCPTR;
1735#endif
1736 }
1737 else
1738 return VERR_SYMBOL_NOT_FOUND;
1739 return VINF_SUCCESS;
1740}
1741
1742
1743/**
1744 * Suspends the the CPU yielder.
1745 *
1746 * @param pVM The VM handle.
1747 */
1748VMMR3DECL(void) VMMR3YieldSuspend(PVM pVM)
1749{
1750 if (!pVM->vmm.s.cYieldResumeMillies)
1751 {
1752 uint64_t u64Now = TMTimerGet(pVM->vmm.s.pYieldTimer);
1753 uint64_t u64Expire = TMTimerGetExpire(pVM->vmm.s.pYieldTimer);
1754 if (u64Now >= u64Expire || u64Expire == ~(uint64_t)0)
1755 pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
1756 else
1757 pVM->vmm.s.cYieldResumeMillies = TMTimerToMilli(pVM->vmm.s.pYieldTimer, u64Expire - u64Now);
1758 TMTimerStop(pVM->vmm.s.pYieldTimer);
1759 }
1760 pVM->vmm.s.u64LastYield = RTTimeNanoTS();
1761}
1762
1763
1764/**
1765 * Stops the the CPU yielder.
1766 *
1767 * @param pVM The VM handle.
1768 */
1769VMMR3DECL(void) VMMR3YieldStop(PVM pVM)
1770{
1771 if (!pVM->vmm.s.cYieldResumeMillies)
1772 TMTimerStop(pVM->vmm.s.pYieldTimer);
1773 pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
1774 pVM->vmm.s.u64LastYield = RTTimeNanoTS();
1775}
1776
1777
1778/**
1779 * Resumes the CPU yielder when it has been a suspended or stopped.
1780 *
1781 * @param pVM The VM handle.
1782 */
1783VMMR3DECL(void) VMMR3YieldResume(PVM pVM)
1784{
1785 if (pVM->vmm.s.cYieldResumeMillies)
1786 {
1787 TMTimerSetMillies(pVM->vmm.s.pYieldTimer, pVM->vmm.s.cYieldResumeMillies);
1788 pVM->vmm.s.cYieldResumeMillies = 0;
1789 }
1790}
1791
1792
1793/**
1794 * Internal timer callback function.
1795 *
1796 * @param pVM The VM.
1797 * @param pTimer The timer handle.
1798 * @param pvUser User argument specified upon timer creation.
1799 */
1800static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser)
1801{
1802 /*
1803 * This really needs some careful tuning. While we shouldn't be too gready since
1804 * that'll cause the rest of the system to stop up, we shouldn't be too nice either
1805 * because that'll cause us to stop up.
1806 *
1807 * The current logic is to use the default interval when there is no lag worth
1808 * mentioning, but when we start accumulating lag we don't bother yielding at all.
1809 *
1810 * (This depends on the TMCLOCK_VIRTUAL_SYNC to be scheduled before TMCLOCK_REAL
1811 * so the lag is up to date.)
1812 */
1813 const uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
1814 if ( u64Lag < 50000000 /* 50ms */
1815 || ( u64Lag < 1000000000 /* 1s */
1816 && RTTimeNanoTS() - pVM->vmm.s.u64LastYield < 500000000 /* 500 ms */)
1817 )
1818 {
1819 uint64_t u64Elapsed = RTTimeNanoTS();
1820 pVM->vmm.s.u64LastYield = u64Elapsed;
1821
1822 RTThreadYield();
1823
1824#ifdef LOG_ENABLED
1825 u64Elapsed = RTTimeNanoTS() - u64Elapsed;
1826 Log(("vmmR3YieldEMT: %RI64 ns\n", u64Elapsed));
1827#endif
1828 }
1829 TMTimerSetMillies(pTimer, pVM->vmm.s.cYieldEveryMillies);
1830}
1831
1832
1833/**
1834 * Acquire global VM lock.
1835 *
1836 * @returns VBox status code
1837 * @param pVM The VM to operate on.
1838 */
1839VMMR3DECL(int) VMMR3Lock(PVM pVM)
1840{
1841 return RTCritSectEnter(&pVM->vmm.s.CritSectVMLock);
1842}
1843
1844
1845/**
1846 * Release global VM lock.
1847 *
1848 * @returns VBox status code
1849 * @param pVM The VM to operate on.
1850 */
1851VMMR3DECL(int) VMMR3Unlock(PVM pVM)
1852{
1853 return RTCritSectLeave(&pVM->vmm.s.CritSectVMLock);
1854}
1855
1856
1857/**
1858 * Return global VM lock owner.
1859 *
1860 * @returns Thread id of owner.
1861 * @returns NIL_RTTHREAD if no owner.
1862 * @param pVM The VM to operate on.
1863 */
1864VMMR3DECL(RTNATIVETHREAD) VMMR3LockGetOwner(PVM pVM)
1865{
1866 return RTCritSectGetOwner(&pVM->vmm.s.CritSectVMLock);
1867}
1868
1869
1870/**
1871 * Checks if the current thread is the owner of the global VM lock.
1872 *
1873 * @returns true if owner.
1874 * @returns false if not owner.
1875 * @param pVM The VM to operate on.
1876 */
1877VMMR3DECL(bool) VMMR3LockIsOwner(PVM pVM)
1878{
1879 return RTCritSectIsOwner(&pVM->vmm.s.CritSectVMLock);
1880}
1881
1882
1883/**
1884 * Executes guest code.
1885 *
1886 * @param pVM VM handle.
1887 */
1888VMMR3DECL(int) VMMR3RawRunGC(PVM pVM)
1889{
1890 Log2(("VMMR3RawRunGC: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
1891
1892 /*
1893 * Set the EIP and ESP.
1894 */
1895 CPUMSetHyperEIP(pVM, CPUMGetGuestEFlags(pVM) & X86_EFL_VM
1896 ? pVM->vmm.s.pfnCPUMGCResumeGuestV86
1897 : pVM->vmm.s.pfnCPUMGCResumeGuest);
1898 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom);
1899
1900 /*
1901 * We hide log flushes (outer) and hypervisor interrupts (inner).
1902 */
1903 for (;;)
1904 {
1905 int rc;
1906 do
1907 {
1908#ifdef NO_SUPCALLR0VMM
1909 rc = VERR_GENERAL_FAILURE;
1910#else
1911 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_RAW_RUN, NULL);
1912#endif
1913 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
1914
1915 /*
1916 * Flush the logs.
1917 */
1918#ifdef LOG_ENABLED
1919 PRTLOGGERGC pLogger = pVM->vmm.s.pLoggerHC;
1920 if ( pLogger
1921 && pLogger->offScratch > 0)
1922 RTLogFlushGC(NULL, pLogger);
1923#endif
1924#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
1925 PRTLOGGERGC pRelLogger = pVM->vmm.s.pRelLoggerHC;
1926 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
1927 RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger);
1928#endif
1929 if (rc != VINF_VMM_CALL_HOST)
1930 {
1931 Log2(("VMMR3RawRunGC: returns %Vrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
1932 return rc;
1933 }
1934 rc = vmmR3ServiceCallHostRequest(pVM);
1935 if (VBOX_FAILURE(rc))
1936 return rc;
1937 /* Resume GC */
1938 }
1939}
1940
1941
1942/**
1943 * Executes guest code (Intel VMX and AMD SVM).
1944 *
1945 * @param pVM VM handle.
1946 */
1947VMMR3DECL(int) VMMR3HwAccRunGC(PVM pVM)
1948{
1949 Log2(("VMMR3HwAccRunGC: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
1950
1951 for (;;)
1952 {
1953 int rc;
1954 do
1955 {
1956#ifdef NO_SUPCALLR0VMM
1957 rc = VERR_GENERAL_FAILURE;
1958#else
1959 //rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_HWACC_RUN, NULL, 0);
1960# if !defined(RT_OS_LINUX) /* Alternative for debugging - currently untested on linux. */
1961 rc = SUPCallVMMR0Fast(pVM->pVMR0, VMMR0_DO_HWACC_RUN);
1962# else
1963 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_HWACC_RUN, NULL);
1964# endif
1965#endif
1966 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
1967
1968#ifdef LOG_ENABLED
1969 /*
1970 * Flush the log
1971 */
1972 PVMMR0LOGGER pR0Logger = pVM->vmm.s.pR0Logger;
1973 if ( pR0Logger
1974 && pR0Logger->Logger.offScratch > 0)
1975 RTLogFlushToLogger(&pR0Logger->Logger, NULL);
1976#endif /* !LOG_ENABLED */
1977 if (rc != VINF_VMM_CALL_HOST)
1978 {
1979 Log2(("VMMR3HwAccRunGC: returns %Vrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
1980 return rc;
1981 }
1982 rc = vmmR3ServiceCallHostRequest(pVM);
1983 if (VBOX_FAILURE(rc))
1984 return rc;
1985 /* Resume R0 */
1986 }
1987}
1988
1989/**
1990 * Calls GC a function.
1991 *
1992 * @param pVM The VM handle.
1993 * @param GCPtrEntry The GC function address.
1994 * @param cArgs The number of arguments in the ....
1995 * @param ... Arguments to the function.
1996 */
1997VMMR3DECL(int) VMMR3CallGC(PVM pVM, RTGCPTR GCPtrEntry, unsigned cArgs, ...)
1998{
1999 va_list args;
2000 va_start(args, cArgs);
2001 int rc = VMMR3CallGCV(pVM, GCPtrEntry, cArgs, args);
2002 va_end(args);
2003 return rc;
2004}
2005
2006
2007/**
2008 * Calls GC a function.
2009 *
2010 * @param pVM The VM handle.
2011 * @param GCPtrEntry The GC function address.
2012 * @param cArgs The number of arguments in the ....
2013 * @param args Arguments to the function.
2014 */
2015VMMR3DECL(int) VMMR3CallGCV(PVM pVM, RTGCPTR GCPtrEntry, unsigned cArgs, va_list args)
2016{
2017 Log2(("VMMR3CallGCV: GCPtrEntry=%VGv cArgs=%d\n", GCPtrEntry, cArgs));
2018
2019 /*
2020 * Setup the call frame using the trampoline.
2021 */
2022 CPUMHyperSetCtxCore(pVM, NULL);
2023 memset(pVM->vmm.s.pbHCStack, 0xaa, VMM_STACK_SIZE); /* Clear the stack. */
2024 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom - cArgs * sizeof(RTGCUINTPTR));
2025 PRTGCUINTPTR pFrame = (PRTGCUINTPTR)(pVM->vmm.s.pbHCStack + VMM_STACK_SIZE) - cArgs;
2026 int i = cArgs;
2027 while (i-- > 0)
2028 *pFrame++ = va_arg(args, RTGCUINTPTR);
2029
2030 CPUMPushHyper(pVM, cArgs * sizeof(RTGCUINTPTR)); /* stack frame size */
2031 CPUMPushHyper(pVM, GCPtrEntry); /* what to call */
2032 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnGCCallTrampoline);
2033
2034 /*
2035 * We hide log flushes (outer) and hypervisor interrupts (inner).
2036 */
2037 for (;;)
2038 {
2039 int rc;
2040 do
2041 {
2042#ifdef NO_SUPCALLR0VMM
2043 rc = VERR_GENERAL_FAILURE;
2044#else
2045 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_RAW_RUN, NULL);
2046#endif
2047 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
2048
2049 /*
2050 * Flush the logs.
2051 */
2052#ifdef LOG_ENABLED
2053 PRTLOGGERGC pLogger = pVM->vmm.s.pLoggerHC;
2054 if ( pLogger
2055 && pLogger->offScratch > 0)
2056 RTLogFlushGC(NULL, pLogger);
2057#endif
2058#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
2059 PRTLOGGERGC pRelLogger = pVM->vmm.s.pRelLoggerHC;
2060 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
2061 RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger);
2062#endif
2063 if (rc == VERR_TRPM_PANIC || rc == VERR_TRPM_DONT_PANIC)
2064 VMMR3FatalDump(pVM, rc);
2065 if (rc != VINF_VMM_CALL_HOST)
2066 {
2067 Log2(("VMMR3CallGCV: returns %Vrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
2068 return rc;
2069 }
2070 rc = vmmR3ServiceCallHostRequest(pVM);
2071 if (VBOX_FAILURE(rc))
2072 return rc;
2073 }
2074}
2075
2076
2077/**
2078 * Resumes executing hypervisor code when interrupted
2079 * by a queue flush or a debug event.
2080 *
2081 * @returns VBox status code.
2082 * @param pVM VM handle.
2083 */
2084VMMR3DECL(int) VMMR3ResumeHyper(PVM pVM)
2085{
2086 Log(("VMMR3ResumeHyper: eip=%VGv esp=%VGv\n", CPUMGetHyperEIP(pVM), CPUMGetHyperESP(pVM)));
2087
2088 /*
2089 * We hide log flushes (outer) and hypervisor interrupts (inner).
2090 */
2091 for (;;)
2092 {
2093 int rc;
2094 do
2095 {
2096#ifdef NO_SUPCALLR0VMM
2097 rc = VERR_GENERAL_FAILURE;
2098#else
2099 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_RAW_RUN, NULL);
2100#endif
2101 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
2102
2103 /*
2104 * Flush the loggers,
2105 */
2106#ifdef LOG_ENABLED
2107 PRTLOGGERGC pLogger = pVM->vmm.s.pLoggerHC;
2108 if ( pLogger
2109 && pLogger->offScratch > 0)
2110 RTLogFlushGC(NULL, pLogger);
2111#endif
2112#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
2113 PRTLOGGERGC pRelLogger = pVM->vmm.s.pRelLoggerHC;
2114 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
2115 RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger);
2116#endif
2117 if (rc == VERR_TRPM_PANIC || rc == VERR_TRPM_DONT_PANIC)
2118 VMMR3FatalDump(pVM, rc);
2119 if (rc != VINF_VMM_CALL_HOST)
2120 {
2121 Log(("VMMR3ResumeHyper: returns %Vrc\n", rc));
2122 return rc;
2123 }
2124 rc = vmmR3ServiceCallHostRequest(pVM);
2125 if (VBOX_FAILURE(rc))
2126 return rc;
2127 }
2128}
2129
2130
2131/**
2132 * Service a call to the ring-3 host code.
2133 *
2134 * @returns VBox status code.
2135 * @param pVM VM handle.
2136 * @remark Careful with critsects.
2137 */
2138static int vmmR3ServiceCallHostRequest(PVM pVM)
2139{
2140 switch (pVM->vmm.s.enmCallHostOperation)
2141 {
2142 /*
2143 * Acquire the PDM lock.
2144 */
2145 case VMMCALLHOST_PDM_LOCK:
2146 {
2147 pVM->vmm.s.rcCallHost = PDMR3LockCall(pVM);
2148 break;
2149 }
2150
2151 /*
2152 * Flush a PDM queue.
2153 */
2154 case VMMCALLHOST_PDM_QUEUE_FLUSH:
2155 {
2156 PDMR3QueueFlushWorker(pVM, NULL);
2157 pVM->vmm.s.rcCallHost = VINF_SUCCESS;
2158 break;
2159 }
2160
2161 /*
2162 * Grow the PGM pool.
2163 */
2164 case VMMCALLHOST_PGM_POOL_GROW:
2165 {
2166 pVM->vmm.s.rcCallHost = PGMR3PoolGrow(pVM);
2167 break;
2168 }
2169
2170 /*
2171 * Maps an page allocation chunk into ring-3 so ring-0 can use it.
2172 */
2173 case VMMCALLHOST_PGM_MAP_CHUNK:
2174 {
2175 pVM->vmm.s.rcCallHost = PGMR3PhysChunkMap(pVM, pVM->vmm.s.u64CallHostArg);
2176 break;
2177 }
2178
2179 /*
2180 * Allocates more handy pages.
2181 */
2182 case VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES:
2183 {
2184 pVM->vmm.s.rcCallHost = PGMR3PhysAllocateHandyPages(pVM);
2185 break;
2186 }
2187#ifndef NEW_PHYS_CODE
2188
2189 case VMMCALLHOST_PGM_RAM_GROW_RANGE:
2190 {
2191 pVM->vmm.s.rcCallHost = PGM3PhysGrowRange(pVM, pVM->vmm.s.u64CallHostArg);
2192 break;
2193 }
2194#endif
2195
2196 /*
2197 * Acquire the PGM lock.
2198 */
2199 case VMMCALLHOST_PGM_LOCK:
2200 {
2201 pVM->vmm.s.rcCallHost = PGMR3LockCall(pVM);
2202 break;
2203 }
2204
2205 /*
2206 * Flush REM handler notifications.
2207 */
2208 case VMMCALLHOST_REM_REPLAY_HANDLER_NOTIFICATIONS:
2209 {
2210 REMR3ReplayHandlerNotifications(pVM);
2211 break;
2212 }
2213
2214 /*
2215 * This is a noop. We just take this route to avoid unnecessary
2216 * tests in the loops.
2217 */
2218 case VMMCALLHOST_VMM_LOGGER_FLUSH:
2219 break;
2220
2221 /*
2222 * Set the VM error message.
2223 */
2224 case VMMCALLHOST_VM_SET_ERROR:
2225 VMR3SetErrorWorker(pVM);
2226 break;
2227
2228 /*
2229 * Set the VM runtime error message.
2230 */
2231 case VMMCALLHOST_VM_SET_RUNTIME_ERROR:
2232 VMR3SetRuntimeErrorWorker(pVM);
2233 break;
2234
2235 default:
2236 AssertMsgFailed(("enmCallHostOperation=%d\n", pVM->vmm.s.enmCallHostOperation));
2237 return VERR_INTERNAL_ERROR;
2238 }
2239
2240 pVM->vmm.s.enmCallHostOperation = VMMCALLHOST_INVALID;
2241 return VINF_SUCCESS;
2242}
2243
2244
2245
2246/**
2247 * Structure to pass to DBGFR3Info() and for doing all other
2248 * output during fatal dump.
2249 */
2250typedef struct VMMR3FATALDUMPINFOHLP
2251{
2252 /** The helper core. */
2253 DBGFINFOHLP Core;
2254 /** The release logger instance. */
2255 PRTLOGGER pRelLogger;
2256 /** The saved release logger flags. */
2257 RTUINT fRelLoggerFlags;
2258 /** The logger instance. */
2259 PRTLOGGER pLogger;
2260 /** The saved logger flags. */
2261 RTUINT fLoggerFlags;
2262 /** The saved logger destination flags. */
2263 RTUINT fLoggerDestFlags;
2264 /** Whether to output to stderr or not. */
2265 bool fStdErr;
2266} VMMR3FATALDUMPINFOHLP, *PVMMR3FATALDUMPINFOHLP;
2267typedef const VMMR3FATALDUMPINFOHLP *PCVMMR3FATALDUMPINFOHLP;
2268
2269
2270/**
2271 * Print formatted string.
2272 *
2273 * @param pHlp Pointer to this structure.
2274 * @param pszFormat The format string.
2275 * @param ... Arguments.
2276 */
2277static DECLCALLBACK(void) vmmR3FatalDumpInfoHlp_pfnPrintf(PCDBGFINFOHLP pHlp, const char *pszFormat, ...)
2278{
2279 va_list args;
2280 va_start(args, pszFormat);
2281 pHlp->pfnPrintfV(pHlp, pszFormat, args);
2282 va_end(args);
2283}
2284
2285
2286/**
2287 * Print formatted string.
2288 *
2289 * @param pHlp Pointer to this structure.
2290 * @param pszFormat The format string.
2291 * @param args Argument list.
2292 */
2293static DECLCALLBACK(void) vmmR3FatalDumpInfoHlp_pfnPrintfV(PCDBGFINFOHLP pHlp, const char *pszFormat, va_list args)
2294{
2295 PCVMMR3FATALDUMPINFOHLP pMyHlp = (PCVMMR3FATALDUMPINFOHLP)pHlp;
2296
2297 if (pMyHlp->pRelLogger)
2298 {
2299 va_list args2;
2300 va_copy(args2, args);
2301 RTLogLoggerV(pMyHlp->pRelLogger, pszFormat, args2);
2302 va_end(args2);
2303 }
2304 if (pMyHlp->pLogger)
2305 {
2306 va_list args2;
2307 va_copy(args2, args);
2308 RTLogLoggerV(pMyHlp->pLogger, pszFormat, args);
2309 va_end(args2);
2310 }
2311 if (pMyHlp->fStdErr)
2312 {
2313 va_list args2;
2314 va_copy(args2, args);
2315 RTStrmPrintfV(g_pStdErr, pszFormat, args);
2316 va_end(args2);
2317 }
2318}
2319
2320
2321/**
2322 * Initializes the fatal dump output helper.
2323 *
2324 * @param pHlp The structure to initialize.
2325 */
2326static void vmmR3FatalDumpInfoHlpInit(PVMMR3FATALDUMPINFOHLP pHlp)
2327{
2328 memset(pHlp, 0, sizeof(*pHlp));
2329
2330 pHlp->Core.pfnPrintf = vmmR3FatalDumpInfoHlp_pfnPrintf;
2331 pHlp->Core.pfnPrintfV = vmmR3FatalDumpInfoHlp_pfnPrintfV;
2332
2333 /*
2334 * The loggers.
2335 */
2336 pHlp->pRelLogger = RTLogRelDefaultInstance();
2337#ifndef LOG_ENABLED
2338 if (!pHlp->pRelLogger)
2339#endif
2340 pHlp->pLogger = RTLogDefaultInstance();
2341
2342 if (pHlp->pRelLogger)
2343 {
2344 pHlp->fRelLoggerFlags = pHlp->pRelLogger->fFlags;
2345 pHlp->pRelLogger->fFlags &= ~(RTLOGFLAGS_BUFFERED | RTLOGFLAGS_DISABLED);
2346 }
2347
2348 if (pHlp->pLogger)
2349 {
2350 pHlp->fLoggerFlags = pHlp->pLogger->fFlags;
2351 pHlp->fLoggerDestFlags = pHlp->pLogger->fDestFlags;
2352 pHlp->pLogger->fFlags &= ~(RTLOGFLAGS_BUFFERED | RTLOGFLAGS_DISABLED);
2353#ifndef DEBUG_sandervl
2354 pHlp->pLogger->fDestFlags |= RTLOGDEST_DEBUGGER;
2355#endif
2356 }
2357
2358 /*
2359 * Check if we need write to stderr.
2360 */
2361 pHlp->fStdErr = (!pHlp->pRelLogger || !(pHlp->pRelLogger->fDestFlags & (RTLOGDEST_STDOUT | RTLOGDEST_STDERR)))
2362 && (!pHlp->pLogger || !(pHlp->pLogger->fDestFlags & (RTLOGDEST_STDOUT | RTLOGDEST_STDERR)));
2363}
2364
2365
2366/**
2367 * Deletes the fatal dump output helper.
2368 *
2369 * @param pHlp The structure to delete.
2370 */
2371static void vmmR3FatalDumpInfoHlpDelete(PVMMR3FATALDUMPINFOHLP pHlp)
2372{
2373 if (pHlp->pRelLogger)
2374 {
2375 RTLogFlush(pHlp->pRelLogger);
2376 pHlp->pRelLogger->fFlags = pHlp->fRelLoggerFlags;
2377 }
2378
2379 if (pHlp->pLogger)
2380 {
2381 RTLogFlush(pHlp->pLogger);
2382 pHlp->pLogger->fFlags = pHlp->fLoggerFlags;
2383 pHlp->pLogger->fDestFlags = pHlp->fLoggerDestFlags;
2384 }
2385}
2386
2387
2388/**
2389 * Dumps the VM state on a fatal error.
2390 *
2391 * @param pVM VM Handle.
2392 * @param rcErr VBox status code.
2393 */
2394VMMR3DECL(void) VMMR3FatalDump(PVM pVM, int rcErr)
2395{
2396 /*
2397 * Create our output helper and sync it with the log settings.
2398 * This helper will be used for all the output.
2399 */
2400 VMMR3FATALDUMPINFOHLP Hlp;
2401 PCDBGFINFOHLP pHlp = &Hlp.Core;
2402 vmmR3FatalDumpInfoHlpInit(&Hlp);
2403
2404 /*
2405 * Header.
2406 */
2407 pHlp->pfnPrintf(pHlp,
2408 "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
2409 "!!\n"
2410 "!! Guru Meditation %d (%Vrc)\n"
2411 "!!\n",
2412 rcErr, rcErr);
2413
2414 /*
2415 * Continue according to context.
2416 */
2417 bool fDoneHyper = false;
2418 switch (rcErr)
2419 {
2420 /*
2421 * Hyper visor errors.
2422 */
2423 case VINF_EM_DBG_HYPER_ASSERTION:
2424 pHlp->pfnPrintf(pHlp, "%s%s!!\n", VMMR3GetGCAssertMsg1(pVM), VMMR3GetGCAssertMsg2(pVM));
2425 /* fall thru */
2426 case VERR_TRPM_DONT_PANIC:
2427 case VERR_TRPM_PANIC:
2428 case VINF_EM_RAW_STALE_SELECTOR:
2429 case VINF_EM_RAW_IRET_TRAP:
2430 case VINF_EM_DBG_HYPER_BREAKPOINT:
2431 case VINF_EM_DBG_HYPER_STEPPED:
2432 {
2433 /* Trap? */
2434 uint32_t uEIP = CPUMGetHyperEIP(pVM);
2435 TRPMEVENT enmType;
2436 uint8_t u8TrapNo = 0xce;
2437 RTGCUINT uErrorCode = 0xdeadface;
2438 RTGCUINTPTR uCR2 = 0xdeadface;
2439 int rc2 = TRPMQueryTrapAll(pVM, &u8TrapNo, &enmType, &uErrorCode, &uCR2);
2440 if (VBOX_SUCCESS(rc2))
2441 pHlp->pfnPrintf(pHlp,
2442 "!! TRAP=%02x ERRCD=%VGv CR2=%VGv EIP=%VGv Type=%d\n",
2443 u8TrapNo, uErrorCode, uCR2, uEIP, enmType);
2444 else
2445 pHlp->pfnPrintf(pHlp,
2446 "!! EIP=%VGv NOTRAP\n",
2447 uEIP);
2448
2449 /*
2450 * Try figure out where eip is.
2451 */
2452 /** @todo make query call for core code or move this function to VMM. */
2453 /* core code? */
2454 //if (uEIP - (RTGCUINTPTR)pVM->vmm.s.pvGCCoreCode < pVM->vmm.s.cbCoreCode)
2455 // pHlp->pfnPrintf(pHlp,
2456 // "!! EIP is in CoreCode, offset %#x\n",
2457 // uEIP - (RTGCUINTPTR)pVM->vmm.s.pvGCCoreCode);
2458 //else
2459 { /* ask PDM */
2460 /** @todo ask DBGFR3Sym later. */
2461 char szModName[64];
2462 RTGCPTR GCPtrMod;
2463 char szNearSym1[260];
2464 RTGCPTR GCPtrNearSym1;
2465 char szNearSym2[260];
2466 RTGCPTR GCPtrNearSym2;
2467 int rc = PDMR3QueryModFromEIP(pVM, uEIP,
2468 &szModName[0], sizeof(szModName), &GCPtrMod,
2469 &szNearSym1[0], sizeof(szNearSym1), &GCPtrNearSym1,
2470 &szNearSym2[0], sizeof(szNearSym2), &GCPtrNearSym2);
2471 if (VBOX_SUCCESS(rc))
2472 {
2473 pHlp->pfnPrintf(pHlp,
2474 "!! EIP in %s (%p) at rva %x near symbols:\n"
2475 "!! %VGv rva %VGv off %08x %s\n"
2476 "!! %VGv rva %VGv off -%08x %s\n",
2477 szModName, GCPtrMod, (unsigned)(uEIP - GCPtrMod),
2478 GCPtrNearSym1, GCPtrNearSym1 - GCPtrMod, (unsigned)(uEIP - GCPtrNearSym1), szNearSym1,
2479 GCPtrNearSym2, GCPtrNearSym2 - GCPtrMod, (unsigned)(GCPtrNearSym2 - uEIP), szNearSym2);
2480 }
2481 else
2482 pHlp->pfnPrintf(pHlp,
2483 "!! EIP is not in any code known to VMM!\n");
2484 }
2485
2486 /* Disassemble the instruction. */
2487 char szInstr[256];
2488 rc2 = DBGFR3DisasInstrEx(pVM, 0, 0, DBGF_DISAS_FLAGS_CURRENT_HYPER, &szInstr[0], sizeof(szInstr), NULL);
2489 if (VBOX_SUCCESS(rc2))
2490 pHlp->pfnPrintf(pHlp,
2491 "!! %s\n", szInstr);
2492
2493 /* Dump the hypervisor cpu state. */
2494 pHlp->pfnPrintf(pHlp,
2495 "!!\n"
2496 "!!\n"
2497 "!!\n");
2498 rc2 = DBGFR3Info(pVM, "cpumhyper", "verbose", pHlp);
2499 fDoneHyper = true;
2500
2501 /* Callstack. */
2502 DBGFSTACKFRAME Frame = {0};
2503 rc2 = DBGFR3StackWalkBeginHyper(pVM, &Frame);
2504 if (VBOX_SUCCESS(rc2))
2505 {
2506 pHlp->pfnPrintf(pHlp,
2507 "!!\n"
2508 "!! Call Stack:\n"
2509 "!!\n"
2510 "EBP Ret EBP Ret CS:EIP Arg0 Arg1 Arg2 Arg3 CS:EIP Symbol [line]\n");
2511 do
2512 {
2513 pHlp->pfnPrintf(pHlp,
2514 "%08RX32 %08RX32 %04RX32:%08RX32 %08RX32 %08RX32 %08RX32 %08RX32",
2515 (uint32_t)Frame.AddrFrame.off,
2516 (uint32_t)Frame.AddrReturnFrame.off,
2517 (uint32_t)Frame.AddrReturnPC.Sel,
2518 (uint32_t)Frame.AddrReturnPC.off,
2519 Frame.Args.au32[0],
2520 Frame.Args.au32[1],
2521 Frame.Args.au32[2],
2522 Frame.Args.au32[3]);
2523 pHlp->pfnPrintf(pHlp, " %RTsel:%08RGv", Frame.AddrPC.Sel, Frame.AddrPC.off);
2524 if (Frame.pSymPC)
2525 {
2526 RTGCINTPTR offDisp = Frame.AddrPC.FlatPtr - Frame.pSymPC->Value;
2527 if (offDisp > 0)
2528 pHlp->pfnPrintf(pHlp, " %s+%llx", Frame.pSymPC->szName, (int64_t)offDisp);
2529 else if (offDisp < 0)
2530 pHlp->pfnPrintf(pHlp, " %s-%llx", Frame.pSymPC->szName, -(int64_t)offDisp);
2531 else
2532 pHlp->pfnPrintf(pHlp, " %s", Frame.pSymPC->szName);
2533 }
2534 if (Frame.pLinePC)
2535 pHlp->pfnPrintf(pHlp, " [%s @ 0i%d]", Frame.pLinePC->szFilename, Frame.pLinePC->uLineNo);
2536 pHlp->pfnPrintf(pHlp, "\n");
2537
2538 /* next */
2539 rc2 = DBGFR3StackWalkNext(pVM, &Frame);
2540 } while (VBOX_SUCCESS(rc2));
2541 DBGFR3StackWalkEnd(pVM, &Frame);
2542 }
2543
2544 /* raw stack */
2545 pHlp->pfnPrintf(pHlp,
2546 "!!\n"
2547 "!! Raw stack (mind the direction).\n"
2548 "!!\n"
2549 "%.*Vhxd\n",
2550 VMM_STACK_SIZE, (char *)pVM->vmm.s.pbHCStack);
2551 break;
2552 }
2553
2554 default:
2555 {
2556 break;
2557 }
2558
2559 } /* switch (rcErr) */
2560
2561
2562 /*
2563 * Dump useful state information.
2564 */
2565 /** @todo convert these dumpers to DBGFR3Info() handlers!!! */
2566 pHlp->pfnPrintf(pHlp,
2567 "!!\n"
2568 "!! PGM Access Handlers & Stuff:\n"
2569 "!!\n");
2570 PGMR3DumpMappings(pVM);
2571
2572
2573 /*
2574 * Generic info dumper loop.
2575 */
2576 static struct
2577 {
2578 const char *pszInfo;
2579 const char *pszArgs;
2580 } const aInfo[] =
2581 {
2582 { "hma", NULL },
2583 { "cpumguest", "verbose" },
2584 { "cpumhyper", "verbose" },
2585 { "cpumhost", "verbose" },
2586 { "mode", "all" },
2587 { "cpuid", "verbose" },
2588 { "gdt", NULL },
2589 { "ldt", NULL },
2590 //{ "tss", NULL },
2591 { "ioport", NULL },
2592 { "mmio", NULL },
2593 { "phys", NULL },
2594 //{ "pgmpd", NULL }, - doesn't always work at init time...
2595 { "timers", NULL },
2596 { "activetimers", NULL },
2597 { "handlers", "phys virt stats" },
2598 { "cfgm", NULL },
2599 };
2600 for (unsigned i = 0; i < ELEMENTS(aInfo); i++)
2601 {
2602 if (fDoneHyper && !strcmp(aInfo[i].pszInfo, "cpumhyper"))
2603 continue;
2604 pHlp->pfnPrintf(pHlp,
2605 "!!\n"
2606 "!! {%s, %s}\n"
2607 "!!\n",
2608 aInfo[i].pszInfo, aInfo[i].pszArgs);
2609 DBGFR3Info(pVM, aInfo[i].pszInfo, aInfo[i].pszArgs, pHlp);
2610 }
2611
2612 /* done */
2613 pHlp->pfnPrintf(pHlp,
2614 "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n");
2615
2616
2617 /*
2618 * Delete the output instance (flushing and restoring of flags).
2619 */
2620 vmmR3FatalDumpInfoHlpDelete(&Hlp);
2621}
2622
2623
2624
2625/**
2626 * Displays the Force action Flags.
2627 *
2628 * @param pVM The VM handle.
2629 * @param pHlp The output helpers.
2630 * @param pszArgs The additional arguments (ignored).
2631 */
2632static DECLCALLBACK(void) vmmR3InfoFF(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2633{
2634 const uint32_t fForcedActions = pVM->fForcedActions;
2635
2636 pHlp->pfnPrintf(pHlp, "Forced action Flags: %#RX32", fForcedActions);
2637
2638 /* show the flag mnemonics */
2639 int c = 0;
2640 uint32_t f = fForcedActions;
2641#define PRINT_FLAG(flag) do { \
2642 if (f & (flag)) \
2643 { \
2644 static const char *s_psz = #flag; \
2645 if (!(c % 6)) \
2646 pHlp->pfnPrintf(pHlp, "%s\n %s", c ? "," : "", s_psz + 6); \
2647 else \
2648 pHlp->pfnPrintf(pHlp, ", %s", s_psz + 6); \
2649 c++; \
2650 f &= ~(flag); \
2651 } \
2652 } while (0)
2653 PRINT_FLAG(VM_FF_INTERRUPT_APIC);
2654 PRINT_FLAG(VM_FF_INTERRUPT_PIC);
2655 PRINT_FLAG(VM_FF_TIMER);
2656 PRINT_FLAG(VM_FF_PDM_QUEUES);
2657 PRINT_FLAG(VM_FF_PDM_DMA);
2658 PRINT_FLAG(VM_FF_PDM_CRITSECT);
2659 PRINT_FLAG(VM_FF_DBGF);
2660 PRINT_FLAG(VM_FF_REQUEST);
2661 PRINT_FLAG(VM_FF_TERMINATE);
2662 PRINT_FLAG(VM_FF_RESET);
2663 PRINT_FLAG(VM_FF_PGM_SYNC_CR3);
2664 PRINT_FLAG(VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
2665 PRINT_FLAG(VM_FF_TRPM_SYNC_IDT);
2666 PRINT_FLAG(VM_FF_SELM_SYNC_TSS);
2667 PRINT_FLAG(VM_FF_SELM_SYNC_GDT);
2668 PRINT_FLAG(VM_FF_SELM_SYNC_LDT);
2669 PRINT_FLAG(VM_FF_INHIBIT_INTERRUPTS);
2670 PRINT_FLAG(VM_FF_CSAM_SCAN_PAGE);
2671 PRINT_FLAG(VM_FF_CSAM_PENDING_ACTION);
2672 PRINT_FLAG(VM_FF_TO_R3);
2673 PRINT_FLAG(VM_FF_DEBUG_SUSPEND);
2674 if (f)
2675 pHlp->pfnPrintf(pHlp, "%s\n Unknown bits: %#RX32\n", c ? "," : "", f);
2676 else
2677 pHlp->pfnPrintf(pHlp, "\n");
2678#undef PRINT_FLAG
2679
2680 /* the groups */
2681 c = 0;
2682#define PRINT_GROUP(grp) do { \
2683 if (fForcedActions & (grp)) \
2684 { \
2685 static const char *s_psz = #grp; \
2686 if (!(c % 5)) \
2687 pHlp->pfnPrintf(pHlp, "%s %s", c ? ",\n" : "Groups:\n", s_psz + 6); \
2688 else \
2689 pHlp->pfnPrintf(pHlp, ", %s", s_psz + 6); \
2690 c++; \
2691 } \
2692 } while (0)
2693 PRINT_GROUP(VM_FF_EXTERNAL_SUSPENDED_MASK);
2694 PRINT_GROUP(VM_FF_EXTERNAL_HALTED_MASK);
2695 PRINT_GROUP(VM_FF_HIGH_PRIORITY_PRE_MASK);
2696 PRINT_GROUP(VM_FF_HIGH_PRIORITY_PRE_RAW_MASK);
2697 PRINT_GROUP(VM_FF_HIGH_PRIORITY_POST_MASK);
2698 PRINT_GROUP(VM_FF_NORMAL_PRIORITY_POST_MASK);
2699 PRINT_GROUP(VM_FF_NORMAL_PRIORITY_MASK);
2700 PRINT_GROUP(VM_FF_RESUME_GUEST_MASK);
2701 PRINT_GROUP(VM_FF_ALL_BUT_RAW_MASK);
2702 if (c)
2703 pHlp->pfnPrintf(pHlp, "\n");
2704#undef PRINT_GROUP
2705}
2706
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette