VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 91243

Last change on this file since 91243 was 91243, checked in by vboxsync, 4 years ago

VMM/PGMPool: Call PGMR0PoolGrow directly from ring-0 instead of going via ring-3. bugref:10093

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 131.6 KB
Line 
1/* $Id: VMMR0.cpp 91243 2021-09-15 10:19:31Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/vmm/trpm.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/pdmapi.h>
29#include <VBox/vmm/pgm.h>
30#ifdef VBOX_WITH_NEM_R0
31# include <VBox/vmm/nem.h>
32#endif
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/stam.h>
35#include <VBox/vmm/tm.h>
36#include "VMMInternal.h"
37#include <VBox/vmm/vmcc.h>
38#include <VBox/vmm/gvm.h>
39#ifdef VBOX_WITH_PCI_PASSTHROUGH
40# include <VBox/vmm/pdmpci.h>
41#endif
42#include <VBox/vmm/apic.h>
43
44#include <VBox/vmm/gvmm.h>
45#include <VBox/vmm/gmm.h>
46#include <VBox/vmm/gim.h>
47#include <VBox/intnet.h>
48#include <VBox/vmm/hm.h>
49#include <VBox/param.h>
50#include <VBox/err.h>
51#include <VBox/version.h>
52#include <VBox/log.h>
53
54#include <iprt/asm-amd64-x86.h>
55#include <iprt/assert.h>
56#include <iprt/crc.h>
57#include <iprt/mem.h>
58#include <iprt/memobj.h>
59#include <iprt/mp.h>
60#include <iprt/once.h>
61#include <iprt/semaphore.h>
62#include <iprt/spinlock.h>
63#include <iprt/stdarg.h>
64#include <iprt/string.h>
65#include <iprt/thread.h>
66#include <iprt/timer.h>
67#include <iprt/time.h>
68
69#include "dtrace/VBoxVMM.h"
70
71
72#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
73# pragma intrinsic(_AddressOfReturnAddress)
74#endif
75
76#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
77# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
78#endif
79
80
81/*********************************************************************************************************************************
82* Internal Functions *
83*********************************************************************************************************************************/
84RT_C_DECLS_BEGIN
85#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
86extern uint64_t __udivdi3(uint64_t, uint64_t);
87extern uint64_t __umoddi3(uint64_t, uint64_t);
88#endif
89RT_C_DECLS_END
90static int vmmR0UpdateLoggers(PGVM pGVM, VMCPUID idCpu, PVMMR0UPDATELOGGERSREQ pReq, size_t idxLogger);
91static int vmmR0LogFlusher(PGVM pGVM);
92static int vmmR0LogWaitFlushed(PGVM pGVM, VMCPUID idCpu, size_t idxLogger);
93static int vmmR0InitLoggers(PGVM pGVM);
94static void vmmR0CleanupLoggers(PGVM pGVM);
95
96
97/*********************************************************************************************************************************
98* Global Variables *
99*********************************************************************************************************************************/
100/** Drag in necessary library bits.
101 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
102struct CLANG11WEIRDNOTHROW { PFNRT pfn; } g_VMMR0Deps[] =
103{
104 { (PFNRT)RTCrc32 },
105 { (PFNRT)RTOnce },
106#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
107 { (PFNRT)__udivdi3 },
108 { (PFNRT)__umoddi3 },
109#endif
110 { NULL }
111};
112
113#ifdef RT_OS_SOLARIS
114/* Dependency information for the native solaris loader. */
115extern "C" { char _depends_on[] = "vboxdrv"; }
116#endif
117
118
119/**
120 * Initialize the module.
121 * This is called when we're first loaded.
122 *
123 * @returns 0 on success.
124 * @returns VBox status on failure.
125 * @param hMod Image handle for use in APIs.
126 */
127DECLEXPORT(int) ModuleInit(void *hMod)
128{
129#ifdef VBOX_WITH_DTRACE_R0
130 /*
131 * The first thing to do is register the static tracepoints.
132 * (Deregistration is automatic.)
133 */
134 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
135 if (RT_FAILURE(rc2))
136 return rc2;
137#endif
138 LogFlow(("ModuleInit:\n"));
139
140#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
141 /*
142 * Display the CMOS debug code.
143 */
144 ASMOutU8(0x72, 0x03);
145 uint8_t bDebugCode = ASMInU8(0x73);
146 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
147 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
148#endif
149
150 /*
151 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
152 */
153 int rc = vmmInitFormatTypes();
154 if (RT_SUCCESS(rc))
155 {
156 rc = GVMMR0Init();
157 if (RT_SUCCESS(rc))
158 {
159 rc = GMMR0Init();
160 if (RT_SUCCESS(rc))
161 {
162 rc = HMR0Init();
163 if (RT_SUCCESS(rc))
164 {
165 PDMR0Init(hMod);
166
167 rc = PGMRegisterStringFormatTypes();
168 if (RT_SUCCESS(rc))
169 {
170 rc = IntNetR0Init();
171 if (RT_SUCCESS(rc))
172 {
173#ifdef VBOX_WITH_PCI_PASSTHROUGH
174 rc = PciRawR0Init();
175#endif
176 if (RT_SUCCESS(rc))
177 {
178 rc = CPUMR0ModuleInit();
179 if (RT_SUCCESS(rc))
180 {
181#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
182 rc = vmmR0TripleFaultHackInit();
183 if (RT_SUCCESS(rc))
184#endif
185 {
186 if (RT_SUCCESS(rc))
187 {
188 LogFlow(("ModuleInit: returns success\n"));
189 return VINF_SUCCESS;
190 }
191 }
192
193 /*
194 * Bail out.
195 */
196#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
197 vmmR0TripleFaultHackTerm();
198#endif
199 }
200 else
201 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
202#ifdef VBOX_WITH_PCI_PASSTHROUGH
203 PciRawR0Term();
204#endif
205 }
206 else
207 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
208 IntNetR0Term();
209 }
210 else
211 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
212 PGMDeregisterStringFormatTypes();
213 }
214 else
215 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
216 HMR0Term();
217 }
218 else
219 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
220 GMMR0Term();
221 }
222 else
223 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
224 GVMMR0Term();
225 }
226 else
227 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
228 vmmTermFormatTypes();
229 }
230 else
231 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
232
233 LogFlow(("ModuleInit: failed %Rrc\n", rc));
234 return rc;
235}
236
237
238/**
239 * Terminate the module.
240 * This is called when we're finally unloaded.
241 *
242 * @param hMod Image handle for use in APIs.
243 */
244DECLEXPORT(void) ModuleTerm(void *hMod)
245{
246 NOREF(hMod);
247 LogFlow(("ModuleTerm:\n"));
248
249 /*
250 * Terminate the CPUM module (Local APIC cleanup).
251 */
252 CPUMR0ModuleTerm();
253
254 /*
255 * Terminate the internal network service.
256 */
257 IntNetR0Term();
258
259 /*
260 * PGM (Darwin), HM and PciRaw global cleanup.
261 */
262#ifdef VBOX_WITH_PCI_PASSTHROUGH
263 PciRawR0Term();
264#endif
265 PGMDeregisterStringFormatTypes();
266 HMR0Term();
267#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
268 vmmR0TripleFaultHackTerm();
269#endif
270
271 /*
272 * Destroy the GMM and GVMM instances.
273 */
274 GMMR0Term();
275 GVMMR0Term();
276
277 vmmTermFormatTypes();
278
279 LogFlow(("ModuleTerm: returns\n"));
280}
281
282
283/**
284 * Initializes VMM specific members when the GVM structure is created,
285 * allocating loggers and stuff.
286 *
287 * The loggers are allocated here so that we can update their settings before
288 * doing VMMR0_DO_VMMR0_INIT and have correct logging at that time.
289 *
290 * @returns VBox status code.
291 * @param pGVM The global (ring-0) VM structure.
292 */
293VMMR0_INT_DECL(int) VMMR0InitPerVMData(PGVM pGVM)
294{
295 AssertCompile(sizeof(pGVM->vmmr0.s) <= sizeof(pGVM->vmmr0.padding));
296
297 /*
298 * Initialize all members first.
299 */
300 pGVM->vmmr0.s.fCalledInitVm = false;
301 pGVM->vmmr0.s.hMemObjLogger = NIL_RTR0MEMOBJ;
302 pGVM->vmmr0.s.hMapObjLogger = NIL_RTR0MEMOBJ;
303 pGVM->vmmr0.s.hMemObjReleaseLogger = NIL_RTR0MEMOBJ;
304 pGVM->vmmr0.s.hMapObjReleaseLogger = NIL_RTR0MEMOBJ;
305 pGVM->vmmr0.s.LogFlusher.hSpinlock = NIL_RTSPINLOCK;
306 pGVM->vmmr0.s.LogFlusher.hThread = NIL_RTNATIVETHREAD;
307 pGVM->vmmr0.s.LogFlusher.hEvent = NIL_RTSEMEVENT;
308 pGVM->vmmr0.s.LogFlusher.idxRingHead = 0;
309 pGVM->vmmr0.s.LogFlusher.idxRingTail = 0;
310 pGVM->vmmr0.s.LogFlusher.fThreadWaiting = false;
311
312 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
313 {
314 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
315 Assert(pGVCpu->idHostCpu == NIL_RTCPUID);
316 Assert(pGVCpu->iHostCpuSet == UINT32_MAX);
317 pGVCpu->vmmr0.s.pPreemptState = NULL;
318 pGVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
319 for (size_t iLogger = 0; iLogger < RT_ELEMENTS(pGVCpu->vmmr0.s.u.aLoggers); iLogger++)
320 pGVCpu->vmmr0.s.u.aLoggers[iLogger].hEventFlushWait = NIL_RTSEMEVENT;
321 }
322
323 /*
324 * Create the loggers.
325 */
326 return vmmR0InitLoggers(pGVM);
327}
328
329
330/**
331 * Initiates the R0 driver for a particular VM instance.
332 *
333 * @returns VBox status code.
334 *
335 * @param pGVM The global (ring-0) VM structure.
336 * @param uSvnRev The SVN revision of the ring-3 part.
337 * @param uBuildType Build type indicator.
338 * @thread EMT(0)
339 */
340static int vmmR0InitVM(PGVM pGVM, uint32_t uSvnRev, uint32_t uBuildType)
341{
342 /*
343 * Match the SVN revisions and build type.
344 */
345 if (uSvnRev != VMMGetSvnRev())
346 {
347 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
348 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
349 return VERR_VMM_R0_VERSION_MISMATCH;
350 }
351 if (uBuildType != vmmGetBuildType())
352 {
353 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
354 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
355 return VERR_VMM_R0_VERSION_MISMATCH;
356 }
357
358 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0 /*idCpu*/);
359 if (RT_FAILURE(rc))
360 return rc;
361
362 /* Don't allow this to be called more than once. */
363 if (!pGVM->vmmr0.s.fCalledInitVm)
364 pGVM->vmmr0.s.fCalledInitVm = true;
365 else
366 return VERR_ALREADY_INITIALIZED;
367
368#ifdef LOG_ENABLED
369
370 /*
371 * Register the EMT R0 logger instance for VCPU 0.
372 */
373 PVMCPUCC pVCpu = VMCC_GET_CPU_0(pGVM);
374 if (pVCpu->vmmr0.s.u.s.Logger.pLogger)
375 {
376# if 0 /* testing of the logger. */
377 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
378 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
379 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
380 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
381
382 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
383 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
384 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
385 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
386
387 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
388 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
389 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
390 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
391
392 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
393 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
394 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
395 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
396 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
397 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
398
399 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
400 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
401
402 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
403 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
404 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
405# endif
406# ifdef VBOX_WITH_R0_LOGGING
407 Log(("Switching to per-thread logging instance %p (key=%p)\n", pVCpu->vmmr0.s.u.s.Logger.pLogger, pGVM->pSession));
408 RTLogSetDefaultInstanceThread(pVCpu->vmmr0.s.u.s.Logger.pLogger, (uintptr_t)pGVM->pSession);
409 pVCpu->vmmr0.s.u.s.Logger.fRegistered = true;
410# endif
411 }
412#endif /* LOG_ENABLED */
413
414 /*
415 * Check if the host supports high resolution timers or not.
416 */
417 if ( pGVM->vmm.s.fUsePeriodicPreemptionTimers
418 && !RTTimerCanDoHighResolution())
419 pGVM->vmm.s.fUsePeriodicPreemptionTimers = false;
420
421 /*
422 * Initialize the per VM data for GVMM and GMM.
423 */
424 rc = GVMMR0InitVM(pGVM);
425 if (RT_SUCCESS(rc))
426 {
427 /*
428 * Init HM, CPUM and PGM (Darwin only).
429 */
430 rc = HMR0InitVM(pGVM);
431 if (RT_SUCCESS(rc))
432 {
433 rc = CPUMR0InitVM(pGVM);
434 if (RT_SUCCESS(rc))
435 {
436 rc = PGMR0InitVM(pGVM);
437 if (RT_SUCCESS(rc))
438 {
439 rc = EMR0InitVM(pGVM);
440 if (RT_SUCCESS(rc))
441 {
442#ifdef VBOX_WITH_PCI_PASSTHROUGH
443 rc = PciRawR0InitVM(pGVM);
444#endif
445 if (RT_SUCCESS(rc))
446 {
447 rc = GIMR0InitVM(pGVM);
448 if (RT_SUCCESS(rc))
449 {
450 GVMMR0DoneInitVM(pGVM);
451
452 /*
453 * Collect a bit of info for the VM release log.
454 */
455 pGVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty();
456 pGVM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();;
457 return rc;
458
459 /* bail out*/
460 //GIMR0TermVM(pGVM);
461 }
462#ifdef VBOX_WITH_PCI_PASSTHROUGH
463 PciRawR0TermVM(pGVM);
464#endif
465 }
466 }
467 }
468 }
469 HMR0TermVM(pGVM);
470 }
471 }
472
473 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
474 return rc;
475}
476
477
478/**
479 * Does EMT specific VM initialization.
480 *
481 * @returns VBox status code.
482 * @param pGVM The ring-0 VM structure.
483 * @param idCpu The EMT that's calling.
484 */
485static int vmmR0InitVMEmt(PGVM pGVM, VMCPUID idCpu)
486{
487 /* Paranoia (caller checked these already). */
488 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
489 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
490
491#if defined(LOG_ENABLED) && defined(VBOX_WITH_R0_LOGGING)
492 /*
493 * Registration of ring 0 loggers.
494 */
495 PVMCPUCC pVCpu = &pGVM->aCpus[idCpu];
496 if ( pVCpu->vmmr0.s.u.s.Logger.pLogger
497 && !pVCpu->vmmr0.s.u.s.Logger.fRegistered)
498 {
499 RTLogSetDefaultInstanceThread(pVCpu->vmmr0.s.u.s.Logger.pLogger, (uintptr_t)pGVM->pSession);
500 pVCpu->vmmr0.s.u.s.Logger.fRegistered = true;
501 }
502#endif
503
504 return VINF_SUCCESS;
505}
506
507
508
509/**
510 * Terminates the R0 bits for a particular VM instance.
511 *
512 * This is normally called by ring-3 as part of the VM termination process, but
513 * may alternatively be called during the support driver session cleanup when
514 * the VM object is destroyed (see GVMM).
515 *
516 * @returns VBox status code.
517 *
518 * @param pGVM The global (ring-0) VM structure.
519 * @param idCpu Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup
520 * thread.
521 * @thread EMT(0) or session clean up thread.
522 */
523VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, VMCPUID idCpu)
524{
525 /*
526 * Check EMT(0) claim if we're called from userland.
527 */
528 if (idCpu != NIL_VMCPUID)
529 {
530 AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID);
531 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
532 if (RT_FAILURE(rc))
533 return rc;
534 }
535
536#ifdef VBOX_WITH_PCI_PASSTHROUGH
537 PciRawR0TermVM(pGVM);
538#endif
539
540 /*
541 * Tell GVMM what we're up to and check that we only do this once.
542 */
543 if (GVMMR0DoingTermVM(pGVM))
544 {
545 GIMR0TermVM(pGVM);
546
547 /** @todo I wish to call PGMR0PhysFlushHandyPages(pGVM, &pGVM->aCpus[idCpu])
548 * here to make sure we don't leak any shared pages if we crash... */
549 HMR0TermVM(pGVM);
550 }
551
552 /*
553 * Deregister the logger for this EMT.
554 */
555 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
556
557 /*
558 * Start log flusher thread termination.
559 */
560 ASMAtomicWriteBool(&pGVM->vmmr0.s.LogFlusher.fThreadShutdown, true);
561 if (pGVM->vmmr0.s.LogFlusher.hEvent != NIL_RTSEMEVENT)
562 RTSemEventSignal(pGVM->vmmr0.s.LogFlusher.hEvent);
563
564 return VINF_SUCCESS;
565}
566
567
568/**
569 * This is called at the end of gvmmR0CleanupVM().
570 *
571 * @param pGVM The global (ring-0) VM structure.
572 */
573VMMR0_INT_DECL(void) VMMR0CleanupVM(PGVM pGVM)
574{
575 AssertCompile(NIL_RTTHREADCTXHOOK == (RTTHREADCTXHOOK)0); /* Depends on zero initialized memory working for NIL at the moment. */
576 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
577 {
578 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
579
580 /** @todo Can we busy wait here for all thread-context hooks to be
581 * deregistered before releasing (destroying) it? Only until we find a
582 * solution for not deregistering hooks everytime we're leaving HMR0
583 * context. */
584 VMMR0ThreadCtxHookDestroyForEmt(pGVCpu);
585 }
586
587 vmmR0CleanupLoggers(pGVM);
588}
589
590
591/**
592 * An interrupt or unhalt force flag is set, deal with it.
593 *
594 * @returns VINF_SUCCESS (or VINF_EM_HALT).
595 * @param pVCpu The cross context virtual CPU structure.
596 * @param uMWait Result from EMMonitorWaitIsActive().
597 * @param enmInterruptibility Guest CPU interruptbility level.
598 */
599static int vmmR0DoHaltInterrupt(PVMCPUCC pVCpu, unsigned uMWait, CPUMINTERRUPTIBILITY enmInterruptibility)
600{
601 Assert(!TRPMHasTrap(pVCpu));
602 Assert( enmInterruptibility > CPUMINTERRUPTIBILITY_INVALID
603 && enmInterruptibility < CPUMINTERRUPTIBILITY_END);
604
605 /*
606 * Pending interrupts w/o any SMIs or NMIs? That the usual case.
607 */
608 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
609 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_INTERRUPT_NMI))
610 {
611 if (enmInterruptibility <= CPUMINTERRUPTIBILITY_UNRESTRAINED)
612 {
613 uint8_t u8Interrupt = 0;
614 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
615 Log(("vmmR0DoHaltInterrupt: CPU%d u8Interrupt=%d (%#x) rc=%Rrc\n", pVCpu->idCpu, u8Interrupt, u8Interrupt, rc));
616 if (RT_SUCCESS(rc))
617 {
618 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
619
620 rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
621 AssertRCSuccess(rc);
622 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
623 return rc;
624 }
625 }
626 }
627 /*
628 * SMI is not implemented yet, at least not here.
629 */
630 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI))
631 {
632 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #3\n", pVCpu->idCpu));
633 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
634 return VINF_EM_HALT;
635 }
636 /*
637 * NMI.
638 */
639 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
640 {
641 if (enmInterruptibility < CPUMINTERRUPTIBILITY_NMI_INHIBIT)
642 {
643 /** @todo later. */
644 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #2 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
645 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
646 return VINF_EM_HALT;
647 }
648 }
649 /*
650 * Nested-guest virtual interrupt.
651 */
652 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
653 {
654 if (enmInterruptibility < CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED)
655 {
656 /** @todo NSTVMX: NSTSVM: Remember, we might have to check and perform VM-exits
657 * here before injecting the virtual interrupt. See emR3ForcedActions
658 * for details. */
659 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #1 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
660 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
661 return VINF_EM_HALT;
662 }
663 }
664
665 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UNHALT))
666 {
667 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
668 Log11(("vmmR0DoHaltInterrupt: CPU%d success VINF_SUCCESS (UNHALT)\n", pVCpu->idCpu));
669 return VINF_SUCCESS;
670 }
671 if (uMWait > 1)
672 {
673 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
674 Log11(("vmmR0DoHaltInterrupt: CPU%d success VINF_SUCCESS (uMWait=%u > 1)\n", pVCpu->idCpu, uMWait));
675 return VINF_SUCCESS;
676 }
677
678 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #0 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
679 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
680 return VINF_EM_HALT;
681}
682
683
684/**
685 * This does one round of vmR3HaltGlobal1Halt().
686 *
687 * The rational here is that we'll reduce latency in interrupt situations if we
688 * don't go to ring-3 immediately on a VINF_EM_HALT (guest executed HLT or
689 * MWAIT), but do one round of blocking here instead and hope the interrupt is
690 * raised in the meanwhile.
691 *
692 * If we go to ring-3 we'll quit the inner HM/NEM loop in EM and end up in the
693 * outer loop, which will then call VMR3WaitHalted() and that in turn will do a
694 * ring-0 call (unless we're too close to a timer event). When the interrupt
695 * wakes us up, we'll return from ring-0 and EM will by instinct do a
696 * rescheduling (because of raw-mode) before it resumes the HM/NEM loop and gets
697 * back to VMMR0EntryFast().
698 *
699 * @returns VINF_SUCCESS or VINF_EM_HALT.
700 * @param pGVM The ring-0 VM structure.
701 * @param pGVCpu The ring-0 virtual CPU structure.
702 *
703 * @todo r=bird: All the blocking/waiting and EMT managment should move out of
704 * the VM module, probably to VMM. Then this would be more weird wrt
705 * parameters and statistics.
706 */
707static int vmmR0DoHalt(PGVM pGVM, PGVMCPU pGVCpu)
708{
709 /*
710 * Do spin stat historization.
711 */
712 if (++pGVCpu->vmm.s.cR0Halts & 0xff)
713 { /* likely */ }
714 else if (pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3)
715 {
716 pGVCpu->vmm.s.cR0HaltsSucceeded = 2;
717 pGVCpu->vmm.s.cR0HaltsToRing3 = 0;
718 }
719 else
720 {
721 pGVCpu->vmm.s.cR0HaltsSucceeded = 0;
722 pGVCpu->vmm.s.cR0HaltsToRing3 = 2;
723 }
724
725 /*
726 * Flags that makes us go to ring-3.
727 */
728 uint32_t const fVmFFs = VM_FF_TM_VIRTUAL_SYNC | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA
729 | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_CHECK_VM_STATE
730 | VM_FF_RESET | VM_FF_EMT_RENDEZVOUS | VM_FF_PGM_NEED_HANDY_PAGES
731 | VM_FF_PGM_NO_MEMORY | VM_FF_DEBUG_SUSPEND;
732 uint64_t const fCpuFFs = VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_IEM
733 | VMCPU_FF_REQUEST | VMCPU_FF_DBGF | VMCPU_FF_HM_UPDATE_CR3
734 | VMCPU_FF_HM_UPDATE_PAE_PDPES | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
735 | VMCPU_FF_TO_R3 | VMCPU_FF_IOM;
736
737 /*
738 * Check preconditions.
739 */
740 unsigned const uMWait = EMMonitorWaitIsActive(pGVCpu);
741 CPUMINTERRUPTIBILITY const enmInterruptibility = CPUMGetGuestInterruptibility(pGVCpu);
742 if ( pGVCpu->vmm.s.fMayHaltInRing0
743 && !TRPMHasTrap(pGVCpu)
744 && ( enmInterruptibility == CPUMINTERRUPTIBILITY_UNRESTRAINED
745 || uMWait > 1))
746 {
747 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
748 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
749 {
750 /*
751 * Interrupts pending already?
752 */
753 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
754 APICUpdatePendingInterrupts(pGVCpu);
755
756 /*
757 * Flags that wake up from the halted state.
758 */
759 uint64_t const fIntMask = VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_NESTED_GUEST
760 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT;
761
762 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
763 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
764 ASMNopPause();
765
766 /*
767 * Check out how long till the next timer event.
768 */
769 uint64_t u64Delta;
770 uint64_t u64GipTime = TMTimerPollGIP(pGVM, pGVCpu, &u64Delta);
771
772 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
773 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
774 {
775 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
776 APICUpdatePendingInterrupts(pGVCpu);
777
778 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
779 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
780
781 /*
782 * Wait if there is enough time to the next timer event.
783 */
784 if (u64Delta >= pGVCpu->vmm.s.cNsSpinBlockThreshold)
785 {
786 /* If there are few other CPU cores around, we will procrastinate a
787 little before going to sleep, hoping for some device raising an
788 interrupt or similar. Though, the best thing here would be to
789 dynamically adjust the spin count according to its usfulness or
790 something... */
791 if ( pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3
792 && RTMpGetOnlineCount() >= 4)
793 {
794 /** @todo Figure out how we can skip this if it hasn't help recently...
795 * @bugref{9172#c12} */
796 uint32_t cSpinLoops = 42;
797 while (cSpinLoops-- > 0)
798 {
799 ASMNopPause();
800 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
801 APICUpdatePendingInterrupts(pGVCpu);
802 ASMNopPause();
803 if (VM_FF_IS_ANY_SET(pGVM, fVmFFs))
804 {
805 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
806 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
807 return VINF_EM_HALT;
808 }
809 ASMNopPause();
810 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
811 {
812 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
813 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
814 return VINF_EM_HALT;
815 }
816 ASMNopPause();
817 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
818 {
819 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromSpin);
820 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
821 }
822 ASMNopPause();
823 }
824 }
825
826 /*
827 * We have to set the state to VMCPUSTATE_STARTED_HALTED here so ring-3
828 * knows when to notify us (cannot access VMINTUSERPERVMCPU::fWait from here).
829 * After changing the state we must recheck the force flags of course.
830 */
831 if (VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED))
832 {
833 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
834 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
835 {
836 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
837 APICUpdatePendingInterrupts(pGVCpu);
838
839 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
840 {
841 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
842 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
843 }
844
845 /* Okay, block! */
846 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
847 int rc = GVMMR0SchedHalt(pGVM, pGVCpu, u64GipTime);
848 uint64_t const u64EndSchedHalt = RTTimeNanoTS();
849 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt;
850 Log10(("vmmR0DoHalt: CPU%d: halted %llu ns\n", pGVCpu->idCpu, cNsElapsedSchedHalt));
851
852 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
853 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlock, cNsElapsedSchedHalt);
854 if ( rc == VINF_SUCCESS
855 || rc == VERR_INTERRUPTED)
856 {
857 /* Keep some stats like ring-3 does. */
858 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime;
859 if (cNsOverslept > 50000)
860 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOverslept, cNsOverslept);
861 else if (cNsOverslept < -50000)
862 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockInsomnia, cNsElapsedSchedHalt);
863 else
864 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOnTime, cNsElapsedSchedHalt);
865
866 /*
867 * Recheck whether we can resume execution or have to go to ring-3.
868 */
869 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
870 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
871 {
872 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
873 APICUpdatePendingInterrupts(pGVCpu);
874 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
875 {
876 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromBlock);
877 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
878 }
879 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PostNoInt);
880 Log12(("vmmR0DoHalt: CPU%d post #2 - No pending interrupt\n", pGVCpu->idCpu));
881 }
882 else
883 {
884 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PostPendingFF);
885 Log12(("vmmR0DoHalt: CPU%d post #1 - Pending FF\n", pGVCpu->idCpu));
886 }
887 }
888 else
889 {
890 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
891 Log12(("vmmR0DoHalt: CPU%d GVMMR0SchedHalt failed: %Rrc\n", pGVCpu->idCpu, rc));
892 }
893 }
894 else
895 {
896 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
897 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
898 Log12(("vmmR0DoHalt: CPU%d failed #5 - Pending FF\n", pGVCpu->idCpu));
899 }
900 }
901 else
902 {
903 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
904 Log12(("vmmR0DoHalt: CPU%d failed #4 - enmState=%d\n", pGVCpu->idCpu, VMCPU_GET_STATE(pGVCpu)));
905 }
906 }
907 else
908 {
909 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3SmallDelta);
910 Log12(("vmmR0DoHalt: CPU%d failed #3 - delta too small: %RU64\n", pGVCpu->idCpu, u64Delta));
911 }
912 }
913 else
914 {
915 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
916 Log12(("vmmR0DoHalt: CPU%d failed #2 - Pending FF\n", pGVCpu->idCpu));
917 }
918 }
919 else
920 {
921 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
922 Log12(("vmmR0DoHalt: CPU%d failed #1 - Pending FF\n", pGVCpu->idCpu));
923 }
924 }
925 else
926 {
927 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
928 Log12(("vmmR0DoHalt: CPU%d failed #0 - fMayHaltInRing0=%d TRPMHasTrap=%d enmInt=%d uMWait=%u\n",
929 pGVCpu->idCpu, pGVCpu->vmm.s.fMayHaltInRing0, TRPMHasTrap(pGVCpu), enmInterruptibility, uMWait));
930 }
931
932 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
933 return VINF_EM_HALT;
934}
935
936
937/**
938 * VMM ring-0 thread-context callback.
939 *
940 * This does common HM state updating and calls the HM-specific thread-context
941 * callback.
942 *
943 * This is used together with RTThreadCtxHookCreate() on platforms which
944 * supports it, and directly from VMMR0EmtPrepareForBlocking() and
945 * VMMR0EmtResumeAfterBlocking() on platforms which don't.
946 *
947 * @param enmEvent The thread-context event.
948 * @param pvUser Opaque pointer to the VMCPU.
949 *
950 * @thread EMT(pvUser)
951 */
952static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
953{
954 PVMCPUCC pVCpu = (PVMCPUCC)pvUser;
955
956 switch (enmEvent)
957 {
958 case RTTHREADCTXEVENT_IN:
959 {
960 /*
961 * Linux may call us with preemption enabled (really!) but technically we
962 * cannot get preempted here, otherwise we end up in an infinite recursion
963 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
964 * ad infinitum). Let's just disable preemption for now...
965 */
966 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
967 * preemption after doing the callout (one or two functions up the
968 * call chain). */
969 /** @todo r=ramshankar: See @bugref{5313#c30}. */
970 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
971 RTThreadPreemptDisable(&ParanoidPreemptState);
972
973 /* We need to update the VCPU <-> host CPU mapping. */
974 RTCPUID idHostCpu;
975 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
976 pVCpu->iHostCpuSet = iHostCpuSet;
977 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
978
979 /* In the very unlikely event that the GIP delta for the CPU we're
980 rescheduled needs calculating, try force a return to ring-3.
981 We unfortunately cannot do the measurements right here. */
982 if (RT_LIKELY(!SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
983 { /* likely */ }
984 else
985 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
986
987 /* Invoke the HM-specific thread-context callback. */
988 HMR0ThreadCtxCallback(enmEvent, pvUser);
989
990 /* Restore preemption. */
991 RTThreadPreemptRestore(&ParanoidPreemptState);
992 break;
993 }
994
995 case RTTHREADCTXEVENT_OUT:
996 {
997 /* Invoke the HM-specific thread-context callback. */
998 HMR0ThreadCtxCallback(enmEvent, pvUser);
999
1000 /*
1001 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
1002 * have the same host CPU associated with it.
1003 */
1004 pVCpu->iHostCpuSet = UINT32_MAX;
1005 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1006 break;
1007 }
1008
1009 default:
1010 /* Invoke the HM-specific thread-context callback. */
1011 HMR0ThreadCtxCallback(enmEvent, pvUser);
1012 break;
1013 }
1014}
1015
1016
1017/**
1018 * Creates thread switching hook for the current EMT thread.
1019 *
1020 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
1021 * platform does not implement switcher hooks, no hooks will be create and the
1022 * member set to NIL_RTTHREADCTXHOOK.
1023 *
1024 * @returns VBox status code.
1025 * @param pVCpu The cross context virtual CPU structure.
1026 * @thread EMT(pVCpu)
1027 */
1028VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPUCC pVCpu)
1029{
1030 VMCPU_ASSERT_EMT(pVCpu);
1031 Assert(pVCpu->vmmr0.s.hCtxHook == NIL_RTTHREADCTXHOOK);
1032
1033#if 1 /* To disable this stuff change to zero. */
1034 int rc = RTThreadCtxHookCreate(&pVCpu->vmmr0.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
1035 if (RT_SUCCESS(rc))
1036 {
1037 pVCpu->pGVM->vmm.s.fIsUsingContextHooks = true;
1038 return rc;
1039 }
1040#else
1041 RT_NOREF(vmmR0ThreadCtxCallback);
1042 int rc = VERR_NOT_SUPPORTED;
1043#endif
1044
1045 pVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
1046 pVCpu->pGVM->vmm.s.fIsUsingContextHooks = false;
1047 if (rc == VERR_NOT_SUPPORTED)
1048 return VINF_SUCCESS;
1049
1050 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
1051 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
1052}
1053
1054
1055/**
1056 * Destroys the thread switching hook for the specified VCPU.
1057 *
1058 * @param pVCpu The cross context virtual CPU structure.
1059 * @remarks Can be called from any thread.
1060 */
1061VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPUCC pVCpu)
1062{
1063 int rc = RTThreadCtxHookDestroy(pVCpu->vmmr0.s.hCtxHook);
1064 AssertRC(rc);
1065 pVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
1066}
1067
1068
1069/**
1070 * Disables the thread switching hook for this VCPU (if we got one).
1071 *
1072 * @param pVCpu The cross context virtual CPU structure.
1073 * @thread EMT(pVCpu)
1074 *
1075 * @remarks This also clears GVMCPU::idHostCpu, so the mapping is invalid after
1076 * this call. This means you have to be careful with what you do!
1077 */
1078VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPUCC pVCpu)
1079{
1080 /*
1081 * Clear the VCPU <-> host CPU mapping as we've left HM context.
1082 * @bugref{7726#c19} explains the need for this trick:
1083 *
1084 * VMXR0CallRing3Callback/SVMR0CallRing3Callback &
1085 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
1086 * longjmp & normal return to ring-3, which opens a window where we may be
1087 * rescheduled without changing GVMCPUID::idHostCpu and cause confusion if
1088 * the CPU starts executing a different EMT. Both functions first disables
1089 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
1090 * an opening for getting preempted.
1091 */
1092 /** @todo Make HM not need this API! Then we could leave the hooks enabled
1093 * all the time. */
1094
1095 /*
1096 * Disable the context hook, if we got one.
1097 */
1098 if (pVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1099 {
1100 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1101 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1102 int rc = RTThreadCtxHookDisable(pVCpu->vmmr0.s.hCtxHook);
1103 AssertRC(rc);
1104 }
1105}
1106
1107
1108/**
1109 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
1110 *
1111 * @returns true if registered, false otherwise.
1112 * @param pVCpu The cross context virtual CPU structure.
1113 */
1114DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1115{
1116 return RTThreadCtxHookIsEnabled(pVCpu->vmmr0.s.hCtxHook);
1117}
1118
1119
1120/**
1121 * Whether thread-context hooks are registered for this VCPU.
1122 *
1123 * @returns true if registered, false otherwise.
1124 * @param pVCpu The cross context virtual CPU structure.
1125 */
1126VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1127{
1128 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
1129}
1130
1131
1132/**
1133 * Returns the ring-0 release logger instance.
1134 *
1135 * @returns Pointer to release logger, NULL if not configured.
1136 * @param pVCpu The cross context virtual CPU structure of the caller.
1137 * @thread EMT(pVCpu)
1138 */
1139VMMR0_INT_DECL(PRTLOGGER) VMMR0GetReleaseLogger(PVMCPUCC pVCpu)
1140{
1141 return pVCpu->vmmr0.s.u.s.RelLogger.pLogger;
1142}
1143
1144
1145#ifdef VBOX_WITH_STATISTICS
1146/**
1147 * Record return code statistics
1148 * @param pVM The cross context VM structure.
1149 * @param pVCpu The cross context virtual CPU structure.
1150 * @param rc The status code.
1151 */
1152static void vmmR0RecordRC(PVMCC pVM, PVMCPUCC pVCpu, int rc)
1153{
1154 /*
1155 * Collect statistics.
1156 */
1157 switch (rc)
1158 {
1159 case VINF_SUCCESS:
1160 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
1161 break;
1162 case VINF_EM_RAW_INTERRUPT:
1163 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
1164 break;
1165 case VINF_EM_RAW_INTERRUPT_HYPER:
1166 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
1167 break;
1168 case VINF_EM_RAW_GUEST_TRAP:
1169 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
1170 break;
1171 case VINF_EM_RAW_RING_SWITCH:
1172 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
1173 break;
1174 case VINF_EM_RAW_RING_SWITCH_INT:
1175 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
1176 break;
1177 case VINF_EM_RAW_STALE_SELECTOR:
1178 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
1179 break;
1180 case VINF_EM_RAW_IRET_TRAP:
1181 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
1182 break;
1183 case VINF_IOM_R3_IOPORT_READ:
1184 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
1185 break;
1186 case VINF_IOM_R3_IOPORT_WRITE:
1187 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
1188 break;
1189 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
1190 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOCommitWrite);
1191 break;
1192 case VINF_IOM_R3_MMIO_READ:
1193 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
1194 break;
1195 case VINF_IOM_R3_MMIO_WRITE:
1196 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
1197 break;
1198 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
1199 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOCommitWrite);
1200 break;
1201 case VINF_IOM_R3_MMIO_READ_WRITE:
1202 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
1203 break;
1204 case VINF_PATM_HC_MMIO_PATCH_READ:
1205 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
1206 break;
1207 case VINF_PATM_HC_MMIO_PATCH_WRITE:
1208 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
1209 break;
1210 case VINF_CPUM_R3_MSR_READ:
1211 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
1212 break;
1213 case VINF_CPUM_R3_MSR_WRITE:
1214 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
1215 break;
1216 case VINF_EM_RAW_EMULATE_INSTR:
1217 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
1218 break;
1219 case VINF_PATCH_EMULATE_INSTR:
1220 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
1221 break;
1222 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
1223 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
1224 break;
1225 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
1226 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
1227 break;
1228 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
1229 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
1230 break;
1231 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
1232 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
1233 break;
1234 case VINF_CSAM_PENDING_ACTION:
1235 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
1236 break;
1237 case VINF_PGM_SYNC_CR3:
1238 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
1239 break;
1240 case VINF_PATM_PATCH_INT3:
1241 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
1242 break;
1243 case VINF_PATM_PATCH_TRAP_PF:
1244 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
1245 break;
1246 case VINF_PATM_PATCH_TRAP_GP:
1247 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
1248 break;
1249 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
1250 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
1251 break;
1252 case VINF_EM_RESCHEDULE_REM:
1253 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
1254 break;
1255 case VINF_EM_RAW_TO_R3:
1256 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total);
1257 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
1258 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
1259 else if (VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
1260 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
1261 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES))
1262 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
1263 else if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1264 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
1265 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
1266 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
1267 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER))
1268 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
1269 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1270 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
1271 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3))
1272 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF);
1273 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1274 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem);
1275 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1276 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom);
1277 else
1278 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
1279 break;
1280
1281 case VINF_EM_RAW_TIMER_PENDING:
1282 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
1283 break;
1284 case VINF_EM_RAW_INTERRUPT_PENDING:
1285 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
1286 break;
1287 case VINF_VMM_CALL_HOST:
1288 switch (pVCpu->vmm.s.enmCallRing3Operation)
1289 {
1290 case VMMCALLRING3_PGM_MAP_CHUNK:
1291 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
1292 break;
1293 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
1294 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
1295 break;
1296 case VMMCALLRING3_VM_R0_ASSERTION:
1297 default:
1298 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
1299 break;
1300 }
1301 break;
1302 case VINF_PATM_DUPLICATE_FUNCTION:
1303 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
1304 break;
1305 case VINF_PGM_CHANGE_MODE:
1306 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
1307 break;
1308 case VINF_PGM_POOL_FLUSH_PENDING:
1309 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
1310 break;
1311 case VINF_EM_PENDING_REQUEST:
1312 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
1313 break;
1314 case VINF_EM_HM_PATCH_TPR_INSTR:
1315 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
1316 break;
1317 default:
1318 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
1319 break;
1320 }
1321}
1322#endif /* VBOX_WITH_STATISTICS */
1323
1324
1325/**
1326 * The Ring 0 entry point, called by the fast-ioctl path.
1327 *
1328 * @param pGVM The global (ring-0) VM structure.
1329 * @param pVMIgnored The cross context VM structure. The return code is
1330 * stored in pVM->vmm.s.iLastGZRc.
1331 * @param idCpu The Virtual CPU ID of the calling EMT.
1332 * @param enmOperation Which operation to execute.
1333 * @remarks Assume called with interrupts _enabled_.
1334 */
1335VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVMCC pVMIgnored, VMCPUID idCpu, VMMR0OPERATION enmOperation)
1336{
1337 RT_NOREF(pVMIgnored);
1338
1339 /*
1340 * Validation.
1341 */
1342 if ( idCpu < pGVM->cCpus
1343 && pGVM->cCpus == pGVM->cCpusUnsafe)
1344 { /*likely*/ }
1345 else
1346 {
1347 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x cCpusUnsafe=%#x\n", idCpu, pGVM->cCpus, pGVM->cCpusUnsafe);
1348 return;
1349 }
1350
1351 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1352 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
1353 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
1354 && pGVCpu->hNativeThreadR0 == hNativeThread))
1355 { /* likely */ }
1356 else
1357 {
1358 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pGVCpu->hNativeThreadR0=%p\n",
1359 idCpu, hNativeThread, pGVCpu->hEMT, pGVCpu->hNativeThreadR0);
1360 return;
1361 }
1362
1363 /*
1364 * Perform requested operation.
1365 */
1366 switch (enmOperation)
1367 {
1368 /*
1369 * Run guest code using the available hardware acceleration technology.
1370 */
1371 case VMMR0_DO_HM_RUN:
1372 {
1373 for (;;) /* hlt loop */
1374 {
1375 /*
1376 * Disable ring-3 calls & blocking till we've successfully entered HM.
1377 * Otherwise we sometimes end up blocking at the finall Log4 statement
1378 * in VMXR0Enter, while still in a somewhat inbetween state.
1379 */
1380 VMMRZCallRing3Disable(pGVCpu);
1381
1382 /*
1383 * Disable preemption.
1384 */
1385 Assert(!vmmR0ThreadCtxHookIsEnabled(pGVCpu));
1386 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1387 RTThreadPreemptDisable(&PreemptState);
1388 pGVCpu->vmmr0.s.pPreemptState = &PreemptState;
1389
1390 /*
1391 * Get the host CPU identifiers, make sure they are valid and that
1392 * we've got a TSC delta for the CPU.
1393 */
1394 RTCPUID idHostCpu;
1395 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1396 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1397 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1398 {
1399 pGVCpu->iHostCpuSet = iHostCpuSet;
1400 ASMAtomicWriteU32(&pGVCpu->idHostCpu, idHostCpu);
1401
1402 /*
1403 * Update the periodic preemption timer if it's active.
1404 */
1405 if (pGVM->vmm.s.fUsePeriodicPreemptionTimers)
1406 GVMMR0SchedUpdatePeriodicPreemptionTimer(pGVM, pGVCpu->idHostCpu, TMCalcHostTimerFrequency(pGVM, pGVCpu));
1407
1408#ifdef VMM_R0_TOUCH_FPU
1409 /*
1410 * Make sure we've got the FPU state loaded so and we don't need to clear
1411 * CR0.TS and get out of sync with the host kernel when loading the guest
1412 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
1413 */
1414 CPUMR0TouchHostFpu();
1415#endif
1416 int rc;
1417 bool fPreemptRestored = false;
1418 if (!HMR0SuspendPending())
1419 {
1420 /*
1421 * Enable the context switching hook.
1422 */
1423 if (pGVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1424 {
1425 Assert(!RTThreadCtxHookIsEnabled(pGVCpu->vmmr0.s.hCtxHook));
1426 int rc2 = RTThreadCtxHookEnable(pGVCpu->vmmr0.s.hCtxHook); AssertRC(rc2);
1427 }
1428
1429 /*
1430 * Enter HM context.
1431 */
1432 rc = HMR0Enter(pGVCpu);
1433 if (RT_SUCCESS(rc))
1434 {
1435 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED_HM);
1436
1437 /*
1438 * When preemption hooks are in place, enable preemption now that
1439 * we're in HM context.
1440 */
1441 if (vmmR0ThreadCtxHookIsEnabled(pGVCpu))
1442 {
1443 fPreemptRestored = true;
1444 pGVCpu->vmmr0.s.pPreemptState = NULL;
1445 RTThreadPreemptRestore(&PreemptState);
1446 }
1447 VMMRZCallRing3Enable(pGVCpu);
1448
1449 /*
1450 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1451 */
1452 rc = vmmR0CallRing3SetJmp(&pGVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pGVM, pGVCpu);
1453
1454 /*
1455 * Assert sanity on the way out. Using manual assertions code here as normal
1456 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1457 */
1458 if (RT_UNLIKELY( VMCPU_GET_STATE(pGVCpu) != VMCPUSTATE_STARTED_HM
1459 && RT_SUCCESS_NP(rc)
1460 && rc != VINF_VMM_CALL_HOST ))
1461 {
1462 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1463 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1464 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pGVCpu), VMCPUSTATE_STARTED_HM);
1465 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1466 }
1467#if 0
1468 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1469 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pGVCpu)))
1470 {
1471 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1472 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1473 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pGVCpu, pGVCpu->idCpu, rc);
1474 rc = VERR_VMM_CONTEXT_HOOK_STILL_ENABLED;
1475 }
1476#endif
1477
1478 VMMRZCallRing3Disable(pGVCpu); /* Lazy bird: Simpler just disabling it again... */
1479 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED);
1480 }
1481 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1482
1483 /*
1484 * Invalidate the host CPU identifiers before we disable the context
1485 * hook / restore preemption.
1486 */
1487 pGVCpu->iHostCpuSet = UINT32_MAX;
1488 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1489
1490 /*
1491 * Disable context hooks. Due to unresolved cleanup issues, we
1492 * cannot leave the hooks enabled when we return to ring-3.
1493 *
1494 * Note! At the moment HM may also have disabled the hook
1495 * when we get here, but the IPRT API handles that.
1496 */
1497 if (pGVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1498 RTThreadCtxHookDisable(pGVCpu->vmmr0.s.hCtxHook);
1499 }
1500 /*
1501 * The system is about to go into suspend mode; go back to ring 3.
1502 */
1503 else
1504 {
1505 pGVCpu->iHostCpuSet = UINT32_MAX;
1506 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1507 rc = VINF_EM_RAW_INTERRUPT;
1508 }
1509
1510 /** @todo When HM stops messing with the context hook state, we'll disable
1511 * preemption again before the RTThreadCtxHookDisable call. */
1512 if (!fPreemptRestored)
1513 {
1514 pGVCpu->vmmr0.s.pPreemptState = NULL;
1515 RTThreadPreemptRestore(&PreemptState);
1516 }
1517
1518 pGVCpu->vmm.s.iLastGZRc = rc;
1519
1520 /* Fire dtrace probe and collect statistics. */
1521 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1522#ifdef VBOX_WITH_STATISTICS
1523 vmmR0RecordRC(pGVM, pGVCpu, rc);
1524#endif
1525 VMMRZCallRing3Enable(pGVCpu);
1526
1527 /*
1528 * If this is a halt.
1529 */
1530 if (rc != VINF_EM_HALT)
1531 { /* we're not in a hurry for a HLT, so prefer this path */ }
1532 else
1533 {
1534 pGVCpu->vmm.s.iLastGZRc = rc = vmmR0DoHalt(pGVM, pGVCpu);
1535 if (rc == VINF_SUCCESS)
1536 {
1537 pGVCpu->vmm.s.cR0HaltsSucceeded++;
1538 continue;
1539 }
1540 pGVCpu->vmm.s.cR0HaltsToRing3++;
1541 }
1542 }
1543 /*
1544 * Invalid CPU set index or TSC delta in need of measuring.
1545 */
1546 else
1547 {
1548 pGVCpu->vmmr0.s.pPreemptState = NULL;
1549 pGVCpu->iHostCpuSet = UINT32_MAX;
1550 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1551 RTThreadPreemptRestore(&PreemptState);
1552
1553 VMMRZCallRing3Enable(pGVCpu);
1554
1555 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1556 {
1557 int rc = SUPR0TscDeltaMeasureBySetIndex(pGVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1558 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1559 0 /*default cTries*/);
1560 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1561 pGVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1562 else
1563 pGVCpu->vmm.s.iLastGZRc = rc;
1564 }
1565 else
1566 pGVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1567 }
1568 break;
1569 } /* halt loop. */
1570 break;
1571 }
1572
1573#ifdef VBOX_WITH_NEM_R0
1574# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
1575 case VMMR0_DO_NEM_RUN:
1576 {
1577 /*
1578 * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode).
1579 */
1580# ifdef VBOXSTRICTRC_STRICT_ENABLED
1581 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, (PFNVMMR0SETJMP2)NEMR0RunGuestCode, pGVM, idCpu);
1582# else
1583 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu);
1584# endif
1585 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1586
1587 pGVCpu->vmm.s.iLastGZRc = rc;
1588
1589 /*
1590 * Fire dtrace probe and collect statistics.
1591 */
1592 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1593# ifdef VBOX_WITH_STATISTICS
1594 vmmR0RecordRC(pGVM, pGVCpu, rc);
1595# endif
1596 break;
1597 }
1598# endif
1599#endif
1600
1601 /*
1602 * For profiling.
1603 */
1604 case VMMR0_DO_NOP:
1605 pGVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1606 break;
1607
1608 /*
1609 * Shouldn't happen.
1610 */
1611 default:
1612 AssertMsgFailed(("%#x\n", enmOperation));
1613 pGVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1614 break;
1615 }
1616}
1617
1618
1619/**
1620 * Validates a session or VM session argument.
1621 *
1622 * @returns true / false accordingly.
1623 * @param pGVM The global (ring-0) VM structure.
1624 * @param pClaimedSession The session claim to validate.
1625 * @param pSession The session argument.
1626 */
1627DECLINLINE(bool) vmmR0IsValidSession(PGVM pGVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1628{
1629 /* This must be set! */
1630 if (!pSession)
1631 return false;
1632
1633 /* Only one out of the two. */
1634 if (pGVM && pClaimedSession)
1635 return false;
1636 if (pGVM)
1637 pClaimedSession = pGVM->pSession;
1638 return pClaimedSession == pSession;
1639}
1640
1641
1642/**
1643 * VMMR0EntryEx worker function, either called directly or when ever possible
1644 * called thru a longjmp so we can exit safely on failure.
1645 *
1646 * @returns VBox status code.
1647 * @param pGVM The global (ring-0) VM structure.
1648 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1649 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1650 * @param enmOperation Which operation to execute.
1651 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1652 * The support driver validates this if it's present.
1653 * @param u64Arg Some simple constant argument.
1654 * @param pSession The session of the caller.
1655 *
1656 * @remarks Assume called with interrupts _enabled_.
1657 */
1658DECL_NO_INLINE(static, int) vmmR0EntryExWorker(PGVM pGVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
1659 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1660{
1661 /*
1662 * Validate pGVM and idCpu for consistency and validity.
1663 */
1664 if (pGVM != NULL)
1665 {
1666 if (RT_LIKELY(((uintptr_t)pGVM & PAGE_OFFSET_MASK) == 0))
1667 { /* likely */ }
1668 else
1669 {
1670 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p! (op=%d)\n", pGVM, enmOperation);
1671 return VERR_INVALID_POINTER;
1672 }
1673
1674 if (RT_LIKELY(idCpu == NIL_VMCPUID || idCpu < pGVM->cCpus))
1675 { /* likely */ }
1676 else
1677 {
1678 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu %#x (cCpus=%#x)\n", idCpu, pGVM->cCpus);
1679 return VERR_INVALID_PARAMETER;
1680 }
1681
1682 if (RT_LIKELY( pGVM->enmVMState >= VMSTATE_CREATING
1683 && pGVM->enmVMState <= VMSTATE_TERMINATED
1684 && pGVM->pSession == pSession
1685 && pGVM->pSelf == pGVM))
1686 { /* likely */ }
1687 else
1688 {
1689 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p:{.enmVMState=%d, .cCpus=%#x, .pSession=%p(==%p), .pSelf=%p(==%p)}! (op=%d)\n",
1690 pGVM, pGVM->enmVMState, pGVM->cCpus, pGVM->pSession, pSession, pGVM->pSelf, pGVM, enmOperation);
1691 return VERR_INVALID_POINTER;
1692 }
1693 }
1694 else if (RT_LIKELY(idCpu == NIL_VMCPUID))
1695 { /* likely */ }
1696 else
1697 {
1698 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1699 return VERR_INVALID_PARAMETER;
1700 }
1701
1702 /*
1703 * Process the request.
1704 */
1705 int rc;
1706 switch (enmOperation)
1707 {
1708 /*
1709 * GVM requests
1710 */
1711 case VMMR0_DO_GVMM_CREATE_VM:
1712 if (pGVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
1713 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
1714 else
1715 rc = VERR_INVALID_PARAMETER;
1716 break;
1717
1718 case VMMR0_DO_GVMM_DESTROY_VM:
1719 if (pReqHdr == NULL && u64Arg == 0)
1720 rc = GVMMR0DestroyVM(pGVM);
1721 else
1722 rc = VERR_INVALID_PARAMETER;
1723 break;
1724
1725 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1726 if (pGVM != NULL)
1727 rc = GVMMR0RegisterVCpu(pGVM, idCpu);
1728 else
1729 rc = VERR_INVALID_PARAMETER;
1730 break;
1731
1732 case VMMR0_DO_GVMM_DEREGISTER_VMCPU:
1733 if (pGVM != NULL)
1734 rc = GVMMR0DeregisterVCpu(pGVM, idCpu);
1735 else
1736 rc = VERR_INVALID_PARAMETER;
1737 break;
1738
1739 case VMMR0_DO_GVMM_SCHED_HALT:
1740 if (pReqHdr)
1741 return VERR_INVALID_PARAMETER;
1742 rc = GVMMR0SchedHaltReq(pGVM, idCpu, u64Arg);
1743 break;
1744
1745 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1746 if (pReqHdr || u64Arg)
1747 return VERR_INVALID_PARAMETER;
1748 rc = GVMMR0SchedWakeUp(pGVM, idCpu);
1749 break;
1750
1751 case VMMR0_DO_GVMM_SCHED_POKE:
1752 if (pReqHdr || u64Arg)
1753 return VERR_INVALID_PARAMETER;
1754 rc = GVMMR0SchedPoke(pGVM, idCpu);
1755 break;
1756
1757 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1758 if (u64Arg)
1759 return VERR_INVALID_PARAMETER;
1760 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1761 break;
1762
1763 case VMMR0_DO_GVMM_SCHED_POLL:
1764 if (pReqHdr || u64Arg > 1)
1765 return VERR_INVALID_PARAMETER;
1766 rc = GVMMR0SchedPoll(pGVM, idCpu, !!u64Arg);
1767 break;
1768
1769 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1770 if (u64Arg)
1771 return VERR_INVALID_PARAMETER;
1772 rc = GVMMR0QueryStatisticsReq(pGVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
1773 break;
1774
1775 case VMMR0_DO_GVMM_RESET_STATISTICS:
1776 if (u64Arg)
1777 return VERR_INVALID_PARAMETER;
1778 rc = GVMMR0ResetStatisticsReq(pGVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
1779 break;
1780
1781 /*
1782 * Initialize the R0 part of a VM instance.
1783 */
1784 case VMMR0_DO_VMMR0_INIT:
1785 rc = vmmR0InitVM(pGVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1786 break;
1787
1788 /*
1789 * Does EMT specific ring-0 init.
1790 */
1791 case VMMR0_DO_VMMR0_INIT_EMT:
1792 rc = vmmR0InitVMEmt(pGVM, idCpu);
1793 break;
1794
1795 /*
1796 * Terminate the R0 part of a VM instance.
1797 */
1798 case VMMR0_DO_VMMR0_TERM:
1799 rc = VMMR0TermVM(pGVM, 0 /*idCpu*/);
1800 break;
1801
1802 /*
1803 * Update release or debug logger instances.
1804 */
1805 case VMMR0_DO_VMMR0_UPDATE_LOGGERS:
1806 if (idCpu == NIL_VMCPUID)
1807 return VERR_INVALID_CPU_ID;
1808 if (u64Arg < VMMLOGGER_IDX_MAX && pReqHdr != NULL)
1809 rc = vmmR0UpdateLoggers(pGVM, idCpu /*idCpu*/, (PVMMR0UPDATELOGGERSREQ)pReqHdr, (size_t)u64Arg);
1810 else
1811 return VERR_INVALID_PARAMETER;
1812 break;
1813
1814 /*
1815 * Log flusher thread.
1816 */
1817 case VMMR0_DO_VMMR0_LOG_FLUSHER:
1818 if (idCpu != NIL_VMCPUID)
1819 return VERR_INVALID_CPU_ID;
1820 if (pReqHdr == NULL)
1821 rc = vmmR0LogFlusher(pGVM);
1822 else
1823 return VERR_INVALID_PARAMETER;
1824 break;
1825
1826 /*
1827 * Wait for the flush to finish with all the buffers for the given logger.
1828 */
1829 case VMMR0_DO_VMMR0_LOG_WAIT_FLUSHED:
1830 if (idCpu == NIL_VMCPUID)
1831 return VERR_INVALID_CPU_ID;
1832 if (u64Arg < VMMLOGGER_IDX_MAX && pReqHdr == NULL)
1833 rc = vmmR0LogWaitFlushed(pGVM, idCpu /*idCpu*/, (size_t)u64Arg);
1834 else
1835 return VERR_INVALID_PARAMETER;
1836 break;
1837
1838 /*
1839 * Attempt to enable hm mode and check the current setting.
1840 */
1841 case VMMR0_DO_HM_ENABLE:
1842 rc = HMR0EnableAllCpus(pGVM);
1843 break;
1844
1845 /*
1846 * Setup the hardware accelerated session.
1847 */
1848 case VMMR0_DO_HM_SETUP_VM:
1849 rc = HMR0SetupVM(pGVM);
1850 break;
1851
1852 /*
1853 * PGM wrappers.
1854 */
1855 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1856 if (idCpu == NIL_VMCPUID)
1857 return VERR_INVALID_CPU_ID;
1858 rc = PGMR0PhysAllocateHandyPages(pGVM, idCpu);
1859 break;
1860
1861 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1862 if (idCpu == NIL_VMCPUID)
1863 return VERR_INVALID_CPU_ID;
1864 rc = PGMR0PhysFlushHandyPages(pGVM, idCpu);
1865 break;
1866
1867 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1868 if (idCpu == NIL_VMCPUID)
1869 return VERR_INVALID_CPU_ID;
1870 rc = PGMR0PhysAllocateLargeHandyPage(pGVM, idCpu);
1871 break;
1872
1873 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1874 if (idCpu != 0)
1875 return VERR_INVALID_CPU_ID;
1876 rc = PGMR0PhysSetupIoMmu(pGVM);
1877 break;
1878
1879 case VMMR0_DO_PGM_POOL_GROW:
1880 if (idCpu == NIL_VMCPUID)
1881 return VERR_INVALID_CPU_ID;
1882 rc = PGMR0PoolGrow(pGVM, idCpu);
1883 break;
1884
1885 /*
1886 * GMM wrappers.
1887 */
1888 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1889 if (u64Arg)
1890 return VERR_INVALID_PARAMETER;
1891 rc = GMMR0InitialReservationReq(pGVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1892 break;
1893
1894 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1895 if (u64Arg)
1896 return VERR_INVALID_PARAMETER;
1897 rc = GMMR0UpdateReservationReq(pGVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1898 break;
1899
1900 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1901 if (u64Arg)
1902 return VERR_INVALID_PARAMETER;
1903 rc = GMMR0AllocatePagesReq(pGVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1904 break;
1905
1906 case VMMR0_DO_GMM_FREE_PAGES:
1907 if (u64Arg)
1908 return VERR_INVALID_PARAMETER;
1909 rc = GMMR0FreePagesReq(pGVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1910 break;
1911
1912 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1913 if (u64Arg)
1914 return VERR_INVALID_PARAMETER;
1915 rc = GMMR0FreeLargePageReq(pGVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1916 break;
1917
1918 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1919 if (u64Arg)
1920 return VERR_INVALID_PARAMETER;
1921 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr);
1922 break;
1923
1924 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1925 if (idCpu == NIL_VMCPUID)
1926 return VERR_INVALID_CPU_ID;
1927 if (u64Arg)
1928 return VERR_INVALID_PARAMETER;
1929 rc = GMMR0QueryMemoryStatsReq(pGVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1930 break;
1931
1932 case VMMR0_DO_GMM_BALLOONED_PAGES:
1933 if (u64Arg)
1934 return VERR_INVALID_PARAMETER;
1935 rc = GMMR0BalloonedPagesReq(pGVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1936 break;
1937
1938 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1939 if (u64Arg)
1940 return VERR_INVALID_PARAMETER;
1941 rc = GMMR0MapUnmapChunkReq(pGVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1942 break;
1943
1944 case VMMR0_DO_GMM_SEED_CHUNK:
1945 if (pReqHdr)
1946 return VERR_INVALID_PARAMETER;
1947 rc = GMMR0SeedChunk(pGVM, idCpu, (RTR3PTR)u64Arg);
1948 break;
1949
1950 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1951 if (idCpu == NIL_VMCPUID)
1952 return VERR_INVALID_CPU_ID;
1953 if (u64Arg)
1954 return VERR_INVALID_PARAMETER;
1955 rc = GMMR0RegisterSharedModuleReq(pGVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1956 break;
1957
1958 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1959 if (idCpu == NIL_VMCPUID)
1960 return VERR_INVALID_CPU_ID;
1961 if (u64Arg)
1962 return VERR_INVALID_PARAMETER;
1963 rc = GMMR0UnregisterSharedModuleReq(pGVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1964 break;
1965
1966 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1967 if (idCpu == NIL_VMCPUID)
1968 return VERR_INVALID_CPU_ID;
1969 if ( u64Arg
1970 || pReqHdr)
1971 return VERR_INVALID_PARAMETER;
1972 rc = GMMR0ResetSharedModules(pGVM, idCpu);
1973 break;
1974
1975#ifdef VBOX_WITH_PAGE_SHARING
1976 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1977 {
1978 if (idCpu == NIL_VMCPUID)
1979 return VERR_INVALID_CPU_ID;
1980 if ( u64Arg
1981 || pReqHdr)
1982 return VERR_INVALID_PARAMETER;
1983 rc = GMMR0CheckSharedModules(pGVM, idCpu);
1984 break;
1985 }
1986#endif
1987
1988#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1989 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1990 if (u64Arg)
1991 return VERR_INVALID_PARAMETER;
1992 rc = GMMR0FindDuplicatePageReq(pGVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1993 break;
1994#endif
1995
1996 case VMMR0_DO_GMM_QUERY_STATISTICS:
1997 if (u64Arg)
1998 return VERR_INVALID_PARAMETER;
1999 rc = GMMR0QueryStatisticsReq(pGVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
2000 break;
2001
2002 case VMMR0_DO_GMM_RESET_STATISTICS:
2003 if (u64Arg)
2004 return VERR_INVALID_PARAMETER;
2005 rc = GMMR0ResetStatisticsReq(pGVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
2006 break;
2007
2008 /*
2009 * A quick GCFGM mock-up.
2010 */
2011 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
2012 case VMMR0_DO_GCFGM_SET_VALUE:
2013 case VMMR0_DO_GCFGM_QUERY_VALUE:
2014 {
2015 if (pGVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2016 return VERR_INVALID_PARAMETER;
2017 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
2018 if (pReq->Hdr.cbReq != sizeof(*pReq))
2019 return VERR_INVALID_PARAMETER;
2020 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
2021 {
2022 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2023 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2024 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2025 }
2026 else
2027 {
2028 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2029 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2030 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2031 }
2032 break;
2033 }
2034
2035 /*
2036 * PDM Wrappers.
2037 */
2038 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
2039 {
2040 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2041 return VERR_INVALID_PARAMETER;
2042 rc = PDMR0DriverCallReqHandler(pGVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
2043 break;
2044 }
2045
2046 case VMMR0_DO_PDM_DEVICE_CREATE:
2047 {
2048 if (!pReqHdr || u64Arg || idCpu != 0)
2049 return VERR_INVALID_PARAMETER;
2050 rc = PDMR0DeviceCreateReqHandler(pGVM, (PPDMDEVICECREATEREQ)pReqHdr);
2051 break;
2052 }
2053
2054 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
2055 {
2056 if (!pReqHdr || u64Arg)
2057 return VERR_INVALID_PARAMETER;
2058 rc = PDMR0DeviceGenCallReqHandler(pGVM, (PPDMDEVICEGENCALLREQ)pReqHdr, idCpu);
2059 break;
2060 }
2061
2062 /** @todo Remove the once all devices has been converted to new style! @bugref{9218} */
2063 case VMMR0_DO_PDM_DEVICE_COMPAT_SET_CRITSECT:
2064 {
2065 if (!pReqHdr || u64Arg || idCpu != 0)
2066 return VERR_INVALID_PARAMETER;
2067 rc = PDMR0DeviceCompatSetCritSectReqHandler(pGVM, (PPDMDEVICECOMPATSETCRITSECTREQ)pReqHdr);
2068 break;
2069 }
2070
2071 /*
2072 * Requests to the internal networking service.
2073 */
2074 case VMMR0_DO_INTNET_OPEN:
2075 {
2076 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
2077 if (u64Arg || !pReq || !vmmR0IsValidSession(pGVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
2078 return VERR_INVALID_PARAMETER;
2079 rc = IntNetR0OpenReq(pSession, pReq);
2080 break;
2081 }
2082
2083 case VMMR0_DO_INTNET_IF_CLOSE:
2084 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2085 return VERR_INVALID_PARAMETER;
2086 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
2087 break;
2088
2089
2090 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
2091 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2092 return VERR_INVALID_PARAMETER;
2093 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
2094 break;
2095
2096 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
2097 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2098 return VERR_INVALID_PARAMETER;
2099 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
2100 break;
2101
2102 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
2103 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2104 return VERR_INVALID_PARAMETER;
2105 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
2106 break;
2107
2108 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
2109 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2110 return VERR_INVALID_PARAMETER;
2111 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
2112 break;
2113
2114 case VMMR0_DO_INTNET_IF_SEND:
2115 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2116 return VERR_INVALID_PARAMETER;
2117 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
2118 break;
2119
2120 case VMMR0_DO_INTNET_IF_WAIT:
2121 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2122 return VERR_INVALID_PARAMETER;
2123 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
2124 break;
2125
2126 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
2127 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2128 return VERR_INVALID_PARAMETER;
2129 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
2130 break;
2131
2132#if 0 //def VBOX_WITH_PCI_PASSTHROUGH
2133 /*
2134 * Requests to host PCI driver service.
2135 */
2136 case VMMR0_DO_PCIRAW_REQ:
2137 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2138 return VERR_INVALID_PARAMETER;
2139 rc = PciRawR0ProcessReq(pGVM, pSession, (PPCIRAWSENDREQ)pReqHdr);
2140 break;
2141#endif
2142
2143 /*
2144 * NEM requests.
2145 */
2146#ifdef VBOX_WITH_NEM_R0
2147# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
2148 case VMMR0_DO_NEM_INIT_VM:
2149 if (u64Arg || pReqHdr || idCpu != 0)
2150 return VERR_INVALID_PARAMETER;
2151 rc = NEMR0InitVM(pGVM);
2152 break;
2153
2154 case VMMR0_DO_NEM_INIT_VM_PART_2:
2155 if (u64Arg || pReqHdr || idCpu != 0)
2156 return VERR_INVALID_PARAMETER;
2157 rc = NEMR0InitVMPart2(pGVM);
2158 break;
2159
2160 case VMMR0_DO_NEM_MAP_PAGES:
2161 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2162 return VERR_INVALID_PARAMETER;
2163 rc = NEMR0MapPages(pGVM, idCpu);
2164 break;
2165
2166 case VMMR0_DO_NEM_UNMAP_PAGES:
2167 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2168 return VERR_INVALID_PARAMETER;
2169 rc = NEMR0UnmapPages(pGVM, idCpu);
2170 break;
2171
2172 case VMMR0_DO_NEM_EXPORT_STATE:
2173 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2174 return VERR_INVALID_PARAMETER;
2175 rc = NEMR0ExportState(pGVM, idCpu);
2176 break;
2177
2178 case VMMR0_DO_NEM_IMPORT_STATE:
2179 if (pReqHdr || idCpu == NIL_VMCPUID)
2180 return VERR_INVALID_PARAMETER;
2181 rc = NEMR0ImportState(pGVM, idCpu, u64Arg);
2182 break;
2183
2184 case VMMR0_DO_NEM_QUERY_CPU_TICK:
2185 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2186 return VERR_INVALID_PARAMETER;
2187 rc = NEMR0QueryCpuTick(pGVM, idCpu);
2188 break;
2189
2190 case VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL:
2191 if (pReqHdr || idCpu == NIL_VMCPUID)
2192 return VERR_INVALID_PARAMETER;
2193 rc = NEMR0ResumeCpuTickOnAll(pGVM, idCpu, u64Arg);
2194 break;
2195
2196 case VMMR0_DO_NEM_UPDATE_STATISTICS:
2197 if (u64Arg || pReqHdr)
2198 return VERR_INVALID_PARAMETER;
2199 rc = NEMR0UpdateStatistics(pGVM, idCpu);
2200 break;
2201
2202# if 1 && defined(DEBUG_bird)
2203 case VMMR0_DO_NEM_EXPERIMENT:
2204 if (pReqHdr)
2205 return VERR_INVALID_PARAMETER;
2206 rc = NEMR0DoExperiment(pGVM, idCpu, u64Arg);
2207 break;
2208# endif
2209# endif
2210#endif
2211
2212 /*
2213 * IOM requests.
2214 */
2215 case VMMR0_DO_IOM_GROW_IO_PORTS:
2216 {
2217 if (pReqHdr || idCpu != 0)
2218 return VERR_INVALID_PARAMETER;
2219 rc = IOMR0IoPortGrowRegistrationTables(pGVM, u64Arg);
2220 break;
2221 }
2222
2223 case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
2224 {
2225 if (pReqHdr || idCpu != 0)
2226 return VERR_INVALID_PARAMETER;
2227 rc = IOMR0IoPortGrowStatisticsTable(pGVM, u64Arg);
2228 break;
2229 }
2230
2231 case VMMR0_DO_IOM_GROW_MMIO_REGS:
2232 {
2233 if (pReqHdr || idCpu != 0)
2234 return VERR_INVALID_PARAMETER;
2235 rc = IOMR0MmioGrowRegistrationTables(pGVM, u64Arg);
2236 break;
2237 }
2238
2239 case VMMR0_DO_IOM_GROW_MMIO_STATS:
2240 {
2241 if (pReqHdr || idCpu != 0)
2242 return VERR_INVALID_PARAMETER;
2243 rc = IOMR0MmioGrowStatisticsTable(pGVM, u64Arg);
2244 break;
2245 }
2246
2247 case VMMR0_DO_IOM_SYNC_STATS_INDICES:
2248 {
2249 if (pReqHdr || idCpu != 0)
2250 return VERR_INVALID_PARAMETER;
2251 rc = IOMR0IoPortSyncStatisticsIndices(pGVM);
2252 if (RT_SUCCESS(rc))
2253 rc = IOMR0MmioSyncStatisticsIndices(pGVM);
2254 break;
2255 }
2256
2257 /*
2258 * DBGF requests.
2259 */
2260#ifdef VBOX_WITH_DBGF_TRACING
2261 case VMMR0_DO_DBGF_TRACER_CREATE:
2262 {
2263 if (!pReqHdr || u64Arg || idCpu != 0)
2264 return VERR_INVALID_PARAMETER;
2265 rc = DBGFR0TracerCreateReqHandler(pGVM, (PDBGFTRACERCREATEREQ)pReqHdr);
2266 break;
2267 }
2268
2269 case VMMR0_DO_DBGF_TRACER_CALL_REQ_HANDLER:
2270 {
2271 if (!pReqHdr || u64Arg)
2272 return VERR_INVALID_PARAMETER;
2273# if 0 /** @todo */
2274 rc = DBGFR0TracerGenCallReqHandler(pGVM, (PDBGFTRACERGENCALLREQ)pReqHdr, idCpu);
2275# else
2276 rc = VERR_NOT_IMPLEMENTED;
2277# endif
2278 break;
2279 }
2280#endif
2281
2282 case VMMR0_DO_DBGF_BP_INIT:
2283 {
2284 if (!pReqHdr || u64Arg || idCpu != 0)
2285 return VERR_INVALID_PARAMETER;
2286 rc = DBGFR0BpInitReqHandler(pGVM, (PDBGFBPINITREQ)pReqHdr);
2287 break;
2288 }
2289
2290 case VMMR0_DO_DBGF_BP_CHUNK_ALLOC:
2291 {
2292 if (!pReqHdr || u64Arg || idCpu != 0)
2293 return VERR_INVALID_PARAMETER;
2294 rc = DBGFR0BpChunkAllocReqHandler(pGVM, (PDBGFBPCHUNKALLOCREQ)pReqHdr);
2295 break;
2296 }
2297
2298 case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC:
2299 {
2300 if (!pReqHdr || u64Arg || idCpu != 0)
2301 return VERR_INVALID_PARAMETER;
2302 rc = DBGFR0BpL2TblChunkAllocReqHandler(pGVM, (PDBGFBPL2TBLCHUNKALLOCREQ)pReqHdr);
2303 break;
2304 }
2305
2306 case VMMR0_DO_DBGF_BP_OWNER_INIT:
2307 {
2308 if (!pReqHdr || u64Arg || idCpu != 0)
2309 return VERR_INVALID_PARAMETER;
2310 rc = DBGFR0BpOwnerInitReqHandler(pGVM, (PDBGFBPOWNERINITREQ)pReqHdr);
2311 break;
2312 }
2313
2314 case VMMR0_DO_DBGF_BP_PORTIO_INIT:
2315 {
2316 if (!pReqHdr || u64Arg || idCpu != 0)
2317 return VERR_INVALID_PARAMETER;
2318 rc = DBGFR0BpPortIoInitReqHandler(pGVM, (PDBGFBPINITREQ)pReqHdr);
2319 break;
2320 }
2321
2322
2323 /*
2324 * TM requests.
2325 */
2326 case VMMR0_DO_TM_GROW_TIMER_QUEUE:
2327 {
2328 if (pReqHdr || idCpu == NIL_VMCPUID)
2329 return VERR_INVALID_PARAMETER;
2330 rc = TMR0TimerQueueGrow(pGVM, RT_HI_U32(u64Arg), RT_LO_U32(u64Arg));
2331 break;
2332 }
2333
2334 /*
2335 * For profiling.
2336 */
2337 case VMMR0_DO_NOP:
2338 case VMMR0_DO_SLOW_NOP:
2339 return VINF_SUCCESS;
2340
2341 /*
2342 * For testing Ring-0 APIs invoked in this environment.
2343 */
2344 case VMMR0_DO_TESTS:
2345 /** @todo make new test */
2346 return VINF_SUCCESS;
2347
2348 default:
2349 /*
2350 * We're returning VERR_NOT_SUPPORT here so we've got something else
2351 * than -1 which the interrupt gate glue code might return.
2352 */
2353 Log(("operation %#x is not supported\n", enmOperation));
2354 return VERR_NOT_SUPPORTED;
2355 }
2356 return rc;
2357}
2358
2359
2360/**
2361 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
2362 *
2363 * @returns VBox status code.
2364 * @param pvArgs The argument package
2365 */
2366static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
2367{
2368 PGVMCPU pGVCpu = (PGVMCPU)pvArgs;
2369 return vmmR0EntryExWorker(pGVCpu->vmmr0.s.pGVM,
2370 pGVCpu->vmmr0.s.idCpu,
2371 pGVCpu->vmmr0.s.enmOperation,
2372 pGVCpu->vmmr0.s.pReq,
2373 pGVCpu->vmmr0.s.u64Arg,
2374 pGVCpu->vmmr0.s.pSession);
2375}
2376
2377
2378/**
2379 * The Ring 0 entry point, called by the support library (SUP).
2380 *
2381 * @returns VBox status code.
2382 * @param pGVM The global (ring-0) VM structure.
2383 * @param pVM The cross context VM structure.
2384 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
2385 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
2386 * @param enmOperation Which operation to execute.
2387 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
2388 * @param u64Arg Some simple constant argument.
2389 * @param pSession The session of the caller.
2390 * @remarks Assume called with interrupts _enabled_.
2391 */
2392VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
2393 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
2394{
2395 /*
2396 * Requests that should only happen on the EMT thread will be
2397 * wrapped in a setjmp so we can assert without causing trouble.
2398 */
2399 if ( pVM != NULL
2400 && pGVM != NULL
2401 && pVM == pGVM /** @todo drop pVM or pGVM */
2402 && idCpu < pGVM->cCpus
2403 && pGVM->pSession == pSession
2404 && pGVM->pSelf == pVM)
2405 {
2406 switch (enmOperation)
2407 {
2408 /* These might/will be called before VMMR3Init. */
2409 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2410 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2411 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2412 case VMMR0_DO_GMM_FREE_PAGES:
2413 case VMMR0_DO_GMM_BALLOONED_PAGES:
2414 /* On the mac we might not have a valid jmp buf, so check these as well. */
2415 case VMMR0_DO_VMMR0_INIT:
2416 case VMMR0_DO_VMMR0_TERM:
2417
2418 case VMMR0_DO_PDM_DEVICE_CREATE:
2419 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
2420 case VMMR0_DO_IOM_GROW_IO_PORTS:
2421 case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
2422 case VMMR0_DO_DBGF_BP_INIT:
2423 case VMMR0_DO_DBGF_BP_CHUNK_ALLOC:
2424 case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC:
2425 {
2426 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2427 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
2428 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
2429 && pGVCpu->hNativeThreadR0 == hNativeThread))
2430 {
2431 if (!pGVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
2432 break;
2433
2434 pGVCpu->vmmr0.s.pGVM = pGVM;
2435 pGVCpu->vmmr0.s.idCpu = idCpu;
2436 pGVCpu->vmmr0.s.enmOperation = enmOperation;
2437 pGVCpu->vmmr0.s.pReq = pReq;
2438 pGVCpu->vmmr0.s.u64Arg = u64Arg;
2439 pGVCpu->vmmr0.s.pSession = pSession;
2440 return vmmR0CallRing3SetJmpEx(&pGVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, pGVCpu,
2441 ((uintptr_t)u64Arg << 16) | (uintptr_t)enmOperation);
2442 }
2443 return VERR_VM_THREAD_NOT_EMT;
2444 }
2445
2446 default:
2447 case VMMR0_DO_PGM_POOL_GROW:
2448 break;
2449 }
2450 }
2451 return vmmR0EntryExWorker(pGVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2452}
2453
2454
2455/*********************************************************************************************************************************
2456* EMT Blocking *
2457*********************************************************************************************************************************/
2458
2459/**
2460 * Checks whether we've armed the ring-0 long jump machinery.
2461 *
2462 * @returns @c true / @c false
2463 * @param pVCpu The cross context virtual CPU structure.
2464 * @thread EMT
2465 * @sa VMMIsLongJumpArmed
2466 */
2467VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPUCC pVCpu)
2468{
2469#ifdef RT_ARCH_X86
2470 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2471 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2472#else
2473 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2474 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2475#endif
2476}
2477
2478
2479/**
2480 * Checks whether we've done a ring-3 long jump.
2481 *
2482 * @returns @c true / @c false
2483 * @param pVCpu The cross context virtual CPU structure.
2484 * @thread EMT
2485 */
2486VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPUCC pVCpu)
2487{
2488 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2489}
2490
2491
2492/**
2493 * Locking helper that deals with HM context and checks if the thread can block.
2494 *
2495 * @returns VINF_SUCCESS if we can block. Returns @a rcBusy or
2496 * VERR_VMM_CANNOT_BLOCK if not able to block.
2497 * @param pVCpu The cross context virtual CPU structure of the calling
2498 * thread.
2499 * @param rcBusy What to return in case of a blocking problem. Will IPE
2500 * if VINF_SUCCESS and we cannot block.
2501 * @param pszCaller The caller (for logging problems).
2502 * @param pvLock The lock address (for logging problems).
2503 * @param pCtx Where to return context info for the resume call.
2504 * @thread EMT(pVCpu)
2505 */
2506VMMR0_INT_DECL(int) VMMR0EmtPrepareToBlock(PVMCPUCC pVCpu, int rcBusy, const char *pszCaller, void *pvLock,
2507 PVMMR0EMTBLOCKCTX pCtx)
2508{
2509 const char *pszMsg;
2510
2511 /*
2512 * Check that we are allowed to block.
2513 */
2514 if (RT_LIKELY(VMMRZCallRing3IsEnabled(pVCpu)))
2515 {
2516 /*
2517 * Are we in HM context and w/o a context hook? If so work the context hook.
2518 */
2519 if (pVCpu->idHostCpu != NIL_RTCPUID)
2520 {
2521 Assert(pVCpu->iHostCpuSet != UINT32_MAX);
2522
2523 if (pVCpu->vmmr0.s.hCtxHook == NIL_RTTHREADCTXHOOK)
2524 {
2525 vmmR0ThreadCtxCallback(RTTHREADCTXEVENT_OUT, pVCpu);
2526 if (pVCpu->vmmr0.s.pPreemptState)
2527 RTThreadPreemptRestore(pVCpu->vmmr0.s.pPreemptState);
2528
2529 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC;
2530 pCtx->fWasInHmContext = true;
2531 return VINF_SUCCESS;
2532 }
2533 }
2534
2535 if (RT_LIKELY(!pVCpu->vmmr0.s.pPreemptState))
2536 {
2537 /*
2538 * Not in HM context or we've got hooks, so just check that preemption
2539 * is enabled.
2540 */
2541 if (RT_LIKELY(RTThreadPreemptIsEnabled(NIL_RTTHREAD)))
2542 {
2543 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC;
2544 pCtx->fWasInHmContext = false;
2545 return VINF_SUCCESS;
2546 }
2547 pszMsg = "Preemption is disabled!";
2548 }
2549 else
2550 pszMsg = "Preemption state w/o HM state!";
2551 }
2552 else
2553 pszMsg = "Ring-3 calls are disabled!";
2554
2555 static uint32_t volatile s_cWarnings = 0;
2556 if (++s_cWarnings < 50)
2557 SUPR0Printf("VMMR0EmtPrepareToBlock: %s pvLock=%p pszCaller=%s rcBusy=%p\n", pszMsg, pvLock, pszCaller, rcBusy);
2558 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC_DEAD;
2559 pCtx->fWasInHmContext = false;
2560 return rcBusy != VINF_SUCCESS ? rcBusy : VERR_VMM_CANNOT_BLOCK;
2561}
2562
2563
2564/**
2565 * Counterpart to VMMR0EmtPrepareToBlock.
2566 *
2567 * @param pVCpu The cross context virtual CPU structure of the calling
2568 * thread.
2569 * @param pCtx The context structure used with VMMR0EmtPrepareToBlock.
2570 * @thread EMT(pVCpu)
2571 */
2572VMMR0_INT_DECL(void) VMMR0EmtResumeAfterBlocking(PVMCPUCC pVCpu, PVMMR0EMTBLOCKCTX pCtx)
2573{
2574 AssertReturnVoid(pCtx->uMagic == VMMR0EMTBLOCKCTX_MAGIC);
2575 if (pCtx->fWasInHmContext)
2576 {
2577 if (pVCpu->vmmr0.s.pPreemptState)
2578 RTThreadPreemptDisable(pVCpu->vmmr0.s.pPreemptState);
2579
2580 pCtx->fWasInHmContext = false;
2581 vmmR0ThreadCtxCallback(RTTHREADCTXEVENT_IN, pVCpu);
2582 }
2583 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC_DEAD;
2584}
2585
2586/** @name VMMR0EMTWAIT_F_XXX - flags for VMMR0EmtWaitEventInner and friends.
2587 * @{ */
2588/** Try suppress VERR_INTERRUPTED for a little while (~10 sec). */
2589#define VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED RT_BIT_32(0)
2590/** @} */
2591
2592/**
2593 * Helper for waiting on an RTSEMEVENT, caller did VMMR0EmtPrepareToBlock.
2594 *
2595 * @returns
2596 * @retval VERR_THREAD_IS_TERMINATING
2597 * @retval VERR_TIMEOUT if we ended up waiting too long, either according to
2598 * @a cMsTimeout or to maximum wait values.
2599 *
2600 * @param pGVCpu The ring-0 virtual CPU structure.
2601 * @param fFlags VMMR0EMTWAIT_F_XXX.
2602 * @param hEvent The event to wait on.
2603 * @param cMsTimeout The timeout or RT_INDEFINITE_WAIT.
2604 */
2605VMMR0DECL(int) VMMR0EmtWaitEventInner(PGVMCPU pGVCpu, uint32_t fFlags, RTSEMEVENT hEvent, RTMSINTERVAL cMsTimeout)
2606{
2607 AssertReturn(pGVCpu->hEMT == RTThreadNativeSelf(), VERR_VM_THREAD_NOT_EMT);
2608
2609 /*
2610 * Note! Similar code is found in the PDM critical sections too.
2611 */
2612 uint64_t const nsStart = RTTimeNanoTS();
2613 uint64_t cNsMaxTotal = cMsTimeout == RT_INDEFINITE_WAIT
2614 ? RT_NS_5MIN : RT_MIN(RT_NS_5MIN, RT_NS_1MS_64 * cMsTimeout);
2615 uint32_t cMsMaxOne = RT_MS_5SEC;
2616 bool fNonInterruptible = false;
2617 for (;;)
2618 {
2619 /* Wait. */
2620 int rcWait = !fNonInterruptible
2621 ? RTSemEventWaitNoResume(hEvent, cMsMaxOne)
2622 : RTSemEventWait(hEvent, cMsMaxOne);
2623 if (RT_SUCCESS(rcWait))
2624 return rcWait;
2625
2626 if (rcWait == VERR_TIMEOUT || rcWait == VERR_INTERRUPTED)
2627 {
2628 uint64_t const cNsElapsed = RTTimeNanoTS() - nsStart;
2629
2630 /*
2631 * Check the thread termination status.
2632 */
2633 int const rcTerm = RTThreadQueryTerminationStatus(NIL_RTTHREAD);
2634 AssertMsg(rcTerm == VINF_SUCCESS || rcTerm == VERR_NOT_SUPPORTED || rcTerm == VINF_THREAD_IS_TERMINATING,
2635 ("rcTerm=%Rrc\n", rcTerm));
2636 if ( rcTerm == VERR_NOT_SUPPORTED
2637 && !fNonInterruptible
2638 && cNsMaxTotal > RT_NS_1MIN)
2639 cNsMaxTotal = RT_NS_1MIN;
2640
2641 /* We return immediately if it looks like the thread is terminating. */
2642 if (rcTerm == VINF_THREAD_IS_TERMINATING)
2643 return VERR_THREAD_IS_TERMINATING;
2644
2645 /* We may suppress VERR_INTERRUPTED if VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED was
2646 specified, otherwise we'll just return it. */
2647 if (rcWait == VERR_INTERRUPTED)
2648 {
2649 if (!(fFlags & VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED))
2650 return VERR_INTERRUPTED;
2651 if (!fNonInterruptible)
2652 {
2653 /* First time: Adjust down the wait parameters and make sure we get at least
2654 one non-interruptible wait before timing out. */
2655 fNonInterruptible = true;
2656 cMsMaxOne = 32;
2657 uint64_t const cNsLeft = cNsMaxTotal - cNsElapsed;
2658 if (cNsLeft > RT_NS_10SEC)
2659 cNsMaxTotal = cNsElapsed + RT_NS_10SEC;
2660 continue;
2661 }
2662 }
2663
2664 /* Check for timeout. */
2665 if (cNsElapsed > cNsMaxTotal)
2666 return VERR_TIMEOUT;
2667 }
2668 else
2669 return rcWait;
2670 }
2671 /* not reached */
2672}
2673
2674
2675/*********************************************************************************************************************************
2676* Logging. *
2677*********************************************************************************************************************************/
2678
2679/**
2680 * VMMR0_DO_VMMR0_UPDATE_LOGGERS: Updates the EMT loggers for the VM.
2681 *
2682 * @returns VBox status code.
2683 * @param pGVM The global (ring-0) VM structure.
2684 * @param idCpu The ID of the calling EMT.
2685 * @param pReq The request data.
2686 * @param idxLogger Which logger set to update.
2687 * @thread EMT(idCpu)
2688 */
2689static int vmmR0UpdateLoggers(PGVM pGVM, VMCPUID idCpu, PVMMR0UPDATELOGGERSREQ pReq, size_t idxLogger)
2690{
2691 /*
2692 * Check sanity. First we require EMT to be calling us.
2693 */
2694 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
2695 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
2696
2697 AssertReturn(pReq->Hdr.cbReq >= RT_UOFFSETOF_DYN(VMMR0UPDATELOGGERSREQ, afGroups[0]), VERR_INVALID_PARAMETER);
2698 AssertReturn(pReq->cGroups < _8K, VERR_INVALID_PARAMETER);
2699 AssertReturn(pReq->Hdr.cbReq == RT_UOFFSETOF_DYN(VMMR0UPDATELOGGERSREQ, afGroups[pReq->cGroups]), VERR_INVALID_PARAMETER);
2700
2701 AssertReturn(idxLogger < VMMLOGGER_IDX_MAX, VERR_OUT_OF_RANGE);
2702
2703 /*
2704 * Adjust flags.
2705 */
2706 /* Always buffered: */
2707 pReq->fFlags |= RTLOGFLAGS_BUFFERED;
2708 /* These doesn't make sense at present: */
2709 pReq->fFlags &= ~(RTLOGFLAGS_FLUSH | RTLOGFLAGS_WRITE_THROUGH);
2710 /* We've traditionally skipped the group restrictions. */
2711 pReq->fFlags &= ~RTLOGFLAGS_RESTRICT_GROUPS;
2712
2713 /*
2714 * Do the updating.
2715 */
2716 int rc = VINF_SUCCESS;
2717 for (idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
2718 {
2719 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2720 PRTLOGGER pLogger = pGVCpu->vmmr0.s.u.aLoggers[idxLogger].pLogger;
2721 if (pLogger)
2722 {
2723 RTLogSetR0ProgramStart(pLogger, pGVM->vmm.s.nsProgramStart);
2724 rc = RTLogBulkUpdate(pLogger, pReq->fFlags, pReq->uGroupCrc32, pReq->cGroups, pReq->afGroups);
2725 }
2726 }
2727
2728 return rc;
2729}
2730
2731
2732/**
2733 * VMMR0_DO_VMMR0_LOG_FLUSHER: Get the next log flushing job.
2734 *
2735 * The job info is copied into VMM::LogFlusherItem.
2736 *
2737 * @returns VBox status code.
2738 * @retval VERR_OBJECT_DESTROYED if we're shutting down.
2739 * @retval VERR_NOT_OWNER if the calling thread is not the flusher thread.
2740 * @param pGVM The global (ring-0) VM structure.
2741 * @thread The log flusher thread (first caller automatically becomes the log
2742 * flusher).
2743 */
2744static int vmmR0LogFlusher(PGVM pGVM)
2745{
2746 /*
2747 * Check that this really is the flusher thread.
2748 */
2749 RTNATIVETHREAD const hNativeSelf = RTThreadNativeSelf();
2750 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_INTERNAL_ERROR_3);
2751 if (RT_LIKELY(pGVM->vmmr0.s.LogFlusher.hThread == hNativeSelf))
2752 { /* likely */ }
2753 else
2754 {
2755 /* The first caller becomes the flusher thread. */
2756 bool fOk;
2757 ASMAtomicCmpXchgHandle(&pGVM->vmmr0.s.LogFlusher.hThread, hNativeSelf, NIL_RTNATIVETHREAD, fOk);
2758 if (!fOk)
2759 return VERR_NOT_OWNER;
2760 pGVM->vmmr0.s.LogFlusher.fThreadRunning = true;
2761 }
2762
2763 /*
2764 * Acknowledge flush, waking up waiting EMT.
2765 */
2766 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2767
2768 uint32_t idxTail = pGVM->vmmr0.s.LogFlusher.idxRingTail % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2769 uint32_t idxHead = pGVM->vmmr0.s.LogFlusher.idxRingHead % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2770 if ( idxTail != idxHead
2771 && pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.fProcessing)
2772 {
2773 /* Pop the head off the ring buffer. */
2774 uint32_t const idCpu = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.idCpu;
2775 uint32_t const idxLogger = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.idxLogger;
2776 uint32_t const idxBuffer = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.idxBuffer;
2777
2778 pGVM->vmmr0.s.LogFlusher.aRing[idxHead].u32 = UINT32_MAX >> 1; /* invalidate the entry */
2779 pGVM->vmmr0.s.LogFlusher.idxRingHead = (idxHead + 1) % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2780
2781 /* Validate content. */
2782 if ( idCpu < pGVM->cCpus
2783 && idxLogger < VMMLOGGER_IDX_MAX
2784 && idxBuffer < VMMLOGGER_BUFFER_COUNT)
2785 {
2786 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2787 PVMMR0PERVCPULOGGER pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
2788 PVMMR3CPULOGGER pShared = &pGVCpu->vmm.s.u.aLoggers[idxLogger];
2789
2790 /*
2791 * Accounting.
2792 */
2793 uint32_t cFlushing = pR0Log->cFlushing - 1;
2794 if (RT_LIKELY(cFlushing < VMMLOGGER_BUFFER_COUNT))
2795 { /*likely*/ }
2796 else
2797 cFlushing = 0;
2798 pR0Log->cFlushing = cFlushing;
2799 ASMAtomicWriteU32(&pShared->cFlushing, cFlushing);
2800
2801 /*
2802 * Wake up the EMT if it's waiting.
2803 */
2804 if (!pR0Log->fEmtWaiting)
2805 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2806 else
2807 {
2808 pR0Log->fEmtWaiting = false;
2809 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2810
2811 int rc = RTSemEventSignal(pR0Log->hEventFlushWait);
2812 if (RT_FAILURE(rc))
2813 LogRelMax(64, ("vmmR0LogFlusher: RTSemEventSignal failed ACKing entry #%u (%u/%u/%u): %Rrc!\n",
2814 idxHead, idCpu, idxLogger, idxBuffer, rc));
2815 }
2816 }
2817 else
2818 {
2819 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2820 LogRelMax(64, ("vmmR0LogFlusher: Bad ACK entry #%u: %u/%u/%u!\n", idxHead, idCpu, idxLogger, idxBuffer));
2821 }
2822
2823 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2824 }
2825
2826 /*
2827 * The wait loop.
2828 */
2829 int rc;
2830 for (;;)
2831 {
2832 /*
2833 * Work pending?
2834 */
2835 idxTail = pGVM->vmmr0.s.LogFlusher.idxRingTail % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2836 idxHead = pGVM->vmmr0.s.LogFlusher.idxRingHead % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2837 if (idxTail != idxHead)
2838 {
2839 pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.fProcessing = true;
2840 pGVM->vmm.s.LogFlusherItem.u32 = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].u32;
2841
2842 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2843 return VINF_SUCCESS;
2844 }
2845
2846 /*
2847 * Nothing to do, so, check for termination and go to sleep.
2848 */
2849 if (!pGVM->vmmr0.s.LogFlusher.fThreadShutdown)
2850 { /* likely */ }
2851 else
2852 {
2853 rc = VERR_OBJECT_DESTROYED;
2854 break;
2855 }
2856
2857 pGVM->vmmr0.s.LogFlusher.fThreadWaiting = true;
2858 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2859
2860 rc = RTSemEventWaitNoResume(pGVM->vmmr0.s.LogFlusher.hEvent, RT_MS_5MIN);
2861
2862 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2863 pGVM->vmmr0.s.LogFlusher.fThreadWaiting = false;
2864
2865 if (RT_SUCCESS(rc) || rc == VERR_TIMEOUT)
2866 { /* likely */ }
2867 else if (rc == VERR_INTERRUPTED)
2868 {
2869 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2870 return rc;
2871 }
2872 else if (rc == VERR_SEM_DESTROYED || rc == VERR_INVALID_HANDLE)
2873 break;
2874 else
2875 {
2876 LogRel(("vmmR0LogFlusher: RTSemEventWaitNoResume returned unexpected status %Rrc\n", rc));
2877 break;
2878 }
2879 }
2880
2881 /*
2882 * Terminating - prevent further calls and indicate to the EMTs that we're no longer around.
2883 */
2884 pGVM->vmmr0.s.LogFlusher.hThread = ~pGVM->vmmr0.s.LogFlusher.hThread; /* (should be reasonably safe) */
2885 pGVM->vmmr0.s.LogFlusher.fThreadRunning = false;
2886
2887 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2888 return rc;
2889}
2890
2891
2892/**
2893 * VMMR0_DO_VMMR0_LOG_WAIT_FLUSHED: Waits for the flusher thread to finish all
2894 * buffers for logger @a idxLogger.
2895 *
2896 * @returns VBox status code.
2897 * @param pGVM The global (ring-0) VM structure.
2898 * @param idCpu The ID of the calling EMT.
2899 * @param idxLogger Which logger to wait on.
2900 * @thread EMT(idCpu)
2901 */
2902static int vmmR0LogWaitFlushed(PGVM pGVM, VMCPUID idCpu, size_t idxLogger)
2903{
2904 /*
2905 * Check sanity. First we require EMT to be calling us.
2906 */
2907 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
2908 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2909 AssertReturn(pGVCpu->hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
2910 AssertReturn(idxLogger < VMMLOGGER_IDX_MAX, VERR_OUT_OF_RANGE);
2911 PVMMR0PERVCPULOGGER const pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
2912
2913 /*
2914 * Do the waiting.
2915 */
2916 int rc = VINF_SUCCESS;
2917 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2918 uint32_t cFlushing = pR0Log->cFlushing;
2919 while (cFlushing > 0)
2920 {
2921 pR0Log->fEmtWaiting = true;
2922 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2923
2924 rc = RTSemEventWaitNoResume(pR0Log->hEventFlushWait, RT_MS_5MIN);
2925
2926 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2927 pR0Log->fEmtWaiting = false;
2928 if (RT_SUCCESS(rc))
2929 {
2930 /* Read the new count, make sure it decreased before looping. That
2931 way we can guarentee that we will only wait more than 5 min * buffers. */
2932 uint32_t const cPrevFlushing = cFlushing;
2933 cFlushing = pR0Log->cFlushing;
2934 if (cFlushing < cPrevFlushing)
2935 continue;
2936 rc = VERR_INTERNAL_ERROR_3;
2937 }
2938 break;
2939 }
2940 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2941 return rc;
2942}
2943
2944
2945/**
2946 * Inner worker for vmmR0LoggerFlushCommon.
2947 */
2948static bool vmmR0LoggerFlushInner(PGVM pGVM, PGVMCPU pGVCpu, uint32_t idxLogger, size_t idxBuffer, uint32_t cbToFlush)
2949{
2950 PVMMR0PERVCPULOGGER const pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
2951 PVMMR3CPULOGGER const pShared = &pGVCpu->vmm.s.u.aLoggers[idxLogger];
2952
2953 /*
2954 * Figure out what we need to do and whether we can.
2955 */
2956 enum { kJustSignal, kPrepAndSignal, kPrepSignalAndWait } enmAction;
2957#if VMMLOGGER_BUFFER_COUNT >= 2
2958 if (pR0Log->cFlushing < VMMLOGGER_BUFFER_COUNT - 1)
2959 {
2960 if (RTSemEventIsSignalSafe())
2961 enmAction = kJustSignal;
2962 else if (VMMRZCallRing3IsEnabled(pGVCpu))
2963 enmAction = kPrepAndSignal;
2964 else
2965 {
2966 /** @todo This is a bit simplistic. We could introduce a FF to signal the
2967 * thread or similar. */
2968 STAM_REL_COUNTER_INC(&pShared->StatCannotBlock);
2969# if defined(RT_OS_LINUX)
2970 SUP_DPRINTF(("vmmR0LoggerFlush: Signalling not safe and EMT blocking disabled! (%u bytes)\n", cbToFlush));
2971# endif
2972 pShared->cbDropped += cbToFlush;
2973 return true;
2974 }
2975 }
2976 else
2977#endif
2978 if (VMMRZCallRing3IsEnabled(pGVCpu))
2979 enmAction = kPrepSignalAndWait;
2980 else
2981 {
2982 STAM_REL_COUNTER_INC(&pShared->StatCannotBlock);
2983# if defined(RT_OS_LINUX)
2984 SUP_DPRINTF(("vmmR0LoggerFlush: EMT blocking disabled! (%u bytes)\n", cbToFlush));
2985# endif
2986 pShared->cbDropped += cbToFlush;
2987 return true;
2988 }
2989
2990 /*
2991 * Prepare for blocking if necessary.
2992 */
2993 VMMR0EMTBLOCKCTX Ctx;
2994 if (enmAction != kJustSignal)
2995 {
2996 int rc = VMMR0EmtPrepareToBlock(pGVCpu, VINF_SUCCESS, "vmmR0LoggerFlushInner", pR0Log->hEventFlushWait, &Ctx);
2997 if (RT_SUCCESS(rc))
2998 { /* likely */ }
2999 else
3000 {
3001 STAM_REL_COUNTER_INC(&pShared->StatCannotBlock);
3002 SUP_DPRINTF(("vmmR0LoggerFlush: VMMR0EmtPrepareToBlock failed! rc=%d\n", rc));
3003 return false;
3004 }
3005 }
3006
3007 /*
3008 * Queue the flush job.
3009 */
3010 bool fFlushedBuffer;
3011 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3012 if (pGVM->vmmr0.s.LogFlusher.fThreadRunning)
3013 {
3014 uint32_t const idxHead = pGVM->vmmr0.s.LogFlusher.idxRingHead % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
3015 uint32_t const idxTail = pGVM->vmmr0.s.LogFlusher.idxRingTail % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
3016 uint32_t const idxNewTail = (idxTail + 1) % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
3017 if (idxNewTail != idxHead)
3018 {
3019 /* Queue it. */
3020 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.idCpu = pGVCpu->idCpu;
3021 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.idxLogger = idxLogger;
3022 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.idxBuffer = (uint32_t)idxBuffer;
3023 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.fProcessing = 0;
3024 pGVM->vmmr0.s.LogFlusher.idxRingTail = idxNewTail;
3025
3026 /* Update the number of buffers currently being flushed. */
3027 uint32_t cFlushing = pR0Log->cFlushing;
3028 cFlushing = RT_MIN(cFlushing + 1, VMMLOGGER_BUFFER_COUNT);
3029 pShared->cFlushing = pR0Log->cFlushing = cFlushing;
3030
3031 /* We must wait if all buffers are currently being flushed. */
3032 bool const fEmtWaiting = cFlushing >= VMMLOGGER_BUFFER_COUNT && enmAction != kJustSignal /* paranoia */;
3033 pR0Log->fEmtWaiting = fEmtWaiting;
3034
3035 /* Stats. */
3036 STAM_REL_COUNTER_INC(&pShared->StatFlushes);
3037 STAM_REL_COUNTER_INC(&pGVM->vmm.s.StatLogFlusherFlushes);
3038
3039 /* Signal the worker thread. */
3040 if (pGVM->vmmr0.s.LogFlusher.fThreadWaiting)
3041 {
3042 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3043 RTSemEventSignal(pGVM->vmmr0.s.LogFlusher.hEvent);
3044 }
3045 else
3046 {
3047 STAM_REL_COUNTER_INC(&pGVM->vmm.s.StatLogFlusherNoWakeUp);
3048 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3049 }
3050
3051 /*
3052 * Wait for a buffer to finish flushing.
3053 *
3054 * Note! Lazy bird is ignoring the status code here. The result is
3055 * that we might end up with an extra even signalling and the
3056 * next time we need to wait we won't and end up with some log
3057 * corruption. However, it's too much hazzle right now for
3058 * a scenario which would most likely end the process rather
3059 * than causing log corruption.
3060 */
3061 if (fEmtWaiting)
3062 {
3063 STAM_REL_PROFILE_START(&pShared->StatWait, a);
3064 VMMR0EmtWaitEventInner(pGVCpu, VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED,
3065 pR0Log->hEventFlushWait, RT_INDEFINITE_WAIT);
3066 STAM_REL_PROFILE_STOP(&pShared->StatWait, a);
3067 }
3068
3069 /*
3070 * We always switch buffer if we have more than one.
3071 */
3072#if VMMLOGGER_BUFFER_COUNT == 1
3073 fFlushedBuffer = true;
3074#else
3075 AssertCompile(VMMLOGGER_BUFFER_COUNT >= 1);
3076 pShared->idxBuf = (idxBuffer + 1) % VMMLOGGER_BUFFER_COUNT;
3077 fFlushedBuffer = false;
3078#endif
3079 }
3080 else
3081 {
3082 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3083 SUP_DPRINTF(("vmmR0LoggerFlush: ring buffer is full!\n"));
3084 fFlushedBuffer = true;
3085 }
3086 }
3087 else
3088 {
3089 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3090 SUP_DPRINTF(("vmmR0LoggerFlush: flusher not active - dropping %u bytes\n", cbToFlush));
3091 fFlushedBuffer = true;
3092 }
3093
3094 /*
3095 * Restore the HM context.
3096 */
3097 if (enmAction != kJustSignal)
3098 VMMR0EmtResumeAfterBlocking(pGVCpu, &Ctx);
3099
3100 return fFlushedBuffer;
3101}
3102
3103
3104/**
3105 * Common worker for vmmR0LogFlush and vmmR0LogRelFlush.
3106 */
3107static bool vmmR0LoggerFlushCommon(PRTLOGGER pLogger, PRTLOGBUFFERDESC pBufDesc, uint32_t idxLogger)
3108{
3109 /*
3110 * Convert the pLogger into a GVMCPU handle and 'call' back to Ring-3.
3111 * (This is a bit paranoid code.)
3112 */
3113 if (RT_VALID_PTR(pLogger))
3114 {
3115 if ( pLogger->u32Magic == RTLOGGER_MAGIC
3116 && (pLogger->u32UserValue1 & VMMR0_LOGGER_FLAGS_MAGIC_MASK) == VMMR0_LOGGER_FLAGS_MAGIC_VALUE
3117 && pLogger->u64UserValue2 == pLogger->u64UserValue3)
3118 {
3119 PGVMCPU const pGVCpu = (PGVMCPU)(uintptr_t)pLogger->u64UserValue2;
3120 if ( RT_VALID_PTR(pGVCpu)
3121 && ((uintptr_t)pGVCpu & PAGE_OFFSET_MASK) == 0)
3122 {
3123 RTNATIVETHREAD const hNativeSelf = RTThreadNativeSelf();
3124 PGVM const pGVM = pGVCpu->pGVM;
3125 if ( hNativeSelf == pGVCpu->hEMT
3126 && RT_VALID_PTR(pGVM))
3127 {
3128 PVMMR0PERVCPULOGGER const pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
3129 size_t const idxBuffer = pBufDesc - &pR0Log->aBufDescs[0];
3130 if (idxBuffer < VMMLOGGER_BUFFER_COUNT)
3131 {
3132 /*
3133 * Make sure we don't recurse forever here should something in the
3134 * following code trigger logging or an assertion. Do the rest in
3135 * an inner work to avoid hitting the right margin too hard.
3136 */
3137 if (!pR0Log->fFlushing)
3138 {
3139 pR0Log->fFlushing = true;
3140 bool fFlushed = vmmR0LoggerFlushInner(pGVM, pGVCpu, idxLogger, idxBuffer, pBufDesc->offBuf);
3141 pR0Log->fFlushing = false;
3142 return fFlushed;
3143 }
3144
3145 SUP_DPRINTF(("vmmR0LoggerFlush: Recursive flushing!\n"));
3146 }
3147 else
3148 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p pGVCpu=%p: idxBuffer=%#zx\n", pLogger, pGVCpu, idxBuffer));
3149 }
3150 else
3151 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p pGVCpu=%p hEMT=%p hNativeSelf=%p!\n",
3152 pLogger, pGVCpu, pGVCpu->hEMT, hNativeSelf));
3153 }
3154 else
3155 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p pGVCpu=%p!\n", pLogger, pGVCpu));
3156 }
3157 else
3158 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p u32Magic=%#x u32UserValue1=%#x u64UserValue2=%#RX64 u64UserValue3=%#RX64!\n",
3159 pLogger, pLogger->u32Magic, pLogger->u32UserValue1, pLogger->u64UserValue2, pLogger->u64UserValue3));
3160 }
3161 else
3162 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p!\n", pLogger));
3163 return true;
3164}
3165
3166
3167/**
3168 * @callback_method_impl{FNRTLOGFLUSH, Release logger buffer flush callback.}
3169 */
3170static DECLCALLBACK(bool) vmmR0LogRelFlush(PRTLOGGER pLogger, PRTLOGBUFFERDESC pBufDesc)
3171{
3172 return vmmR0LoggerFlushCommon(pLogger, pBufDesc, VMMLOGGER_IDX_RELEASE);
3173}
3174
3175
3176/**
3177 * @callback_method_impl{FNRTLOGFLUSH, Logger (debug) buffer flush callback.}
3178 */
3179static DECLCALLBACK(bool) vmmR0LogFlush(PRTLOGGER pLogger, PRTLOGBUFFERDESC pBufDesc)
3180{
3181#ifdef LOG_ENABLED
3182 return vmmR0LoggerFlushCommon(pLogger, pBufDesc, VMMLOGGER_IDX_REGULAR);
3183#else
3184 RT_NOREF(pLogger, pBufDesc);
3185 return true;
3186#endif
3187}
3188
3189
3190/*
3191 * Override RTLogDefaultInstanceEx so we can do logging from EMTs in ring-0.
3192 */
3193DECLEXPORT(PRTLOGGER) RTLogDefaultInstanceEx(uint32_t fFlagsAndGroup)
3194{
3195#ifdef LOG_ENABLED
3196 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
3197 if (pGVCpu)
3198 {
3199 PRTLOGGER pLogger = pGVCpu->vmmr0.s.u.s.Logger.pLogger;
3200 if (RT_VALID_PTR(pLogger))
3201 {
3202 if ( pLogger->u64UserValue2 == (uintptr_t)pGVCpu
3203 && pLogger->u64UserValue3 == (uintptr_t)pGVCpu)
3204 {
3205 if (!pGVCpu->vmmr0.s.u.s.Logger.fFlushing)
3206 {
3207 if (!(pGVCpu->vmmr0.s.fLogFlushingDisabled))
3208 return RTLogCheckGroupFlags(pLogger, fFlagsAndGroup);
3209 return NULL;
3210 }
3211
3212 /*
3213 * When we're flushing we _must_ return NULL here to suppress any
3214 * attempts at using the logger while in vmmR0LoggerFlushCommon.
3215 * The VMMR0EmtPrepareToBlock code may trigger logging in HM,
3216 * which will reset the buffer content before we even get to queue
3217 * the flush request. (Only an issue when VBOX_WITH_R0_LOGGING
3218 * is enabled.)
3219 */
3220 return NULL;
3221 }
3222 }
3223 }
3224#endif
3225 return SUPR0DefaultLogInstanceEx(fFlagsAndGroup);
3226}
3227
3228
3229/*
3230 * Override RTLogRelGetDefaultInstanceEx so we can do LogRel to VBox.log from EMTs in ring-0.
3231 */
3232DECLEXPORT(PRTLOGGER) RTLogRelGetDefaultInstanceEx(uint32_t fFlagsAndGroup)
3233{
3234 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
3235 if (pGVCpu)
3236 {
3237 PRTLOGGER pLogger = pGVCpu->vmmr0.s.u.s.RelLogger.pLogger;
3238 if (RT_VALID_PTR(pLogger))
3239 {
3240 if ( pLogger->u64UserValue2 == (uintptr_t)pGVCpu
3241 && pLogger->u64UserValue3 == (uintptr_t)pGVCpu)
3242 {
3243 if (!pGVCpu->vmmr0.s.u.s.RelLogger.fFlushing)
3244 {
3245 if (!(pGVCpu->vmmr0.s.fLogFlushingDisabled))
3246 return RTLogCheckGroupFlags(pLogger, fFlagsAndGroup);
3247 return NULL;
3248 }
3249 }
3250 }
3251 }
3252 return SUPR0GetDefaultLogRelInstanceEx(fFlagsAndGroup);
3253}
3254
3255
3256/**
3257 * Helper for vmmR0InitLoggerSet
3258 */
3259static int vmmR0InitLoggerOne(PGVMCPU pGVCpu, bool fRelease, PVMMR0PERVCPULOGGER pR0Log, PVMMR3CPULOGGER pShared,
3260 uint32_t cbBuf, char *pchBuf, RTR3PTR pchBufR3)
3261{
3262 /*
3263 * Create and configure the logger.
3264 */
3265 for (size_t i = 0; i < VMMLOGGER_BUFFER_COUNT; i++)
3266 {
3267 pR0Log->aBufDescs[i].u32Magic = RTLOGBUFFERDESC_MAGIC;
3268 pR0Log->aBufDescs[i].uReserved = 0;
3269 pR0Log->aBufDescs[i].cbBuf = cbBuf;
3270 pR0Log->aBufDescs[i].offBuf = 0;
3271 pR0Log->aBufDescs[i].pchBuf = pchBuf + i * cbBuf;
3272 pR0Log->aBufDescs[i].pAux = &pShared->aBufs[i].AuxDesc;
3273
3274 pShared->aBufs[i].AuxDesc.fFlushedIndicator = false;
3275 pShared->aBufs[i].AuxDesc.afPadding[0] = 0;
3276 pShared->aBufs[i].AuxDesc.afPadding[1] = 0;
3277 pShared->aBufs[i].AuxDesc.afPadding[2] = 0;
3278 pShared->aBufs[i].AuxDesc.offBuf = 0;
3279 pShared->aBufs[i].pchBufR3 = pchBufR3 + i * cbBuf;
3280 }
3281 pShared->cbBuf = cbBuf;
3282
3283 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
3284 int rc = RTLogCreateEx(&pR0Log->pLogger, fRelease ? "VBOX_RELEASE_LOG" : "VBOX_LOG", RTLOG_F_NO_LOCKING | RTLOGFLAGS_BUFFERED,
3285 "all", RT_ELEMENTS(s_apszGroups), s_apszGroups, UINT32_MAX,
3286 VMMLOGGER_BUFFER_COUNT, pR0Log->aBufDescs, RTLOGDEST_DUMMY,
3287 NULL /*pfnPhase*/, 0 /*cHistory*/, 0 /*cbHistoryFileMax*/, 0 /*cSecsHistoryTimeSlot*/,
3288 NULL /*pErrInfo*/, NULL /*pszFilenameFmt*/);
3289 if (RT_SUCCESS(rc))
3290 {
3291 PRTLOGGER pLogger = pR0Log->pLogger;
3292 pLogger->u32UserValue1 = VMMR0_LOGGER_FLAGS_MAGIC_VALUE;
3293 pLogger->u64UserValue2 = (uintptr_t)pGVCpu;
3294 pLogger->u64UserValue3 = (uintptr_t)pGVCpu;
3295
3296 rc = RTLogSetFlushCallback(pLogger, fRelease ? vmmR0LogRelFlush : vmmR0LogFlush);
3297 if (RT_SUCCESS(rc))
3298 {
3299 RTLogSetR0ThreadNameF(pLogger, "EMT-%u-R0", pGVCpu->idCpu);
3300
3301 /*
3302 * Create the event sem the EMT waits on while flushing is happening.
3303 */
3304 rc = RTSemEventCreate(&pR0Log->hEventFlushWait);
3305 if (RT_SUCCESS(rc))
3306 return VINF_SUCCESS;
3307 pR0Log->hEventFlushWait = NIL_RTSEMEVENT;
3308 }
3309 RTLogDestroy(pLogger);
3310 }
3311 pR0Log->pLogger = NULL;
3312 return rc;
3313}
3314
3315
3316/**
3317 * Worker for VMMR0CleanupVM and vmmR0InitLoggerSet that destroys one logger.
3318 */
3319static void vmmR0TermLoggerOne(PVMMR0PERVCPULOGGER pR0Log, PVMMR3CPULOGGER pShared)
3320{
3321 RTLogDestroy(pR0Log->pLogger);
3322 pR0Log->pLogger = NULL;
3323
3324 for (size_t i = 0; i < VMMLOGGER_BUFFER_COUNT; i++)
3325 pShared->aBufs[i].pchBufR3 = NIL_RTR3PTR;
3326
3327 RTSemEventDestroy(pR0Log->hEventFlushWait);
3328 pR0Log->hEventFlushWait = NIL_RTSEMEVENT;
3329}
3330
3331
3332/**
3333 * Initializes one type of loggers for each EMT.
3334 */
3335static int vmmR0InitLoggerSet(PGVM pGVM, uint8_t idxLogger, uint32_t cbBuf, PRTR0MEMOBJ phMemObj, PRTR0MEMOBJ phMapObj)
3336{
3337 /* Allocate buffers first. */
3338 int rc = RTR0MemObjAllocPage(phMemObj, cbBuf * pGVM->cCpus * VMMLOGGER_BUFFER_COUNT, false /*fExecutable*/);
3339 if (RT_SUCCESS(rc))
3340 {
3341 rc = RTR0MemObjMapUser(phMapObj, *phMemObj, (RTR3PTR)-1, 0 /*uAlignment*/, RTMEM_PROT_READ, NIL_RTR0PROCESS);
3342 if (RT_SUCCESS(rc))
3343 {
3344 char * const pchBuf = (char *)RTR0MemObjAddress(*phMemObj);
3345 AssertPtrReturn(pchBuf, VERR_INTERNAL_ERROR_2);
3346
3347 RTR3PTR const pchBufR3 = RTR0MemObjAddressR3(*phMapObj);
3348 AssertReturn(pchBufR3 != NIL_RTR3PTR, VERR_INTERNAL_ERROR_3);
3349
3350 /* Initialize the per-CPU loggers. */
3351 for (uint32_t i = 0; i < pGVM->cCpus; i++)
3352 {
3353 PGVMCPU pGVCpu = &pGVM->aCpus[i];
3354 PVMMR0PERVCPULOGGER pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
3355 PVMMR3CPULOGGER pShared = &pGVCpu->vmm.s.u.aLoggers[idxLogger];
3356 rc = vmmR0InitLoggerOne(pGVCpu, idxLogger == VMMLOGGER_IDX_RELEASE, pR0Log, pShared, cbBuf,
3357 pchBuf + i * cbBuf * VMMLOGGER_BUFFER_COUNT,
3358 pchBufR3 + i * cbBuf * VMMLOGGER_BUFFER_COUNT);
3359 if (RT_FAILURE(rc))
3360 {
3361 vmmR0TermLoggerOne(pR0Log, pShared);
3362 while (i-- > 0)
3363 {
3364 pGVCpu = &pGVM->aCpus[i];
3365 vmmR0TermLoggerOne(&pGVCpu->vmmr0.s.u.aLoggers[idxLogger], &pGVCpu->vmm.s.u.aLoggers[idxLogger]);
3366 }
3367 break;
3368 }
3369 }
3370 if (RT_SUCCESS(rc))
3371 return VINF_SUCCESS;
3372
3373 /* Bail out. */
3374 RTR0MemObjFree(*phMapObj, false /*fFreeMappings*/);
3375 *phMapObj = NIL_RTR0MEMOBJ;
3376 }
3377 RTR0MemObjFree(*phMemObj, true /*fFreeMappings*/);
3378 *phMemObj = NIL_RTR0MEMOBJ;
3379 }
3380 return rc;
3381}
3382
3383
3384/**
3385 * Worker for VMMR0InitPerVMData that initializes all the logging related stuff.
3386 *
3387 * @returns VBox status code.
3388 * @param pGVM The global (ring-0) VM structure.
3389 */
3390static int vmmR0InitLoggers(PGVM pGVM)
3391{
3392 /*
3393 * Invalidate the ring buffer (not really necessary).
3394 */
3395 for (size_t idx = 0; idx < RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing); idx++)
3396 pGVM->vmmr0.s.LogFlusher.aRing[idx].u32 = UINT32_MAX >> 1; /* (all bits except fProcessing set) */
3397
3398 /*
3399 * Create the spinlock and flusher event semaphore.
3400 */
3401 int rc = RTSpinlockCreate(&pGVM->vmmr0.s.LogFlusher.hSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VM-Log-Flusher");
3402 if (RT_SUCCESS(rc))
3403 {
3404 rc = RTSemEventCreate(&pGVM->vmmr0.s.LogFlusher.hEvent);
3405 if (RT_SUCCESS(rc))
3406 {
3407 /*
3408 * Create the ring-0 release loggers.
3409 */
3410 rc = vmmR0InitLoggerSet(pGVM, VMMLOGGER_IDX_RELEASE, _4K,
3411 &pGVM->vmmr0.s.hMemObjReleaseLogger, &pGVM->vmmr0.s.hMapObjReleaseLogger);
3412#ifdef LOG_ENABLED
3413 if (RT_SUCCESS(rc))
3414 {
3415 /*
3416 * Create debug loggers.
3417 */
3418 rc = vmmR0InitLoggerSet(pGVM, VMMLOGGER_IDX_REGULAR, _64K,
3419 &pGVM->vmmr0.s.hMemObjLogger, &pGVM->vmmr0.s.hMapObjLogger);
3420 }
3421#endif
3422 }
3423 }
3424 return rc;
3425}
3426
3427
3428/**
3429 * Worker for VMMR0InitPerVMData that initializes all the logging related stuff.
3430 *
3431 * @param pGVM The global (ring-0) VM structure.
3432 */
3433static void vmmR0CleanupLoggers(PGVM pGVM)
3434{
3435 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
3436 {
3437 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
3438 for (size_t iLogger = 0; iLogger < RT_ELEMENTS(pGVCpu->vmmr0.s.u.aLoggers); iLogger++)
3439 vmmR0TermLoggerOne(&pGVCpu->vmmr0.s.u.aLoggers[iLogger], &pGVCpu->vmm.s.u.aLoggers[iLogger]);
3440 }
3441
3442 /*
3443 * Free logger buffer memory.
3444 */
3445 RTR0MemObjFree(pGVM->vmmr0.s.hMapObjReleaseLogger, false /*fFreeMappings*/);
3446 pGVM->vmmr0.s.hMapObjReleaseLogger = NIL_RTR0MEMOBJ;
3447 RTR0MemObjFree(pGVM->vmmr0.s.hMemObjReleaseLogger, true /*fFreeMappings*/);
3448 pGVM->vmmr0.s.hMemObjReleaseLogger = NIL_RTR0MEMOBJ;
3449
3450 RTR0MemObjFree(pGVM->vmmr0.s.hMapObjLogger, false /*fFreeMappings*/);
3451 pGVM->vmmr0.s.hMapObjLogger = NIL_RTR0MEMOBJ;
3452 RTR0MemObjFree(pGVM->vmmr0.s.hMemObjLogger, true /*fFreeMappings*/);
3453 pGVM->vmmr0.s.hMemObjLogger = NIL_RTR0MEMOBJ;
3454
3455 /*
3456 * Free log flusher related stuff.
3457 */
3458 RTSpinlockDestroy(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3459 pGVM->vmmr0.s.LogFlusher.hSpinlock = NIL_RTSPINLOCK;
3460 RTSemEventDestroy(pGVM->vmmr0.s.LogFlusher.hEvent);
3461 pGVM->vmmr0.s.LogFlusher.hEvent = NIL_RTSEMEVENT;
3462}
3463
3464
3465/*********************************************************************************************************************************
3466* Assertions *
3467*********************************************************************************************************************************/
3468
3469/*
3470 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
3471 *
3472 * @returns true if the breakpoint should be hit, false if it should be ignored.
3473 */
3474DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
3475{
3476#if 0
3477 return true;
3478#else
3479 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
3480 if (pVM)
3481 {
3482 PVMCPUCC pVCpu = VMMGetCpu(pVM);
3483
3484 if (pVCpu)
3485 {
3486# ifdef RT_ARCH_X86
3487 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
3488 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
3489# else
3490 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
3491 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
3492# endif
3493 {
3494 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
3495 return RT_FAILURE_NP(rc);
3496 }
3497 }
3498 }
3499# ifdef RT_OS_LINUX
3500 return true;
3501# else
3502 return false;
3503# endif
3504#endif
3505}
3506
3507
3508/*
3509 * Override this so we can push it up to ring-3.
3510 */
3511DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
3512{
3513 /*
3514 * To host kernel log/whatever.
3515 */
3516 SUPR0Printf("!!R0-Assertion Failed!!\n"
3517 "Expression: %s\n"
3518 "Location : %s(%d) %s\n",
3519 pszExpr, pszFile, uLine, pszFunction);
3520
3521 /*
3522 * To the log.
3523 */
3524 LogAlways(("\n!!R0-Assertion Failed!!\n"
3525 "Expression: %s\n"
3526 "Location : %s(%d) %s\n",
3527 pszExpr, pszFile, uLine, pszFunction));
3528
3529 /*
3530 * To the global VMM buffer.
3531 */
3532 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
3533 if (pVM)
3534 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
3535 "\n!!R0-Assertion Failed!!\n"
3536 "Expression: %.*s\n"
3537 "Location : %s(%d) %s\n",
3538 sizeof(pVM->vmm.s.szRing0AssertMsg1) / 4 * 3, pszExpr,
3539 pszFile, uLine, pszFunction);
3540
3541 /*
3542 * Continue the normal way.
3543 */
3544 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
3545}
3546
3547
3548/**
3549 * Callback for RTLogFormatV which writes to the ring-3 log port.
3550 * See PFNLOGOUTPUT() for details.
3551 */
3552static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
3553{
3554 for (size_t i = 0; i < cbChars; i++)
3555 {
3556 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
3557 }
3558
3559 NOREF(pv);
3560 return cbChars;
3561}
3562
3563
3564/*
3565 * Override this so we can push it up to ring-3.
3566 */
3567DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
3568{
3569 va_list vaCopy;
3570
3571 /*
3572 * Push the message to the loggers.
3573 */
3574 PRTLOGGER pLog = RTLogRelGetDefaultInstance();
3575 if (pLog)
3576 {
3577 va_copy(vaCopy, va);
3578 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
3579 va_end(vaCopy);
3580 }
3581 pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
3582 if (pLog)
3583 {
3584 va_copy(vaCopy, va);
3585 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
3586 va_end(vaCopy);
3587 }
3588
3589 /*
3590 * Push it to the global VMM buffer.
3591 */
3592 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
3593 if (pVM)
3594 {
3595 va_copy(vaCopy, va);
3596 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
3597 va_end(vaCopy);
3598 }
3599
3600 /*
3601 * Continue the normal way.
3602 */
3603 RTAssertMsg2V(pszFormat, va);
3604}
3605
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette