VirtualBox

source: vbox/trunk/src/VBox/VMM/include/CPUMInternal.h

Last change on this file was 109309, checked in by vboxsync, 5 days ago

VMM/CPUM: Mostly reworked the collection of ARM host features. jiraref:VBP-1653

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 26.3 KB
Line 
1/* $Id: CPUMInternal.h 109309 2025-05-16 21:41:53Z vboxsync $ */
2/** @file
3 * CPUM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.215389.xyz.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_CPUMInternal_h
29#define VMM_INCLUDED_SRC_include_CPUMInternal_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34#ifndef VBOX_FOR_DTRACE_LIB
35# include <VBox/cdefs.h>
36# include <VBox/types.h>
37# include <VBox/vmm/stam.h>
38# include <iprt/x86.h>
39# include <VBox/vmm/pgm.h>
40#else
41# pragma D depends_on library x86.d
42# pragma D depends_on library cpumctx.d
43# pragma D depends_on library cpum.d
44
45/* Some fudging. */
46typedef uint64_t STAMCOUNTER;
47#endif
48
49
50
51
52/** @defgroup grp_cpum_int Internals
53 * @ingroup grp_cpum
54 * @internal
55 * @{
56 */
57
58/** Use flags (CPUM::fUseFlags).
59 * (Don't forget to sync this with CPUMInternal.mac !)
60 * @note Was part of saved state (6.1 and earlier).
61 * @{ */
62#if defined(RT_ARCH_AMD64) || defined(VBOX_VMM_TARGET_X86) /** @todo temporary: */ || defined(VBOX_VMM_TARGET_AGNOSTIC)
63
64/** Indicates that we've saved the host FPU, SSE, whatever state and that it
65 * needs to be restored. */
66#define CPUM_USED_FPU_HOST RT_BIT(0)
67/** Indicates that we've loaded the guest FPU, SSE, whatever state and that it
68 * needs to be saved.
69 * @note Mirrored in CPUMCTX::fUsedFpuGuest for the HM switcher code. */
70#define CPUM_USED_FPU_GUEST RT_BIT(10)
71/** Used the guest FPU, SSE or such stuff since last we were in REM.
72 * REM syncing is clearing this, lazy FPU is setting it. */
73#define CPUM_USED_FPU_SINCE_REM RT_BIT(1)
74/** The XMM state was manually restored. (AMD only) */
75#define CPUM_USED_MANUAL_XMM_RESTORE RT_BIT(2)
76
77/** Host OS is using SYSENTER and we must NULL the CS. */
78#define CPUM_USE_SYSENTER RT_BIT(3)
79/** Host OS is using SYSENTER and we must NULL the CS. */
80#define CPUM_USE_SYSCALL RT_BIT(4)
81
82/** Debug registers are used by host and that DR7 and DR6 must be saved and
83 * disabled when switching to raw-mode. */
84#define CPUM_USE_DEBUG_REGS_HOST RT_BIT(5)
85/** Records that we've saved the host DRx registers.
86 * In ring-0 this means all (DR0-7), while in raw-mode context this means DR0-3
87 * since DR6 and DR7 are covered by CPUM_USE_DEBUG_REGS_HOST. */
88#define CPUM_USED_DEBUG_REGS_HOST RT_BIT(6)
89/** Set to indicate that we should save host DR0-7 and load the hypervisor debug
90 * registers in the raw-mode world switchers. (See CPUMRecalcHyperDRx.) */
91#define CPUM_USE_DEBUG_REGS_HYPER RT_BIT(7)
92/** Used in ring-0 to indicate that we have loaded the hypervisor debug
93 * registers. */
94#define CPUM_USED_DEBUG_REGS_HYPER RT_BIT(8)
95/** Used in ring-0 to indicate that we have loaded the guest debug
96 * registers (DR0-3 and maybe DR6) for direct use by the guest.
97 * DR7 (and AMD-V DR6) are handled via the VMCB. */
98#define CPUM_USED_DEBUG_REGS_GUEST RT_BIT(9)
99
100/** Host CPU requires fxsave/fxrstor leaky bit handling. */
101#define CPUM_USE_FFXSR_LEAKY RT_BIT(19)
102/** Set if the VM supports long-mode. */
103#define CPUM_USE_SUPPORTS_LONGMODE RT_BIT(20)
104
105#endif
106/** @} */
107
108
109/** @name CPUM Saved State Version.
110 * @{ */
111
112/** The current saved state version.
113 * @todo AMD64:When bumping to next version, add CPUMCTX::enmHwVirt and
114 * uMicrocodeRevision to the saved state. */
115#if defined(VBOX_VMM_TARGET_X86)
116# define CPUM_SAVED_STATE_VERSION CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_4
117#elif defined(VBOX_VMM_TARGET_ARMV8)
118# define CPUM_SAVED_STATE_VERSION CPUM_SAVED_STATE_VERSION_ARMV8_V2
119#endif
120
121#if defined(VBOX_VMM_TARGET_X86)
122/** The saved state version with u32RestoreProcCtls2 for Nested Microsoft
123 * Hyper-V. */
124# define CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_4 23
125/** The saved state version with more virtual VMCS fields (HLAT prefix size,
126 * PCONFIG-exiting bitmap, HLAT ptr, VM-exit ctls2) and a CPUMCTX field (VM-exit
127 * ctls2 MSR). */
128# define CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_3 22
129/** The saved state version with PAE PDPEs added. */
130# define CPUM_SAVED_STATE_VERSION_PAE_PDPES 21
131/** The saved state version with more virtual VMCS fields and CPUMCTX VMX fields. */
132# define CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_2 20
133/** The saved state version including VMX hardware virtualization state. */
134# define CPUM_SAVED_STATE_VERSION_HWVIRT_VMX 19
135/** The saved state version including SVM hardware virtualization state. */
136# define CPUM_SAVED_STATE_VERSION_HWVIRT_SVM 18
137/** The saved state version including XSAVE state. */
138# define CPUM_SAVED_STATE_VERSION_XSAVE 17
139/** The saved state version with good CPUID leaf count. */
140# define CPUM_SAVED_STATE_VERSION_GOOD_CPUID_COUNT 16
141/** CPUID changes with explode forgetting to update the leaf count on
142 * restore, resulting in garbage being saved restoring+saving old states). */
143# define CPUM_SAVED_STATE_VERSION_BAD_CPUID_COUNT 15
144/** The saved state version before the CPUIDs changes. */
145# define CPUM_SAVED_STATE_VERSION_PUT_STRUCT 14
146/** The saved state version before using SSMR3PutStruct. */
147# define CPUM_SAVED_STATE_VERSION_MEM 13
148/** The saved state version before introducing the MSR size field. */
149# define CPUM_SAVED_STATE_VERSION_NO_MSR_SIZE 12
150/** The saved state version of 3.2, 3.1 and 3.3 trunk before the hidden
151 * selector register change (CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID). */
152# define CPUM_SAVED_STATE_VERSION_VER3_2 11
153/** The saved state version of 3.0 and 3.1 trunk before the teleportation
154 * changes. */
155# define CPUM_SAVED_STATE_VERSION_VER3_0 10
156/** The saved state version for the 2.1 trunk before the MSR changes. */
157# define CPUM_SAVED_STATE_VERSION_VER2_1_NOMSR 9
158/** The saved state version of 2.0, used for backwards compatibility. */
159# define CPUM_SAVED_STATE_VERSION_VER2_0 8
160/** The saved state version of 1.6, used for backwards compatibility. */
161# define CPUM_SAVED_STATE_VERSION_VER1_6 6
162#endif
163
164#if defined(VBOX_VMM_TARGET_ARMV8)
165/** Adds ACTLR_EL1 to the ARMv8 saved state. */
166# define CPUM_SAVED_STATE_VERSION_ARMV8_V2 2
167/** The initial ARMv8 saved state. */
168# define CPUM_SAVED_STATE_VERSION_ARMV8_V1 1
169#endif
170/** @} */
171
172
173#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) || defined(VBOX_VMM_TARGET_X86)
174/** @name AMD64: XSAVE limits.
175 * @{ */
176/** Max size we accept for the XSAVE area.
177 * @see CPUMCTX::abXSave */
178#define CPUM_MAX_XSAVE_AREA_SIZE (0x4000 - 0x300)
179/* Min size we accept for the XSAVE area. */
180#define CPUM_MIN_XSAVE_AREA_SIZE 0x240
181/** @} */
182#endif
183
184/**
185 * CPU info
186 */
187typedef struct CPUMINFO
188{
189#if defined(VBOX_VMM_TARGET_X86) /** @todo temporary: */ || defined(VBOX_VMM_TARGET_AGNOSTIC)
190 /** The number of MSR ranges (CPUMMSRRANGE) in the array pointed to below. */
191 uint32_t cMsrRanges;
192 /** Mask applied to ECX before looking up the MSR for a RDMSR/WRMSR
193 * instruction. Older hardware has been observed to ignore higher bits. */
194 uint32_t fMsrMask;
195
196 /** MXCSR mask. */
197 uint32_t fMxCsrMask;
198
199 /** The number of CPUID leaves (CPUMCPUIDLEAF) in the array pointed to below. */
200 uint32_t cCpuIdLeaves;
201 /** The index of the first extended CPUID leaf in the array.
202 * Set to cCpuIdLeaves if none present. */
203 uint32_t iFirstExtCpuIdLeaf;
204 /** How to handle unknown CPUID leaves. */
205 CPUMUNKNOWNCPUID enmUnknownCpuIdMethod;
206 /** For use with CPUMUNKNOWNCPUID_DEFAULTS (DB & VM),
207 * CPUMUNKNOWNCPUID_LAST_STD_LEAF (VM) and CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX (VM). */
208 CPUMCPUID DefCpuId;
209
210 /** Scalable bus frequency used for reporting other frequencies. */
211 uint64_t uScalableBusFreq;
212
213 /** The microcode revision.
214 * UINT32_MAX if the one from the CPU database entry is to be used.
215 * @see /CPUM/GuestMicrocodeRevision in CFGM. */
216 uint32_t uMicrocodeRevision;
217 uint32_t uPadding;
218
219 /** Pointer to the MSR ranges (for compatibility with old hyper heap code). */
220 R3PTRTYPE(PCPUMMSRRANGE) paMsrRangesR3;
221 /** Pointer to the CPUID leaves (for compatibility with old hyper heap code). */
222 R3PTRTYPE(PCPUMCPUIDLEAF) paCpuIdLeavesR3;
223
224 /** CPUID leaves. */
225 CPUMCPUIDLEAF aCpuIdLeaves[256];
226 /** MSR ranges.
227 * @todo This is insane, so might want to move this into a separate
228 * allocation. The insanity is mainly for more recent AMD CPUs. */
229 CPUMMSRRANGE aMsrRanges[8192];
230
231#elif defined(VBOX_VMM_TARGET_ARMV8)
232 /** The number of system register ranges (CPUMSSREGRANGE) in the array pointed to below. */
233 uint32_t cSysRegRanges;
234 uint32_t uPadding;
235
236 /** Pointer to the sysrem register ranges. */
237 R3PTRTYPE(PCPUMSYSREGRANGE) paSysRegRangesR3;
238
239 /** System register ranges. */
240 CPUMSYSREGRANGE aSysRegRanges[128];
241#else
242# error "port me"
243#endif
244} CPUMINFO;
245/** Pointer to a CPU info structure. */
246typedef CPUMINFO *PCPUMINFO;
247/** Pointer to a const CPU info structure. */
248typedef CPUMINFO const *CPCPUMINFO;
249
250
251#ifdef RT_ARCH_AMD64
252/**
253 * The saved host CPU state.
254 */
255typedef struct CPUMHOSTCTX
256{
257 /** The extended state (FPU/SSE/AVX/AVX-2/XXXX). Must be aligned on 64 bytes. */
258 union /* no tag */
259 {
260 X86XSAVEAREA XState;
261 /** Byte view for simple indexing and space allocation.
262 * @note Must match or exceed the size of CPUMCTX::abXState. */
263 uint8_t abXState[0x4000 - 0x300];
264 } CPUM_UNION_NM(u);
265
266 /** General purpose register, selectors, flags and more
267 * @{ */
268 /** General purpose register ++
269 * { */
270 /*uint64_t rax; - scratch*/
271 uint64_t rbx;
272 /*uint64_t rcx; - scratch*/
273 /*uint64_t rdx; - scratch*/
274 uint64_t rdi;
275 uint64_t rsi;
276 uint64_t rbp;
277 uint64_t rsp;
278 /*uint64_t r8; - scratch*/
279 /*uint64_t r9; - scratch*/
280 uint64_t r10;
281 uint64_t r11;
282 uint64_t r12;
283 uint64_t r13;
284 uint64_t r14;
285 uint64_t r15;
286 /*uint64_t rip; - scratch*/
287 uint64_t rflags;
288 /** @} */
289
290 /** Selector registers
291 * @{ */
292 RTSEL ss;
293 RTSEL ssPadding;
294 RTSEL gs;
295 RTSEL gsPadding;
296 RTSEL fs;
297 RTSEL fsPadding;
298 RTSEL es;
299 RTSEL esPadding;
300 RTSEL ds;
301 RTSEL dsPadding;
302 RTSEL cs;
303 RTSEL csPadding;
304 /** @} */
305
306 /** Control registers.
307 * @{ */
308 /** The CR0 FPU state in HM mode. */
309 uint64_t cr0;
310 /*uint64_t cr2; - scratch*/
311 uint64_t cr3;
312 uint64_t cr4;
313 uint64_t cr8;
314 /** @} */
315
316 /** Debug registers.
317 * @{ */
318 uint64_t dr0;
319 uint64_t dr1;
320 uint64_t dr2;
321 uint64_t dr3;
322 uint64_t dr6;
323 uint64_t dr7;
324 /** @} */
325
326 /** Global Descriptor Table register. */
327 X86XDTR64 gdtr;
328 uint16_t gdtrPadding;
329 /** Interrupt Descriptor Table register. */
330 X86XDTR64 idtr;
331 uint16_t idtrPadding;
332 /** The task register. */
333 RTSEL ldtr;
334 RTSEL ldtrPadding;
335 /** The task register. */
336 RTSEL tr;
337 RTSEL trPadding;
338
339 /** MSRs
340 * @{ */
341 CPUMSYSENTER SysEnter;
342 uint64_t FSbase;
343 uint64_t GSbase;
344 uint64_t efer;
345 /** @} */
346
347 /** The XCR0 register. */
348 uint64_t xcr0;
349 /** The mask to pass to XSAVE/XRSTOR in EDX:EAX. If zero we use
350 * FXSAVE/FXRSTOR (since bit 0 will always be set, we only need to test it). */
351 uint64_t fXStateMask;
352
353 /* padding to get 64byte aligned size */
354 uint8_t auPadding[24];
355# if HC_ARCH_BITS != 64
356# error HC_ARCH_BITS not defined or unsupported
357# endif
358} CPUMHOSTCTX;
359# ifndef VBOX_FOR_DTRACE_LIB
360AssertCompileSizeAlignment(CPUMHOSTCTX, 64);
361# endif
362/** Pointer to the saved host CPU state. */
363typedef CPUMHOSTCTX *PCPUMHOSTCTX;
364#endif /* RT_ARCH_AMD64 */
365
366
367#if defined(VBOX_VMM_TARGET_X86) /** @todo temporary: */ || defined(VBOX_VMM_TARGET_AGNOSTIC)
368/**
369 * The hypervisor context CPU state (just DRx left now).
370 */
371typedef struct CPUMHYPERCTX
372{
373 /** Debug registers.
374 * @remarks DR4 and DR5 should not be used since they are aliases for
375 * DR6 and DR7 respectively on both AMD and Intel CPUs.
376 * @remarks DR8-15 are currently not supported by AMD or Intel, so
377 * neither do we.
378 */
379 uint64_t dr[8];
380 /** @todo eliminiate the rest. */
381 uint64_t cr3;
382 uint64_t au64Padding[7];
383} CPUMHYPERCTX;
384# ifndef VBOX_FOR_DTRACE_LIB
385AssertCompileSizeAlignment(CPUMHYPERCTX, 64);
386# endif
387/** Pointer to the hypervisor context CPU state. */
388typedef CPUMHYPERCTX *PCPUMHYPERCTX;
389#endif
390
391
392/**
393 * CPUM Data (part of VM)
394 */
395typedef struct CPUM
396{
397 /** Guest CPU feature information.
398 * Externaly visible via that VM structure, aligned with HostFeatures. */
399 CPUMFEATURES GuestFeatures;
400 /** Host CPU feature information.
401 * Externaly visible via the VM structure, aligned on 64-byte boundrary. */
402 CPUHOSTFEATURES HostFeatures;
403
404 /** The (more) portable CPUID level. */
405 uint8_t u8PortableCpuIdLevel;
406 /** Indicates that a state restore is pending.
407 * This is used to verify load order dependencies (PGM). */
408 bool fPendingRestore;
409
410#if defined(VBOX_VMM_TARGET_X86) /** @todo temporary: */ || defined(VBOX_VMM_TARGET_AGNOSTIC)
411 /** Whether MTRR reads report valid memory types for memory regions. */
412 bool fMtrrRead;
413 /** Whether the guest's writes to MTRRs are implemented. */
414 bool fMtrrWrite;
415 /** Use flags.
416 * These flags indicates which CPU features the host uses.
417 */
418 uint32_t fHostUseFlags;
419
420 /** XSAVE/XRTOR components we can expose to the guest mask. */
421 uint64_t fXStateGuestMask;
422 /** XSAVE/XRSTOR host mask. Only state components in this mask can be exposed
423 * to the guest. This is 0 if no XSAVE/XRSTOR bits can be exposed. */
424 uint64_t fXStateHostMask;
425
426 /** Random value we store in the reserved RFLAGS bits we don't use ourselves so
427 * we can detect corruption. */
428 uint64_t fReservedRFlagsCookie;
429
430 /** Guest CPU info. */
431 CPUMINFO GuestInfo;
432
433 /** The standard set of CpuId leaves. */
434 CPUMCPUID aGuestCpuIdPatmStd[6];
435 /** The extended set of CpuId leaves. */
436 CPUMCPUID aGuestCpuIdPatmExt[10];
437 /** The centaur set of CpuId leaves. */
438 CPUMCPUID aGuestCpuIdPatmCentaur[4];
439
440 /** @name MSR statistics.
441 * @{ */
442 STAMCOUNTER cMsrWrites;
443 STAMCOUNTER cMsrWritesToIgnoredBits;
444 STAMCOUNTER cMsrWritesRaiseGp;
445 STAMCOUNTER cMsrWritesUnknown;
446 STAMCOUNTER cMsrReads;
447 STAMCOUNTER cMsrReadsRaiseGp;
448 STAMCOUNTER cMsrReadsUnknown;
449 /** @} */
450
451#elif defined(VBOX_VMM_TARGET_ARMV8)
452 /** The initial exception level (EL) to start the CPU after a reset,
453 * should be either ARMV8_AARCH64_EL_1 or ARMV8_AARCH64_EL_2 for nested virtualization. */
454 uint8_t bResetEl;
455 uint8_t abPadding0[5];
456
457 /** The reset value of the program counter. */
458 uint64_t u64ResetPc;
459
460 /** Guest CPU info. */
461 CPUMINFO GuestInfo;
462 /** Guest CPU ID registers. */
463 CPUMARMV8IDREGS GuestIdRegs;
464
465 /** @name System register statistics.
466 * @{ */
467 STAMCOUNTER cSysRegWrites;
468 STAMCOUNTER cSysRegWritesToIgnoredBits;
469 STAMCOUNTER cSysRegWritesRaiseExcp;
470 STAMCOUNTER cSysRegWritesUnknown;
471 STAMCOUNTER cSysRegReads;
472 STAMCOUNTER cSysRegReadsRaiseExcp;
473 STAMCOUNTER cSysRegReadsUnknown;
474 /** @} */
475#endif
476
477#ifdef RT_ARCH_ARM64
478 /** Host system registers used for identification. */
479 PSUPARMSYSREGVAL paHostIdRegs;
480 /** Number of registers in paHostSysRegs. */
481 uint32_t cHostIdRegs;
482
483 /** Host CPU ID registers. */
484 CPUMARMV8IDREGS HostIdRegs;
485
486#elif defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
487 /** The host MXCSR mask (determined at init). */
488 uint32_t fHostMxCsrMask;
489#endif
490} CPUM;
491#ifndef VBOX_FOR_DTRACE_LIB
492AssertCompileMemberOffset(CPUM, GuestFeatures, 0);
493AssertCompileMemberOffset(CPUM, HostFeatures, 64);
494AssertCompileMemberOffset(CPUM, u8PortableCpuIdLevel, 128);
495#endif
496/** Pointer to the CPUM instance data residing in the shared VM structure. */
497typedef CPUM *PCPUM;
498
499/**
500 * CPUM Data (part of VMCPU)
501 */
502typedef struct CPUMCPU
503{
504 /** Guest context.
505 * Aligned on a 64-byte boundary. */
506 CPUMCTX Guest;
507#if defined(VBOX_VMM_TARGET_X86) /** @todo temporary: */ || defined(VBOX_VMM_TARGET_AGNOSTIC)
508 /** Guest context - misc MSRs
509 * Aligned on a 64-byte boundary. */
510 CPUMCTXMSRS GuestMsrs;
511#endif
512#ifdef RT_ARCH_AMD64
513 /** Saved host context. Only valid while inside RC or HM contexts.
514 * Must be aligned on a 64-byte boundary. */
515 CPUMHOSTCTX Host;
516#endif
517
518 /** Use flags.
519 * These flags indicates both what is to be used and what has been used. */
520 uint32_t fUseFlags;
521
522 /** Changed flags.
523 * These flags indicates to REM (and others) which important guest
524 * registers which has been changed since last time the flags were cleared.
525 * See the CPUM_CHANGED_* defines for what we keep track of.
526 *
527 * @todo Obsolete, but will probably be refactored so keep it for reference. */
528 uint32_t fChanged;
529
530#if defined(VBOX_VMM_TARGET_X86) /** @todo temporary: */ || defined(VBOX_VMM_TARGET_AGNOSTIC)
531 /** Nested VMX: VMX-preemption timer. */
532 TMTIMERHANDLE hNestedVmxPreemptTimer;
533 /** Whether the X86_CPUID_FEATURE_EDX_APIC and X86_CPUID_AMD_FEATURE_EDX_APIC
534 * (?) bits are visible or not. (The APIC is responsible for setting this
535 * when loading state, so we won't save it.) */
536 bool fCpuIdApicFeatureVisible;
537 uint8_t abPadding[7+8];
538
539 /** Old hypervisor context, only used for combined DRx values now.
540 * Must be aligned on a 64-byte boundary. */
541 CPUMHYPERCTX Hyper;
542#endif
543
544#ifdef VBOX_WITH_CRASHDUMP_MAGIC
545 uint8_t aMagic[56];
546 uint64_t uMagic;
547#endif
548} CPUMCPU;
549#ifndef VBOX_FOR_DTRACE_LIB
550# ifdef RT_ARCH_AMD64
551AssertCompileMemberAlignment(CPUMCPU, Host, 64);
552# endif
553# if defined(VBOX_VMM_TARGET_X86)
554AssertCompileAdjacentMembers(CPUMCPU, Guest, GuestMsrs); /* HACK ALERT! HMR0A.asm makes this ASSUMPTION in the SVM RUN code! */
555# endif
556#endif
557/** Pointer to the CPUMCPU instance data residing in the shared VMCPU structure. */
558typedef CPUMCPU *PCPUMCPU;
559
560#ifndef VBOX_FOR_DTRACE_LIB
561RT_C_DECLS_BEGIN
562
563# if defined(VBOX_VMM_TARGET_X86) /** @todo temporary: */ || defined(VBOX_VMM_TARGET_AGNOSTIC)
564PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf);
565PCPUMCPUIDLEAF cpumCpuIdGetLeafEx(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf, bool *pfExactSubLeafHit);
566# endif
567# if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) || defined(VBOX_VMM_TARGET_X86)
568PCPUMCPUIDLEAF cpumCpuIdGetLeafInt(PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, uint32_t uLeaf, uint32_t uSubLeaf);
569PCPUMCPUIDLEAF cpumCpuIdEnsureSpace(PVM pVM, PCPUMCPUIDLEAF *ppaLeaves, uint32_t cLeaves);
570# ifdef VBOX_STRICT
571void cpumCpuIdAssertOrder(PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves);
572# endif
573int cpumCpuIdExplodeFeaturesX86(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, CPUMFEATURESX86 *pFeatures);
574void cpumCpuIdExplodeFeaturesX86SetSummaryBits(CPUMFEATURESX86 *pFeatures);
575DECLHIDDEN(void) cpumCpuIdExplodeFeaturesX86Vmx(struct VMXMSRS const *pVmxMsrs, CPUMFEATURESX86 *pFeatures);
576DECLHIDDEN(void) cpumCpuIdExplodeFeaturesX86VmxFromSupMsrs(PCSUPHWVIRTMSRS pMsrs, CPUMFEATURESX86 *pFeatures);
577void cpumCpuIdExplodeArchCapabilities(CPUMFEATURESX86 *pFeatures, bool fHasArchCap, uint64_t fArchVal);
578# endif /* defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) || defined(VBOX_VMM_TARGET_X86) */
579# if defined(RT_ARCH_ARM64) || defined(VBOX_VMM_TARGET_ARMV8)
580DECLHIDDEN(int) cpumCpuIdExplodeFeaturesArmV8FromIdRegs(PCCPUMARMV8IDREGS pIdRegs, CPUMFEATURESARMV8 *pFeatures);
581DECLHIDDEN(int) cpumCpuIdExplodeFeaturesArmV8FromSysRegs(PCSUPARMSYSREGVAL paSysRegs, uint32_t cSysRegs,
582 CPUMFEATURESARMV8 *pFeatures);
583# endif
584
585
586# ifdef IN_RING3
587
588/** @name Per-target functions (lives in target specific source files).
589 * @{ */
590/**
591 * Called by CPUMR3Init to do target specific initializations.
592 */
593# if defined(VBOX_VMM_TARGET_X86)
594DECLHIDDEN(int) cpumR3InitTargetX86(PVM pVM, PCSUPHWVIRTMSRS pHostMsrs);
595# else
596DECLHIDDEN(int) cpumR3InitTarget(PVM pVM);
597# endif
598/**
599 * Called by CPUMR3InitCompleted when ring-3 init is completed.
600 */
601DECLHIDDEN(int) cpumR3InitCompletedRing3Target(PVM pVM);
602
603/**
604 * Performs target specific termination cleanups at the end of CPUMR3Term().
605 */
606DECLHIDDEN(int) cpumR3TermTarget(PVM pVM);
607
608/**
609 * @callback_method_impl{FNSSMINTLIVEEXEC,
610 * Implemented directly in target specific source file -
611 * returns VINF_SSM_DONT_CALL_AGAIN.}
612 */
613DECLCALLBACK(int) cpumR3LiveExecTarget(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass);
614
615/**
616 * @callback_method_impl{FNSSMINTSAVEEXEC,
617 * Implemented directly in target specific source file.}
618 */
619DECLCALLBACK(int) cpumR3SaveExecTarget(PVM pVM, PSSMHANDLE pSSM);
620
621/**
622 * @callback_method_impl{FNSSMINTLOADEXEC}
623 */
624DECLCALLBACK(int) cpumR3LoadExecTarget(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
625
626/**
627 * Target specific done-loading function, called by cpumR3LoadDone.
628 */
629DECLHIDDEN(int) cpumR3LoadDoneTarget(PVM pVM, PSSMHANDLE pSSM);
630
631/** What kind of cpu info dump to perform.*/
632typedef enum CPUMDUMPTYPE { CPUMDUMPTYPE_TERSE = 1, CPUMDUMPTYPE_DEFAULT, CPUMDUMPTYPE_VERBOSE } CPUMDUMPTYPE;
633/**
634 * Dump guest registers (target specific).
635 */
636DECLHIDDEN(void) cpumR3InfoOneTarget(PVM pVM, PCVMCPU pVCpu, PCDBGFINFOHLP pHlp, CPUMDUMPTYPE enmType);
637
638/**
639 * Called by CPUMR3LogCpuIdAndMsrFeatures to do target specific work.
640 *
641 * @param pVM The cross context VM structure.
642 */
643DECLHIDDEN(void) cpumR3LogCpuIdAndMsrFeaturesTarget(PVM pVM);
644
645/**
646 * Initializes the debugger related sides of the CPUM component.
647 *
648 * Called by CPUMR3Init.
649 *
650 * @returns VBox status code.
651 * @param pVM The cross context VM structure.
652 */
653DECLHIDDEN(int) cpumR3DbgInitTarget(PVM pVM);
654/** @} */
655
656
657# if defined(VBOX_VMM_TARGET_X86)
658DECLCALLBACK(void) cpumR3InfoGuestHwvirt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
659DECLCALLBACK(void) cpumR3InfoHyper(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
660# endif
661# if defined(VBOX_VMM_TARGET_ARMV8)
662DECLHIDDEN(int) cpumR3SysRegStrictInitChecks(void);
663# elif defined(VBOX_VMM_TARGET_X86)
664int cpumR3InitCpuIdAndMsrs(PVM pVM, PCSUPHWVIRTMSRS pHostMsrs);
665DECLHIDDEN(void) cpumR3InitVmxGuestFeaturesAndMsrs(PVM pVM, PCFGMNODE pCpumCfg, PCSUPHWVIRTMSRS pHostMsrs,
666 PVMXMSRS pGuestVmxMsrs);
667void cpumR3CpuIdRing3InitDone(PVM pVM);
668# endif
669void cpumR3SaveCpuId(PVM pVM, PSSMHANDLE pSSM);
670# ifdef VBOX_VMM_TARGET_X86
671int cpumR3LoadCpuIdX86(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCCPUMMSRS pGuestMsrs);
672int cpumR3LoadCpuIdPre32(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion);
673# elif defined(VBOX_VMM_TARGET_ARMV8)
674int cpumR3LoadCpuIdArmV8(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion);
675DECLCALLBACK(void) cpumR3CpuFeatInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
676# endif
677DECLCALLBACK(void) cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
678DECLHIDDEN(void) cpumR3InfoParseArg(const char *pszArgs, CPUMDUMPTYPE *penmType, const char **ppszComment);
679
680int cpumR3DbGetCpuInfo(const char *pszName, PCPUMINFO pInfo);
681# ifdef VBOX_VMM_TARGET_X86
682DECLHIDDEN(void) cpumR3InfoFormatFlagsX86(char *pszEFlags, uint32_t efl);
683int cpumR3MsrRangesInsert(PVM pVM, PCPUMMSRRANGE *ppaMsrRanges, uint32_t *pcMsrRanges, PCCPUMMSRRANGE pNewRange);
684DECLHIDDEN(int) cpumR3MsrReconcileWithCpuId(PVM pVM, bool fForceFlushCmd, bool fForceSpecCtrl);
685int cpumR3MsrApplyFudge(PVM pVM);
686int cpumR3MsrRegStats(PVM pVM);
687int cpumR3MsrStrictInitChecks(void);
688PCPUMMSRRANGE cpumLookupMsrRange(PVM pVM, uint32_t idMsr);
689# endif
690# endif /* IN_RING3 */
691
692# ifdef IN_RING0
693# if defined(VBOX_VMM_TARGET_X86) /** @todo temporary: */ || defined(VBOX_VMM_TARGET_AGNOSTIC)
694DECLASM(int) cpumR0SaveHostRestoreGuestFPUState(PCPUMCPU pCPUM);
695DECLASM(void) cpumR0SaveGuestRestoreHostFPUState(PCPUMCPU pCPUM);
696# if ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
697DECLASM(void) cpumR0RestoreHostFPUState(PCPUMCPU pCPUM);
698# endif
699# endif
700# endif
701
702# if defined(IN_RC) || defined(IN_RING0)
703# if defined(VBOX_VMM_TARGET_X86) /** @todo temporary: */ || defined(VBOX_VMM_TARGET_AGNOSTIC)
704DECLASM(int) cpumRZSaveHostFPUState(PCPUMCPU pCPUM);
705DECLASM(void) cpumRZSaveGuestFpuState(PCPUMCPU pCPUM, bool fLeaveFpuAccessible);
706DECLASM(void) cpumRZSaveGuestSseRegisters(PCPUMCPU pCPUM);
707DECLASM(void) cpumRZSaveGuestAvxRegisters(PCPUMCPU pCPUM);
708# endif
709# endif
710
711RT_C_DECLS_END
712#endif /* !VBOX_FOR_DTRACE_LIB */
713
714/** @} */
715
716#endif /* !VMM_INCLUDED_SRC_include_CPUMInternal_h */
717
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette