VirtualBox

source: vbox/trunk/include/VBox/vmm/hmvmxinline.h@ 80145

Last change on this file since 80145 was 80145, checked in by vboxsync, 6 years ago

VMM/HMVMXR0: Nested VMX: bugref:9180 Naming nit.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 39.6 KB
Line 
1/** @file
2 * HM - VMX Structures and Definitions. (VMM)
3 */
4
5/*
6 * Copyright (C) 2006-2019 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.215389.xyz. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef VBOX_INCLUDED_vmm_hmvmxinline_h
27#define VBOX_INCLUDED_vmm_hmvmxinline_h
28#ifndef RT_WITHOUT_PRAGMA_ONCE
29# pragma once
30#endif
31
32#include <VBox/vmm/hm_vmx.h>
33#include <VBox/err.h>
34
35/* In Visual C++ versions prior to 2012, the vmx intrinsics are only available
36 when targeting AMD64. */
37#if RT_INLINE_ASM_USES_INTRIN >= 16 && defined(RT_ARCH_AMD64)
38# pragma warning(push)
39# pragma warning(disable:4668) /* Several incorrect __cplusplus uses. */
40# pragma warning(disable:4255) /* Incorrect __slwpcb prototype. */
41# include <intrin.h>
42# pragma warning(pop)
43/* We always want them as intrinsics, no functions. */
44# pragma intrinsic(__vmx_on)
45# pragma intrinsic(__vmx_off)
46# pragma intrinsic(__vmx_vmclear)
47# pragma intrinsic(__vmx_vmptrld)
48# pragma intrinsic(__vmx_vmread)
49# pragma intrinsic(__vmx_vmwrite)
50# define VMX_USE_MSC_INTRINSICS 1
51#else
52# define VMX_USE_MSC_INTRINSICS 0
53#endif
54
55/* Skip checking VMREAD/VMWRITE failures on non-strict builds. */
56#ifndef VBOX_STRICT
57# define VBOX_WITH_VMREAD_VMWRITE_NOCHECK
58#endif
59
60
61/** @defgroup grp_hm_vmx_inline VMX Inline Helpers
62 * @ingroup grp_hm_vmx
63 * @{
64 */
65/**
66 * Gets the effective width of a VMCS field given it's encoding adjusted for
67 * HIGH/FULL access for 64-bit fields.
68 *
69 * @returns The effective VMCS field width.
70 * @param uFieldEnc The VMCS field encoding.
71 *
72 * @remarks Warning! This function does not verify the encoding is for a valid and
73 * supported VMCS field.
74 */
75DECLINLINE(uint8_t) VMXGetVmcsFieldWidthEff(uint32_t uFieldEnc)
76{
77 /* Only the "HIGH" parts of all 64-bit fields have bit 0 set. */
78 if (uFieldEnc & RT_BIT(0))
79 return VMXVMCSFIELDWIDTH_32BIT;
80
81 /* Bits 13:14 contains the width of the VMCS field, see VMXVMCSFIELDWIDTH_XXX. */
82 return (uFieldEnc >> 13) & 0x3;
83}
84
85/**
86 * Returns whether the given VMCS field is a read-only VMCS field or not.
87 *
88 * @returns @c true if it's a read-only field, @c false otherwise.
89 * @param uFieldEnc The VMCS field encoding.
90 *
91 * @remarks Warning! This function does not verify that the encoding is for a valid
92 * and/or supported VMCS field.
93 */
94DECLINLINE(bool) VMXIsVmcsFieldReadOnly(uint32_t uFieldEnc)
95{
96 /* See Intel spec. B.4.2 "Natural-Width Read-Only Data Fields". */
97 return (RT_BF_GET(uFieldEnc, VMX_BF_VMCSFIELD_TYPE) == VMXVMCSFIELDTYPE_VMEXIT_INFO);
98}
99
100/**
101 * Returns whether the given VM-entry interruption-information type is valid or not.
102 *
103 * @returns @c true if it's a valid type, @c false otherwise.
104 * @param fSupportsMTF Whether the Monitor-Trap Flag CPU feature is supported.
105 * @param uType The VM-entry interruption-information type.
106 */
107DECLINLINE(bool) VMXIsEntryIntInfoTypeValid(bool fSupportsMTF, uint8_t uType)
108{
109 /* See Intel spec. 26.2.1.3 "VM-Entry Control Fields". */
110 switch (uType)
111 {
112 case VMX_ENTRY_INT_INFO_TYPE_EXT_INT:
113 case VMX_ENTRY_INT_INFO_TYPE_NMI:
114 case VMX_ENTRY_INT_INFO_TYPE_HW_XCPT:
115 case VMX_ENTRY_INT_INFO_TYPE_SW_INT:
116 case VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT:
117 case VMX_ENTRY_INT_INFO_TYPE_SW_XCPT: return true;
118 case VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT: return fSupportsMTF;
119 default:
120 return false;
121 }
122}
123
124/**
125 * Returns whether the given VM-entry interruption-information vector and type
126 * combination is valid or not.
127 *
128 * @returns @c true if it's a valid vector/type combination, @c false otherwise.
129 * @param uVector The VM-entry interruption-information vector.
130 * @param uType The VM-entry interruption-information type.
131 *
132 * @remarks Warning! This function does not validate the type field individually.
133 * Use it after verifying type is valid using HMVmxIsEntryIntInfoTypeValid.
134 */
135DECLINLINE(bool) VMXIsEntryIntInfoVectorValid(uint8_t uVector, uint8_t uType)
136{
137 /* See Intel spec. 26.2.1.3 "VM-Entry Control Fields". */
138 if ( uType == VMX_ENTRY_INT_INFO_TYPE_NMI
139 && uVector != X86_XCPT_NMI)
140 return false;
141 if ( uType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
142 && uVector > X86_XCPT_LAST)
143 return false;
144 if ( uType == VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT
145 && uVector != VMX_ENTRY_INT_INFO_VECTOR_MTF)
146 return false;
147 return true;
148}
149
150/**
151 * Returns whether or not the VM-exit is trap-like or fault-like.
152 *
153 * @returns @c true if it's a trap-like VM-exit, @c false otherwise.
154 * @param uExitReason The VM-exit reason.
155 *
156 * @remarks Warning! This does not validate the VM-exit reason.
157 */
158DECLINLINE(bool) VMXIsVmexitTrapLike(uint32_t uExitReason)
159{
160 /*
161 * Trap-like VM-exits - The instruction causing the VM-exit completes before the
162 * VM-exit occurs.
163 *
164 * Fault-like VM-exits - The instruction causing the VM-exit is not completed before
165 * the VM-exit occurs.
166 *
167 * See Intel spec. 25.5.2 "Monitor Trap Flag".
168 * See Intel spec. 29.1.4 "EOI Virtualization".
169 * See Intel spec. 29.4.3.3 "APIC-Write VM Exits".
170 * See Intel spec. 29.1.2 "TPR Virtualization".
171 */
172 /** @todo NSTVMX: r=ramshankar: What about VM-exits due to debug traps (single-step,
173 * I/O breakpoints, data breakpoints), debug exceptions (data breakpoint)
174 * delayed by MovSS blocking, machine-check exceptions. */
175 switch (uExitReason)
176 {
177 case VMX_EXIT_MTF:
178 case VMX_EXIT_VIRTUALIZED_EOI:
179 case VMX_EXIT_APIC_WRITE:
180 case VMX_EXIT_TPR_BELOW_THRESHOLD:
181 return true;
182 }
183 return false;
184}
185
186/**
187 * Returns whether the VM-entry is vectoring or not given the VM-entry interruption
188 * information field.
189 *
190 * @returns @c true if the VM-entry is vectoring, @c false otherwise.
191 * @param uEntryIntInfo The VM-entry interruption information field.
192 * @param pEntryIntInfoType The VM-entry interruption information type field.
193 * Optional, can be NULL. Only updated when this
194 * function returns @c true.
195 */
196DECLINLINE(bool) VMXIsVmentryVectoring(uint32_t uEntryIntInfo, uint8_t *pEntryIntInfoType)
197{
198 /*
199 * The definition of what is a vectoring VM-entry is taken
200 * from Intel spec. 26.6 "Special Features of VM Entry".
201 */
202 if (!VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo))
203 return false;
204
205 /* Scope and keep variable defines on top to satisy archaic c89 nonsense. */
206 {
207 uint8_t const uType = VMX_ENTRY_INT_INFO_TYPE(uEntryIntInfo);
208 switch (uType)
209 {
210 case VMX_ENTRY_INT_INFO_TYPE_EXT_INT:
211 case VMX_ENTRY_INT_INFO_TYPE_NMI:
212 case VMX_ENTRY_INT_INFO_TYPE_HW_XCPT:
213 case VMX_ENTRY_INT_INFO_TYPE_SW_INT:
214 case VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT:
215 case VMX_ENTRY_INT_INFO_TYPE_SW_XCPT:
216 {
217 if (pEntryIntInfoType)
218 *pEntryIntInfoType = uType;
219 return true;
220 }
221 }
222 }
223 return false;
224}
225/** @} */
226
227
228/** @defgroup grp_hm_vmx_asm VMX Assembly Helpers
229 * @{
230 */
231#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
232
233/**
234 * Restores some host-state fields that need not be done on every VM-exit.
235 *
236 * @returns VBox status code.
237 * @param fRestoreHostFlags Flags of which host registers needs to be
238 * restored.
239 * @param pRestoreHost Pointer to the host-restore structure.
240 */
241DECLASM(int) VMXRestoreHostState(uint32_t fRestoreHostFlags, PVMXRESTOREHOST pRestoreHost);
242
243
244/**
245 * Dispatches an NMI to the host.
246 */
247DECLASM(int) VMXDispatchHostNmi(void);
248
249
250/**
251 * Executes VMXON.
252 *
253 * @returns VBox status code.
254 * @param HCPhysVmxOn Physical address of VMXON structure.
255 */
256#if RT_INLINE_ASM_EXTERNAL && !VMX_USE_MSC_INTRINSICS
257DECLASM(int) VMXEnable(RTHCPHYS HCPhysVmxOn);
258#else
259DECLINLINE(int) VMXEnable(RTHCPHYS HCPhysVmxOn)
260{
261# if VMX_USE_MSC_INTRINSICS
262 unsigned char rcMsc = __vmx_on(&HCPhysVmxOn);
263 if (RT_LIKELY(rcMsc == 0))
264 return VINF_SUCCESS;
265 return rcMsc == 2 ? VERR_VMX_INVALID_VMXON_PTR : VERR_VMX_VMXON_FAILED;
266
267# elif RT_INLINE_ASM_GNU_STYLE
268# ifdef RT_ARCH_AMD64
269 int rc;
270 __asm__ __volatile__ (
271 "pushq %2 \n\t"
272 ".byte 0xf3, 0x0f, 0xc7, 0x34, 0x24 # VMXON [esp] \n\t"
273 "ja 2f \n\t"
274 "je 1f \n\t"
275 "movl $" RT_XSTR(VERR_VMX_INVALID_VMXON_PTR)", %0 \n\t"
276 "jmp 2f \n\t"
277 "1: \n\t"
278 "movl $" RT_XSTR(VERR_VMX_VMXON_FAILED)", %0 \n\t"
279 "2: \n\t"
280 "add $8, %%rsp \n\t"
281 :"=rm"(rc)
282 :"0"(VINF_SUCCESS),
283 "ir"(HCPhysVmxOn) /* don't allow direct memory reference here, */
284 /* this would not work with -fomit-frame-pointer */
285 :"memory"
286 );
287 return rc;
288# else
289 int rc;
290 __asm__ __volatile__ (
291 "push %3 \n\t"
292 "push %2 \n\t"
293 ".byte 0xf3, 0x0f, 0xc7, 0x34, 0x24 # VMXON [esp] \n\t"
294 "ja 2f \n\t"
295 "je 1f \n\t"
296 "movl $" RT_XSTR(VERR_VMX_INVALID_VMXON_PTR)", %0 \n\t"
297 "jmp 2f \n\t"
298 "1: \n\t"
299 "movl $" RT_XSTR(VERR_VMX_VMXON_FAILED)", %0 \n\t"
300 "2: \n\t"
301 "add $8, %%esp \n\t"
302 :"=rm"(rc)
303 :"0"(VINF_SUCCESS),
304 "ir"((uint32_t)HCPhysVmxOn), /* don't allow direct memory reference here, */
305 "ir"((uint32_t)(HCPhysVmxOn >> 32)) /* this would not work with -fomit-frame-pointer */
306 :"memory"
307 );
308 return rc;
309# endif
310
311# elif defined(RT_ARCH_X86)
312 int rc = VINF_SUCCESS;
313 __asm
314 {
315 push dword ptr [HCPhysVmxOn + 4]
316 push dword ptr [HCPhysVmxOn]
317 _emit 0xf3
318 _emit 0x0f
319 _emit 0xc7
320 _emit 0x34
321 _emit 0x24 /* VMXON [esp] */
322 jnc vmxon_good
323 mov dword ptr [rc], VERR_VMX_INVALID_VMXON_PTR
324 jmp the_end
325
326vmxon_good:
327 jnz the_end
328 mov dword ptr [rc], VERR_VMX_VMXON_FAILED
329the_end:
330 add esp, 8
331 }
332 return rc;
333
334# else
335# error "Shouldn't be here..."
336# endif
337}
338#endif
339
340
341#if 0
342#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS)
343DECLASM(int) VMXEnable(RTHCPHYS HCPhysVmxOn);
344#else
345DECLINLINE(int) VMXEnable(RTHCPHYS HCPhysVmxOn)
346{
347# if RT_INLINE_ASM_GNU_STYLE
348 int rc = VINF_SUCCESS;
349 __asm__ __volatile__ (
350 "push %3 \n\t"
351 "push %2 \n\t"
352 ".byte 0xf3, 0x0f, 0xc7, 0x34, 0x24 # VMXON [esp] \n\t"
353 "ja 2f \n\t"
354 "je 1f \n\t"
355 "movl $" RT_XSTR(VERR_VMX_INVALID_VMXON_PTR)", %0 \n\t"
356 "jmp 2f \n\t"
357 "1: \n\t"
358 "movl $" RT_XSTR(VERR_VMX_VMXON_FAILED)", %0 \n\t"
359 "2: \n\t"
360 "add $8, %%esp \n\t"
361 :"=rm"(rc)
362 :"0"(VINF_SUCCESS),
363 "ir"((uint32_t)HCPhysVmxOn), /* don't allow direct memory reference here, */
364 "ir"((uint32_t)(HCPhysVmxOn >> 32)) /* this would not work with -fomit-frame-pointer */
365 :"memory"
366 );
367 return rc;
368
369# elif VMX_USE_MSC_INTRINSICS
370 unsigned char rcMsc = __vmx_on(&HCPhysVmxOn);
371 if (RT_LIKELY(rcMsc == 0))
372 return VINF_SUCCESS;
373 return rcMsc == 2 ? VERR_VMX_INVALID_VMXON_PTR : VERR_VMX_VMXON_FAILED;
374
375# else
376 int rc = VINF_SUCCESS;
377 __asm
378 {
379 push dword ptr [HCPhysVmxOn + 4]
380 push dword ptr [HCPhysVmxOn]
381 _emit 0xf3
382 _emit 0x0f
383 _emit 0xc7
384 _emit 0x34
385 _emit 0x24 /* VMXON [esp] */
386 jnc vmxon_good
387 mov dword ptr [rc], VERR_VMX_INVALID_VMXON_PTR
388 jmp the_end
389
390vmxon_good:
391 jnz the_end
392 mov dword ptr [rc], VERR_VMX_VMXON_FAILED
393the_end:
394 add esp, 8
395 }
396 return rc;
397# endif
398}
399#endif
400#endif
401
402
403/**
404 * Executes VMXOFF.
405 */
406#if RT_INLINE_ASM_EXTERNAL && !VMX_USE_MSC_INTRINSICS
407DECLASM(void) VMXDisable(void);
408#else
409DECLINLINE(void) VMXDisable(void)
410{
411# if VMX_USE_MSC_INTRINSICS
412 __vmx_off();
413
414# elif RT_INLINE_ASM_GNU_STYLE
415 __asm__ __volatile__ (
416 ".byte 0x0f, 0x01, 0xc4 # VMXOFF \n\t"
417 );
418
419# elif defined(RT_ARCH_X86)
420 __asm
421 {
422 _emit 0x0f
423 _emit 0x01
424 _emit 0xc4 /* VMXOFF */
425 }
426
427# else
428# error "Shouldn't be here..."
429# endif
430}
431#endif
432
433
434#if 0
435#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS)
436DECLASM(void) VMXDisable(void);
437#else
438DECLINLINE(void) VMXDisable(void)
439{
440# if RT_INLINE_ASM_GNU_STYLE
441 __asm__ __volatile__ (
442 ".byte 0x0f, 0x01, 0xc4 # VMXOFF \n\t"
443 );
444
445# elif VMX_USE_MSC_INTRINSICS
446 __vmx_off();
447
448# else
449 __asm
450 {
451 _emit 0x0f
452 _emit 0x01
453 _emit 0xc4 /* VMXOFF */
454 }
455# endif
456}
457#endif
458#endif
459
460
461/**
462 * Executes VMCLEAR.
463 *
464 * @returns VBox status code.
465 * @param HCPhysVmcs Physical address of VM control structure.
466 */
467#if RT_INLINE_ASM_EXTERNAL && !VMX_USE_MSC_INTRINSICS
468DECLASM(int) VMXClearVmcs(RTHCPHYS HCPhysVmcs);
469#else
470DECLINLINE(int) VMXClearVmcs(RTHCPHYS HCPhysVmcs)
471{
472# if VMX_USE_MSC_INTRINSICS
473 unsigned char rcMsc = __vmx_vmclear(&HCPhysVmcs);
474 if (RT_LIKELY(rcMsc == 0))
475 return VINF_SUCCESS;
476 return VERR_VMX_INVALID_VMCS_PTR;
477
478# elif RT_INLINE_ASM_GNU_STYLE
479# ifdef RT_ARCH_AMD64
480 int rc;
481 __asm__ __volatile__ (
482 "pushq %2 \n\t"
483 ".byte 0x66, 0x0f, 0xc7, 0x34, 0x24 # VMCLEAR [esp] \n\t"
484 "jnc 1f \n\t"
485 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
486 "1: \n\t"
487 "add $8, %%rsp \n\t"
488 :"=rm"(rc)
489 :"0"(VINF_SUCCESS),
490 "ir"(HCPhysVmcs) /* don't allow direct memory reference here, */
491 /* this would not work with -fomit-frame-pointer */
492 :"memory"
493 );
494 return rc;
495# else
496 int rc;
497 __asm__ __volatile__ (
498 "push %3 \n\t"
499 "push %2 \n\t"
500 ".byte 0x66, 0x0f, 0xc7, 0x34, 0x24 # VMCLEAR [esp] \n\t"
501 "jnc 1f \n\t"
502 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
503 "1: \n\t"
504 "add $8, %%esp \n\t"
505 :"=rm"(rc)
506 :"0"(VINF_SUCCESS),
507 "ir"((uint32_t)HCPhysVmcs), /* don't allow direct memory reference here, */
508 "ir"((uint32_t)(HCPhysVmcs >> 32)) /* this would not work with -fomit-frame-pointer */
509 :"memory"
510 );
511 return rc;
512# endif
513
514# elif defined(RT_ARCH_X86)
515 int rc = VINF_SUCCESS;
516 __asm
517 {
518 push dword ptr [HCPhysVmcs + 4]
519 push dword ptr [HCPhysVmcs]
520 _emit 0x66
521 _emit 0x0f
522 _emit 0xc7
523 _emit 0x34
524 _emit 0x24 /* VMCLEAR [esp] */
525 jnc success
526 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
527success:
528 add esp, 8
529 }
530 return rc;
531
532# else
533# error "Shouldn't be here..."
534# endif
535}
536#endif
537
538
539#if 0
540#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS)
541DECLASM(int) VMXClearVmcs(RTHCPHYS HCPhysVmcs);
542#else
543DECLINLINE(int) VMXClearVmcs(RTHCPHYS HCPhysVmcs)
544{
545# if RT_INLINE_ASM_GNU_STYLE
546 int rc = VINF_SUCCESS;
547 __asm__ __volatile__ (
548 "push %3 \n\t"
549 "push %2 \n\t"
550 ".byte 0x66, 0x0f, 0xc7, 0x34, 0x24 # VMCLEAR [esp] \n\t"
551 "jnc 1f \n\t"
552 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
553 "1: \n\t"
554 "add $8, %%esp \n\t"
555 :"=rm"(rc)
556 :"0"(VINF_SUCCESS),
557 "ir"((uint32_t)HCPhysVmcs), /* don't allow direct memory reference here, */
558 "ir"((uint32_t)(HCPhysVmcs >> 32)) /* this would not work with -fomit-frame-pointer */
559 :"memory"
560 );
561 return rc;
562
563# elif VMX_USE_MSC_INTRINSICS
564 unsigned char rcMsc = __vmx_vmclear(&HCPhysVmcs);
565 if (RT_LIKELY(rcMsc == 0))
566 return VINF_SUCCESS;
567 return VERR_VMX_INVALID_VMCS_PTR;
568
569# else
570 int rc = VINF_SUCCESS;
571 __asm
572 {
573 push dword ptr [HCPhysVmcs + 4]
574 push dword ptr [HCPhysVmcs]
575 _emit 0x66
576 _emit 0x0f
577 _emit 0xc7
578 _emit 0x34
579 _emit 0x24 /* VMCLEAR [esp] */
580 jnc success
581 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
582success:
583 add esp, 8
584 }
585 return rc;
586# endif
587}
588#endif
589#endif
590
591
592/**
593 * Executes VMPTRLD.
594 *
595 * @returns VBox status code.
596 * @param HCPhysVmcs Physical address of VMCS structure.
597 */
598#if RT_INLINE_ASM_EXTERNAL && !VMX_USE_MSC_INTRINSICS
599DECLASM(int) VMXLoadVmcs(RTHCPHYS HCPhysVmcs);
600#else
601DECLINLINE(int) VMXLoadVmcs(RTHCPHYS HCPhysVmcs)
602{
603# if VMX_USE_MSC_INTRINSICS
604 unsigned char rcMsc = __vmx_vmptrld(&HCPhysVmcs);
605 if (RT_LIKELY(rcMsc == 0))
606 return VINF_SUCCESS;
607 return VERR_VMX_INVALID_VMCS_PTR;
608
609# elif RT_INLINE_ASM_GNU_STYLE
610# ifdef RT_ARCH_AMD64
611 int rc;
612 __asm__ __volatile__ (
613 "pushq %2 \n\t"
614 ".byte 0x0f, 0xc7, 0x34, 0x24 # VMPTRLD [esp] \n\t"
615 "jnc 1f \n\t"
616 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
617 "1: \n\t"
618 "add $8, %%rsp \n\t"
619 :"=rm"(rc)
620 :"0"(VINF_SUCCESS),
621 "ir"(HCPhysVmcs) /* don't allow direct memory reference here, */
622 /* this will not work with -fomit-frame-pointer */
623 :"memory"
624 );
625 return rc;
626# else
627 int rc;
628 __asm__ __volatile__ (
629 "push %3 \n\t"
630 "push %2 \n\t"
631 ".byte 0x0f, 0xc7, 0x34, 0x24 # VMPTRLD [esp] \n\t"
632 "jnc 1f \n\t"
633 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
634 "1: \n\t"
635 "add $8, %%esp \n\t"
636 :"=rm"(rc)
637 :"0"(VINF_SUCCESS),
638 "ir"((uint32_t)HCPhysVmcs), /* don't allow direct memory reference here, */
639 "ir"((uint32_t)(HCPhysVmcs >> 32)) /* this will not work with -fomit-frame-pointer */
640 :"memory"
641 );
642 return rc;
643# endif
644
645# elif defined(RT_ARCH_X86)
646 int rc = VINF_SUCCESS;
647 __asm
648 {
649 push dword ptr [HCPhysVmcs + 4]
650 push dword ptr [HCPhysVmcs]
651 _emit 0x0f
652 _emit 0xc7
653 _emit 0x34
654 _emit 0x24 /* VMPTRLD [esp] */
655 jnc success
656 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
657success:
658 add esp, 8
659 }
660 return rc;
661
662# else
663# error "Shouldn't be here..."
664# endif
665}
666#endif
667
668#if 0
669#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS)
670DECLASM(int) VMXLoadVmcs(RTHCPHYS HCPhysVmcs);
671#else
672DECLINLINE(int) VMXLoadVmcs(RTHCPHYS HCPhysVmcs)
673{
674# if RT_INLINE_ASM_GNU_STYLE
675 int rc = VINF_SUCCESS;
676 __asm__ __volatile__ (
677 "push %3 \n\t"
678 "push %2 \n\t"
679 ".byte 0x0f, 0xc7, 0x34, 0x24 # VMPTRLD [esp] \n\t"
680 "jnc 1f \n\t"
681 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
682 "1: \n\t"
683 "add $8, %%esp \n\t"
684 :"=rm"(rc)
685 :"0"(VINF_SUCCESS),
686 "ir"((uint32_t)HCPhysVmcs), /* don't allow direct memory reference here, */
687 "ir"((uint32_t)(HCPhysVmcs >> 32)) /* this will not work with -fomit-frame-pointer */
688 );
689 return rc;
690
691# elif VMX_USE_MSC_INTRINSICS
692 unsigned char rcMsc = __vmx_vmptrld(&HCPhysVmcs);
693 if (RT_LIKELY(rcMsc == 0))
694 return VINF_SUCCESS;
695 return VERR_VMX_INVALID_VMCS_PTR;
696
697# else
698 int rc = VINF_SUCCESS;
699 __asm
700 {
701 push dword ptr [HCPhysVmcs + 4]
702 push dword ptr [HCPhysVmcs]
703 _emit 0x0f
704 _emit 0xc7
705 _emit 0x34
706 _emit 0x24 /* VMPTRLD [esp] */
707 jnc success
708 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
709
710success:
711 add esp, 8
712 }
713 return rc;
714# endif
715}
716#endif
717#endif
718
719
720/**
721 * Executes VMPTRST.
722 *
723 * @returns VBox status code.
724 * @param pHCPhysVmcs Where to store the physical address of the current
725 * VMCS.
726 */
727DECLASM(int) VMXGetCurrentVmcs(RTHCPHYS *pHCPhysVmcs);
728
729
730/**
731 * Executes VMWRITE for a 32-bit field.
732 *
733 * @returns VBox status code.
734 * @retval VINF_SUCCESS.
735 * @retval VERR_VMX_INVALID_VMCS_PTR.
736 * @retval VERR_VMX_INVALID_VMCS_FIELD.
737 *
738 * @param uFieldEnc VMCS field encoding.
739 * @param u32Val The 32-bit value to set.
740 *
741 * @remarks The values of the two status codes can be OR'ed together, the result
742 * will be VERR_VMX_INVALID_VMCS_PTR.
743 */
744#if RT_INLINE_ASM_EXTERNAL && !VMX_USE_MSC_INTRINSICS
745DECLASM(int) VMXWriteVmcs32(uint32_t uFieldEnc, uint32_t u32Val);
746#else
747DECLINLINE(int) VMXWriteVmcs32(uint32_t uFieldEnc, uint32_t u32Val)
748{
749# if VMX_USE_MSC_INTRINSICS
750# ifdef VBOX_WITH_VMREAD_VMWRITE_NOCHECK
751 __vmx_vmwrite(uFieldEnc, u32Val);
752 return VINF_SUCCESS;
753# else
754 unsigned char rcMsc = __vmx_vmwrite(uFieldEnc, u32Val);
755 if (RT_LIKELY(rcMsc == 0))
756 return VINF_SUCCESS;
757 return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;
758# endif
759
760# elif RT_INLINE_ASM_GNU_STYLE
761# ifdef VBOX_WITH_VMREAD_VMWRITE_NOCHECK
762 __asm__ __volatile__ (
763 ".byte 0x0f, 0x79, 0xc2 # VMWRITE eax, edx \n\t"
764 :
765 :"a"(uFieldEnc),
766 "d"(u32Val)
767 );
768 return VINF_SUCCESS;
769# else
770 int rc;
771 __asm__ __volatile__ (
772 ".byte 0x0f, 0x79, 0xc2 # VMWRITE eax, edx \n\t"
773 "ja 2f \n\t"
774 "je 1f \n\t"
775 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
776 "jmp 2f \n\t"
777 "1: \n\t"
778 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_FIELD)", %0 \n\t"
779 "2: \n\t"
780 :"=rm"(rc)
781 :"0"(VINF_SUCCESS),
782 "a"(uFieldEnc),
783 "d"(u32Val)
784 );
785 return rc;
786# endif
787
788# elif defined(RT_ARCH_X86)
789 int rc = VINF_SUCCESS;
790 __asm
791 {
792 push dword ptr [u32Val]
793 mov eax, [uFieldEnc]
794 _emit 0x0f
795 _emit 0x79
796 _emit 0x04
797 _emit 0x24 /* VMWRITE eax, [esp] */
798 jnc valid_vmcs
799 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
800 jmp the_end
801valid_vmcs:
802 jnz the_end
803 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_FIELD
804the_end:
805 add esp, 4
806 }
807 return rc;
808
809# else
810# error "Shouldn't be here..."
811# endif
812}
813#endif
814
815
816#if 0
817#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS)
818DECLASM(int) VMXWriteVmcs32(uint32_t uFieldEnc, uint32_t u32Val);
819#else
820DECLINLINE(int) VMXWriteVmcs32(uint32_t uFieldEnc, uint32_t u32Val)
821{
822# if RT_INLINE_ASM_GNU_STYLE
823 int rc = VINF_SUCCESS;
824 __asm__ __volatile__ (
825 ".byte 0x0f, 0x79, 0xc2 # VMWRITE eax, edx \n\t"
826 "ja 2f \n\t"
827 "je 1f \n\t"
828 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
829 "jmp 2f \n\t"
830 "1: \n\t"
831 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_FIELD)", %0 \n\t"
832 "2: \n\t"
833 :"=rm"(rc)
834 :"0"(VINF_SUCCESS),
835 "a"(uFieldEnc),
836 "d"(u32Val)
837 );
838 return rc;
839
840# elif VMX_USE_MSC_INTRINSICS
841 unsigned char rcMsc = __vmx_vmwrite(uFieldEnc, u32Val);
842 if (RT_LIKELY(rcMsc == 0))
843 return VINF_SUCCESS;
844 return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;
845
846#else
847 int rc = VINF_SUCCESS;
848 __asm
849 {
850 push dword ptr [u32Val]
851 mov eax, [uFieldEnc]
852 _emit 0x0f
853 _emit 0x79
854 _emit 0x04
855 _emit 0x24 /* VMWRITE eax, [esp] */
856 jnc valid_vmcs
857 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
858 jmp the_end
859
860valid_vmcs:
861 jnz the_end
862 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_FIELD
863the_end:
864 add esp, 4
865 }
866 return rc;
867# endif
868}
869#endif
870#endif
871
872
873/**
874 * Executes VMWRITE for a 64-bit field.
875 *
876 * @returns VBox status code.
877 * @retval VINF_SUCCESS.
878 * @retval VERR_VMX_INVALID_VMCS_PTR.
879 * @retval VERR_VMX_INVALID_VMCS_FIELD.
880 *
881 * @param uFieldEnc The VMCS field encoding.
882 * @param u64Val The 16, 32 or 64-bit value to set.
883 *
884 * @remarks The values of the two status codes can be OR'ed together, the result
885 * will be VERR_VMX_INVALID_VMCS_PTR.
886 */
887#if defined(RT_ARCH_X86) || (RT_INLINE_ASM_EXTERNAL && !VMX_USE_MSC_INTRINSICS)
888DECLASM(int) VMXWriteVmcs64(uint32_t uFieldEnc, uint64_t u64Val);
889#else
890DECLINLINE(int) VMXWriteVmcs64(uint32_t uFieldEnc, uint64_t u64Val)
891{
892# if VMX_USE_MSC_INTRINSICS
893# ifdef VBOX_WITH_VMREAD_VMWRITE_NOCHECK
894 __vmx_vmwrite(uFieldEnc, u64Val);
895 return VINF_SUCCESS;
896# else
897 unsigned char rcMsc = __vmx_vmwrite(uFieldEnc, u64Val);
898 if (RT_LIKELY(rcMsc == 0))
899 return VINF_SUCCESS;
900 return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;
901# endif
902
903# elif RT_INLINE_ASM_GNU_STYLE
904# ifdef VBOX_WITH_VMREAD_VMWRITE_NOCHECK
905 __asm__ __volatile__ (
906 ".byte 0x0f, 0x79, 0xc2 # VMWRITE eax, edx \n\t"
907 :
908 :"a"(uFieldEnc),
909 "d"(u64Val)
910 );
911 return VINF_SUCCESS;
912# else
913 int rc;
914 __asm__ __volatile__ (
915 ".byte 0x0f, 0x79, 0xc2 # VMWRITE eax, edx \n\t"
916 "ja 2f \n\t"
917 "je 1f \n\t"
918 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
919 "jmp 2f \n\t"
920 "1: \n\t"
921 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_FIELD)", %0 \n\t"
922 "2: \n\t"
923 :"=rm"(rc)
924 :"0"(VINF_SUCCESS),
925 "a"(uFieldEnc),
926 "d"(u64Val)
927 );
928 return rc;
929# endif
930
931# else
932# error "Shouldn't be here..."
933# endif
934}
935#endif
936
937
938#if 0
939#if (defined(RT_ARCH_AMD64) && VMX_USE_MSC_INTRINSICS)
940DECLINLINE(int) VMXWriteVmcs64(uint32_t uFieldEnc, uint64_t u64Val)
941{
942 unsigned char rcMsc = __vmx_vmwrite(uFieldEnc, u64Val);
943 if (RT_LIKELY(rcMsc == 0))
944 return VINF_SUCCESS;
945 return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;
946}
947#else
948DECLASM(int) VMXWriteVmcs64(uint32_t uFieldEnc, uint64_t u64Val);
949#endif
950#endif
951
952
953/**
954 * Executes VMWRITE for a 16-bit VMCS field.
955 *
956 * @returns VBox status code.
957 * @retval VINF_SUCCESS.
958 * @retval VERR_VMX_INVALID_VMCS_PTR.
959 * @retval VERR_VMX_INVALID_VMCS_FIELD.
960 *
961 * @param uVmcsField The VMCS field.
962 * @param u16Val The 16-bit value to set.
963 *
964 * @remarks The values of the two status codes can be OR'ed together, the result
965 * will be VERR_VMX_INVALID_VMCS_PTR.
966 */
967DECLINLINE(int) VMXWriteVmcs16(uint32_t uVmcsField, uint16_t u16Val)
968{
969 AssertMsg(RT_BF_GET(uVmcsField, VMX_BF_VMCSFIELD_WIDTH) == VMX_VMCSFIELD_WIDTH_16BIT, ("%#RX32\n", uVmcsField));
970 return VMXWriteVmcs32(uVmcsField, u16Val);
971}
972
973
974/**
975 * Executes VMWRITE for a natural-width VMCS field.
976 */
977#ifdef RT_ARCH_AMD64
978# define VMXWriteVmcsNw VMXWriteVmcs64
979#else
980# define VMXWriteVmcsNw VMXWriteVmcs32
981#endif
982
983
984/**
985 * Invalidate a page using INVEPT.
986 *
987 * @returns VBox status code.
988 * @param enmFlush Type of flush.
989 * @param pDescriptor Pointer to the descriptor.
990 */
991DECLASM(int) VMXR0InvEPT(VMXTLBFLUSHEPT enmFlush, uint64_t *pDescriptor);
992
993
994/**
995 * Invalidate a page using INVVPID.
996 *
997 * @returns VBox status code.
998 * @param enmFlush Type of flush.
999 * @param pDescriptor Pointer to the descriptor.
1000 */
1001DECLASM(int) VMXR0InvVPID(VMXTLBFLUSHVPID enmFlush, uint64_t *pDescriptor);
1002
1003
1004/**
1005 * Executes VMREAD for a 32-bit field.
1006 *
1007 * @returns VBox status code.
1008 * @retval VINF_SUCCESS.
1009 * @retval VERR_VMX_INVALID_VMCS_PTR.
1010 * @retval VERR_VMX_INVALID_VMCS_FIELD.
1011 *
1012 * @param uFieldEnc The VMCS field encoding.
1013 * @param pData Where to store VMCS field value.
1014 *
1015 * @remarks The values of the two status codes can be OR'ed together, the result
1016 * will be VERR_VMX_INVALID_VMCS_PTR.
1017 */
1018#if RT_INLINE_ASM_EXTERNAL && !VMX_USE_MSC_INTRINSICS
1019DECLASM(int) VMXReadVmcs32(uint32_t uFieldEnc, uint32_t *pData);
1020#else
1021DECLINLINE(int) VMXReadVmcs32(uint32_t uFieldEnc, uint32_t *pData)
1022{
1023# if VMX_USE_MSC_INTRINSICS
1024# ifdef VBOX_WITH_VMREAD_VMWRITE_NOCHECK
1025 uint64_t u64Tmp = 0;
1026 __vmx_vmread(uFieldEnc, &u64Tmp);
1027 *pData = (uint32_t)u64Tmp;
1028 return VINF_SUCCESS;
1029# else
1030 unsigned char rcMsc;
1031 uint64_t u64Tmp;
1032 rcMsc = __vmx_vmread(uFieldEnc, &u64Tmp);
1033 *pData = (uint32_t)u64Tmp;
1034 if (RT_LIKELY(rcMsc == 0))
1035 return VINF_SUCCESS;
1036 return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;
1037# endif
1038
1039# elif RT_INLINE_ASM_GNU_STYLE
1040# ifdef VBOX_WITH_VMREAD_VMWRITE_NOCHECK
1041 __asm__ __volatile__ (
1042 ".byte 0x0f, 0x78, 0xc2 # VMREAD eax, edx \n\t"
1043 :"=d"(*pData)
1044 :"a"(uFieldEnc),
1045 "d"(0)
1046 );
1047 return VINF_SUCCESS;
1048# else
1049 int rc;
1050 __asm__ __volatile__ (
1051 "movl $" RT_XSTR(VINF_SUCCESS)", %0 \n\t"
1052 ".byte 0x0f, 0x78, 0xc2 # VMREAD eax, edx \n\t"
1053 "ja 2f \n\t"
1054 "je 1f \n\t"
1055 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
1056 "jmp 2f \n\t"
1057 "1: \n\t"
1058 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_FIELD)", %0 \n\t"
1059 "2: \n\t"
1060 :"=&r"(rc),
1061 "=d"(*pData)
1062 :"a"(uFieldEnc),
1063 "d"(0)
1064 );
1065 return rc;
1066# endif
1067
1068# elif defined(RT_ARCH_X86)
1069 int rc = VINF_SUCCESS;
1070 __asm
1071 {
1072 sub esp, 4
1073 mov dword ptr [esp], 0
1074 mov eax, [uFieldEnc]
1075 _emit 0x0f
1076 _emit 0x78
1077 _emit 0x04
1078 _emit 0x24 /* VMREAD eax, [esp] */
1079 mov edx, pData
1080 pop dword ptr [edx]
1081 jnc valid_vmcs
1082 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
1083 jmp the_end
1084valid_vmcs:
1085 jnz the_end
1086 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_FIELD
1087the_end:
1088 }
1089 return rc;
1090
1091# else
1092# error "Shouldn't be here..."
1093# endif
1094}
1095#endif
1096
1097#if 0
1098#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS)
1099DECLASM(int) VMXReadVmcs32(uint32_t uFieldEnc, uint32_t *pData);
1100#else
1101DECLINLINE(int) VMXReadVmcs32(uint32_t uFieldEnc, uint32_t *pData)
1102{
1103# if RT_INLINE_ASM_GNU_STYLE
1104 int rc = VINF_SUCCESS;
1105 __asm__ __volatile__ (
1106 "movl $" RT_XSTR(VINF_SUCCESS)", %0 \n\t"
1107 ".byte 0x0f, 0x78, 0xc2 # VMREAD eax, edx \n\t"
1108 "ja 2f \n\t"
1109 "je 1f \n\t"
1110 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
1111 "jmp 2f \n\t"
1112 "1: \n\t"
1113 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_FIELD)", %0 \n\t"
1114 "2: \n\t"
1115 :"=&r"(rc),
1116 "=d"(*pData)
1117 :"a"(uFieldEnc),
1118 "d"(0)
1119 );
1120 return rc;
1121
1122# elif VMX_USE_MSC_INTRINSICS
1123 unsigned char rcMsc;
1124# ifdef RT_ARCH_X86
1125 rcMsc = __vmx_vmread(uFieldEnc, pData);
1126# else
1127 uint64_t u64Tmp;
1128 rcMsc = __vmx_vmread(uFieldEnc, &u64Tmp);
1129 *pData = (uint32_t)u64Tmp;
1130# endif
1131 if (RT_LIKELY(rcMsc == 0))
1132 return VINF_SUCCESS;
1133 return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;
1134
1135#else
1136 int rc = VINF_SUCCESS;
1137 __asm
1138 {
1139 sub esp, 4
1140 mov dword ptr [esp], 0
1141 mov eax, [uFieldEnc]
1142 _emit 0x0f
1143 _emit 0x78
1144 _emit 0x04
1145 _emit 0x24 /* VMREAD eax, [esp] */
1146 mov edx, pData
1147 pop dword ptr [edx]
1148 jnc valid_vmcs
1149 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
1150 jmp the_end
1151
1152valid_vmcs:
1153 jnz the_end
1154 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_FIELD
1155the_end:
1156 }
1157 return rc;
1158# endif
1159}
1160#endif
1161#endif
1162
1163
1164/**
1165 * Executes VMREAD for a 64-bit field.
1166 *
1167 * @returns VBox status code.
1168 * @retval VINF_SUCCESS.
1169 * @retval VERR_VMX_INVALID_VMCS_PTR.
1170 * @retval VERR_VMX_INVALID_VMCS_FIELD.
1171 *
1172 * @param uFieldEnc The VMCS field encoding.
1173 * @param pData Where to store VMCS field value.
1174 *
1175 * @remarks The values of the two status codes can be OR'ed together, the result
1176 * will be VERR_VMX_INVALID_VMCS_PTR.
1177 */
1178#if defined(RT_ARCH_X86) || (RT_INLINE_ASM_EXTERNAL && !VMX_USE_MSC_INTRINSICS)
1179DECLASM(int) VMXReadVmcs64(uint32_t uFieldEnc, uint64_t *pData);
1180#else
1181DECLINLINE(int) VMXReadVmcs64(uint32_t uFieldEnc, uint64_t *pData)
1182{
1183# if VMX_USE_MSC_INTRINSICS
1184# ifdef VBOX_WITH_VMREAD_VMWRITE_NOCHECK
1185 __vmx_vmread(uFieldEnc, pData);
1186 return VINF_SUCCESS;
1187# else
1188 unsigned char rcMsc;
1189 rcMsc = __vmx_vmread(uFieldEnc, pData);
1190 if (RT_LIKELY(rcMsc == 0))
1191 return VINF_SUCCESS;
1192 return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;
1193# endif
1194
1195# elif RT_INLINE_ASM_GNU_STYLE
1196# ifdef VBOX_WITH_VMREAD_VMWRITE_NOCHECK
1197 __asm__ __volatile__ (
1198 ".byte 0x0f, 0x78, 0xc2 # VMREAD eax, edx \n\t"
1199 :"=d"(*pData)
1200 :"a"(uFieldEnc),
1201 "d"(0)
1202 );
1203 return VINF_SUCCESS;
1204# else
1205 int rc;
1206 __asm__ __volatile__ (
1207 "movl $" RT_XSTR(VINF_SUCCESS)", %0 \n\t"
1208 ".byte 0x0f, 0x78, 0xc2 # VMREAD eax, edx \n\t"
1209 "ja 2f \n\t"
1210 "je 1f \n\t"
1211 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
1212 "jmp 2f \n\t"
1213 "1: \n\t"
1214 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_FIELD)", %0 \n\t"
1215 "2: \n\t"
1216 :"=&r"(rc),
1217 "=d"(*pData)
1218 :"a"(uFieldEnc),
1219 "d"(0)
1220 );
1221 return rc;
1222# endif
1223# else
1224# error "Shouldn't be here..."
1225# endif
1226}
1227#endif
1228
1229
1230#if 0
1231#if (!defined(RT_ARCH_X86) && !VMX_USE_MSC_INTRINSICS)
1232DECLASM(int) VMXReadVmcs64(uint32_t uFieldEnc, uint64_t *pData);
1233#else
1234DECLINLINE(int) VMXReadVmcs64(uint32_t uFieldEnc, uint64_t *pData)
1235{
1236# if VMX_USE_MSC_INTRINSICS
1237 unsigned char rcMsc;
1238# ifdef RT_ARCH_X86
1239 size_t uLow;
1240 size_t uHigh;
1241 rcMsc = __vmx_vmread(uFieldEnc, &uLow);
1242 rcMsc |= __vmx_vmread(uFieldEnc + 1, &uHigh);
1243 *pData = RT_MAKE_U64(uLow, uHigh);
1244# else
1245 rcMsc = __vmx_vmread(uFieldEnc, pData);
1246# endif
1247 if (RT_LIKELY(rcMsc == 0))
1248 return VINF_SUCCESS;
1249 return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;
1250
1251# elif defined(RT_ARCH_X86)
1252 int rc;
1253 uint32_t val_hi, val;
1254 rc = VMXReadVmcs32(uFieldEnc, &val);
1255 rc |= VMXReadVmcs32(uFieldEnc + 1, &val_hi);
1256 AssertRC(rc);
1257 *pData = RT_MAKE_U64(val, val_hi);
1258 return rc;
1259
1260# else
1261# error "Shouldn't be here..."
1262# endif
1263}
1264#endif
1265#endif
1266
1267
1268/**
1269 * Executes VMREAD for a 16-bit field.
1270 *
1271 * @returns VBox status code.
1272 * @retval VINF_SUCCESS.
1273 * @retval VERR_VMX_INVALID_VMCS_PTR.
1274 * @retval VERR_VMX_INVALID_VMCS_FIELD.
1275 *
1276 * @param uVmcsField The VMCS field.
1277 * @param pData Where to store VMCS field value.
1278 *
1279 * @remarks The values of the two status codes can be OR'ed together, the result
1280 * will be VERR_VMX_INVALID_VMCS_PTR.
1281 */
1282DECLINLINE(int) VMXReadVmcs16(uint32_t uVmcsField, uint16_t *pData)
1283{
1284 uint32_t u32Tmp;
1285 int rc;
1286 AssertMsg(RT_BF_GET(uVmcsField, VMX_BF_VMCSFIELD_WIDTH) == VMX_VMCSFIELD_WIDTH_16BIT, ("%#RX32\n", uVmcsField));
1287 rc = VMXReadVmcs32(uVmcsField, &u32Tmp);
1288 *pData = (uint16_t)u32Tmp;
1289 return rc;
1290}
1291
1292
1293/**
1294 * Executes VMREAD for a natural-width VMCS field.
1295 */
1296#ifdef RT_ARCH_AMD64
1297# define VMXReadVmcsNw VMXReadVmcs64
1298#else
1299# define VMXReadVmcsNw VMXReadVmcs32
1300#endif
1301
1302#endif /* RT_ARCH_AMD64 || RT_ARCH_X86 */
1303
1304/** @} */
1305
1306#endif /* !VBOX_INCLUDED_vmm_hmvmxinline_h */
1307
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette