VirtualBox

source: vbox/trunk/src/recompiler_new/VBoxRecompiler.c@ 17352

Last change on this file since 17352 was 17352, checked in by vboxsync, 16 years ago

REM_NEW: Retired remR3HCVirt2GCPhys.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 158.2 KB
Line 
1/* $Id: VBoxRecompiler.c 17352 2009-03-04 16:32:32Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_REM
27#include "vl.h"
28#include "osdep.h"
29#include "exec-all.h"
30#include "config.h"
31#include "cpu-all.h"
32
33#include <VBox/rem.h>
34#include <VBox/vmapi.h>
35#include <VBox/tm.h>
36#include <VBox/ssm.h>
37#include <VBox/em.h>
38#include <VBox/trpm.h>
39#include <VBox/iom.h>
40#include <VBox/mm.h>
41#include <VBox/pgm.h>
42#include <VBox/pdm.h>
43#include <VBox/dbgf.h>
44#include <VBox/dbg.h>
45#include <VBox/hwaccm.h>
46#include <VBox/patm.h>
47#include <VBox/csam.h>
48#include "REMInternal.h"
49#include <VBox/vm.h>
50#include <VBox/param.h>
51#include <VBox/err.h>
52
53#include <VBox/log.h>
54#include <iprt/semaphore.h>
55#include <iprt/asm.h>
56#include <iprt/assert.h>
57#include <iprt/thread.h>
58#include <iprt/string.h>
59
60/* Don't wanna include everything. */
61extern void cpu_exec_init_all(unsigned long tb_size);
62extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
63extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
64extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
65extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
66extern void tlb_flush(CPUState *env, int flush_global);
67extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
68extern void sync_ldtr(CPUX86State *env1, int selector);
69
70#ifdef VBOX_STRICT
71unsigned long get_phys_page_offset(target_ulong addr);
72#endif
73
74
75/*******************************************************************************
76* Defined Constants And Macros *
77*******************************************************************************/
78
79/** Copy 80-bit fpu register at pSrc to pDst.
80 * This is probably faster than *calling* memcpy.
81 */
82#define REM_COPY_FPU_REG(pDst, pSrc) \
83 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
84
85
86/*******************************************************************************
87* Internal Functions *
88*******************************************************************************/
89static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
91static void remR3StateUpdate(PVM pVM);
92
93static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
94static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
96static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
97static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99
100static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
101static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
103static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
104static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106
107
108/*******************************************************************************
109* Global Variables *
110*******************************************************************************/
111
112/** @todo Move stats to REM::s some rainy day we have nothing do to. */
113#ifdef VBOX_WITH_STATISTICS
114static STAMPROFILEADV gStatExecuteSingleInstr;
115static STAMPROFILEADV gStatCompilationQEmu;
116static STAMPROFILEADV gStatRunCodeQEmu;
117static STAMPROFILEADV gStatTotalTimeQEmu;
118static STAMPROFILEADV gStatTimers;
119static STAMPROFILEADV gStatTBLookup;
120static STAMPROFILEADV gStatIRQ;
121static STAMPROFILEADV gStatRawCheck;
122static STAMPROFILEADV gStatMemRead;
123static STAMPROFILEADV gStatMemWrite;
124static STAMPROFILE gStatGCPhys2HCVirt;
125static STAMPROFILE gStatHCVirt2GCPhys;
126static STAMCOUNTER gStatCpuGetTSC;
127static STAMCOUNTER gStatRefuseTFInhibit;
128static STAMCOUNTER gStatRefuseVM86;
129static STAMCOUNTER gStatRefusePaging;
130static STAMCOUNTER gStatRefusePAE;
131static STAMCOUNTER gStatRefuseIOPLNot0;
132static STAMCOUNTER gStatRefuseIF0;
133static STAMCOUNTER gStatRefuseCode16;
134static STAMCOUNTER gStatRefuseWP0;
135static STAMCOUNTER gStatRefuseRing1or2;
136static STAMCOUNTER gStatRefuseCanExecute;
137static STAMCOUNTER gStatREMGDTChange;
138static STAMCOUNTER gStatREMIDTChange;
139static STAMCOUNTER gStatREMLDTRChange;
140static STAMCOUNTER gStatREMTRChange;
141static STAMCOUNTER gStatSelOutOfSync[6];
142static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
143static STAMCOUNTER gStatFlushTBs;
144#endif
145
146/*
147 * Global stuff.
148 */
149
150/** MMIO read callbacks. */
151CPUReadMemoryFunc *g_apfnMMIORead[3] =
152{
153 remR3MMIOReadU8,
154 remR3MMIOReadU16,
155 remR3MMIOReadU32
156};
157
158/** MMIO write callbacks. */
159CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
160{
161 remR3MMIOWriteU8,
162 remR3MMIOWriteU16,
163 remR3MMIOWriteU32
164};
165
166/** Handler read callbacks. */
167CPUReadMemoryFunc *g_apfnHandlerRead[3] =
168{
169 remR3HandlerReadU8,
170 remR3HandlerReadU16,
171 remR3HandlerReadU32
172};
173
174/** Handler write callbacks. */
175CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
176{
177 remR3HandlerWriteU8,
178 remR3HandlerWriteU16,
179 remR3HandlerWriteU32
180};
181
182
183#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
184/*
185 * Debugger commands.
186 */
187static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
188
189/** '.remstep' arguments. */
190static const DBGCVARDESC g_aArgRemStep[] =
191{
192 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
193 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
194};
195
196/** Command descriptors. */
197static const DBGCCMD g_aCmds[] =
198{
199 {
200 .pszCmd ="remstep",
201 .cArgsMin = 0,
202 .cArgsMax = 1,
203 .paArgDescs = &g_aArgRemStep[0],
204 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
205 .pResultDesc = NULL,
206 .fFlags = 0,
207 .pfnHandler = remR3CmdDisasEnableStepping,
208 .pszSyntax = "[on/off]",
209 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
210 "If no arguments show the current state."
211 }
212};
213#endif
214
215
216/*******************************************************************************
217* Internal Functions *
218*******************************************************************************/
219void remAbort(int rc, const char *pszTip);
220extern int testmath(void);
221
222/* Put them here to avoid unused variable warning. */
223AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
224#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
225//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
226/* Why did this have to be identical?? */
227AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
228#else
229AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
230#endif
231
232
233/** Prologue code, must be in lower 4G to simplify jumps to/from generated code. */
234uint8_t *code_gen_prologue;
235
236/**
237 * Initializes the REM.
238 *
239 * @returns VBox status code.
240 * @param pVM The VM to operate on.
241 */
242REMR3DECL(int) REMR3Init(PVM pVM)
243{
244 uint32_t u32Dummy;
245 int rc;
246
247#ifdef VBOX_ENABLE_VBOXREM64
248 LogRel(("Using 64-bit aware REM\n"));
249#endif
250
251 /*
252 * Assert sanity.
253 */
254 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
255 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
256 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
257#if defined(DEBUG) && !defined(RT_OS_SOLARIS) /// @todo fix the solaris math stuff.
258 Assert(!testmath());
259#endif
260 /*
261 * Init some internal data members.
262 */
263 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
264 pVM->rem.s.Env.pVM = pVM;
265#ifdef CPU_RAW_MODE_INIT
266 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
267#endif
268
269 /* ctx. */
270 pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVM);
271 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order have changed! REM depends on notification about ALL physical memory registrations\n"));
272
273 /* ignore all notifications */
274 pVM->rem.s.fIgnoreAll = true;
275
276 code_gen_prologue = RTMemExecAlloc(_1K);
277 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
278
279 cpu_exec_init_all(0);
280
281 /*
282 * Init the recompiler.
283 */
284 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
285 {
286 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
287 return VERR_GENERAL_FAILURE;
288 }
289 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
290 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
291
292 /* allocate code buffer for single instruction emulation. */
293 pVM->rem.s.Env.cbCodeBuffer = 4096;
294 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
295 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
296
297 /* finally, set the cpu_single_env global. */
298 cpu_single_env = &pVM->rem.s.Env;
299
300 /* Nothing is pending by default */
301 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
302
303 /*
304 * Register ram types.
305 */
306 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
307 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
308 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
309 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
310 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
311
312 /* stop ignoring. */
313 pVM->rem.s.fIgnoreAll = false;
314
315 /*
316 * Register the saved state data unit.
317 */
318 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
319 NULL, remR3Save, NULL,
320 NULL, remR3Load, NULL);
321 if (RT_FAILURE(rc))
322 return rc;
323
324#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
325 /*
326 * Debugger commands.
327 */
328 static bool fRegisteredCmds = false;
329 if (!fRegisteredCmds)
330 {
331 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
332 if (RT_SUCCESS(rc))
333 fRegisteredCmds = true;
334 }
335#endif
336
337#ifdef VBOX_WITH_STATISTICS
338 /*
339 * Statistics.
340 */
341 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
342 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
343 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
344 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
345 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
346 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
347 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
348 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
349 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
350 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
351 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
352 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
353
354 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
355
356 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
357 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
358 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
359 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
360 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
361 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
362 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
363 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
364 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
365 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
366 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
367
368 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
369 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
370 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
371 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
372
373 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
374 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
375 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
376 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
377 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
378 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
379
380 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
381 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
382 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
383 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
384 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
385 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
386
387 /** @todo missing /REM/Tb*Count stats */
388
389#endif
390
391#ifdef DEBUG_ALL_LOGGING
392 loglevel = ~0;
393# ifdef DEBUG_TMP_LOGGING
394 logfile = fopen("/tmp/vbox-qemu.log", "w");
395# endif
396#endif
397
398 return rc;
399}
400
401
402/**
403 * Terminates the REM.
404 *
405 * Termination means cleaning up and freeing all resources,
406 * the VM it self is at this point powered off or suspended.
407 *
408 * @returns VBox status code.
409 * @param pVM The VM to operate on.
410 */
411REMR3DECL(int) REMR3Term(PVM pVM)
412{
413 return VINF_SUCCESS;
414}
415
416
417/**
418 * The VM is being reset.
419 *
420 * For the REM component this means to call the cpu_reset() and
421 * reinitialize some state variables.
422 *
423 * @param pVM VM handle.
424 */
425REMR3DECL(void) REMR3Reset(PVM pVM)
426{
427 /*
428 * Reset the REM cpu.
429 */
430 pVM->rem.s.fIgnoreAll = true;
431 cpu_reset(&pVM->rem.s.Env);
432 pVM->rem.s.cInvalidatedPages = 0;
433 pVM->rem.s.fIgnoreAll = false;
434
435 /* Clear raw ring 0 init state */
436 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
437
438 /* Flush the TBs the next time we execute code here. */
439 pVM->rem.s.fFlushTBs = true;
440}
441
442
443/**
444 * Execute state save operation.
445 *
446 * @returns VBox status code.
447 * @param pVM VM Handle.
448 * @param pSSM SSM operation handle.
449 */
450static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
451{
452 /*
453 * Save the required CPU Env bits.
454 * (Not much because we're never in REM when doing the save.)
455 */
456 PREM pRem = &pVM->rem.s;
457 LogFlow(("remR3Save:\n"));
458 Assert(!pRem->fInREM);
459 SSMR3PutU32(pSSM, pRem->Env.hflags);
460 SSMR3PutU32(pSSM, ~0); /* separator */
461
462 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
463 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
464 SSMR3PutUInt(pSSM, pVM->rem.s.u32PendingInterrupt);
465
466 return SSMR3PutU32(pSSM, ~0); /* terminator */
467}
468
469
470/**
471 * Execute state load operation.
472 *
473 * @returns VBox status code.
474 * @param pVM VM Handle.
475 * @param pSSM SSM operation handle.
476 * @param u32Version Data layout version.
477 */
478static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
479{
480 uint32_t u32Dummy;
481 uint32_t fRawRing0 = false;
482 uint32_t u32Sep;
483 int rc;
484 PREM pRem;
485 LogFlow(("remR3Load:\n"));
486
487 /*
488 * Validate version.
489 */
490 if ( u32Version != REM_SAVED_STATE_VERSION
491 && u32Version != REM_SAVED_STATE_VERSION_VER1_6)
492 {
493 AssertMsgFailed(("remR3Load: Invalid version u32Version=%d!\n", u32Version));
494 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
495 }
496
497 /*
498 * Do a reset to be on the safe side...
499 */
500 REMR3Reset(pVM);
501
502 /*
503 * Ignore all ignorable notifications.
504 * (Not doing this will cause serious trouble.)
505 */
506 pVM->rem.s.fIgnoreAll = true;
507
508 /*
509 * Load the required CPU Env bits.
510 * (Not much because we're never in REM when doing the save.)
511 */
512 pRem = &pVM->rem.s;
513 Assert(!pRem->fInREM);
514 SSMR3GetU32(pSSM, &pRem->Env.hflags);
515 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
516 {
517 /* Redundant REM CPU state has to be loaded, but can be ignored. */
518 CPUX86State_Ver16 temp;
519 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
520 }
521
522 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
523 if (RT_FAILURE(rc))
524 return rc;
525 if (u32Sep != ~0U)
526 {
527 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
528 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
529 }
530
531 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
532 SSMR3GetUInt(pSSM, &fRawRing0);
533 if (fRawRing0)
534 pRem->Env.state |= CPU_RAW_RING0;
535
536 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
537 {
538 unsigned i;
539
540 /*
541 * Load the REM stuff.
542 */
543 rc = SSMR3GetUInt(pSSM, &pRem->cInvalidatedPages);
544 if (RT_FAILURE(rc))
545 return rc;
546 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
547 {
548 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
549 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
550 }
551 for (i = 0; i < pRem->cInvalidatedPages; i++)
552 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
553 }
554
555 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
556 if (RT_FAILURE(rc))
557 return rc;
558
559 /* check the terminator. */
560 rc = SSMR3GetU32(pSSM, &u32Sep);
561 if (RT_FAILURE(rc))
562 return rc;
563 if (u32Sep != ~0U)
564 {
565 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
566 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
567 }
568
569 /*
570 * Get the CPUID features.
571 */
572 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
573 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
574
575 /*
576 * Sync the Load Flush the TLB
577 */
578 tlb_flush(&pRem->Env, 1);
579
580 /*
581 * Stop ignoring ignornable notifications.
582 */
583 pVM->rem.s.fIgnoreAll = false;
584
585 /*
586 * Sync the whole CPU state when executing code in the recompiler.
587 */
588 CPUMSetChangedFlags(pVM, CPUM_CHANGED_ALL);
589 return VINF_SUCCESS;
590}
591
592
593
594#undef LOG_GROUP
595#define LOG_GROUP LOG_GROUP_REM_RUN
596
597/**
598 * Single steps an instruction in recompiled mode.
599 *
600 * Before calling this function the REM state needs to be in sync with
601 * the VM. Call REMR3State() to perform the sync. It's only necessary
602 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
603 * and after calling REMR3StateBack().
604 *
605 * @returns VBox status code.
606 *
607 * @param pVM VM Handle.
608 */
609REMR3DECL(int) REMR3Step(PVM pVM)
610{
611 int rc, interrupt_request;
612 RTGCPTR GCPtrPC;
613 bool fBp;
614
615 /*
616 * Lock the REM - we don't wanna have anyone interrupting us
617 * while stepping - and enabled single stepping. We also ignore
618 * pending interrupts and suchlike.
619 */
620 interrupt_request = pVM->rem.s.Env.interrupt_request;
621 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
622 pVM->rem.s.Env.interrupt_request = 0;
623 cpu_single_step(&pVM->rem.s.Env, 1);
624
625 /*
626 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
627 */
628 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
629 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
630
631 /*
632 * Execute and handle the return code.
633 * We execute without enabling the cpu tick, so on success we'll
634 * just flip it on and off to make sure it moves
635 */
636 rc = cpu_exec(&pVM->rem.s.Env);
637 if (rc == EXCP_DEBUG)
638 {
639 TMCpuTickResume(pVM);
640 TMCpuTickPause(pVM);
641 TMVirtualResume(pVM);
642 TMVirtualPause(pVM);
643 rc = VINF_EM_DBG_STEPPED;
644 }
645 else
646 {
647 switch (rc)
648 {
649 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
650 case EXCP_HLT:
651 case EXCP_HALTED: rc = VINF_EM_HALT; break;
652 case EXCP_RC:
653 rc = pVM->rem.s.rc;
654 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
655 break;
656 default:
657 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
658 rc = VERR_INTERNAL_ERROR;
659 break;
660 }
661 }
662
663 /*
664 * Restore the stuff we changed to prevent interruption.
665 * Unlock the REM.
666 */
667 if (fBp)
668 {
669 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
670 Assert(rc2 == 0); NOREF(rc2);
671 }
672 cpu_single_step(&pVM->rem.s.Env, 0);
673 pVM->rem.s.Env.interrupt_request = interrupt_request;
674
675 return rc;
676}
677
678
679/**
680 * Set a breakpoint using the REM facilities.
681 *
682 * @returns VBox status code.
683 * @param pVM The VM handle.
684 * @param Address The breakpoint address.
685 * @thread The emulation thread.
686 */
687REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
688{
689 VM_ASSERT_EMT(pVM);
690 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
691 {
692 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
693 return VINF_SUCCESS;
694 }
695 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
696 return VERR_REM_NO_MORE_BP_SLOTS;
697}
698
699
700/**
701 * Clears a breakpoint set by REMR3BreakpointSet().
702 *
703 * @returns VBox status code.
704 * @param pVM The VM handle.
705 * @param Address The breakpoint address.
706 * @thread The emulation thread.
707 */
708REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
709{
710 VM_ASSERT_EMT(pVM);
711 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
712 {
713 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
714 return VINF_SUCCESS;
715 }
716 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
717 return VERR_REM_BP_NOT_FOUND;
718}
719
720
721/**
722 * Emulate an instruction.
723 *
724 * This function executes one instruction without letting anyone
725 * interrupt it. This is intended for being called while being in
726 * raw mode and thus will take care of all the state syncing between
727 * REM and the rest.
728 *
729 * @returns VBox status code.
730 * @param pVM VM handle.
731 */
732REMR3DECL(int) REMR3EmulateInstruction(PVM pVM)
733{
734 bool fFlushTBs;
735
736 int rc, rc2;
737 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
738
739 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
740 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
741 */
742 if (HWACCMIsEnabled(pVM))
743 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
744
745 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
746 fFlushTBs = pVM->rem.s.fFlushTBs;
747 pVM->rem.s.fFlushTBs = false;
748
749 /*
750 * Sync the state and enable single instruction / single stepping.
751 */
752 rc = REMR3State(pVM);
753 pVM->rem.s.fFlushTBs = fFlushTBs;
754 if (RT_SUCCESS(rc))
755 {
756 int interrupt_request = pVM->rem.s.Env.interrupt_request;
757 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
758 Assert(!pVM->rem.s.Env.singlestep_enabled);
759 /*
760 * Now we set the execute single instruction flag and enter the cpu_exec loop.
761 */
762 TMNotifyStartOfExecution(pVM);
763 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
764 rc = cpu_exec(&pVM->rem.s.Env);
765 TMNotifyEndOfExecution(pVM);
766 switch (rc)
767 {
768 /*
769 * Executed without anything out of the way happening.
770 */
771 case EXCP_SINGLE_INSTR:
772 rc = VINF_EM_RESCHEDULE;
773 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
774 break;
775
776 /*
777 * If we take a trap or start servicing a pending interrupt, we might end up here.
778 * (Timer thread or some other thread wishing EMT's attention.)
779 */
780 case EXCP_INTERRUPT:
781 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
782 rc = VINF_EM_RESCHEDULE;
783 break;
784
785 /*
786 * Single step, we assume!
787 * If there was a breakpoint there we're fucked now.
788 */
789 case EXCP_DEBUG:
790 {
791 /* breakpoint or single step? */
792 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
793 int iBP;
794 rc = VINF_EM_DBG_STEPPED;
795 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
796 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
797 {
798 rc = VINF_EM_DBG_BREAKPOINT;
799 break;
800 }
801 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
802 break;
803 }
804
805 /*
806 * hlt instruction.
807 */
808 case EXCP_HLT:
809 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
810 rc = VINF_EM_HALT;
811 break;
812
813 /*
814 * The VM has halted.
815 */
816 case EXCP_HALTED:
817 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
818 rc = VINF_EM_HALT;
819 break;
820
821 /*
822 * Switch to RAW-mode.
823 */
824 case EXCP_EXECUTE_RAW:
825 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
826 rc = VINF_EM_RESCHEDULE_RAW;
827 break;
828
829 /*
830 * Switch to hardware accelerated RAW-mode.
831 */
832 case EXCP_EXECUTE_HWACC:
833 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
834 rc = VINF_EM_RESCHEDULE_HWACC;
835 break;
836
837 /*
838 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
839 */
840 case EXCP_RC:
841 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
842 rc = pVM->rem.s.rc;
843 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
844 break;
845
846 /*
847 * Figure out the rest when they arrive....
848 */
849 default:
850 AssertMsgFailed(("rc=%d\n", rc));
851 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
852 rc = VINF_EM_RESCHEDULE;
853 break;
854 }
855
856 /*
857 * Switch back the state.
858 */
859 pVM->rem.s.Env.interrupt_request = interrupt_request;
860 rc2 = REMR3StateBack(pVM);
861 AssertRC(rc2);
862 }
863
864 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
865 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
866 return rc;
867}
868
869
870/**
871 * Runs code in recompiled mode.
872 *
873 * Before calling this function the REM state needs to be in sync with
874 * the VM. Call REMR3State() to perform the sync. It's only necessary
875 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
876 * and after calling REMR3StateBack().
877 *
878 * @returns VBox status code.
879 *
880 * @param pVM VM Handle.
881 */
882REMR3DECL(int) REMR3Run(PVM pVM)
883{
884 int rc;
885 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
886 Assert(pVM->rem.s.fInREM);
887
888 TMNotifyStartOfExecution(pVM);
889 rc = cpu_exec(&pVM->rem.s.Env);
890 TMNotifyEndOfExecution(pVM);
891 switch (rc)
892 {
893 /*
894 * This happens when the execution was interrupted
895 * by an external event, like pending timers.
896 */
897 case EXCP_INTERRUPT:
898 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
899 rc = VINF_SUCCESS;
900 break;
901
902 /*
903 * hlt instruction.
904 */
905 case EXCP_HLT:
906 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
907 rc = VINF_EM_HALT;
908 break;
909
910 /*
911 * The VM has halted.
912 */
913 case EXCP_HALTED:
914 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
915 rc = VINF_EM_HALT;
916 break;
917
918 /*
919 * Breakpoint/single step.
920 */
921 case EXCP_DEBUG:
922 {
923#if 0//def DEBUG_bird
924 static int iBP = 0;
925 printf("howdy, breakpoint! iBP=%d\n", iBP);
926 switch (iBP)
927 {
928 case 0:
929 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
930 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
931 //pVM->rem.s.Env.interrupt_request = 0;
932 //pVM->rem.s.Env.exception_index = -1;
933 //g_fInterruptDisabled = 1;
934 rc = VINF_SUCCESS;
935 asm("int3");
936 break;
937 default:
938 asm("int3");
939 break;
940 }
941 iBP++;
942#else
943 /* breakpoint or single step? */
944 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
945 int iBP;
946 rc = VINF_EM_DBG_STEPPED;
947 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
948 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
949 {
950 rc = VINF_EM_DBG_BREAKPOINT;
951 break;
952 }
953 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
954#endif
955 break;
956 }
957
958 /*
959 * Switch to RAW-mode.
960 */
961 case EXCP_EXECUTE_RAW:
962 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
963 rc = VINF_EM_RESCHEDULE_RAW;
964 break;
965
966 /*
967 * Switch to hardware accelerated RAW-mode.
968 */
969 case EXCP_EXECUTE_HWACC:
970 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
971 rc = VINF_EM_RESCHEDULE_HWACC;
972 break;
973
974 /** @todo missing VBOX_WITH_VMI/EXECP_PARAV_CALL */
975 /*
976 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
977 */
978 case EXCP_RC:
979 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
980 rc = pVM->rem.s.rc;
981 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
982 break;
983
984 /*
985 * Figure out the rest when they arrive....
986 */
987 default:
988 AssertMsgFailed(("rc=%d\n", rc));
989 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
990 rc = VINF_SUCCESS;
991 break;
992 }
993
994 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
995 return rc;
996}
997
998
999/**
1000 * Check if the cpu state is suitable for Raw execution.
1001 *
1002 * @returns boolean
1003 * @param env The CPU env struct.
1004 * @param eip The EIP to check this for (might differ from env->eip).
1005 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1006 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1007 *
1008 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1009 */
1010bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1011{
1012 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1013 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1014 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1015 uint32_t u32CR0;
1016
1017 /* Update counter. */
1018 env->pVM->rem.s.cCanExecuteRaw++;
1019
1020 if (HWACCMIsEnabled(env->pVM))
1021 {
1022 CPUMCTX Ctx;
1023
1024 env->state |= CPU_RAW_HWACC;
1025
1026 /*
1027 * Create partial context for HWACCMR3CanExecuteGuest
1028 */
1029 Ctx.cr0 = env->cr[0];
1030 Ctx.cr3 = env->cr[3];
1031 Ctx.cr4 = env->cr[4];
1032
1033 Ctx.tr = env->tr.selector;
1034 Ctx.trHid.u64Base = env->tr.base;
1035 Ctx.trHid.u32Limit = env->tr.limit;
1036 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1037
1038 Ctx.idtr.cbIdt = env->idt.limit;
1039 Ctx.idtr.pIdt = env->idt.base;
1040
1041 Ctx.gdtr.cbGdt = env->gdt.limit;
1042 Ctx.gdtr.pGdt = env->gdt.base;
1043
1044 Ctx.rsp = env->regs[R_ESP];
1045 Ctx.rip = env->eip;
1046
1047 Ctx.eflags.u32 = env->eflags;
1048
1049 Ctx.cs = env->segs[R_CS].selector;
1050 Ctx.csHid.u64Base = env->segs[R_CS].base;
1051 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1052 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1053
1054 Ctx.ds = env->segs[R_DS].selector;
1055 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1056 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1057 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1058
1059 Ctx.es = env->segs[R_ES].selector;
1060 Ctx.esHid.u64Base = env->segs[R_ES].base;
1061 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1062 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1063
1064 Ctx.fs = env->segs[R_FS].selector;
1065 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1066 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1067 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1068
1069 Ctx.gs = env->segs[R_GS].selector;
1070 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1071 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1072 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1073
1074 Ctx.ss = env->segs[R_SS].selector;
1075 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1076 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1077 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1078
1079 Ctx.msrEFER = env->efer;
1080
1081 /* Hardware accelerated raw-mode:
1082 *
1083 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1084 */
1085 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1086 {
1087 *piException = EXCP_EXECUTE_HWACC;
1088 return true;
1089 }
1090 return false;
1091 }
1092
1093 /*
1094 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1095 * or 32 bits protected mode ring 0 code
1096 *
1097 * The tests are ordered by the likelyhood of being true during normal execution.
1098 */
1099 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1100 {
1101 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1102 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1103 return false;
1104 }
1105
1106#ifndef VBOX_RAW_V86
1107 if (fFlags & VM_MASK) {
1108 STAM_COUNTER_INC(&gStatRefuseVM86);
1109 Log2(("raw mode refused: VM_MASK\n"));
1110 return false;
1111 }
1112#endif
1113
1114 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1115 {
1116#ifndef DEBUG_bird
1117 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1118#endif
1119 return false;
1120 }
1121
1122 if (env->singlestep_enabled)
1123 {
1124 //Log2(("raw mode refused: Single step\n"));
1125 return false;
1126 }
1127
1128 if (env->nb_breakpoints > 0)
1129 {
1130 //Log2(("raw mode refused: Breakpoints\n"));
1131 return false;
1132 }
1133
1134 u32CR0 = env->cr[0];
1135 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1136 {
1137 STAM_COUNTER_INC(&gStatRefusePaging);
1138 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1139 return false;
1140 }
1141
1142 if (env->cr[4] & CR4_PAE_MASK)
1143 {
1144 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1145 {
1146 STAM_COUNTER_INC(&gStatRefusePAE);
1147 return false;
1148 }
1149 }
1150
1151 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1152 {
1153 if (!EMIsRawRing3Enabled(env->pVM))
1154 return false;
1155
1156 if (!(env->eflags & IF_MASK))
1157 {
1158 STAM_COUNTER_INC(&gStatRefuseIF0);
1159 Log2(("raw mode refused: IF (RawR3)\n"));
1160 return false;
1161 }
1162
1163 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1164 {
1165 STAM_COUNTER_INC(&gStatRefuseWP0);
1166 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1167 return false;
1168 }
1169 }
1170 else
1171 {
1172 if (!EMIsRawRing0Enabled(env->pVM))
1173 return false;
1174
1175 // Let's start with pure 32 bits ring 0 code first
1176 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1177 {
1178 STAM_COUNTER_INC(&gStatRefuseCode16);
1179 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1180 return false;
1181 }
1182
1183 // Only R0
1184 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1185 {
1186 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1187 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1188 return false;
1189 }
1190
1191 if (!(u32CR0 & CR0_WP_MASK))
1192 {
1193 STAM_COUNTER_INC(&gStatRefuseWP0);
1194 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1195 return false;
1196 }
1197
1198 if (PATMIsPatchGCAddr(env->pVM, eip))
1199 {
1200 Log2(("raw r0 mode forced: patch code\n"));
1201 *piException = EXCP_EXECUTE_RAW;
1202 return true;
1203 }
1204
1205#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1206 if (!(env->eflags & IF_MASK))
1207 {
1208 STAM_COUNTER_INC(&gStatRefuseIF0);
1209 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1210 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1211 return false;
1212 }
1213#endif
1214
1215 env->state |= CPU_RAW_RING0;
1216 }
1217
1218 /*
1219 * Don't reschedule the first time we're called, because there might be
1220 * special reasons why we're here that is not covered by the above checks.
1221 */
1222 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1223 {
1224 Log2(("raw mode refused: first scheduling\n"));
1225 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1226 return false;
1227 }
1228
1229 Assert(PGMPhysIsA20Enabled(env->pVM));
1230 *piException = EXCP_EXECUTE_RAW;
1231 return true;
1232}
1233
1234
1235/**
1236 * Fetches a code byte.
1237 *
1238 * @returns Success indicator (bool) for ease of use.
1239 * @param env The CPU environment structure.
1240 * @param GCPtrInstr Where to fetch code.
1241 * @param pu8Byte Where to store the byte on success
1242 */
1243bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1244{
1245 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1246 if (RT_SUCCESS(rc))
1247 return true;
1248 return false;
1249}
1250
1251
1252/**
1253 * Flush (or invalidate if you like) page table/dir entry.
1254 *
1255 * (invlpg instruction; tlb_flush_page)
1256 *
1257 * @param env Pointer to cpu environment.
1258 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1259 */
1260void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1261{
1262 PVM pVM = env->pVM;
1263 PCPUMCTX pCtx;
1264 int rc;
1265
1266 /*
1267 * When we're replaying invlpg instructions or restoring a saved
1268 * state we disable this path.
1269 */
1270 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.fIgnoreAll)
1271 return;
1272 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1273 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1274
1275 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1276
1277 /*
1278 * Update the control registers before calling PGMFlushPage.
1279 */
1280 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1281 pCtx->cr0 = env->cr[0];
1282 pCtx->cr3 = env->cr[3];
1283 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1284 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
1285 pCtx->cr4 = env->cr[4];
1286
1287 /*
1288 * Let PGM do the rest.
1289 */
1290 rc = PGMInvalidatePage(pVM, GCPtr);
1291 if (RT_FAILURE(rc))
1292 {
1293 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1294 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1295 }
1296 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1297}
1298
1299
1300#ifndef REM_PHYS_ADDR_IN_TLB
1301/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1302void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1303{
1304 void *pv;
1305 int rc;
1306
1307 /* Address must be aligned enough to fiddle with lower bits */
1308 Assert((physAddr & 0x3) == 0);
1309
1310 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1311 Assert( rc == VINF_SUCCESS
1312 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1313 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1314 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1315 if (RT_FAILURE(rc))
1316 return (void *)1;
1317 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1318 return (void *)((uintptr_t)pv | 2);
1319 return pv;
1320}
1321#endif /* REM_PHYS_ADDR_IN_TLB */
1322
1323
1324/**
1325 * Called from tlb_protect_code in order to write monitor a code page.
1326 *
1327 * @param env Pointer to the CPU environment.
1328 * @param GCPtr Code page to monitor
1329 */
1330void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1331{
1332#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1333 Assert(env->pVM->rem.s.fInREM);
1334 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1335 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1336 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1337 && !(env->eflags & VM_MASK) /* no V86 mode */
1338 && !HWACCMIsEnabled(env->pVM))
1339 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1340#endif
1341}
1342
1343
1344/**
1345 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1346 *
1347 * @param env Pointer to the CPU environment.
1348 * @param GCPtr Code page to monitor
1349 */
1350void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1351{
1352 Assert(env->pVM->rem.s.fInREM);
1353#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1354 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1355 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1356 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1357 && !(env->eflags & VM_MASK) /* no V86 mode */
1358 && !HWACCMIsEnabled(env->pVM))
1359 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1360#endif
1361}
1362
1363
1364/**
1365 * Called when the CPU is initialized, any of the CRx registers are changed or
1366 * when the A20 line is modified.
1367 *
1368 * @param env Pointer to the CPU environment.
1369 * @param fGlobal Set if the flush is global.
1370 */
1371void remR3FlushTLB(CPUState *env, bool fGlobal)
1372{
1373 PVM pVM = env->pVM;
1374 PCPUMCTX pCtx;
1375
1376 /*
1377 * When we're replaying invlpg instructions or restoring a saved
1378 * state we disable this path.
1379 */
1380 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.fIgnoreAll)
1381 return;
1382 Assert(pVM->rem.s.fInREM);
1383
1384 /*
1385 * The caller doesn't check cr4, so we have to do that for ourselves.
1386 */
1387 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1388 fGlobal = true;
1389 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1390
1391 /*
1392 * Update the control registers before calling PGMR3FlushTLB.
1393 */
1394 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1395 pCtx->cr0 = env->cr[0];
1396 pCtx->cr3 = env->cr[3];
1397 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1398 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
1399 pCtx->cr4 = env->cr[4];
1400
1401 /*
1402 * Let PGM do the rest.
1403 */
1404 PGMFlushTLB(pVM, env->cr[3], fGlobal);
1405}
1406
1407
1408/**
1409 * Called when any of the cr0, cr4 or efer registers is updated.
1410 *
1411 * @param env Pointer to the CPU environment.
1412 */
1413void remR3ChangeCpuMode(CPUState *env)
1414{
1415 int rc;
1416 PVM pVM = env->pVM;
1417 PCPUMCTX pCtx;
1418
1419 /*
1420 * When we're replaying loads or restoring a saved
1421 * state this path is disabled.
1422 */
1423 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.fIgnoreAll)
1424 return;
1425 Assert(pVM->rem.s.fInREM);
1426
1427 /*
1428 * Update the control registers before calling PGMChangeMode()
1429 * as it may need to map whatever cr3 is pointing to.
1430 */
1431 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1432 pCtx->cr0 = env->cr[0];
1433 pCtx->cr3 = env->cr[3];
1434 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1435 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
1436 pCtx->cr4 = env->cr[4];
1437
1438#ifdef TARGET_X86_64
1439 rc = PGMChangeMode(pVM, env->cr[0], env->cr[4], env->efer);
1440 if (rc != VINF_SUCCESS)
1441 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], env->efer, rc);
1442#else
1443 rc = PGMChangeMode(pVM, env->cr[0], env->cr[4], 0);
1444 if (rc != VINF_SUCCESS)
1445 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], 0LL, rc);
1446#endif
1447}
1448
1449
1450/**
1451 * Called from compiled code to run dma.
1452 *
1453 * @param env Pointer to the CPU environment.
1454 */
1455void remR3DmaRun(CPUState *env)
1456{
1457 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1458 PDMR3DmaRun(env->pVM);
1459 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1460}
1461
1462
1463/**
1464 * Called from compiled code to schedule pending timers in VMM
1465 *
1466 * @param env Pointer to the CPU environment.
1467 */
1468void remR3TimersRun(CPUState *env)
1469{
1470 LogFlow(("remR3TimersRun:\n"));
1471 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1472 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1473 TMR3TimerQueuesDo(env->pVM);
1474 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1475 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1476}
1477
1478
1479/**
1480 * Record trap occurance
1481 *
1482 * @returns VBox status code
1483 * @param env Pointer to the CPU environment.
1484 * @param uTrap Trap nr
1485 * @param uErrorCode Error code
1486 * @param pvNextEIP Next EIP
1487 */
1488int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1489{
1490 PVM pVM = env->pVM;
1491#ifdef VBOX_WITH_STATISTICS
1492 static STAMCOUNTER s_aStatTrap[255];
1493 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1494#endif
1495
1496#ifdef VBOX_WITH_STATISTICS
1497 if (uTrap < 255)
1498 {
1499 if (!s_aRegisters[uTrap])
1500 {
1501 char szStatName[64];
1502 s_aRegisters[uTrap] = true;
1503 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1504 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1505 }
1506 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1507 }
1508#endif
1509 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1510 if( uTrap < 0x20
1511 && (env->cr[0] & X86_CR0_PE)
1512 && !(env->eflags & X86_EFL_VM))
1513 {
1514#ifdef DEBUG
1515 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1516#endif
1517 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1518 {
1519 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1520 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1521 return VERR_REM_TOO_MANY_TRAPS;
1522 }
1523 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1524 pVM->rem.s.cPendingExceptions = 1;
1525 pVM->rem.s.uPendingException = uTrap;
1526 pVM->rem.s.uPendingExcptEIP = env->eip;
1527 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1528 }
1529 else
1530 {
1531 pVM->rem.s.cPendingExceptions = 0;
1532 pVM->rem.s.uPendingException = uTrap;
1533 pVM->rem.s.uPendingExcptEIP = env->eip;
1534 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1535 }
1536 return VINF_SUCCESS;
1537}
1538
1539
1540/*
1541 * Clear current active trap
1542 *
1543 * @param pVM VM Handle.
1544 */
1545void remR3TrapClear(PVM pVM)
1546{
1547 pVM->rem.s.cPendingExceptions = 0;
1548 pVM->rem.s.uPendingException = 0;
1549 pVM->rem.s.uPendingExcptEIP = 0;
1550 pVM->rem.s.uPendingExcptCR2 = 0;
1551}
1552
1553
1554/*
1555 * Record previous call instruction addresses
1556 *
1557 * @param env Pointer to the CPU environment.
1558 */
1559void remR3RecordCall(CPUState *env)
1560{
1561 CSAMR3RecordCallAddress(env->pVM, env->eip);
1562}
1563
1564
1565/**
1566 * Syncs the internal REM state with the VM.
1567 *
1568 * This must be called before REMR3Run() is invoked whenever when the REM
1569 * state is not up to date. Calling it several times in a row is not
1570 * permitted.
1571 *
1572 * @returns VBox status code.
1573 *
1574 * @param pVM VM Handle.
1575 * @param fFlushTBs Flush all translation blocks before executing code
1576 *
1577 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
1578 * no do this since the majority of the callers don't want any unnecessary of events
1579 * pending that would immediatly interrupt execution.
1580 */
1581REMR3DECL(int) REMR3State(PVM pVM)
1582{
1583 register const CPUMCTX *pCtx;
1584 register unsigned fFlags;
1585 bool fHiddenSelRegsValid;
1586 unsigned i;
1587 TRPMEVENT enmType;
1588 uint8_t u8TrapNo;
1589 int rc;
1590
1591 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
1592 Log2(("REMR3State:\n"));
1593
1594 pCtx = pVM->rem.s.pCtx;
1595 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVM);
1596
1597 Assert(!pVM->rem.s.fInREM);
1598 pVM->rem.s.fInStateSync = true;
1599
1600 /*
1601 * If we have to flush TBs, do that immediately.
1602 */
1603 if (pVM->rem.s.fFlushTBs)
1604 {
1605 STAM_COUNTER_INC(&gStatFlushTBs);
1606 tb_flush(&pVM->rem.s.Env);
1607 pVM->rem.s.fFlushTBs = false;
1608 }
1609
1610 /*
1611 * Copy the registers which require no special handling.
1612 */
1613#ifdef TARGET_X86_64
1614 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
1615 Assert(R_EAX == 0);
1616 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
1617 Assert(R_ECX == 1);
1618 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
1619 Assert(R_EDX == 2);
1620 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
1621 Assert(R_EBX == 3);
1622 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
1623 Assert(R_ESP == 4);
1624 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
1625 Assert(R_EBP == 5);
1626 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
1627 Assert(R_ESI == 6);
1628 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
1629 Assert(R_EDI == 7);
1630 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
1631 pVM->rem.s.Env.regs[8] = pCtx->r8;
1632 pVM->rem.s.Env.regs[9] = pCtx->r9;
1633 pVM->rem.s.Env.regs[10] = pCtx->r10;
1634 pVM->rem.s.Env.regs[11] = pCtx->r11;
1635 pVM->rem.s.Env.regs[12] = pCtx->r12;
1636 pVM->rem.s.Env.regs[13] = pCtx->r13;
1637 pVM->rem.s.Env.regs[14] = pCtx->r14;
1638 pVM->rem.s.Env.regs[15] = pCtx->r15;
1639
1640 pVM->rem.s.Env.eip = pCtx->rip;
1641
1642 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
1643#else
1644 Assert(R_EAX == 0);
1645 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
1646 Assert(R_ECX == 1);
1647 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
1648 Assert(R_EDX == 2);
1649 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
1650 Assert(R_EBX == 3);
1651 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
1652 Assert(R_ESP == 4);
1653 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
1654 Assert(R_EBP == 5);
1655 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
1656 Assert(R_ESI == 6);
1657 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
1658 Assert(R_EDI == 7);
1659 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
1660 pVM->rem.s.Env.eip = pCtx->eip;
1661
1662 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
1663#endif
1664
1665 pVM->rem.s.Env.cr[2] = pCtx->cr2;
1666
1667 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
1668 for (i=0;i<8;i++)
1669 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
1670
1671 /*
1672 * Clear the halted hidden flag (the interrupt waking up the CPU can
1673 * have been dispatched in raw mode).
1674 */
1675 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
1676
1677 /*
1678 * Replay invlpg?
1679 */
1680 if (pVM->rem.s.cInvalidatedPages)
1681 {
1682 RTUINT i;
1683
1684 pVM->rem.s.fIgnoreInvlPg = true;
1685 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
1686 {
1687 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
1688 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
1689 }
1690 pVM->rem.s.fIgnoreInvlPg = false;
1691 pVM->rem.s.cInvalidatedPages = 0;
1692 }
1693
1694 /* Replay notification changes? */
1695 if (pVM->rem.s.cHandlerNotifications)
1696 REMR3ReplayHandlerNotifications(pVM);
1697
1698 /* Update MSRs; before CRx registers! */
1699 pVM->rem.s.Env.efer = pCtx->msrEFER;
1700 pVM->rem.s.Env.star = pCtx->msrSTAR;
1701 pVM->rem.s.Env.pat = pCtx->msrPAT;
1702#ifdef TARGET_X86_64
1703 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
1704 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
1705 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
1706 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
1707
1708 /* Update the internal long mode activate flag according to the new EFER value. */
1709 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
1710 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
1711 else
1712 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
1713#endif
1714
1715
1716 /*
1717 * Registers which are rarely changed and require special handling / order when changed.
1718 */
1719 fFlags = CPUMGetAndClearChangedFlagsREM(pVM);
1720 LogFlow(("CPUMGetAndClearChangedFlagsREM %x\n", fFlags));
1721 if (fFlags & ( CPUM_CHANGED_CR4 | CPUM_CHANGED_CR3 | CPUM_CHANGED_CR0
1722 | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_LDTR
1723 | CPUM_CHANGED_FPU_REM | CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_CPUID))
1724 {
1725 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
1726 {
1727 pVM->rem.s.fIgnoreCR3Load = true;
1728 tlb_flush(&pVM->rem.s.Env, true);
1729 pVM->rem.s.fIgnoreCR3Load = false;
1730 }
1731
1732 /* CR4 before CR0! */
1733 if (fFlags & CPUM_CHANGED_CR4)
1734 {
1735 pVM->rem.s.fIgnoreCR3Load = true;
1736 pVM->rem.s.fIgnoreCpuMode = true;
1737 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
1738 pVM->rem.s.fIgnoreCpuMode = false;
1739 pVM->rem.s.fIgnoreCR3Load = false;
1740 }
1741
1742 if (fFlags & CPUM_CHANGED_CR0)
1743 {
1744 pVM->rem.s.fIgnoreCR3Load = true;
1745 pVM->rem.s.fIgnoreCpuMode = true;
1746 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
1747 pVM->rem.s.fIgnoreCpuMode = false;
1748 pVM->rem.s.fIgnoreCR3Load = false;
1749 }
1750
1751 if (fFlags & CPUM_CHANGED_CR3)
1752 {
1753 pVM->rem.s.fIgnoreCR3Load = true;
1754 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
1755 pVM->rem.s.fIgnoreCR3Load = false;
1756 }
1757
1758 if (fFlags & CPUM_CHANGED_GDTR)
1759 {
1760 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
1761 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
1762 }
1763
1764 if (fFlags & CPUM_CHANGED_IDTR)
1765 {
1766 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
1767 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
1768 }
1769
1770 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
1771 {
1772 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
1773 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
1774 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
1775 }
1776
1777 if (fFlags & CPUM_CHANGED_LDTR)
1778 {
1779 if (fHiddenSelRegsValid)
1780 {
1781 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
1782 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
1783 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
1784 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
1785 }
1786 else
1787 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
1788 }
1789
1790 if (fFlags & CPUM_CHANGED_CPUID)
1791 {
1792 uint32_t u32Dummy;
1793
1794 /*
1795 * Get the CPUID features.
1796 */
1797 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
1798 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
1799 }
1800
1801 /* Sync FPU state after CR4, CPUID and EFER (!). */
1802 if (fFlags & CPUM_CHANGED_FPU_REM)
1803 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
1804 }
1805
1806 /*
1807 * Sync TR unconditionally to make life simpler.
1808 */
1809 pVM->rem.s.Env.tr.selector = pCtx->tr;
1810 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
1811 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
1812 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
1813 /* Note! do_interrupt will fault if the busy flag is still set... */
1814 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
1815
1816 /*
1817 * Update selector registers.
1818 * This must be done *after* we've synced gdt, ldt and crX registers
1819 * since we're reading the GDT/LDT om sync_seg. This will happen with
1820 * saved state which takes a quick dip into rawmode for instance.
1821 */
1822 /*
1823 * Stack; Note first check this one as the CPL might have changed. The
1824 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
1825 */
1826
1827 if (fHiddenSelRegsValid)
1828 {
1829 /* The hidden selector registers are valid in the CPU context. */
1830 /** @note QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
1831
1832 /* Set current CPL */
1833 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
1834
1835 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
1836 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
1837 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
1838 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
1839 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
1840 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
1841 }
1842 else
1843 {
1844 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
1845 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
1846 {
1847 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
1848
1849 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
1850 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
1851#ifdef VBOX_WITH_STATISTICS
1852 if (pVM->rem.s.Env.segs[R_SS].newselector)
1853 {
1854 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
1855 }
1856#endif
1857 }
1858 else
1859 pVM->rem.s.Env.segs[R_SS].newselector = 0;
1860
1861 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
1862 {
1863 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
1864 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
1865#ifdef VBOX_WITH_STATISTICS
1866 if (pVM->rem.s.Env.segs[R_ES].newselector)
1867 {
1868 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
1869 }
1870#endif
1871 }
1872 else
1873 pVM->rem.s.Env.segs[R_ES].newselector = 0;
1874
1875 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
1876 {
1877 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
1878 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
1879#ifdef VBOX_WITH_STATISTICS
1880 if (pVM->rem.s.Env.segs[R_CS].newselector)
1881 {
1882 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
1883 }
1884#endif
1885 }
1886 else
1887 pVM->rem.s.Env.segs[R_CS].newselector = 0;
1888
1889 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
1890 {
1891 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
1892 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
1893#ifdef VBOX_WITH_STATISTICS
1894 if (pVM->rem.s.Env.segs[R_DS].newselector)
1895 {
1896 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
1897 }
1898#endif
1899 }
1900 else
1901 pVM->rem.s.Env.segs[R_DS].newselector = 0;
1902
1903 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
1904 * be the same but not the base/limit. */
1905 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
1906 {
1907 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
1908 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
1909#ifdef VBOX_WITH_STATISTICS
1910 if (pVM->rem.s.Env.segs[R_FS].newselector)
1911 {
1912 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
1913 }
1914#endif
1915 }
1916 else
1917 pVM->rem.s.Env.segs[R_FS].newselector = 0;
1918
1919 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
1920 {
1921 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
1922 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
1923#ifdef VBOX_WITH_STATISTICS
1924 if (pVM->rem.s.Env.segs[R_GS].newselector)
1925 {
1926 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
1927 }
1928#endif
1929 }
1930 else
1931 pVM->rem.s.Env.segs[R_GS].newselector = 0;
1932 }
1933
1934 /*
1935 * Check for traps.
1936 */
1937 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
1938 rc = TRPMQueryTrap(pVM, &u8TrapNo, &enmType);
1939 if (RT_SUCCESS(rc))
1940 {
1941#ifdef DEBUG
1942 if (u8TrapNo == 0x80)
1943 {
1944 remR3DumpLnxSyscall(pVM);
1945 remR3DumpOBsdSyscall(pVM);
1946 }
1947#endif
1948
1949 pVM->rem.s.Env.exception_index = u8TrapNo;
1950 if (enmType != TRPM_SOFTWARE_INT)
1951 {
1952 pVM->rem.s.Env.exception_is_int = 0;
1953 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
1954 }
1955 else
1956 {
1957 /*
1958 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
1959 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
1960 * for int03 and into.
1961 */
1962 pVM->rem.s.Env.exception_is_int = 1;
1963 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
1964 /* int 3 may be generated by one-byte 0xcc */
1965 if (u8TrapNo == 3)
1966 {
1967 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
1968 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
1969 }
1970 /* int 4 may be generated by one-byte 0xce */
1971 else if (u8TrapNo == 4)
1972 {
1973 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
1974 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
1975 }
1976 }
1977
1978 /* get error code and cr2 if needed. */
1979 switch (u8TrapNo)
1980 {
1981 case 0x0e:
1982 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVM);
1983 /* fallthru */
1984 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
1985 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVM);
1986 break;
1987
1988 case 0x11: case 0x08:
1989 default:
1990 pVM->rem.s.Env.error_code = 0;
1991 break;
1992 }
1993
1994 /*
1995 * We can now reset the active trap since the recompiler is gonna have a go at it.
1996 */
1997 rc = TRPMResetTrap(pVM);
1998 AssertRC(rc);
1999 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2000 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2001 }
2002
2003 /*
2004 * Clear old interrupt request flags; Check for pending hardware interrupts.
2005 * (See @remark for why we don't check for other FFs.)
2006 */
2007 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2008 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2009 || VM_FF_ISPENDING(pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
2010 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2011
2012 /*
2013 * We're now in REM mode.
2014 */
2015 pVM->rem.s.fInREM = true;
2016 pVM->rem.s.fInStateSync = false;
2017 pVM->rem.s.cCanExecuteRaw = 0;
2018 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2019 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2020 return VINF_SUCCESS;
2021}
2022
2023
2024/**
2025 * Syncs back changes in the REM state to the the VM state.
2026 *
2027 * This must be called after invoking REMR3Run().
2028 * Calling it several times in a row is not permitted.
2029 *
2030 * @returns VBox status code.
2031 *
2032 * @param pVM VM Handle.
2033 */
2034REMR3DECL(int) REMR3StateBack(PVM pVM)
2035{
2036 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2037 unsigned i;
2038
2039 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2040 Log2(("REMR3StateBack:\n"));
2041 Assert(pVM->rem.s.fInREM);
2042
2043 /*
2044 * Copy back the registers.
2045 * This is done in the order they are declared in the CPUMCTX structure.
2046 */
2047
2048 /** @todo FOP */
2049 /** @todo FPUIP */
2050 /** @todo CS */
2051 /** @todo FPUDP */
2052 /** @todo DS */
2053
2054 /** @todo check if FPU/XMM was actually used in the recompiler */
2055 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2056//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2057
2058#ifdef TARGET_X86_64
2059 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2060 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2061 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2062 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2063 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2064 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2065 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2066 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2067 pCtx->r8 = pVM->rem.s.Env.regs[8];
2068 pCtx->r9 = pVM->rem.s.Env.regs[9];
2069 pCtx->r10 = pVM->rem.s.Env.regs[10];
2070 pCtx->r11 = pVM->rem.s.Env.regs[11];
2071 pCtx->r12 = pVM->rem.s.Env.regs[12];
2072 pCtx->r13 = pVM->rem.s.Env.regs[13];
2073 pCtx->r14 = pVM->rem.s.Env.regs[14];
2074 pCtx->r15 = pVM->rem.s.Env.regs[15];
2075
2076 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2077
2078#else
2079 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2080 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2081 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2082 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2083 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2084 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2085 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2086
2087 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2088#endif
2089
2090 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2091
2092#ifdef VBOX_WITH_STATISTICS
2093 if (pVM->rem.s.Env.segs[R_SS].newselector)
2094 {
2095 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2096 }
2097 if (pVM->rem.s.Env.segs[R_GS].newselector)
2098 {
2099 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2100 }
2101 if (pVM->rem.s.Env.segs[R_FS].newselector)
2102 {
2103 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2104 }
2105 if (pVM->rem.s.Env.segs[R_ES].newselector)
2106 {
2107 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2108 }
2109 if (pVM->rem.s.Env.segs[R_DS].newselector)
2110 {
2111 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2112 }
2113 if (pVM->rem.s.Env.segs[R_CS].newselector)
2114 {
2115 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2116 }
2117#endif
2118 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2119 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2120 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2121 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2122 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2123
2124#ifdef TARGET_X86_64
2125 pCtx->rip = pVM->rem.s.Env.eip;
2126 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2127#else
2128 pCtx->eip = pVM->rem.s.Env.eip;
2129 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2130#endif
2131
2132 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2133 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2134 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2135 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2136 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2137 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2138
2139 for (i = 0; i < 8; i++)
2140 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2141
2142 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2143 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2144 {
2145 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2146 STAM_COUNTER_INC(&gStatREMGDTChange);
2147 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2148 }
2149
2150 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2151 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2152 {
2153 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2154 STAM_COUNTER_INC(&gStatREMIDTChange);
2155 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2156 }
2157
2158 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2159 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2160 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2161 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2162 {
2163 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2164 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2165 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2166 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2167 STAM_COUNTER_INC(&gStatREMLDTRChange);
2168 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2169 }
2170
2171 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2172 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2173 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2174 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2175 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2176 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2177 : 0) )
2178 {
2179 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2180 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2181 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2182 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2183 pCtx->tr = pVM->rem.s.Env.tr.selector;
2184 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2185 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2186 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2187 if (pCtx->trHid.Attr.u)
2188 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2189 STAM_COUNTER_INC(&gStatREMTRChange);
2190 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2191 }
2192
2193 /** @todo These values could still be out of sync! */
2194 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2195 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2196 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2197 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2198
2199 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2200 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2201 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2202
2203 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2204 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2205 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2206
2207 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2208 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2209 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2210
2211 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2212 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2213 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2214
2215 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2216 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2217 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2218
2219 /* Sysenter MSR */
2220 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2221 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2222 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2223
2224 /* System MSRs. */
2225 pCtx->msrEFER = pVM->rem.s.Env.efer;
2226 pCtx->msrSTAR = pVM->rem.s.Env.star;
2227 pCtx->msrPAT = pVM->rem.s.Env.pat;
2228#ifdef TARGET_X86_64
2229 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2230 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2231 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2232 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2233#endif
2234
2235 remR3TrapClear(pVM);
2236
2237 /*
2238 * Check for traps.
2239 */
2240 if ( pVM->rem.s.Env.exception_index >= 0
2241 && pVM->rem.s.Env.exception_index < 256)
2242 {
2243 int rc;
2244
2245 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2246 rc = TRPMAssertTrap(pVM, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2247 AssertRC(rc);
2248 switch (pVM->rem.s.Env.exception_index)
2249 {
2250 case 0x0e:
2251 TRPMSetFaultAddress(pVM, pCtx->cr2);
2252 /* fallthru */
2253 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2254 case 0x11: case 0x08: /* 0 */
2255 TRPMSetErrorCode(pVM, pVM->rem.s.Env.error_code);
2256 break;
2257 }
2258
2259 }
2260
2261 /*
2262 * We're not longer in REM mode.
2263 */
2264 pVM->rem.s.fInREM = false;
2265 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2266 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2267 return VINF_SUCCESS;
2268}
2269
2270
2271/**
2272 * This is called by the disassembler when it wants to update the cpu state
2273 * before for instance doing a register dump.
2274 */
2275static void remR3StateUpdate(PVM pVM)
2276{
2277 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2278 unsigned i;
2279
2280 Assert(pVM->rem.s.fInREM);
2281
2282 /*
2283 * Copy back the registers.
2284 * This is done in the order they are declared in the CPUMCTX structure.
2285 */
2286
2287 /** @todo FOP */
2288 /** @todo FPUIP */
2289 /** @todo CS */
2290 /** @todo FPUDP */
2291 /** @todo DS */
2292 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2293 pCtx->fpu.MXCSR = 0;
2294 pCtx->fpu.MXCSR_MASK = 0;
2295
2296 /** @todo check if FPU/XMM was actually used in the recompiler */
2297 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2298//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2299
2300#ifdef TARGET_X86_64
2301 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2302 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2303 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2304 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2305 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2306 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2307 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2308 pCtx->r8 = pVM->rem.s.Env.regs[8];
2309 pCtx->r9 = pVM->rem.s.Env.regs[9];
2310 pCtx->r10 = pVM->rem.s.Env.regs[10];
2311 pCtx->r11 = pVM->rem.s.Env.regs[11];
2312 pCtx->r12 = pVM->rem.s.Env.regs[12];
2313 pCtx->r13 = pVM->rem.s.Env.regs[13];
2314 pCtx->r14 = pVM->rem.s.Env.regs[14];
2315 pCtx->r15 = pVM->rem.s.Env.regs[15];
2316
2317 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2318#else
2319 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2320 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2321 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2322 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2323 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2324 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2325 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2326
2327 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2328#endif
2329
2330 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2331
2332 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2333 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2334 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2335 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2336 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2337
2338#ifdef TARGET_X86_64
2339 pCtx->rip = pVM->rem.s.Env.eip;
2340 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2341#else
2342 pCtx->eip = pVM->rem.s.Env.eip;
2343 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2344#endif
2345
2346 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2347 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2348 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2349 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2350 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2351 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2352
2353 for (i = 0; i < 8; i++)
2354 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2355
2356 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2357 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2358 {
2359 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2360 STAM_COUNTER_INC(&gStatREMGDTChange);
2361 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2362 }
2363
2364 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2365 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2366 {
2367 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2368 STAM_COUNTER_INC(&gStatREMIDTChange);
2369 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2370 }
2371
2372 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2373 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2374 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2375 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2376 {
2377 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2378 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2379 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2380 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2381 STAM_COUNTER_INC(&gStatREMLDTRChange);
2382 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2383 }
2384
2385 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2386 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2387 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2388 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2389 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2390 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2391 : 0) )
2392 {
2393 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2394 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2395 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2396 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2397 pCtx->tr = pVM->rem.s.Env.tr.selector;
2398 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2399 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2400 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2401 if (pCtx->trHid.Attr.u)
2402 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2403 STAM_COUNTER_INC(&gStatREMTRChange);
2404 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2405 }
2406
2407 /** @todo These values could still be out of sync! */
2408 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2409 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2410 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2411 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2412
2413 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2414 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2415 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2416
2417 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2418 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2419 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2420
2421 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2422 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2423 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2424
2425 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2426 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2427 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2428
2429 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2430 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2431 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2432
2433 /* Sysenter MSR */
2434 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2435 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2436 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2437
2438 /* System MSRs. */
2439 pCtx->msrEFER = pVM->rem.s.Env.efer;
2440 pCtx->msrSTAR = pVM->rem.s.Env.star;
2441 pCtx->msrPAT = pVM->rem.s.Env.pat;
2442#ifdef TARGET_X86_64
2443 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2444 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2445 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2446 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2447#endif
2448
2449}
2450
2451
2452/**
2453 * Update the VMM state information if we're currently in REM.
2454 *
2455 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2456 * we're currently executing in REM and the VMM state is invalid. This method will of
2457 * course check that we're executing in REM before syncing any data over to the VMM.
2458 *
2459 * @param pVM The VM handle.
2460 */
2461REMR3DECL(void) REMR3StateUpdate(PVM pVM)
2462{
2463 if (pVM->rem.s.fInREM)
2464 remR3StateUpdate(pVM);
2465}
2466
2467
2468#undef LOG_GROUP
2469#define LOG_GROUP LOG_GROUP_REM
2470
2471
2472/**
2473 * Notify the recompiler about Address Gate 20 state change.
2474 *
2475 * This notification is required since A20 gate changes are
2476 * initialized from a device driver and the VM might just as
2477 * well be in REM mode as in RAW mode.
2478 *
2479 * @param pVM VM handle.
2480 * @param fEnable True if the gate should be enabled.
2481 * False if the gate should be disabled.
2482 */
2483REMR3DECL(void) REMR3A20Set(PVM pVM, bool fEnable)
2484{
2485 bool fSaved;
2486
2487 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2488 VM_ASSERT_EMT(pVM);
2489
2490 fSaved = pVM->rem.s.fIgnoreAll; /* just in case. */
2491 pVM->rem.s.fIgnoreAll = fSaved || !pVM->rem.s.fInREM;
2492
2493 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2494
2495 pVM->rem.s.fIgnoreAll = fSaved;
2496}
2497
2498
2499/**
2500 * Replays the invalidated recorded pages.
2501 * Called in response to VERR_REM_FLUSHED_PAGES_OVERFLOW from the RAW execution loop.
2502 *
2503 * @param pVM VM handle.
2504 */
2505REMR3DECL(void) REMR3ReplayInvalidatedPages(PVM pVM)
2506{
2507 RTUINT i;
2508
2509 VM_ASSERT_EMT(pVM);
2510
2511 /*
2512 * Sync the required registers.
2513 */
2514 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2515 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2516 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2517 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2518
2519 /*
2520 * Replay the flushes.
2521 */
2522 pVM->rem.s.fIgnoreInvlPg = true;
2523 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2524 {
2525 Log2(("REMR3ReplayInvalidatedPages: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2526 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2527 }
2528 pVM->rem.s.fIgnoreInvlPg = false;
2529 pVM->rem.s.cInvalidatedPages = 0;
2530}
2531
2532
2533/**
2534 * Replays the handler notification changes
2535 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2536 *
2537 * @param pVM VM handle.
2538 */
2539REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2540{
2541 /*
2542 * Replay the flushes.
2543 */
2544 RTUINT i;
2545 const RTUINT c = pVM->rem.s.cHandlerNotifications;
2546
2547 LogFlow(("REMR3ReplayInvalidatedPages:\n"));
2548 VM_ASSERT_EMT(pVM);
2549
2550 pVM->rem.s.cHandlerNotifications = 0;
2551 for (i = 0; i < c; i++)
2552 {
2553 PREMHANDLERNOTIFICATION pRec = &pVM->rem.s.aHandlerNotifications[i];
2554 switch (pRec->enmKind)
2555 {
2556 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
2557 REMR3NotifyHandlerPhysicalRegister(pVM,
2558 pRec->u.PhysicalRegister.enmType,
2559 pRec->u.PhysicalRegister.GCPhys,
2560 pRec->u.PhysicalRegister.cb,
2561 pRec->u.PhysicalRegister.fHasHCHandler);
2562 break;
2563
2564 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
2565 REMR3NotifyHandlerPhysicalDeregister(pVM,
2566 pRec->u.PhysicalDeregister.enmType,
2567 pRec->u.PhysicalDeregister.GCPhys,
2568 pRec->u.PhysicalDeregister.cb,
2569 pRec->u.PhysicalDeregister.fHasHCHandler,
2570 pRec->u.PhysicalDeregister.fRestoreAsRAM);
2571 break;
2572
2573 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
2574 REMR3NotifyHandlerPhysicalModify(pVM,
2575 pRec->u.PhysicalModify.enmType,
2576 pRec->u.PhysicalModify.GCPhysOld,
2577 pRec->u.PhysicalModify.GCPhysNew,
2578 pRec->u.PhysicalModify.cb,
2579 pRec->u.PhysicalModify.fHasHCHandler,
2580 pRec->u.PhysicalModify.fRestoreAsRAM);
2581 break;
2582
2583 default:
2584 AssertReleaseMsgFailed(("enmKind=%d\n", pRec->enmKind));
2585 break;
2586 }
2587 }
2588 VM_FF_CLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY);
2589}
2590
2591
2592/**
2593 * Notify REM about changed code page.
2594 *
2595 * @returns VBox status code.
2596 * @param pVM VM handle.
2597 * @param pvCodePage Code page address
2598 */
2599REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, RTGCPTR pvCodePage)
2600{
2601#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
2602 int rc;
2603 RTGCPHYS PhysGC;
2604 uint64_t flags;
2605
2606 VM_ASSERT_EMT(pVM);
2607
2608 /*
2609 * Get the physical page address.
2610 */
2611 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
2612 if (rc == VINF_SUCCESS)
2613 {
2614 /*
2615 * Sync the required registers and flush the whole page.
2616 * (Easier to do the whole page than notifying it about each physical
2617 * byte that was changed.
2618 */
2619 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2620 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2621 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2622 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2623
2624 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
2625 }
2626#endif
2627 return VINF_SUCCESS;
2628}
2629
2630
2631/**
2632 * Notification about a successful MMR3PhysRegister() call.
2633 *
2634 * @param pVM VM handle.
2635 * @param GCPhys The physical address the RAM.
2636 * @param cb Size of the memory.
2637 * @param fFlags Flags of the MM_RAM_FLAGS_* defines.
2638 */
2639REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, unsigned fFlags)
2640{
2641 uint32_t cbBitmap;
2642 int rc;
2643 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%d fFlags=%d\n", GCPhys, cb, fFlags));
2644 VM_ASSERT_EMT(pVM);
2645
2646 /*
2647 * Validate input - we trust the caller.
2648 */
2649 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2650 Assert(cb);
2651 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2652
2653 /*
2654 * Base ram?
2655 */
2656 if (!GCPhys)
2657 {
2658 phys_ram_size = cb;
2659 phys_ram_dirty_size = cb >> PAGE_SHIFT;
2660#ifndef VBOX_STRICT
2661 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
2662 AssertReleaseMsg(phys_ram_dirty, ("failed to allocate %d bytes of dirty bytes\n", phys_ram_dirty_size));
2663#else /* VBOX_STRICT: allocate a full map and make the out of bounds pages invalid. */
2664 phys_ram_dirty = RTMemPageAlloc(_4G >> PAGE_SHIFT);
2665 AssertReleaseMsg(phys_ram_dirty, ("failed to allocate %d bytes of dirty bytes\n", _4G >> PAGE_SHIFT));
2666 cbBitmap = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
2667 rc = RTMemProtect(phys_ram_dirty + cbBitmap, (_4G >> PAGE_SHIFT) - cbBitmap, RTMEM_PROT_NONE);
2668 AssertRC(rc);
2669 phys_ram_dirty += cbBitmap - phys_ram_dirty_size;
2670#endif
2671 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
2672 }
2673
2674 /*
2675 * Register the ram.
2676 */
2677 Assert(!pVM->rem.s.fIgnoreAll);
2678 pVM->rem.s.fIgnoreAll = true;
2679
2680#ifdef VBOX_WITH_NEW_PHYS_CODE
2681 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2682#else
2683 if (!GCPhys)
2684 cpu_register_physical_memory(GCPhys, cb, GCPhys | IO_MEM_RAM_MISSING);
2685 else
2686 {
2687 if (fFlags & MM_RAM_FLAGS_RESERVED)
2688 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2689 else
2690 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2691 }
2692#endif
2693 Assert(pVM->rem.s.fIgnoreAll);
2694 pVM->rem.s.fIgnoreAll = false;
2695}
2696
2697#ifndef VBOX_WITH_NEW_PHYS_CODE
2698
2699/**
2700 * Notification about a successful PGMR3PhysRegisterChunk() call.
2701 *
2702 * @param pVM VM handle.
2703 * @param GCPhys The physical address the RAM.
2704 * @param cb Size of the memory.
2705 * @param pvRam The HC address of the RAM.
2706 * @param fFlags Flags of the MM_RAM_FLAGS_* defines.
2707 */
2708REMR3DECL(void) REMR3NotifyPhysRamChunkRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, RTHCUINTPTR pvRam, unsigned fFlags)
2709{
2710 Log(("REMR3NotifyPhysRamChunkRegister: GCPhys=%RGp cb=%d pvRam=%p fFlags=%d\n", GCPhys, cb, pvRam, fFlags));
2711 VM_ASSERT_EMT(pVM);
2712
2713 /*
2714 * Validate input - we trust the caller.
2715 */
2716 Assert(pvRam);
2717 Assert(RT_ALIGN(pvRam, PAGE_SIZE) == pvRam);
2718 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2719 Assert(cb == PGM_DYNAMIC_CHUNK_SIZE);
2720 Assert(fFlags == 0 /* normal RAM */);
2721 Assert(!pVM->rem.s.fIgnoreAll);
2722 pVM->rem.s.fIgnoreAll = true;
2723 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2724 Assert(pVM->rem.s.fIgnoreAll);
2725 pVM->rem.s.fIgnoreAll = false;
2726}
2727
2728
2729/**
2730 * Grows dynamically allocated guest RAM.
2731 * Will raise a fatal error if the operation fails.
2732 *
2733 * @param physaddr The physical address.
2734 */
2735void remR3GrowDynRange(unsigned long physaddr) /** @todo Needs fixing for MSC... */
2736{
2737 int rc;
2738 PVM pVM = cpu_single_env->pVM;
2739 const RTGCPHYS GCPhys = physaddr;
2740
2741 LogFlow(("remR3GrowDynRange %RGp\n", (RTGCPTR)physaddr));
2742 rc = PGM3PhysGrowRange(pVM, &GCPhys);
2743 if (RT_SUCCESS(rc))
2744 return;
2745
2746 LogRel(("\nUnable to allocate guest RAM chunk at %RGp\n", (RTGCPTR)physaddr));
2747 cpu_abort(cpu_single_env, "Unable to allocate guest RAM chunk at %RGp\n", (RTGCPTR)physaddr);
2748 AssertFatalFailed();
2749}
2750
2751#endif /* !VBOX_WITH_NEW_PHYS_CODE */
2752
2753/**
2754 * Notification about a successful MMR3PhysRomRegister() call.
2755 *
2756 * @param pVM VM handle.
2757 * @param GCPhys The physical address of the ROM.
2758 * @param cb The size of the ROM.
2759 * @param pvCopy Pointer to the ROM copy.
2760 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
2761 * This function will be called when ever the protection of the
2762 * shadow ROM changes (at reset and end of POST).
2763 */
2764REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
2765{
2766 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d pvCopy=%p fShadow=%RTbool\n", GCPhys, cb, pvCopy, fShadow));
2767 VM_ASSERT_EMT(pVM);
2768
2769 /*
2770 * Validate input - we trust the caller.
2771 */
2772 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2773 Assert(cb);
2774 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2775 Assert(pvCopy);
2776 Assert(RT_ALIGN_P(pvCopy, PAGE_SIZE) == pvCopy);
2777
2778 /*
2779 * Register the rom.
2780 */
2781 Assert(!pVM->rem.s.fIgnoreAll);
2782 pVM->rem.s.fIgnoreAll = true;
2783
2784 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
2785
2786 Log2(("%.64Rhxd\n", (char *)pvCopy + cb - 64));
2787
2788 Assert(pVM->rem.s.fIgnoreAll);
2789 pVM->rem.s.fIgnoreAll = false;
2790}
2791
2792
2793/**
2794 * Notification about a successful memory deregistration or reservation.
2795 *
2796 * @param pVM VM Handle.
2797 * @param GCPhys Start physical address.
2798 * @param cb The size of the range.
2799 */
2800REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
2801{
2802 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
2803 VM_ASSERT_EMT(pVM);
2804
2805 /*
2806 * Validate input - we trust the caller.
2807 */
2808 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2809 Assert(cb);
2810 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2811
2812 /*
2813 * Unassigning the memory.
2814 */
2815 Assert(!pVM->rem.s.fIgnoreAll);
2816 pVM->rem.s.fIgnoreAll = true;
2817
2818 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2819
2820 Assert(pVM->rem.s.fIgnoreAll);
2821 pVM->rem.s.fIgnoreAll = false;
2822}
2823
2824
2825/**
2826 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
2827 *
2828 * @param pVM VM Handle.
2829 * @param enmType Handler type.
2830 * @param GCPhys Handler range address.
2831 * @param cb Size of the handler range.
2832 * @param fHasHCHandler Set if the handler has a HC callback function.
2833 *
2834 * @remark MMR3PhysRomRegister assumes that this function will not apply the
2835 * Handler memory type to memory which has no HC handler.
2836 */
2837REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
2838{
2839 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
2840 enmType, GCPhys, cb, fHasHCHandler));
2841 VM_ASSERT_EMT(pVM);
2842 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2843 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2844
2845 if (pVM->rem.s.cHandlerNotifications)
2846 REMR3ReplayHandlerNotifications(pVM);
2847
2848 Assert(!pVM->rem.s.fIgnoreAll);
2849 pVM->rem.s.fIgnoreAll = true;
2850
2851 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2852 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
2853 else if (fHasHCHandler)
2854 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
2855
2856 Assert(pVM->rem.s.fIgnoreAll);
2857 pVM->rem.s.fIgnoreAll = false;
2858}
2859
2860
2861/**
2862 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
2863 *
2864 * @param pVM VM Handle.
2865 * @param enmType Handler type.
2866 * @param GCPhys Handler range address.
2867 * @param cb Size of the handler range.
2868 * @param fHasHCHandler Set if the handler has a HC callback function.
2869 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
2870 */
2871REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
2872{
2873 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
2874 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
2875 VM_ASSERT_EMT(pVM);
2876
2877 if (pVM->rem.s.cHandlerNotifications)
2878 REMR3ReplayHandlerNotifications(pVM);
2879
2880 Assert(!pVM->rem.s.fIgnoreAll);
2881 pVM->rem.s.fIgnoreAll = true;
2882
2883/** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
2884 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2885 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2886 else if (fHasHCHandler)
2887 {
2888 if (!fRestoreAsRAM)
2889 {
2890 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
2891 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2892 }
2893 else
2894 {
2895 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2896 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2897 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2898 }
2899 }
2900
2901 Assert(pVM->rem.s.fIgnoreAll);
2902 pVM->rem.s.fIgnoreAll = false;
2903}
2904
2905
2906/**
2907 * Notification about a successful PGMR3HandlerPhysicalModify() call.
2908 *
2909 * @param pVM VM Handle.
2910 * @param enmType Handler type.
2911 * @param GCPhysOld Old handler range address.
2912 * @param GCPhysNew New handler range address.
2913 * @param cb Size of the handler range.
2914 * @param fHasHCHandler Set if the handler has a HC callback function.
2915 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
2916 */
2917REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
2918{
2919 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
2920 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
2921 VM_ASSERT_EMT(pVM);
2922 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
2923
2924 if (pVM->rem.s.cHandlerNotifications)
2925 REMR3ReplayHandlerNotifications(pVM);
2926
2927 if (fHasHCHandler)
2928 {
2929 Assert(!pVM->rem.s.fIgnoreAll);
2930 pVM->rem.s.fIgnoreAll = true;
2931
2932 /*
2933 * Reset the old page.
2934 */
2935 if (!fRestoreAsRAM)
2936 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
2937 else
2938 {
2939 /* This is not perfect, but it'll do for PD monitoring... */
2940 Assert(cb == PAGE_SIZE);
2941 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
2942 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
2943 }
2944
2945 /*
2946 * Update the new page.
2947 */
2948 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
2949 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2950 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
2951
2952 Assert(pVM->rem.s.fIgnoreAll);
2953 pVM->rem.s.fIgnoreAll = false;
2954 }
2955}
2956
2957
2958/**
2959 * Checks if we're handling access to this page or not.
2960 *
2961 * @returns true if we're trapping access.
2962 * @returns false if we aren't.
2963 * @param pVM The VM handle.
2964 * @param GCPhys The physical address.
2965 *
2966 * @remark This function will only work correctly in VBOX_STRICT builds!
2967 */
2968REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
2969{
2970#ifdef VBOX_STRICT
2971 unsigned long off;
2972 if (pVM->rem.s.cHandlerNotifications)
2973 REMR3ReplayHandlerNotifications(pVM);
2974
2975 off = get_phys_page_offset(GCPhys);
2976 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
2977 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
2978 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
2979#else
2980 return false;
2981#endif
2982}
2983
2984
2985/**
2986 * Deals with a rare case in get_phys_addr_code where the code
2987 * is being monitored.
2988 *
2989 * It could also be an MMIO page, in which case we will raise a fatal error.
2990 *
2991 * @returns The physical address corresponding to addr.
2992 * @param env The cpu environment.
2993 * @param addr The virtual address.
2994 * @param pTLBEntry The TLB entry.
2995 */
2996target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
2997 target_ulong addr,
2998 CPUTLBEntry* pTLBEntry,
2999 target_phys_addr_t ioTLBEntry)
3000{
3001 PVM pVM = env->pVM;
3002
3003 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3004 {
3005 /* If code memory is being monitored, appropriate IOTLB entry will have
3006 handler IO type, and addend will provide real physical address, no
3007 matter if we store VA in TLB or not, as handlers are always passed PA */
3008 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3009 return ret;
3010 }
3011 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3012 "*** handlers\n",
3013 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3014 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3015 LogRel(("*** mmio\n"));
3016 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3017 LogRel(("*** phys\n"));
3018 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3019 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3020 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3021 AssertFatalFailed();
3022}
3023
3024/**
3025 * Read guest RAM and ROM.
3026 *
3027 * @param SrcGCPhys The source address (guest physical).
3028 * @param pvDst The destination address.
3029 * @param cb Number of bytes
3030 */
3031void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3032{
3033 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3034 VBOX_CHECK_ADDR(SrcGCPhys);
3035 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3036#ifdef VBOX_DEBUG_PHYS
3037 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3038#endif
3039 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3040}
3041
3042
3043/**
3044 * Read guest RAM and ROM, unsigned 8-bit.
3045 *
3046 * @param SrcGCPhys The source address (guest physical).
3047 */
3048RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3049{
3050 uint8_t val;
3051 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3052 VBOX_CHECK_ADDR(SrcGCPhys);
3053 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3054 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3055#ifdef VBOX_DEBUG_PHYS
3056 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3057#endif
3058 return val;
3059}
3060
3061
3062/**
3063 * Read guest RAM and ROM, signed 8-bit.
3064 *
3065 * @param SrcGCPhys The source address (guest physical).
3066 */
3067RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3068{
3069 int8_t val;
3070 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3071 VBOX_CHECK_ADDR(SrcGCPhys);
3072 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3073 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3074#ifdef VBOX_DEBUG_PHYS
3075 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3076#endif
3077 return val;
3078}
3079
3080
3081/**
3082 * Read guest RAM and ROM, unsigned 16-bit.
3083 *
3084 * @param SrcGCPhys The source address (guest physical).
3085 */
3086RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3087{
3088 uint16_t val;
3089 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3090 VBOX_CHECK_ADDR(SrcGCPhys);
3091 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3092 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3093#ifdef VBOX_DEBUG_PHYS
3094 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3095#endif
3096 return val;
3097}
3098
3099
3100/**
3101 * Read guest RAM and ROM, signed 16-bit.
3102 *
3103 * @param SrcGCPhys The source address (guest physical).
3104 */
3105RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3106{
3107 int16_t val;
3108 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3109 VBOX_CHECK_ADDR(SrcGCPhys);
3110 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3111 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3112#ifdef VBOX_DEBUG_PHYS
3113 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3114#endif
3115 return val;
3116}
3117
3118
3119/**
3120 * Read guest RAM and ROM, unsigned 32-bit.
3121 *
3122 * @param SrcGCPhys The source address (guest physical).
3123 */
3124RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3125{
3126 uint32_t val;
3127 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3128 VBOX_CHECK_ADDR(SrcGCPhys);
3129 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3130 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3131#ifdef VBOX_DEBUG_PHYS
3132 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3133#endif
3134 return val;
3135}
3136
3137
3138/**
3139 * Read guest RAM and ROM, signed 32-bit.
3140 *
3141 * @param SrcGCPhys The source address (guest physical).
3142 */
3143RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3144{
3145 int32_t val;
3146 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3147 VBOX_CHECK_ADDR(SrcGCPhys);
3148 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3149 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3150#ifdef VBOX_DEBUG_PHYS
3151 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3152#endif
3153 return val;
3154}
3155
3156
3157/**
3158 * Read guest RAM and ROM, unsigned 64-bit.
3159 *
3160 * @param SrcGCPhys The source address (guest physical).
3161 */
3162uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3163{
3164 uint64_t val;
3165 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3166 VBOX_CHECK_ADDR(SrcGCPhys);
3167 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3168 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3169#ifdef VBOX_DEBUG_PHYS
3170 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3171#endif
3172 return val;
3173}
3174
3175
3176/**
3177 * Read guest RAM and ROM, signed 64-bit.
3178 *
3179 * @param SrcGCPhys The source address (guest physical).
3180 */
3181int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3182{
3183 int64_t val;
3184 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3185 VBOX_CHECK_ADDR(SrcGCPhys);
3186 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3187 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3188#ifdef VBOX_DEBUG_PHYS
3189 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3190#endif
3191 return val;
3192}
3193
3194
3195/**
3196 * Write guest RAM.
3197 *
3198 * @param DstGCPhys The destination address (guest physical).
3199 * @param pvSrc The source address.
3200 * @param cb Number of bytes to write
3201 */
3202void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3203{
3204 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3205 VBOX_CHECK_ADDR(DstGCPhys);
3206 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3207 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3208#ifdef VBOX_DEBUG_PHYS
3209 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3210#endif
3211}
3212
3213
3214/**
3215 * Write guest RAM, unsigned 8-bit.
3216 *
3217 * @param DstGCPhys The destination address (guest physical).
3218 * @param val Value
3219 */
3220void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3221{
3222 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3223 VBOX_CHECK_ADDR(DstGCPhys);
3224 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3225 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3226#ifdef VBOX_DEBUG_PHYS
3227 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3228#endif
3229}
3230
3231
3232/**
3233 * Write guest RAM, unsigned 8-bit.
3234 *
3235 * @param DstGCPhys The destination address (guest physical).
3236 * @param val Value
3237 */
3238void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3239{
3240 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3241 VBOX_CHECK_ADDR(DstGCPhys);
3242 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3243 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3244#ifdef VBOX_DEBUG_PHYS
3245 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3246#endif
3247}
3248
3249
3250/**
3251 * Write guest RAM, unsigned 32-bit.
3252 *
3253 * @param DstGCPhys The destination address (guest physical).
3254 * @param val Value
3255 */
3256void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3257{
3258 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3259 VBOX_CHECK_ADDR(DstGCPhys);
3260 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3261 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3262#ifdef VBOX_DEBUG_PHYS
3263 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3264#endif
3265}
3266
3267
3268/**
3269 * Write guest RAM, unsigned 64-bit.
3270 *
3271 * @param DstGCPhys The destination address (guest physical).
3272 * @param val Value
3273 */
3274void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3275{
3276 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3277 VBOX_CHECK_ADDR(DstGCPhys);
3278 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3279 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3280#ifdef VBOX_DEBUG_PHYS
3281 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3282#endif
3283}
3284
3285#undef LOG_GROUP
3286#define LOG_GROUP LOG_GROUP_REM_MMIO
3287
3288/** Read MMIO memory. */
3289static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3290{
3291 uint32_t u32 = 0;
3292 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3293 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3294 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", GCPhys, u32));
3295 return u32;
3296}
3297
3298/** Read MMIO memory. */
3299static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3300{
3301 uint32_t u32 = 0;
3302 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3303 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3304 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", GCPhys, u32));
3305 return u32;
3306}
3307
3308/** Read MMIO memory. */
3309static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3310{
3311 uint32_t u32 = 0;
3312 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3313 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3314 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", GCPhys, u32));
3315 return u32;
3316}
3317
3318/** Write to MMIO memory. */
3319static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3320{
3321 int rc;
3322 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3323 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3324 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3325}
3326
3327/** Write to MMIO memory. */
3328static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3329{
3330 int rc;
3331 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3332 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3333 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3334}
3335
3336/** Write to MMIO memory. */
3337static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3338{
3339 int rc;
3340 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3341 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3342 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3343}
3344
3345
3346#undef LOG_GROUP
3347#define LOG_GROUP LOG_GROUP_REM_HANDLER
3348
3349/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3350
3351static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3352{
3353 uint8_t u8;
3354 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", GCPhys));
3355 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3356 return u8;
3357}
3358
3359static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3360{
3361 uint16_t u16;
3362 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", GCPhys));
3363 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3364 return u16;
3365}
3366
3367static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3368{
3369 uint32_t u32;
3370 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", GCPhys));
3371 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3372 return u32;
3373}
3374
3375static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3376{
3377 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3378 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3379}
3380
3381static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3382{
3383 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3384 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3385}
3386
3387static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3388{
3389 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3390 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3391}
3392
3393/* -+- disassembly -+- */
3394
3395#undef LOG_GROUP
3396#define LOG_GROUP LOG_GROUP_REM_DISAS
3397
3398
3399/**
3400 * Enables or disables singled stepped disassembly.
3401 *
3402 * @returns VBox status code.
3403 * @param pVM VM handle.
3404 * @param fEnable To enable set this flag, to disable clear it.
3405 */
3406static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3407{
3408 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3409 VM_ASSERT_EMT(pVM);
3410
3411 if (fEnable)
3412 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3413 else
3414 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3415 return VINF_SUCCESS;
3416}
3417
3418
3419/**
3420 * Enables or disables singled stepped disassembly.
3421 *
3422 * @returns VBox status code.
3423 * @param pVM VM handle.
3424 * @param fEnable To enable set this flag, to disable clear it.
3425 */
3426REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3427{
3428 PVMREQ pReq;
3429 int rc;
3430
3431 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3432 if (VM_IS_EMT(pVM))
3433 return remR3DisasEnableStepping(pVM, fEnable);
3434
3435 rc = VMR3ReqCall(pVM, VMREQDEST_ANY, &pReq, RT_INDEFINITE_WAIT, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3436 AssertRC(rc);
3437 if (RT_SUCCESS(rc))
3438 rc = pReq->iStatus;
3439 VMR3ReqFree(pReq);
3440 return rc;
3441}
3442
3443
3444#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3445/**
3446 * External Debugger Command: .remstep [on|off|1|0]
3447 */
3448static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3449{
3450 bool fEnable;
3451 int rc;
3452
3453 /* print status */
3454 if (cArgs == 0)
3455 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "DisasStepping is %s\n",
3456 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3457
3458 /* convert the argument and change the mode. */
3459 rc = pCmdHlp->pfnVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3460 if (RT_FAILURE(rc))
3461 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "boolean conversion failed!\n");
3462 rc = REMR3DisasEnableStepping(pVM, fEnable);
3463 if (RT_FAILURE(rc))
3464 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "REMR3DisasEnableStepping failed!\n");
3465 return rc;
3466}
3467#endif
3468
3469
3470/**
3471 * Disassembles n instructions and prints them to the log.
3472 *
3473 * @returns Success indicator.
3474 * @param env Pointer to the recompiler CPU structure.
3475 * @param f32BitCode Indicates that whether or not the code should
3476 * be disassembled as 16 or 32 bit. If -1 the CS
3477 * selector will be inspected.
3478 * @param nrInstructions Nr of instructions to disassemble
3479 * @param pszPrefix
3480 * @remark not currently used for anything but ad-hoc debugging.
3481 */
3482bool remR3DisasBlock(CPUState *env, int f32BitCode, int nrInstructions, char *pszPrefix)
3483{
3484 int i, rc;
3485 RTGCPTR GCPtrPC;
3486 uint8_t *pvPC;
3487 RTINTPTR off;
3488 DISCPUSTATE Cpu;
3489
3490 /*
3491 * Determin 16/32 bit mode.
3492 */
3493 if (f32BitCode == -1)
3494 f32BitCode = !!(env->segs[R_CS].flags & X86_DESC_DB); /** @todo is this right?!!?!?!?!? */
3495
3496 /*
3497 * Convert cs:eip to host context address.
3498 * We don't care to much about cross page correctness presently.
3499 */
3500 GCPtrPC = env->segs[R_CS].base + env->eip;
3501 if (f32BitCode && (env->cr[0] & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG))
3502 {
3503 Assert(PGMGetGuestMode(env->pVM) < PGMMODE_AMD64);
3504
3505 /* convert eip to physical address. */
3506 rc = PGMPhysGCPtr2R3PtrByGstCR3(env->pVM,
3507 GCPtrPC,
3508 env->cr[3],
3509 env->cr[4] & (X86_CR4_PSE | X86_CR4_PAE), /** @todo add longmode flag */
3510 (void**)&pvPC);
3511 if (RT_FAILURE(rc))
3512 {
3513 if (!PATMIsPatchGCAddr(env->pVM, GCPtrPC))
3514 return false;
3515 pvPC = (uint8_t *)PATMR3QueryPatchMemHC(env->pVM, NULL)
3516 + (GCPtrPC - PATMR3QueryPatchMemGC(env->pVM, NULL));
3517 }
3518 }
3519 else
3520 {
3521 /* physical address */
3522 rc = PGMPhysGCPhys2R3Ptr(env->pVM, (RTGCPHYS)GCPtrPC, nrInstructions * 16,
3523 (void**)&pvPC);
3524 if (RT_FAILURE(rc))
3525 return false;
3526 }
3527
3528 /*
3529 * Disassemble.
3530 */
3531 off = env->eip - (RTGCUINTPTR)(uintptr_t)pvPC;
3532 Cpu.mode = f32BitCode ? CPUMODE_32BIT : CPUMODE_16BIT;
3533 Cpu.pfnReadBytes = NULL; /** @todo make cs:eip reader for the disassembler. */
3534 //Cpu.dwUserData[0] = (uintptr_t)pVM;
3535 //Cpu.dwUserData[1] = (uintptr_t)pvPC;
3536 //Cpu.dwUserData[2] = GCPtrPC;
3537
3538 for (i=0;i<nrInstructions;i++)
3539 {
3540 char szOutput[256];
3541 uint32_t cbOp;
3542 if (RT_FAILURE(DISInstr(&Cpu, (uintptr_t)pvPC, off, &cbOp, &szOutput[0])))
3543 return false;
3544 if (pszPrefix)
3545 Log(("%s: %s", pszPrefix, szOutput));
3546 else
3547 Log(("%s", szOutput));
3548
3549 pvPC += cbOp;
3550 }
3551 return true;
3552}
3553
3554
3555/** @todo need to test the new code, using the old code in the mean while. */
3556#define USE_OLD_DUMP_AND_DISASSEMBLY
3557
3558/**
3559 * Disassembles one instruction and prints it to the log.
3560 *
3561 * @returns Success indicator.
3562 * @param env Pointer to the recompiler CPU structure.
3563 * @param f32BitCode Indicates that whether or not the code should
3564 * be disassembled as 16 or 32 bit. If -1 the CS
3565 * selector will be inspected.
3566 * @param pszPrefix
3567 */
3568bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3569{
3570#ifdef USE_OLD_DUMP_AND_DISASSEMBLY
3571 PVM pVM = env->pVM;
3572 RTGCPTR GCPtrPC;
3573 uint8_t *pvPC;
3574 char szOutput[256];
3575 uint32_t cbOp;
3576 RTINTPTR off;
3577 DISCPUSTATE Cpu;
3578
3579
3580 /* Doesn't work in long mode. */
3581 if (env->hflags & HF_LMA_MASK)
3582 return false;
3583
3584 /*
3585 * Determin 16/32 bit mode.
3586 */
3587 if (f32BitCode == -1)
3588 f32BitCode = !!(env->segs[R_CS].flags & X86_DESC_DB); /** @todo is this right?!!?!?!?!? */
3589
3590 /*
3591 * Log registers
3592 */
3593 if (LogIs2Enabled())
3594 {
3595 remR3StateUpdate(pVM);
3596 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3597 }
3598
3599 /*
3600 * Convert cs:eip to host context address.
3601 * We don't care to much about cross page correctness presently.
3602 */
3603 GCPtrPC = env->segs[R_CS].base + env->eip;
3604 if ((env->cr[0] & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG))
3605 {
3606 /* convert eip to physical address. */
3607 int rc = PGMPhysGCPtr2R3PtrByGstCR3(pVM,
3608 GCPtrPC,
3609 env->cr[3],
3610 env->cr[4] & (X86_CR4_PSE | X86_CR4_PAE),
3611 (void**)&pvPC);
3612 if (RT_FAILURE(rc))
3613 {
3614 if (!PATMIsPatchGCAddr(pVM, GCPtrPC))
3615 return false;
3616 pvPC = (uint8_t *)PATMR3QueryPatchMemHC(pVM, NULL)
3617 + (GCPtrPC - PATMR3QueryPatchMemGC(pVM, NULL));
3618 }
3619 }
3620 else
3621 {
3622
3623 /* physical address */
3624 int rc = PGMPhysGCPhys2R3Ptr(pVM, (RTGCPHYS)GCPtrPC, 16, (void**)&pvPC);
3625 if (RT_FAILURE(rc))
3626 return false;
3627 }
3628
3629 /*
3630 * Disassemble.
3631 */
3632 off = env->eip - (RTGCUINTPTR)(uintptr_t)pvPC;
3633 Cpu.mode = f32BitCode ? CPUMODE_32BIT : CPUMODE_16BIT;
3634 Cpu.pfnReadBytes = NULL; /** @todo make cs:eip reader for the disassembler. */
3635 //Cpu.dwUserData[0] = (uintptr_t)pVM;
3636 //Cpu.dwUserData[1] = (uintptr_t)pvPC;
3637 //Cpu.dwUserData[2] = GCPtrPC;
3638 if (RT_FAILURE(DISInstr(&Cpu, (uintptr_t)pvPC, off, &cbOp, &szOutput[0])))
3639 return false;
3640
3641 if (!f32BitCode)
3642 {
3643 if (pszPrefix)
3644 Log(("%s: %04X:%s", pszPrefix, env->segs[R_CS].selector, szOutput));
3645 else
3646 Log(("%04X:%s", env->segs[R_CS].selector, szOutput));
3647 }
3648 else
3649 {
3650 if (pszPrefix)
3651 Log(("%s: %s", pszPrefix, szOutput));
3652 else
3653 Log(("%s", szOutput));
3654 }
3655 return true;
3656
3657#else /* !USE_OLD_DUMP_AND_DISASSEMBLY */
3658 PVM pVM = env->pVM;
3659 const bool fLog = LogIsEnabled();
3660 const bool fLog2 = LogIs2Enabled();
3661 int rc = VINF_SUCCESS;
3662
3663 /*
3664 * Don't bother if there ain't any log output to do.
3665 */
3666 if (!fLog && !fLog2)
3667 return true;
3668
3669 /*
3670 * Update the state so DBGF reads the correct register values.
3671 */
3672 remR3StateUpdate(pVM);
3673
3674 /*
3675 * Log registers if requested.
3676 */
3677 if (!fLog2)
3678 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3679
3680 /*
3681 * Disassemble to log.
3682 */
3683 if (fLog)
3684 rc = DBGFR3DisasInstrCurrentLogInternal(pVM, pszPrefix);
3685
3686 return RT_SUCCESS(rc);
3687#endif
3688}
3689
3690
3691/**
3692 * Disassemble recompiled code.
3693 *
3694 * @param phFileIgnored Ignored, logfile usually.
3695 * @param pvCode Pointer to the code block.
3696 * @param cb Size of the code block.
3697 */
3698void disas(FILE *phFile, void *pvCode, unsigned long cb)
3699{
3700#ifdef DEBUG_TMP_LOGGING
3701# define DISAS_PRINTF(x...) fprintf(phFile, x)
3702#else
3703# define DISAS_PRINTF(x...) RTLogPrintf(x)
3704 if (LogIs2Enabled())
3705#endif
3706 {
3707 unsigned off = 0;
3708 char szOutput[256];
3709 DISCPUSTATE Cpu;
3710
3711 memset(&Cpu, 0, sizeof(Cpu));
3712#ifdef RT_ARCH_X86
3713 Cpu.mode = CPUMODE_32BIT;
3714#else
3715 Cpu.mode = CPUMODE_64BIT;
3716#endif
3717
3718 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
3719 while (off < cb)
3720 {
3721 uint32_t cbInstr;
3722 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
3723 DISAS_PRINTF("%s", szOutput);
3724 else
3725 {
3726 DISAS_PRINTF("disas error\n");
3727 cbInstr = 1;
3728#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporing 64-bit code. */
3729 break;
3730#endif
3731 }
3732 off += cbInstr;
3733 }
3734 }
3735
3736#undef DISAS_PRINTF
3737}
3738
3739
3740/**
3741 * Disassemble guest code.
3742 *
3743 * @param phFileIgnored Ignored, logfile usually.
3744 * @param uCode The guest address of the code to disassemble. (flat?)
3745 * @param cb Number of bytes to disassemble.
3746 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
3747 */
3748void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
3749{
3750#ifdef DEBUG_TMP_LOGGING
3751# define DISAS_PRINTF(x...) fprintf(phFile, x)
3752#else
3753# define DISAS_PRINTF(x...) RTLogPrintf(x)
3754 if (LogIs2Enabled())
3755#endif
3756 {
3757 PVM pVM = cpu_single_env->pVM;
3758 RTSEL cs;
3759 RTGCUINTPTR eip;
3760
3761 /*
3762 * Update the state so DBGF reads the correct register values (flags).
3763 */
3764 remR3StateUpdate(pVM);
3765
3766 /*
3767 * Do the disassembling.
3768 */
3769 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
3770 cs = cpu_single_env->segs[R_CS].selector;
3771 eip = uCode - cpu_single_env->segs[R_CS].base;
3772 for (;;)
3773 {
3774 char szBuf[256];
3775 uint32_t cbInstr;
3776 int rc = DBGFR3DisasInstrEx(pVM,
3777 cs,
3778 eip,
3779 0,
3780 szBuf, sizeof(szBuf),
3781 &cbInstr);
3782 if (RT_SUCCESS(rc))
3783 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
3784 else
3785 {
3786 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
3787 cbInstr = 1;
3788 }
3789
3790 /* next */
3791 if (cb <= cbInstr)
3792 break;
3793 cb -= cbInstr;
3794 uCode += cbInstr;
3795 eip += cbInstr;
3796 }
3797 }
3798#undef DISAS_PRINTF
3799}
3800
3801
3802/**
3803 * Looks up a guest symbol.
3804 *
3805 * @returns Pointer to symbol name. This is a static buffer.
3806 * @param orig_addr The address in question.
3807 */
3808const char *lookup_symbol(target_ulong orig_addr)
3809{
3810 RTGCINTPTR off = 0;
3811 DBGFSYMBOL Sym;
3812 PVM pVM = cpu_single_env->pVM;
3813 int rc = DBGFR3SymbolByAddr(pVM, orig_addr, &off, &Sym);
3814 if (RT_SUCCESS(rc))
3815 {
3816 static char szSym[sizeof(Sym.szName) + 48];
3817 if (!off)
3818 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
3819 else if (off > 0)
3820 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
3821 else
3822 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
3823 return szSym;
3824 }
3825 return "<N/A>";
3826}
3827
3828
3829#undef LOG_GROUP
3830#define LOG_GROUP LOG_GROUP_REM
3831
3832
3833/* -+- FF notifications -+- */
3834
3835
3836/**
3837 * Notification about a pending interrupt.
3838 *
3839 * @param pVM VM Handle.
3840 * @param u8Interrupt Interrupt
3841 * @thread The emulation thread.
3842 */
3843REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, uint8_t u8Interrupt)
3844{
3845 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
3846 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
3847}
3848
3849/**
3850 * Notification about a pending interrupt.
3851 *
3852 * @returns Pending interrupt or REM_NO_PENDING_IRQ
3853 * @param pVM VM Handle.
3854 * @thread The emulation thread.
3855 */
3856REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM)
3857{
3858 return pVM->rem.s.u32PendingInterrupt;
3859}
3860
3861/**
3862 * Notification about the interrupt FF being set.
3863 *
3864 * @param pVM VM Handle.
3865 * @thread The emulation thread.
3866 */
3867REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM)
3868{
3869 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
3870 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
3871 if (pVM->rem.s.fInREM)
3872 {
3873 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3874 CPU_INTERRUPT_EXTERNAL_HARD);
3875 }
3876}
3877
3878
3879/**
3880 * Notification about the interrupt FF being set.
3881 *
3882 * @param pVM VM Handle.
3883 * @thread Any.
3884 */
3885REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM)
3886{
3887 LogFlow(("REMR3NotifyInterruptClear:\n"));
3888 if (pVM->rem.s.fInREM)
3889 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3890}
3891
3892
3893/**
3894 * Notification about pending timer(s).
3895 *
3896 * @param pVM VM Handle.
3897 * @thread Any.
3898 */
3899REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM)
3900{
3901#ifndef DEBUG_bird
3902 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
3903#endif
3904 if (pVM->rem.s.fInREM)
3905 {
3906 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3907 CPU_INTERRUPT_EXTERNAL_TIMER);
3908 }
3909}
3910
3911
3912/**
3913 * Notification about pending DMA transfers.
3914 *
3915 * @param pVM VM Handle.
3916 * @thread Any.
3917 */
3918REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
3919{
3920 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
3921 if (pVM->rem.s.fInREM)
3922 {
3923 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3924 CPU_INTERRUPT_EXTERNAL_DMA);
3925 }
3926}
3927
3928
3929/**
3930 * Notification about pending timer(s).
3931 *
3932 * @param pVM VM Handle.
3933 * @thread Any.
3934 */
3935REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
3936{
3937 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
3938 if (pVM->rem.s.fInREM)
3939 {
3940 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3941 CPU_INTERRUPT_EXTERNAL_EXIT);
3942 }
3943}
3944
3945
3946/**
3947 * Notification about pending FF set by an external thread.
3948 *
3949 * @param pVM VM handle.
3950 * @thread Any.
3951 */
3952REMR3DECL(void) REMR3NotifyFF(PVM pVM)
3953{
3954 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
3955 if (pVM->rem.s.fInREM)
3956 {
3957 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3958 CPU_INTERRUPT_EXTERNAL_EXIT);
3959 }
3960}
3961
3962
3963#ifdef VBOX_WITH_STATISTICS
3964void remR3ProfileStart(int statcode)
3965{
3966 STAMPROFILEADV *pStat;
3967 switch(statcode)
3968 {
3969 case STATS_EMULATE_SINGLE_INSTR:
3970 pStat = &gStatExecuteSingleInstr;
3971 break;
3972 case STATS_QEMU_COMPILATION:
3973 pStat = &gStatCompilationQEmu;
3974 break;
3975 case STATS_QEMU_RUN_EMULATED_CODE:
3976 pStat = &gStatRunCodeQEmu;
3977 break;
3978 case STATS_QEMU_TOTAL:
3979 pStat = &gStatTotalTimeQEmu;
3980 break;
3981 case STATS_QEMU_RUN_TIMERS:
3982 pStat = &gStatTimers;
3983 break;
3984 case STATS_TLB_LOOKUP:
3985 pStat= &gStatTBLookup;
3986 break;
3987 case STATS_IRQ_HANDLING:
3988 pStat= &gStatIRQ;
3989 break;
3990 case STATS_RAW_CHECK:
3991 pStat = &gStatRawCheck;
3992 break;
3993
3994 default:
3995 AssertMsgFailed(("unknown stat %d\n", statcode));
3996 return;
3997 }
3998 STAM_PROFILE_ADV_START(pStat, a);
3999}
4000
4001
4002void remR3ProfileStop(int statcode)
4003{
4004 STAMPROFILEADV *pStat;
4005 switch(statcode)
4006 {
4007 case STATS_EMULATE_SINGLE_INSTR:
4008 pStat = &gStatExecuteSingleInstr;
4009 break;
4010 case STATS_QEMU_COMPILATION:
4011 pStat = &gStatCompilationQEmu;
4012 break;
4013 case STATS_QEMU_RUN_EMULATED_CODE:
4014 pStat = &gStatRunCodeQEmu;
4015 break;
4016 case STATS_QEMU_TOTAL:
4017 pStat = &gStatTotalTimeQEmu;
4018 break;
4019 case STATS_QEMU_RUN_TIMERS:
4020 pStat = &gStatTimers;
4021 break;
4022 case STATS_TLB_LOOKUP:
4023 pStat= &gStatTBLookup;
4024 break;
4025 case STATS_IRQ_HANDLING:
4026 pStat= &gStatIRQ;
4027 break;
4028 case STATS_RAW_CHECK:
4029 pStat = &gStatRawCheck;
4030 break;
4031 default:
4032 AssertMsgFailed(("unknown stat %d\n", statcode));
4033 return;
4034 }
4035 STAM_PROFILE_ADV_STOP(pStat, a);
4036}
4037#endif
4038
4039/**
4040 * Raise an RC, force rem exit.
4041 *
4042 * @param pVM VM handle.
4043 * @param rc The rc.
4044 */
4045void remR3RaiseRC(PVM pVM, int rc)
4046{
4047 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4048 Assert(pVM->rem.s.fInREM);
4049 VM_ASSERT_EMT(pVM);
4050 pVM->rem.s.rc = rc;
4051 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4052}
4053
4054
4055/* -+- timers -+- */
4056
4057uint64_t cpu_get_tsc(CPUX86State *env)
4058{
4059 STAM_COUNTER_INC(&gStatCpuGetTSC);
4060 return TMCpuTickGet(env->pVM);
4061}
4062
4063
4064/* -+- interrupts -+- */
4065
4066void cpu_set_ferr(CPUX86State *env)
4067{
4068 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4069 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4070}
4071
4072int cpu_get_pic_interrupt(CPUState *env)
4073{
4074 uint8_t u8Interrupt;
4075 int rc;
4076
4077 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4078 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4079 * with the (a)pic.
4080 */
4081 /** @note We assume we will go directly to the recompiler to handle the pending interrupt! */
4082 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4083 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4084 * remove this kludge. */
4085 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4086 {
4087 rc = VINF_SUCCESS;
4088 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4089 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4090 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4091 }
4092 else
4093 rc = PDMGetInterrupt(env->pVM, &u8Interrupt);
4094
4095 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc\n", u8Interrupt, rc));
4096 if (RT_SUCCESS(rc))
4097 {
4098 if (VM_FF_ISPENDING(env->pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
4099 env->interrupt_request |= CPU_INTERRUPT_HARD;
4100 return u8Interrupt;
4101 }
4102 return -1;
4103}
4104
4105
4106/* -+- local apic -+- */
4107
4108void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4109{
4110 int rc = PDMApicSetBase(env->pVM, val);
4111 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4112}
4113
4114uint64_t cpu_get_apic_base(CPUX86State *env)
4115{
4116 uint64_t u64;
4117 int rc = PDMApicGetBase(env->pVM, &u64);
4118 if (RT_SUCCESS(rc))
4119 {
4120 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4121 return u64;
4122 }
4123 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4124 return 0;
4125}
4126
4127void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4128{
4129 int rc = PDMApicSetTPR(env->pVM, val);
4130 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4131}
4132
4133uint8_t cpu_get_apic_tpr(CPUX86State *env)
4134{
4135 uint8_t u8;
4136 int rc = PDMApicGetTPR(env->pVM, &u8, NULL);
4137 if (RT_SUCCESS(rc))
4138 {
4139 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4140 return u8;
4141 }
4142 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4143 return 0;
4144}
4145
4146
4147uint64_t cpu_apic_rdmsr(CPUX86State *env, uint32_t reg)
4148{
4149 uint64_t value;
4150 int rc = PDMApicReadMSR(env->pVM, 0/* cpu */, reg, &value);
4151 if (RT_SUCCESS(rc))
4152 {
4153 LogFlow(("cpu_apic_rdms returns %#x\n", value));
4154 return value;
4155 }
4156 /** @todo: exception ? */
4157 LogFlow(("cpu_apic_rdms returns 0 (rc=%Rrc)\n", rc));
4158 return value;
4159}
4160
4161void cpu_apic_wrmsr(CPUX86State *env, uint32_t reg, uint64_t value)
4162{
4163 int rc = PDMApicWriteMSR(env->pVM, 0 /* cpu */, reg, value);
4164 /** @todo: exception if error ? */
4165 LogFlow(("cpu_apic_wrmsr: rc=%Rrc\n", rc)); NOREF(rc);
4166}
4167
4168uint64_t cpu_rdmsr(CPUX86State *env, uint32_t msr)
4169{
4170 return CPUMGetGuestMsr(env->pVM, msr);
4171}
4172
4173void cpu_wrmsr(CPUX86State *env, uint32_t msr, uint64_t val)
4174{
4175 CPUMSetGuestMsr(env->pVM, msr, val);
4176}
4177
4178/* -+- I/O Ports -+- */
4179
4180#undef LOG_GROUP
4181#define LOG_GROUP LOG_GROUP_REM_IOPORT
4182
4183void cpu_outb(CPUState *env, int addr, int val)
4184{
4185 int rc;
4186
4187 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4188 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4189
4190 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4191 if (RT_LIKELY(rc == VINF_SUCCESS))
4192 return;
4193 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4194 {
4195 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4196 remR3RaiseRC(env->pVM, rc);
4197 return;
4198 }
4199 remAbort(rc, __FUNCTION__);
4200}
4201
4202void cpu_outw(CPUState *env, int addr, int val)
4203{
4204 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4205 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4206 if (RT_LIKELY(rc == VINF_SUCCESS))
4207 return;
4208 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4209 {
4210 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4211 remR3RaiseRC(env->pVM, rc);
4212 return;
4213 }
4214 remAbort(rc, __FUNCTION__);
4215}
4216
4217void cpu_outl(CPUState *env, int addr, int val)
4218{
4219 int rc;
4220 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4221 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4222 if (RT_LIKELY(rc == VINF_SUCCESS))
4223 return;
4224 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4225 {
4226 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4227 remR3RaiseRC(env->pVM, rc);
4228 return;
4229 }
4230 remAbort(rc, __FUNCTION__);
4231}
4232
4233int cpu_inb(CPUState *env, int addr)
4234{
4235 uint32_t u32 = 0;
4236 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4237 if (RT_LIKELY(rc == VINF_SUCCESS))
4238 {
4239 if (/*addr != 0x61 && */addr != 0x71)
4240 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4241 return (int)u32;
4242 }
4243 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4244 {
4245 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4246 remR3RaiseRC(env->pVM, rc);
4247 return (int)u32;
4248 }
4249 remAbort(rc, __FUNCTION__);
4250 return 0xff;
4251}
4252
4253int cpu_inw(CPUState *env, int addr)
4254{
4255 uint32_t u32 = 0;
4256 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4257 if (RT_LIKELY(rc == VINF_SUCCESS))
4258 {
4259 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4260 return (int)u32;
4261 }
4262 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4263 {
4264 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4265 remR3RaiseRC(env->pVM, rc);
4266 return (int)u32;
4267 }
4268 remAbort(rc, __FUNCTION__);
4269 return 0xffff;
4270}
4271
4272int cpu_inl(CPUState *env, int addr)
4273{
4274 uint32_t u32 = 0;
4275 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4276 if (RT_LIKELY(rc == VINF_SUCCESS))
4277 {
4278//if (addr==0x01f0 && u32 == 0x6b6d)
4279// loglevel = ~0;
4280 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4281 return (int)u32;
4282 }
4283 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4284 {
4285 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4286 remR3RaiseRC(env->pVM, rc);
4287 return (int)u32;
4288 }
4289 remAbort(rc, __FUNCTION__);
4290 return 0xffffffff;
4291}
4292
4293#undef LOG_GROUP
4294#define LOG_GROUP LOG_GROUP_REM
4295
4296
4297/* -+- helpers and misc other interfaces -+- */
4298
4299/**
4300 * Perform the CPUID instruction.
4301 *
4302 * ASMCpuId cannot be invoked from some source files where this is used because of global
4303 * register allocations.
4304 *
4305 * @param env Pointer to the recompiler CPU structure.
4306 * @param uOperator CPUID operation (eax).
4307 * @param pvEAX Where to store eax.
4308 * @param pvEBX Where to store ebx.
4309 * @param pvECX Where to store ecx.
4310 * @param pvEDX Where to store edx.
4311 */
4312void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4313{
4314 CPUMGetGuestCpuId(env->pVM, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4315}
4316
4317
4318#if 0 /* not used */
4319/**
4320 * Interface for qemu hardware to report back fatal errors.
4321 */
4322void hw_error(const char *pszFormat, ...)
4323{
4324 /*
4325 * Bitch about it.
4326 */
4327 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4328 * this in my Odin32 tree at home! */
4329 va_list args;
4330 va_start(args, pszFormat);
4331 RTLogPrintf("fatal error in virtual hardware:");
4332 RTLogPrintfV(pszFormat, args);
4333 va_end(args);
4334 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4335
4336 /*
4337 * If we're in REM context we'll sync back the state before 'jumping' to
4338 * the EMs failure handling.
4339 */
4340 PVM pVM = cpu_single_env->pVM;
4341 if (pVM->rem.s.fInREM)
4342 REMR3StateBack(pVM);
4343 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4344 AssertMsgFailed(("EMR3FatalError returned!\n"));
4345}
4346#endif
4347
4348/**
4349 * Interface for the qemu cpu to report unhandled situation
4350 * raising a fatal VM error.
4351 */
4352void cpu_abort(CPUState *env, const char *pszFormat, ...)
4353{
4354 va_list args;
4355 PVM pVM;
4356
4357 /*
4358 * Bitch about it.
4359 */
4360#ifndef _MSC_VER
4361 /** @todo: MSVC is right - it's not valid C */
4362 RTLogFlags(NULL, "nodisabled nobuffered");
4363#endif
4364 va_start(args, pszFormat);
4365 RTLogPrintf("fatal error in recompiler cpu: %N\n", pszFormat, &args);
4366 va_end(args);
4367 va_start(args, pszFormat);
4368 AssertReleaseMsgFailed(("fatal error in recompiler cpu: %N\n", pszFormat, &args));
4369 va_end(args);
4370
4371 /*
4372 * If we're in REM context we'll sync back the state before 'jumping' to
4373 * the EMs failure handling.
4374 */
4375 pVM = cpu_single_env->pVM;
4376 if (pVM->rem.s.fInREM)
4377 REMR3StateBack(pVM);
4378 EMR3FatalError(pVM, VERR_REM_VIRTUAL_CPU_ERROR);
4379 AssertMsgFailed(("EMR3FatalError returned!\n"));
4380}
4381
4382
4383/**
4384 * Aborts the VM.
4385 *
4386 * @param rc VBox error code.
4387 * @param pszTip Hint about why/when this happend.
4388 */
4389void remAbort(int rc, const char *pszTip)
4390{
4391 PVM pVM;
4392
4393 /*
4394 * Bitch about it.
4395 */
4396 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4397 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4398
4399 /*
4400 * Jump back to where we entered the recompiler.
4401 */
4402 pVM = cpu_single_env->pVM;
4403 if (pVM->rem.s.fInREM)
4404 REMR3StateBack(pVM);
4405 EMR3FatalError(pVM, rc);
4406 AssertMsgFailed(("EMR3FatalError returned!\n"));
4407}
4408
4409
4410/**
4411 * Dumps a linux system call.
4412 * @param pVM VM handle.
4413 */
4414void remR3DumpLnxSyscall(PVM pVM)
4415{
4416 static const char *apsz[] =
4417 {
4418 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4419 "sys_exit",
4420 "sys_fork",
4421 "sys_read",
4422 "sys_write",
4423 "sys_open", /* 5 */
4424 "sys_close",
4425 "sys_waitpid",
4426 "sys_creat",
4427 "sys_link",
4428 "sys_unlink", /* 10 */
4429 "sys_execve",
4430 "sys_chdir",
4431 "sys_time",
4432 "sys_mknod",
4433 "sys_chmod", /* 15 */
4434 "sys_lchown16",
4435 "sys_ni_syscall", /* old break syscall holder */
4436 "sys_stat",
4437 "sys_lseek",
4438 "sys_getpid", /* 20 */
4439 "sys_mount",
4440 "sys_oldumount",
4441 "sys_setuid16",
4442 "sys_getuid16",
4443 "sys_stime", /* 25 */
4444 "sys_ptrace",
4445 "sys_alarm",
4446 "sys_fstat",
4447 "sys_pause",
4448 "sys_utime", /* 30 */
4449 "sys_ni_syscall", /* old stty syscall holder */
4450 "sys_ni_syscall", /* old gtty syscall holder */
4451 "sys_access",
4452 "sys_nice",
4453 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4454 "sys_sync",
4455 "sys_kill",
4456 "sys_rename",
4457 "sys_mkdir",
4458 "sys_rmdir", /* 40 */
4459 "sys_dup",
4460 "sys_pipe",
4461 "sys_times",
4462 "sys_ni_syscall", /* old prof syscall holder */
4463 "sys_brk", /* 45 */
4464 "sys_setgid16",
4465 "sys_getgid16",
4466 "sys_signal",
4467 "sys_geteuid16",
4468 "sys_getegid16", /* 50 */
4469 "sys_acct",
4470 "sys_umount", /* recycled never used phys() */
4471 "sys_ni_syscall", /* old lock syscall holder */
4472 "sys_ioctl",
4473 "sys_fcntl", /* 55 */
4474 "sys_ni_syscall", /* old mpx syscall holder */
4475 "sys_setpgid",
4476 "sys_ni_syscall", /* old ulimit syscall holder */
4477 "sys_olduname",
4478 "sys_umask", /* 60 */
4479 "sys_chroot",
4480 "sys_ustat",
4481 "sys_dup2",
4482 "sys_getppid",
4483 "sys_getpgrp", /* 65 */
4484 "sys_setsid",
4485 "sys_sigaction",
4486 "sys_sgetmask",
4487 "sys_ssetmask",
4488 "sys_setreuid16", /* 70 */
4489 "sys_setregid16",
4490 "sys_sigsuspend",
4491 "sys_sigpending",
4492 "sys_sethostname",
4493 "sys_setrlimit", /* 75 */
4494 "sys_old_getrlimit",
4495 "sys_getrusage",
4496 "sys_gettimeofday",
4497 "sys_settimeofday",
4498 "sys_getgroups16", /* 80 */
4499 "sys_setgroups16",
4500 "old_select",
4501 "sys_symlink",
4502 "sys_lstat",
4503 "sys_readlink", /* 85 */
4504 "sys_uselib",
4505 "sys_swapon",
4506 "sys_reboot",
4507 "old_readdir",
4508 "old_mmap", /* 90 */
4509 "sys_munmap",
4510 "sys_truncate",
4511 "sys_ftruncate",
4512 "sys_fchmod",
4513 "sys_fchown16", /* 95 */
4514 "sys_getpriority",
4515 "sys_setpriority",
4516 "sys_ni_syscall", /* old profil syscall holder */
4517 "sys_statfs",
4518 "sys_fstatfs", /* 100 */
4519 "sys_ioperm",
4520 "sys_socketcall",
4521 "sys_syslog",
4522 "sys_setitimer",
4523 "sys_getitimer", /* 105 */
4524 "sys_newstat",
4525 "sys_newlstat",
4526 "sys_newfstat",
4527 "sys_uname",
4528 "sys_iopl", /* 110 */
4529 "sys_vhangup",
4530 "sys_ni_syscall", /* old "idle" system call */
4531 "sys_vm86old",
4532 "sys_wait4",
4533 "sys_swapoff", /* 115 */
4534 "sys_sysinfo",
4535 "sys_ipc",
4536 "sys_fsync",
4537 "sys_sigreturn",
4538 "sys_clone", /* 120 */
4539 "sys_setdomainname",
4540 "sys_newuname",
4541 "sys_modify_ldt",
4542 "sys_adjtimex",
4543 "sys_mprotect", /* 125 */
4544 "sys_sigprocmask",
4545 "sys_ni_syscall", /* old "create_module" */
4546 "sys_init_module",
4547 "sys_delete_module",
4548 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4549 "sys_quotactl",
4550 "sys_getpgid",
4551 "sys_fchdir",
4552 "sys_bdflush",
4553 "sys_sysfs", /* 135 */
4554 "sys_personality",
4555 "sys_ni_syscall", /* reserved for afs_syscall */
4556 "sys_setfsuid16",
4557 "sys_setfsgid16",
4558 "sys_llseek", /* 140 */
4559 "sys_getdents",
4560 "sys_select",
4561 "sys_flock",
4562 "sys_msync",
4563 "sys_readv", /* 145 */
4564 "sys_writev",
4565 "sys_getsid",
4566 "sys_fdatasync",
4567 "sys_sysctl",
4568 "sys_mlock", /* 150 */
4569 "sys_munlock",
4570 "sys_mlockall",
4571 "sys_munlockall",
4572 "sys_sched_setparam",
4573 "sys_sched_getparam", /* 155 */
4574 "sys_sched_setscheduler",
4575 "sys_sched_getscheduler",
4576 "sys_sched_yield",
4577 "sys_sched_get_priority_max",
4578 "sys_sched_get_priority_min", /* 160 */
4579 "sys_sched_rr_get_interval",
4580 "sys_nanosleep",
4581 "sys_mremap",
4582 "sys_setresuid16",
4583 "sys_getresuid16", /* 165 */
4584 "sys_vm86",
4585 "sys_ni_syscall", /* Old sys_query_module */
4586 "sys_poll",
4587 "sys_nfsservctl",
4588 "sys_setresgid16", /* 170 */
4589 "sys_getresgid16",
4590 "sys_prctl",
4591 "sys_rt_sigreturn",
4592 "sys_rt_sigaction",
4593 "sys_rt_sigprocmask", /* 175 */
4594 "sys_rt_sigpending",
4595 "sys_rt_sigtimedwait",
4596 "sys_rt_sigqueueinfo",
4597 "sys_rt_sigsuspend",
4598 "sys_pread64", /* 180 */
4599 "sys_pwrite64",
4600 "sys_chown16",
4601 "sys_getcwd",
4602 "sys_capget",
4603 "sys_capset", /* 185 */
4604 "sys_sigaltstack",
4605 "sys_sendfile",
4606 "sys_ni_syscall", /* reserved for streams1 */
4607 "sys_ni_syscall", /* reserved for streams2 */
4608 "sys_vfork", /* 190 */
4609 "sys_getrlimit",
4610 "sys_mmap2",
4611 "sys_truncate64",
4612 "sys_ftruncate64",
4613 "sys_stat64", /* 195 */
4614 "sys_lstat64",
4615 "sys_fstat64",
4616 "sys_lchown",
4617 "sys_getuid",
4618 "sys_getgid", /* 200 */
4619 "sys_geteuid",
4620 "sys_getegid",
4621 "sys_setreuid",
4622 "sys_setregid",
4623 "sys_getgroups", /* 205 */
4624 "sys_setgroups",
4625 "sys_fchown",
4626 "sys_setresuid",
4627 "sys_getresuid",
4628 "sys_setresgid", /* 210 */
4629 "sys_getresgid",
4630 "sys_chown",
4631 "sys_setuid",
4632 "sys_setgid",
4633 "sys_setfsuid", /* 215 */
4634 "sys_setfsgid",
4635 "sys_pivot_root",
4636 "sys_mincore",
4637 "sys_madvise",
4638 "sys_getdents64", /* 220 */
4639 "sys_fcntl64",
4640 "sys_ni_syscall", /* reserved for TUX */
4641 "sys_ni_syscall",
4642 "sys_gettid",
4643 "sys_readahead", /* 225 */
4644 "sys_setxattr",
4645 "sys_lsetxattr",
4646 "sys_fsetxattr",
4647 "sys_getxattr",
4648 "sys_lgetxattr", /* 230 */
4649 "sys_fgetxattr",
4650 "sys_listxattr",
4651 "sys_llistxattr",
4652 "sys_flistxattr",
4653 "sys_removexattr", /* 235 */
4654 "sys_lremovexattr",
4655 "sys_fremovexattr",
4656 "sys_tkill",
4657 "sys_sendfile64",
4658 "sys_futex", /* 240 */
4659 "sys_sched_setaffinity",
4660 "sys_sched_getaffinity",
4661 "sys_set_thread_area",
4662 "sys_get_thread_area",
4663 "sys_io_setup", /* 245 */
4664 "sys_io_destroy",
4665 "sys_io_getevents",
4666 "sys_io_submit",
4667 "sys_io_cancel",
4668 "sys_fadvise64", /* 250 */
4669 "sys_ni_syscall",
4670 "sys_exit_group",
4671 "sys_lookup_dcookie",
4672 "sys_epoll_create",
4673 "sys_epoll_ctl", /* 255 */
4674 "sys_epoll_wait",
4675 "sys_remap_file_pages",
4676 "sys_set_tid_address",
4677 "sys_timer_create",
4678 "sys_timer_settime", /* 260 */
4679 "sys_timer_gettime",
4680 "sys_timer_getoverrun",
4681 "sys_timer_delete",
4682 "sys_clock_settime",
4683 "sys_clock_gettime", /* 265 */
4684 "sys_clock_getres",
4685 "sys_clock_nanosleep",
4686 "sys_statfs64",
4687 "sys_fstatfs64",
4688 "sys_tgkill", /* 270 */
4689 "sys_utimes",
4690 "sys_fadvise64_64",
4691 "sys_ni_syscall" /* sys_vserver */
4692 };
4693
4694 uint32_t uEAX = CPUMGetGuestEAX(pVM);
4695 switch (uEAX)
4696 {
4697 default:
4698 if (uEAX < RT_ELEMENTS(apsz))
4699 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
4700 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), CPUMGetGuestEBX(pVM), CPUMGetGuestECX(pVM),
4701 CPUMGetGuestEDX(pVM), CPUMGetGuestESI(pVM), CPUMGetGuestEDI(pVM), CPUMGetGuestEBP(pVM)));
4702 else
4703 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVM), uEAX, uEAX));
4704 break;
4705
4706 }
4707}
4708
4709
4710/**
4711 * Dumps an OpenBSD system call.
4712 * @param pVM VM handle.
4713 */
4714void remR3DumpOBsdSyscall(PVM pVM)
4715{
4716 static const char *apsz[] =
4717 {
4718 "SYS_syscall", //0
4719 "SYS_exit", //1
4720 "SYS_fork", //2
4721 "SYS_read", //3
4722 "SYS_write", //4
4723 "SYS_open", //5
4724 "SYS_close", //6
4725 "SYS_wait4", //7
4726 "SYS_8",
4727 "SYS_link", //9
4728 "SYS_unlink", //10
4729 "SYS_11",
4730 "SYS_chdir", //12
4731 "SYS_fchdir", //13
4732 "SYS_mknod", //14
4733 "SYS_chmod", //15
4734 "SYS_chown", //16
4735 "SYS_break", //17
4736 "SYS_18",
4737 "SYS_19",
4738 "SYS_getpid", //20
4739 "SYS_mount", //21
4740 "SYS_unmount", //22
4741 "SYS_setuid", //23
4742 "SYS_getuid", //24
4743 "SYS_geteuid", //25
4744 "SYS_ptrace", //26
4745 "SYS_recvmsg", //27
4746 "SYS_sendmsg", //28
4747 "SYS_recvfrom", //29
4748 "SYS_accept", //30
4749 "SYS_getpeername", //31
4750 "SYS_getsockname", //32
4751 "SYS_access", //33
4752 "SYS_chflags", //34
4753 "SYS_fchflags", //35
4754 "SYS_sync", //36
4755 "SYS_kill", //37
4756 "SYS_38",
4757 "SYS_getppid", //39
4758 "SYS_40",
4759 "SYS_dup", //41
4760 "SYS_opipe", //42
4761 "SYS_getegid", //43
4762 "SYS_profil", //44
4763 "SYS_ktrace", //45
4764 "SYS_sigaction", //46
4765 "SYS_getgid", //47
4766 "SYS_sigprocmask", //48
4767 "SYS_getlogin", //49
4768 "SYS_setlogin", //50
4769 "SYS_acct", //51
4770 "SYS_sigpending", //52
4771 "SYS_osigaltstack", //53
4772 "SYS_ioctl", //54
4773 "SYS_reboot", //55
4774 "SYS_revoke", //56
4775 "SYS_symlink", //57
4776 "SYS_readlink", //58
4777 "SYS_execve", //59
4778 "SYS_umask", //60
4779 "SYS_chroot", //61
4780 "SYS_62",
4781 "SYS_63",
4782 "SYS_64",
4783 "SYS_65",
4784 "SYS_vfork", //66
4785 "SYS_67",
4786 "SYS_68",
4787 "SYS_sbrk", //69
4788 "SYS_sstk", //70
4789 "SYS_61",
4790 "SYS_vadvise", //72
4791 "SYS_munmap", //73
4792 "SYS_mprotect", //74
4793 "SYS_madvise", //75
4794 "SYS_76",
4795 "SYS_77",
4796 "SYS_mincore", //78
4797 "SYS_getgroups", //79
4798 "SYS_setgroups", //80
4799 "SYS_getpgrp", //81
4800 "SYS_setpgid", //82
4801 "SYS_setitimer", //83
4802 "SYS_84",
4803 "SYS_85",
4804 "SYS_getitimer", //86
4805 "SYS_87",
4806 "SYS_88",
4807 "SYS_89",
4808 "SYS_dup2", //90
4809 "SYS_91",
4810 "SYS_fcntl", //92
4811 "SYS_select", //93
4812 "SYS_94",
4813 "SYS_fsync", //95
4814 "SYS_setpriority", //96
4815 "SYS_socket", //97
4816 "SYS_connect", //98
4817 "SYS_99",
4818 "SYS_getpriority", //100
4819 "SYS_101",
4820 "SYS_102",
4821 "SYS_sigreturn", //103
4822 "SYS_bind", //104
4823 "SYS_setsockopt", //105
4824 "SYS_listen", //106
4825 "SYS_107",
4826 "SYS_108",
4827 "SYS_109",
4828 "SYS_110",
4829 "SYS_sigsuspend", //111
4830 "SYS_112",
4831 "SYS_113",
4832 "SYS_114",
4833 "SYS_115",
4834 "SYS_gettimeofday", //116
4835 "SYS_getrusage", //117
4836 "SYS_getsockopt", //118
4837 "SYS_119",
4838 "SYS_readv", //120
4839 "SYS_writev", //121
4840 "SYS_settimeofday", //122
4841 "SYS_fchown", //123
4842 "SYS_fchmod", //124
4843 "SYS_125",
4844 "SYS_setreuid", //126
4845 "SYS_setregid", //127
4846 "SYS_rename", //128
4847 "SYS_129",
4848 "SYS_130",
4849 "SYS_flock", //131
4850 "SYS_mkfifo", //132
4851 "SYS_sendto", //133
4852 "SYS_shutdown", //134
4853 "SYS_socketpair", //135
4854 "SYS_mkdir", //136
4855 "SYS_rmdir", //137
4856 "SYS_utimes", //138
4857 "SYS_139",
4858 "SYS_adjtime", //140
4859 "SYS_141",
4860 "SYS_142",
4861 "SYS_143",
4862 "SYS_144",
4863 "SYS_145",
4864 "SYS_146",
4865 "SYS_setsid", //147
4866 "SYS_quotactl", //148
4867 "SYS_149",
4868 "SYS_150",
4869 "SYS_151",
4870 "SYS_152",
4871 "SYS_153",
4872 "SYS_154",
4873 "SYS_nfssvc", //155
4874 "SYS_156",
4875 "SYS_157",
4876 "SYS_158",
4877 "SYS_159",
4878 "SYS_160",
4879 "SYS_getfh", //161
4880 "SYS_162",
4881 "SYS_163",
4882 "SYS_164",
4883 "SYS_sysarch", //165
4884 "SYS_166",
4885 "SYS_167",
4886 "SYS_168",
4887 "SYS_169",
4888 "SYS_170",
4889 "SYS_171",
4890 "SYS_172",
4891 "SYS_pread", //173
4892 "SYS_pwrite", //174
4893 "SYS_175",
4894 "SYS_176",
4895 "SYS_177",
4896 "SYS_178",
4897 "SYS_179",
4898 "SYS_180",
4899 "SYS_setgid", //181
4900 "SYS_setegid", //182
4901 "SYS_seteuid", //183
4902 "SYS_lfs_bmapv", //184
4903 "SYS_lfs_markv", //185
4904 "SYS_lfs_segclean", //186
4905 "SYS_lfs_segwait", //187
4906 "SYS_188",
4907 "SYS_189",
4908 "SYS_190",
4909 "SYS_pathconf", //191
4910 "SYS_fpathconf", //192
4911 "SYS_swapctl", //193
4912 "SYS_getrlimit", //194
4913 "SYS_setrlimit", //195
4914 "SYS_getdirentries", //196
4915 "SYS_mmap", //197
4916 "SYS___syscall", //198
4917 "SYS_lseek", //199
4918 "SYS_truncate", //200
4919 "SYS_ftruncate", //201
4920 "SYS___sysctl", //202
4921 "SYS_mlock", //203
4922 "SYS_munlock", //204
4923 "SYS_205",
4924 "SYS_futimes", //206
4925 "SYS_getpgid", //207
4926 "SYS_xfspioctl", //208
4927 "SYS_209",
4928 "SYS_210",
4929 "SYS_211",
4930 "SYS_212",
4931 "SYS_213",
4932 "SYS_214",
4933 "SYS_215",
4934 "SYS_216",
4935 "SYS_217",
4936 "SYS_218",
4937 "SYS_219",
4938 "SYS_220",
4939 "SYS_semget", //221
4940 "SYS_222",
4941 "SYS_223",
4942 "SYS_224",
4943 "SYS_msgget", //225
4944 "SYS_msgsnd", //226
4945 "SYS_msgrcv", //227
4946 "SYS_shmat", //228
4947 "SYS_229",
4948 "SYS_shmdt", //230
4949 "SYS_231",
4950 "SYS_clock_gettime", //232
4951 "SYS_clock_settime", //233
4952 "SYS_clock_getres", //234
4953 "SYS_235",
4954 "SYS_236",
4955 "SYS_237",
4956 "SYS_238",
4957 "SYS_239",
4958 "SYS_nanosleep", //240
4959 "SYS_241",
4960 "SYS_242",
4961 "SYS_243",
4962 "SYS_244",
4963 "SYS_245",
4964 "SYS_246",
4965 "SYS_247",
4966 "SYS_248",
4967 "SYS_249",
4968 "SYS_minherit", //250
4969 "SYS_rfork", //251
4970 "SYS_poll", //252
4971 "SYS_issetugid", //253
4972 "SYS_lchown", //254
4973 "SYS_getsid", //255
4974 "SYS_msync", //256
4975 "SYS_257",
4976 "SYS_258",
4977 "SYS_259",
4978 "SYS_getfsstat", //260
4979 "SYS_statfs", //261
4980 "SYS_fstatfs", //262
4981 "SYS_pipe", //263
4982 "SYS_fhopen", //264
4983 "SYS_265",
4984 "SYS_fhstatfs", //266
4985 "SYS_preadv", //267
4986 "SYS_pwritev", //268
4987 "SYS_kqueue", //269
4988 "SYS_kevent", //270
4989 "SYS_mlockall", //271
4990 "SYS_munlockall", //272
4991 "SYS_getpeereid", //273
4992 "SYS_274",
4993 "SYS_275",
4994 "SYS_276",
4995 "SYS_277",
4996 "SYS_278",
4997 "SYS_279",
4998 "SYS_280",
4999 "SYS_getresuid", //281
5000 "SYS_setresuid", //282
5001 "SYS_getresgid", //283
5002 "SYS_setresgid", //284
5003 "SYS_285",
5004 "SYS_mquery", //286
5005 "SYS_closefrom", //287
5006 "SYS_sigaltstack", //288
5007 "SYS_shmget", //289
5008 "SYS_semop", //290
5009 "SYS_stat", //291
5010 "SYS_fstat", //292
5011 "SYS_lstat", //293
5012 "SYS_fhstat", //294
5013 "SYS___semctl", //295
5014 "SYS_shmctl", //296
5015 "SYS_msgctl", //297
5016 "SYS_MAXSYSCALL", //298
5017 //299
5018 //300
5019 };
5020 uint32_t uEAX;
5021 if (!LogIsEnabled())
5022 return;
5023 uEAX = CPUMGetGuestEAX(pVM);
5024 switch (uEAX)
5025 {
5026 default:
5027 if (uEAX < RT_ELEMENTS(apsz))
5028 {
5029 uint32_t au32Args[8] = {0};
5030 PGMPhysSimpleReadGCPtr(pVM, au32Args, CPUMGetGuestESP(pVM), sizeof(au32Args));
5031 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5032 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5033 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5034 }
5035 else
5036 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVM), uEAX, uEAX);
5037 break;
5038 }
5039}
5040
5041
5042#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5043/**
5044 * The Dll main entry point (stub).
5045 */
5046bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5047{
5048 return true;
5049}
5050
5051void *memcpy(void *dst, const void *src, size_t size)
5052{
5053 uint8_t*pbDst = dst, *pbSrc = src;
5054 while (size-- > 0)
5055 *pbDst++ = *pbSrc++;
5056 return dst;
5057}
5058
5059#endif
5060
5061void cpu_smm_update(CPUState *env)
5062{
5063}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette