VirtualBox

source: vbox/trunk/src/recompiler_new/VBoxRecompiler.c@ 18597

Last change on this file since 18597 was 18597, checked in by vboxsync, 16 years ago

REM: Added a l0 map for PageDesc, this should fix the performance issue if our hunch is right about the cause. Profile tb_flush.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 155.7 KB
Line 
1/* $Id: VBoxRecompiler.c 18597 2009-04-01 13:34:19Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_REM
27#include "vl.h"
28#include "osdep.h"
29#include "exec-all.h"
30#include "config.h"
31#include "cpu-all.h"
32
33#include <VBox/rem.h>
34#include <VBox/vmapi.h>
35#include <VBox/tm.h>
36#include <VBox/ssm.h>
37#include <VBox/em.h>
38#include <VBox/trpm.h>
39#include <VBox/iom.h>
40#include <VBox/mm.h>
41#include <VBox/pgm.h>
42#include <VBox/pdm.h>
43#include <VBox/dbgf.h>
44#include <VBox/dbg.h>
45#include <VBox/hwaccm.h>
46#include <VBox/patm.h>
47#include <VBox/csam.h>
48#include "REMInternal.h"
49#include <VBox/vm.h>
50#include <VBox/param.h>
51#include <VBox/err.h>
52
53#include <VBox/log.h>
54#include <iprt/semaphore.h>
55#include <iprt/asm.h>
56#include <iprt/assert.h>
57#include <iprt/thread.h>
58#include <iprt/string.h>
59
60/* Don't wanna include everything. */
61extern void cpu_exec_init_all(unsigned long tb_size);
62extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
63extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
64extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
65extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
66extern void tlb_flush(CPUState *env, int flush_global);
67extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
68extern void sync_ldtr(CPUX86State *env1, int selector);
69
70#ifdef VBOX_STRICT
71unsigned long get_phys_page_offset(target_ulong addr);
72#endif
73
74
75/*******************************************************************************
76* Defined Constants And Macros *
77*******************************************************************************/
78
79/** Copy 80-bit fpu register at pSrc to pDst.
80 * This is probably faster than *calling* memcpy.
81 */
82#define REM_COPY_FPU_REG(pDst, pSrc) \
83 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
84
85
86/*******************************************************************************
87* Internal Functions *
88*******************************************************************************/
89static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
91static void remR3StateUpdate(PVM pVM);
92static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
93
94static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
96static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
97static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
100
101static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
103static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
104static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
107
108
109/*******************************************************************************
110* Global Variables *
111*******************************************************************************/
112
113/** @todo Move stats to REM::s some rainy day we have nothing do to. */
114#ifdef VBOX_WITH_STATISTICS
115static STAMPROFILEADV gStatExecuteSingleInstr;
116static STAMPROFILEADV gStatCompilationQEmu;
117static STAMPROFILEADV gStatRunCodeQEmu;
118static STAMPROFILEADV gStatTotalTimeQEmu;
119static STAMPROFILEADV gStatTimers;
120static STAMPROFILEADV gStatTBLookup;
121static STAMPROFILEADV gStatIRQ;
122static STAMPROFILEADV gStatRawCheck;
123static STAMPROFILEADV gStatMemRead;
124static STAMPROFILEADV gStatMemWrite;
125static STAMPROFILE gStatGCPhys2HCVirt;
126static STAMPROFILE gStatHCVirt2GCPhys;
127static STAMCOUNTER gStatCpuGetTSC;
128static STAMCOUNTER gStatRefuseTFInhibit;
129static STAMCOUNTER gStatRefuseVM86;
130static STAMCOUNTER gStatRefusePaging;
131static STAMCOUNTER gStatRefusePAE;
132static STAMCOUNTER gStatRefuseIOPLNot0;
133static STAMCOUNTER gStatRefuseIF0;
134static STAMCOUNTER gStatRefuseCode16;
135static STAMCOUNTER gStatRefuseWP0;
136static STAMCOUNTER gStatRefuseRing1or2;
137static STAMCOUNTER gStatRefuseCanExecute;
138static STAMCOUNTER gStatREMGDTChange;
139static STAMCOUNTER gStatREMIDTChange;
140static STAMCOUNTER gStatREMLDTRChange;
141static STAMCOUNTER gStatREMTRChange;
142static STAMCOUNTER gStatSelOutOfSync[6];
143static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
144static STAMCOUNTER gStatFlushTBs;
145#endif
146/* in exec.c */
147extern uint32_t tlb_flush_count;
148extern uint32_t tb_flush_count;
149extern uint32_t tb_phys_invalidate_count;
150
151/*
152 * Global stuff.
153 */
154
155/** MMIO read callbacks. */
156CPUReadMemoryFunc *g_apfnMMIORead[3] =
157{
158 remR3MMIOReadU8,
159 remR3MMIOReadU16,
160 remR3MMIOReadU32
161};
162
163/** MMIO write callbacks. */
164CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
165{
166 remR3MMIOWriteU8,
167 remR3MMIOWriteU16,
168 remR3MMIOWriteU32
169};
170
171/** Handler read callbacks. */
172CPUReadMemoryFunc *g_apfnHandlerRead[3] =
173{
174 remR3HandlerReadU8,
175 remR3HandlerReadU16,
176 remR3HandlerReadU32
177};
178
179/** Handler write callbacks. */
180CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
181{
182 remR3HandlerWriteU8,
183 remR3HandlerWriteU16,
184 remR3HandlerWriteU32
185};
186
187
188#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
189/*
190 * Debugger commands.
191 */
192static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
193
194/** '.remstep' arguments. */
195static const DBGCVARDESC g_aArgRemStep[] =
196{
197 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
198 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
199};
200
201/** Command descriptors. */
202static const DBGCCMD g_aCmds[] =
203{
204 {
205 .pszCmd ="remstep",
206 .cArgsMin = 0,
207 .cArgsMax = 1,
208 .paArgDescs = &g_aArgRemStep[0],
209 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
210 .pResultDesc = NULL,
211 .fFlags = 0,
212 .pfnHandler = remR3CmdDisasEnableStepping,
213 .pszSyntax = "[on/off]",
214 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
215 "If no arguments show the current state."
216 }
217};
218#endif
219
220/** Prologue code, must be in lower 4G to simplify jumps to/from generated code. */
221uint8_t *code_gen_prologue;
222
223
224/*******************************************************************************
225* Internal Functions *
226*******************************************************************************/
227void remAbort(int rc, const char *pszTip);
228extern int testmath(void);
229
230/* Put them here to avoid unused variable warning. */
231AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
232#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
233//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
234/* Why did this have to be identical?? */
235AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
236#else
237AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
238#endif
239
240
241/**
242 * Initializes the REM.
243 *
244 * @returns VBox status code.
245 * @param pVM The VM to operate on.
246 */
247REMR3DECL(int) REMR3Init(PVM pVM)
248{
249 uint32_t u32Dummy;
250 int rc;
251
252#ifdef VBOX_ENABLE_VBOXREM64
253 LogRel(("Using 64-bit aware REM\n"));
254#endif
255
256 /*
257 * Assert sanity.
258 */
259 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
260 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
261 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
262#if defined(DEBUG) && !defined(RT_OS_SOLARIS) /// @todo fix the solaris math stuff.
263 Assert(!testmath());
264#endif
265
266 /*
267 * Init some internal data members.
268 */
269 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
270 pVM->rem.s.Env.pVM = pVM;
271#ifdef CPU_RAW_MODE_INIT
272 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
273#endif
274
275 /* ctx. */
276 pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVM);
277 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order have changed! REM depends on notification about ALL physical memory registrations\n"));
278
279 /* ignore all notifications */
280 pVM->rem.s.fIgnoreAll = true;
281
282 code_gen_prologue = RTMemExecAlloc(_1K);
283 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
284
285 cpu_exec_init_all(0);
286
287 /*
288 * Init the recompiler.
289 */
290 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
291 {
292 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
293 return VERR_GENERAL_FAILURE;
294 }
295 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
296 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
297
298 /* allocate code buffer for single instruction emulation. */
299 pVM->rem.s.Env.cbCodeBuffer = 4096;
300 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
301 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
302
303 /* finally, set the cpu_single_env global. */
304 cpu_single_env = &pVM->rem.s.Env;
305
306 /* Nothing is pending by default */
307 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
308
309 /*
310 * Register ram types.
311 */
312 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
313 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
314 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
315 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
316 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
317
318 /* stop ignoring. */
319 pVM->rem.s.fIgnoreAll = false;
320
321 /*
322 * Register the saved state data unit.
323 */
324 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
325 NULL, remR3Save, NULL,
326 NULL, remR3Load, NULL);
327 if (RT_FAILURE(rc))
328 return rc;
329
330#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
331 /*
332 * Debugger commands.
333 */
334 static bool fRegisteredCmds = false;
335 if (!fRegisteredCmds)
336 {
337 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
338 if (RT_SUCCESS(rc))
339 fRegisteredCmds = true;
340 }
341#endif
342
343#ifdef VBOX_WITH_STATISTICS
344 /*
345 * Statistics.
346 */
347 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
348 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
349 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
350 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
351 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
352 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
353 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
354 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
355 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
356 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
357 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
358 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
359
360 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
361
362 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
363 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
364 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
365 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
366 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
367 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
368 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
369 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
370 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
371 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
372 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
373
374 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
375 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
376 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
377 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
378
379 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
380 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
381 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
382 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
383 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
384 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
385
386 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
387 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
388 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
389 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
390 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
391 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
392
393 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
394#endif /* VBOX_WITH_STATISTICS */
395
396 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
397 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
398 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
399
400
401#ifdef DEBUG_ALL_LOGGING
402 loglevel = ~0;
403# ifdef DEBUG_TMP_LOGGING
404 logfile = fopen("/tmp/vbox-qemu.log", "w");
405# endif
406#endif
407
408 return rc;
409}
410
411
412/**
413 * Finalizes the REM initialization.
414 *
415 * This is called after all components, devices and drivers has
416 * been initialized. Its main purpose it to finish the RAM related
417 * initialization.
418 *
419 * @returns VBox status code.
420 *
421 * @param pVM The VM handle.
422 */
423REMR3DECL(int) REMR3InitFinalize(PVM pVM)
424{
425 int rc;
426
427 /*
428 * Ram size & dirty bit map.
429 */
430 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
431 pVM->rem.s.fGCPhysLastRamFixed = true;
432#ifdef RT_STRICT
433 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
434#else
435 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
436#endif
437 return rc;
438}
439
440
441/**
442 * Initializes phys_ram_size, phys_ram_dirty and phys_ram_dirty_size.
443 *
444 * @returns VBox status code.
445 * @param pVM The VM handle.
446 * @param fGuarded Whether to guard the map.
447 */
448static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
449{
450 int rc = VINF_SUCCESS;
451 RTGCPHYS cb;
452
453 cb = pVM->rem.s.GCPhysLastRam + 1;
454 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
455 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
456 VERR_OUT_OF_RANGE);
457 phys_ram_size = cb;
458 phys_ram_dirty_size = cb >> PAGE_SHIFT;
459 AssertMsg(((RTGCPHYS)phys_ram_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
460
461 if (!fGuarded)
462 {
463 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
464 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", phys_ram_dirty_size), VERR_NO_MEMORY);
465 }
466 else
467 {
468 /*
469 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
470 */
471 uint32_t cbBitmapAligned = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
472 uint32_t cbBitmapFull = RT_ALIGN_32(phys_ram_dirty_size, (_4G >> PAGE_SHIFT));
473 if (cbBitmapFull == cbBitmapAligned)
474 cbBitmapFull += _4G >> PAGE_SHIFT;
475 else if (cbBitmapFull - cbBitmapAligned < _64K)
476 cbBitmapFull += _64K;
477
478 phys_ram_dirty = RTMemPageAlloc(cbBitmapFull);
479 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
480
481 rc = RTMemProtect(phys_ram_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
482 if (RT_FAILURE(rc))
483 {
484 RTMemPageFree(phys_ram_dirty);
485 AssertLogRelRCReturn(rc, rc);
486 }
487
488 phys_ram_dirty += cbBitmapAligned - phys_ram_dirty_size;
489 }
490
491 /* initialize it. */
492 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
493 return rc;
494}
495
496
497/**
498 * Terminates the REM.
499 *
500 * Termination means cleaning up and freeing all resources,
501 * the VM it self is at this point powered off or suspended.
502 *
503 * @returns VBox status code.
504 * @param pVM The VM to operate on.
505 */
506REMR3DECL(int) REMR3Term(PVM pVM)
507{
508 return VINF_SUCCESS;
509}
510
511
512/**
513 * The VM is being reset.
514 *
515 * For the REM component this means to call the cpu_reset() and
516 * reinitialize some state variables.
517 *
518 * @param pVM VM handle.
519 */
520REMR3DECL(void) REMR3Reset(PVM pVM)
521{
522 /*
523 * Reset the REM cpu.
524 */
525 pVM->rem.s.fIgnoreAll = true;
526 cpu_reset(&pVM->rem.s.Env);
527 pVM->rem.s.cInvalidatedPages = 0;
528 pVM->rem.s.fIgnoreAll = false;
529
530 /* Clear raw ring 0 init state */
531 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
532
533 /* Flush the TBs the next time we execute code here. */
534 pVM->rem.s.fFlushTBs = true;
535}
536
537
538/**
539 * Execute state save operation.
540 *
541 * @returns VBox status code.
542 * @param pVM VM Handle.
543 * @param pSSM SSM operation handle.
544 */
545static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
546{
547 PREM pRem = &pVM->rem.s;
548
549 /*
550 * Save the required CPU Env bits.
551 * (Not much because we're never in REM when doing the save.)
552 */
553 LogFlow(("remR3Save:\n"));
554 Assert(!pRem->fInREM);
555 SSMR3PutU32(pSSM, pRem->Env.hflags);
556 SSMR3PutU32(pSSM, ~0); /* separator */
557
558 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
559 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
560 SSMR3PutUInt(pSSM, pVM->rem.s.u32PendingInterrupt);
561
562 return SSMR3PutU32(pSSM, ~0); /* terminator */
563}
564
565
566/**
567 * Execute state load operation.
568 *
569 * @returns VBox status code.
570 * @param pVM VM Handle.
571 * @param pSSM SSM operation handle.
572 * @param u32Version Data layout version.
573 */
574static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
575{
576 uint32_t u32Dummy;
577 uint32_t fRawRing0 = false;
578 uint32_t u32Sep;
579 int rc;
580 PREM pRem;
581 LogFlow(("remR3Load:\n"));
582
583 /*
584 * Validate version.
585 */
586 if ( u32Version != REM_SAVED_STATE_VERSION
587 && u32Version != REM_SAVED_STATE_VERSION_VER1_6)
588 {
589 AssertMsgFailed(("remR3Load: Invalid version u32Version=%d!\n", u32Version));
590 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
591 }
592
593 /*
594 * Do a reset to be on the safe side...
595 */
596 REMR3Reset(pVM);
597
598 /*
599 * Ignore all ignorable notifications.
600 * (Not doing this will cause serious trouble.)
601 */
602 pVM->rem.s.fIgnoreAll = true;
603
604 /*
605 * Load the required CPU Env bits.
606 * (Not much because we're never in REM when doing the save.)
607 */
608 pRem = &pVM->rem.s;
609 Assert(!pRem->fInREM);
610 SSMR3GetU32(pSSM, &pRem->Env.hflags);
611 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
612 {
613 /* Redundant REM CPU state has to be loaded, but can be ignored. */
614 CPUX86State_Ver16 temp;
615 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
616 }
617
618 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
619 if (RT_FAILURE(rc))
620 return rc;
621 if (u32Sep != ~0U)
622 {
623 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
624 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
625 }
626
627 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
628 SSMR3GetUInt(pSSM, &fRawRing0);
629 if (fRawRing0)
630 pRem->Env.state |= CPU_RAW_RING0;
631
632 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
633 {
634 unsigned i;
635
636 /*
637 * Load the REM stuff.
638 */
639 rc = SSMR3GetUInt(pSSM, &pRem->cInvalidatedPages);
640 if (RT_FAILURE(rc))
641 return rc;
642 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
643 {
644 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
645 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
646 }
647 for (i = 0; i < pRem->cInvalidatedPages; i++)
648 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
649 }
650
651 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
652 if (RT_FAILURE(rc))
653 return rc;
654
655 /* check the terminator. */
656 rc = SSMR3GetU32(pSSM, &u32Sep);
657 if (RT_FAILURE(rc))
658 return rc;
659 if (u32Sep != ~0U)
660 {
661 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
662 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
663 }
664
665 /*
666 * Get the CPUID features.
667 */
668 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
669 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
670
671 /*
672 * Sync the Load Flush the TLB
673 */
674 tlb_flush(&pRem->Env, 1);
675
676 /*
677 * Stop ignoring ignornable notifications.
678 */
679 pVM->rem.s.fIgnoreAll = false;
680
681 /*
682 * Sync the whole CPU state when executing code in the recompiler.
683 */
684 CPUMSetChangedFlags(pVM, CPUM_CHANGED_ALL);
685 return VINF_SUCCESS;
686}
687
688
689
690#undef LOG_GROUP
691#define LOG_GROUP LOG_GROUP_REM_RUN
692
693/**
694 * Single steps an instruction in recompiled mode.
695 *
696 * Before calling this function the REM state needs to be in sync with
697 * the VM. Call REMR3State() to perform the sync. It's only necessary
698 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
699 * and after calling REMR3StateBack().
700 *
701 * @returns VBox status code.
702 *
703 * @param pVM VM Handle.
704 */
705REMR3DECL(int) REMR3Step(PVM pVM)
706{
707 int rc, interrupt_request;
708 RTGCPTR GCPtrPC;
709 bool fBp;
710
711 /*
712 * Lock the REM - we don't wanna have anyone interrupting us
713 * while stepping - and enabled single stepping. We also ignore
714 * pending interrupts and suchlike.
715 */
716 interrupt_request = pVM->rem.s.Env.interrupt_request;
717 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
718 pVM->rem.s.Env.interrupt_request = 0;
719 cpu_single_step(&pVM->rem.s.Env, 1);
720
721 /*
722 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
723 */
724 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
725 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
726
727 /*
728 * Execute and handle the return code.
729 * We execute without enabling the cpu tick, so on success we'll
730 * just flip it on and off to make sure it moves
731 */
732 rc = cpu_exec(&pVM->rem.s.Env);
733 if (rc == EXCP_DEBUG)
734 {
735 TMCpuTickResume(pVM);
736 TMCpuTickPause(pVM);
737 TMVirtualResume(pVM);
738 TMVirtualPause(pVM);
739 rc = VINF_EM_DBG_STEPPED;
740 }
741 else
742 {
743 switch (rc)
744 {
745 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
746 case EXCP_HLT:
747 case EXCP_HALTED: rc = VINF_EM_HALT; break;
748 case EXCP_RC:
749 rc = pVM->rem.s.rc;
750 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
751 break;
752 case EXCP_EXECUTE_RAW:
753 case EXCP_EXECUTE_HWACC:
754 /** @todo: is it correct? No! */
755 rc = VINF_SUCCESS;
756 break;
757 default:
758 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
759 rc = VERR_INTERNAL_ERROR;
760 break;
761 }
762 }
763
764 /*
765 * Restore the stuff we changed to prevent interruption.
766 * Unlock the REM.
767 */
768 if (fBp)
769 {
770 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
771 Assert(rc2 == 0); NOREF(rc2);
772 }
773 cpu_single_step(&pVM->rem.s.Env, 0);
774 pVM->rem.s.Env.interrupt_request = interrupt_request;
775
776 return rc;
777}
778
779
780/**
781 * Set a breakpoint using the REM facilities.
782 *
783 * @returns VBox status code.
784 * @param pVM The VM handle.
785 * @param Address The breakpoint address.
786 * @thread The emulation thread.
787 */
788REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
789{
790 VM_ASSERT_EMT(pVM);
791 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
792 {
793 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
794 return VINF_SUCCESS;
795 }
796 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
797 return VERR_REM_NO_MORE_BP_SLOTS;
798}
799
800
801/**
802 * Clears a breakpoint set by REMR3BreakpointSet().
803 *
804 * @returns VBox status code.
805 * @param pVM The VM handle.
806 * @param Address The breakpoint address.
807 * @thread The emulation thread.
808 */
809REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
810{
811 VM_ASSERT_EMT(pVM);
812 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
813 {
814 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
815 return VINF_SUCCESS;
816 }
817 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
818 return VERR_REM_BP_NOT_FOUND;
819}
820
821
822/**
823 * Emulate an instruction.
824 *
825 * This function executes one instruction without letting anyone
826 * interrupt it. This is intended for being called while being in
827 * raw mode and thus will take care of all the state syncing between
828 * REM and the rest.
829 *
830 * @returns VBox status code.
831 * @param pVM VM handle.
832 */
833REMR3DECL(int) REMR3EmulateInstruction(PVM pVM)
834{
835 bool fFlushTBs;
836
837 int rc, rc2;
838 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
839
840 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
841 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
842 */
843 if (HWACCMIsEnabled(pVM))
844 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
845
846 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
847 fFlushTBs = pVM->rem.s.fFlushTBs;
848 pVM->rem.s.fFlushTBs = false;
849
850 /*
851 * Sync the state and enable single instruction / single stepping.
852 */
853 rc = REMR3State(pVM);
854 pVM->rem.s.fFlushTBs = fFlushTBs;
855 if (RT_SUCCESS(rc))
856 {
857 int interrupt_request = pVM->rem.s.Env.interrupt_request;
858 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
859 Assert(!pVM->rem.s.Env.singlestep_enabled);
860 /*
861 * Now we set the execute single instruction flag and enter the cpu_exec loop.
862 */
863 TMNotifyStartOfExecution(pVM);
864 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
865 rc = cpu_exec(&pVM->rem.s.Env);
866 TMNotifyEndOfExecution(pVM);
867 switch (rc)
868 {
869 /*
870 * Executed without anything out of the way happening.
871 */
872 case EXCP_SINGLE_INSTR:
873 rc = VINF_EM_RESCHEDULE;
874 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
875 break;
876
877 /*
878 * If we take a trap or start servicing a pending interrupt, we might end up here.
879 * (Timer thread or some other thread wishing EMT's attention.)
880 */
881 case EXCP_INTERRUPT:
882 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
883 rc = VINF_EM_RESCHEDULE;
884 break;
885
886 /*
887 * Single step, we assume!
888 * If there was a breakpoint there we're fucked now.
889 */
890 case EXCP_DEBUG:
891 {
892 /* breakpoint or single step? */
893 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
894 int iBP;
895 rc = VINF_EM_DBG_STEPPED;
896 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
897 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
898 {
899 rc = VINF_EM_DBG_BREAKPOINT;
900 break;
901 }
902 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
903 break;
904 }
905
906 /*
907 * hlt instruction.
908 */
909 case EXCP_HLT:
910 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
911 rc = VINF_EM_HALT;
912 break;
913
914 /*
915 * The VM has halted.
916 */
917 case EXCP_HALTED:
918 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
919 rc = VINF_EM_HALT;
920 break;
921
922 /*
923 * Switch to RAW-mode.
924 */
925 case EXCP_EXECUTE_RAW:
926 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
927 rc = VINF_EM_RESCHEDULE_RAW;
928 break;
929
930 /*
931 * Switch to hardware accelerated RAW-mode.
932 */
933 case EXCP_EXECUTE_HWACC:
934 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
935 rc = VINF_EM_RESCHEDULE_HWACC;
936 break;
937
938 /*
939 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
940 */
941 case EXCP_RC:
942 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
943 rc = pVM->rem.s.rc;
944 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
945 break;
946
947 /*
948 * Figure out the rest when they arrive....
949 */
950 default:
951 AssertMsgFailed(("rc=%d\n", rc));
952 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
953 rc = VINF_EM_RESCHEDULE;
954 break;
955 }
956
957 /*
958 * Switch back the state.
959 */
960 pVM->rem.s.Env.interrupt_request = interrupt_request;
961 rc2 = REMR3StateBack(pVM);
962 AssertRC(rc2);
963 }
964
965 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
966 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
967 return rc;
968}
969
970
971/**
972 * Runs code in recompiled mode.
973 *
974 * Before calling this function the REM state needs to be in sync with
975 * the VM. Call REMR3State() to perform the sync. It's only necessary
976 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
977 * and after calling REMR3StateBack().
978 *
979 * @returns VBox status code.
980 *
981 * @param pVM VM Handle.
982 */
983REMR3DECL(int) REMR3Run(PVM pVM)
984{
985 int rc;
986 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
987 Assert(pVM->rem.s.fInREM);
988
989 TMNotifyStartOfExecution(pVM);
990 rc = cpu_exec(&pVM->rem.s.Env);
991 TMNotifyEndOfExecution(pVM);
992 switch (rc)
993 {
994 /*
995 * This happens when the execution was interrupted
996 * by an external event, like pending timers.
997 */
998 case EXCP_INTERRUPT:
999 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1000 rc = VINF_SUCCESS;
1001 break;
1002
1003 /*
1004 * hlt instruction.
1005 */
1006 case EXCP_HLT:
1007 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1008 rc = VINF_EM_HALT;
1009 break;
1010
1011 /*
1012 * The VM has halted.
1013 */
1014 case EXCP_HALTED:
1015 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1016 rc = VINF_EM_HALT;
1017 break;
1018
1019 /*
1020 * Breakpoint/single step.
1021 */
1022 case EXCP_DEBUG:
1023 {
1024#if 0//def DEBUG_bird
1025 static int iBP = 0;
1026 printf("howdy, breakpoint! iBP=%d\n", iBP);
1027 switch (iBP)
1028 {
1029 case 0:
1030 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
1031 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
1032 //pVM->rem.s.Env.interrupt_request = 0;
1033 //pVM->rem.s.Env.exception_index = -1;
1034 //g_fInterruptDisabled = 1;
1035 rc = VINF_SUCCESS;
1036 asm("int3");
1037 break;
1038 default:
1039 asm("int3");
1040 break;
1041 }
1042 iBP++;
1043#else
1044 /* breakpoint or single step? */
1045 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1046 int iBP;
1047 rc = VINF_EM_DBG_STEPPED;
1048 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1049 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1050 {
1051 rc = VINF_EM_DBG_BREAKPOINT;
1052 break;
1053 }
1054 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1055#endif
1056 break;
1057 }
1058
1059 /*
1060 * Switch to RAW-mode.
1061 */
1062 case EXCP_EXECUTE_RAW:
1063 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1064 rc = VINF_EM_RESCHEDULE_RAW;
1065 break;
1066
1067 /*
1068 * Switch to hardware accelerated RAW-mode.
1069 */
1070 case EXCP_EXECUTE_HWACC:
1071 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1072 rc = VINF_EM_RESCHEDULE_HWACC;
1073 break;
1074
1075 /** @todo missing VBOX_WITH_VMI/EXECP_PARAV_CALL */
1076 /*
1077 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1078 */
1079 case EXCP_RC:
1080 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1081 rc = pVM->rem.s.rc;
1082 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1083 break;
1084
1085 /*
1086 * Figure out the rest when they arrive....
1087 */
1088 default:
1089 AssertMsgFailed(("rc=%d\n", rc));
1090 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1091 rc = VINF_SUCCESS;
1092 break;
1093 }
1094
1095 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1096 return rc;
1097}
1098
1099
1100/**
1101 * Check if the cpu state is suitable for Raw execution.
1102 *
1103 * @returns boolean
1104 * @param env The CPU env struct.
1105 * @param eip The EIP to check this for (might differ from env->eip).
1106 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1107 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1108 *
1109 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1110 */
1111bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1112{
1113 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1114 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1115 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1116 uint32_t u32CR0;
1117
1118 /* Update counter. */
1119 env->pVM->rem.s.cCanExecuteRaw++;
1120
1121 if (HWACCMIsEnabled(env->pVM))
1122 {
1123 CPUMCTX Ctx;
1124
1125 env->state |= CPU_RAW_HWACC;
1126
1127 /*
1128 * Create partial context for HWACCMR3CanExecuteGuest
1129 */
1130 Ctx.cr0 = env->cr[0];
1131 Ctx.cr3 = env->cr[3];
1132 Ctx.cr4 = env->cr[4];
1133
1134 Ctx.tr = env->tr.selector;
1135 Ctx.trHid.u64Base = env->tr.base;
1136 Ctx.trHid.u32Limit = env->tr.limit;
1137 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1138
1139 Ctx.idtr.cbIdt = env->idt.limit;
1140 Ctx.idtr.pIdt = env->idt.base;
1141
1142 Ctx.gdtr.cbGdt = env->gdt.limit;
1143 Ctx.gdtr.pGdt = env->gdt.base;
1144
1145 Ctx.rsp = env->regs[R_ESP];
1146 Ctx.rip = env->eip;
1147
1148 Ctx.eflags.u32 = env->eflags;
1149
1150 Ctx.cs = env->segs[R_CS].selector;
1151 Ctx.csHid.u64Base = env->segs[R_CS].base;
1152 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1153 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1154
1155 Ctx.ds = env->segs[R_DS].selector;
1156 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1157 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1158 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1159
1160 Ctx.es = env->segs[R_ES].selector;
1161 Ctx.esHid.u64Base = env->segs[R_ES].base;
1162 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1163 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1164
1165 Ctx.fs = env->segs[R_FS].selector;
1166 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1167 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1168 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1169
1170 Ctx.gs = env->segs[R_GS].selector;
1171 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1172 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1173 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1174
1175 Ctx.ss = env->segs[R_SS].selector;
1176 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1177 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1178 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1179
1180 Ctx.msrEFER = env->efer;
1181
1182 /* Hardware accelerated raw-mode:
1183 *
1184 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1185 */
1186 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1187 {
1188 *piException = EXCP_EXECUTE_HWACC;
1189 return true;
1190 }
1191 return false;
1192 }
1193
1194 /*
1195 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1196 * or 32 bits protected mode ring 0 code
1197 *
1198 * The tests are ordered by the likelyhood of being true during normal execution.
1199 */
1200 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1201 {
1202 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1203 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1204 return false;
1205 }
1206
1207#ifndef VBOX_RAW_V86
1208 if (fFlags & VM_MASK) {
1209 STAM_COUNTER_INC(&gStatRefuseVM86);
1210 Log2(("raw mode refused: VM_MASK\n"));
1211 return false;
1212 }
1213#endif
1214
1215 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1216 {
1217#ifndef DEBUG_bird
1218 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1219#endif
1220 return false;
1221 }
1222
1223 if (env->singlestep_enabled)
1224 {
1225 //Log2(("raw mode refused: Single step\n"));
1226 return false;
1227 }
1228
1229 if (env->nb_breakpoints > 0)
1230 {
1231 //Log2(("raw mode refused: Breakpoints\n"));
1232 return false;
1233 }
1234
1235 u32CR0 = env->cr[0];
1236 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1237 {
1238 STAM_COUNTER_INC(&gStatRefusePaging);
1239 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1240 return false;
1241 }
1242
1243 if (env->cr[4] & CR4_PAE_MASK)
1244 {
1245 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1246 {
1247 STAM_COUNTER_INC(&gStatRefusePAE);
1248 return false;
1249 }
1250 }
1251
1252 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1253 {
1254 if (!EMIsRawRing3Enabled(env->pVM))
1255 return false;
1256
1257 if (!(env->eflags & IF_MASK))
1258 {
1259 STAM_COUNTER_INC(&gStatRefuseIF0);
1260 Log2(("raw mode refused: IF (RawR3)\n"));
1261 return false;
1262 }
1263
1264 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1265 {
1266 STAM_COUNTER_INC(&gStatRefuseWP0);
1267 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1268 return false;
1269 }
1270 }
1271 else
1272 {
1273 if (!EMIsRawRing0Enabled(env->pVM))
1274 return false;
1275
1276 // Let's start with pure 32 bits ring 0 code first
1277 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1278 {
1279 STAM_COUNTER_INC(&gStatRefuseCode16);
1280 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1281 return false;
1282 }
1283
1284 // Only R0
1285 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1286 {
1287 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1288 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1289 return false;
1290 }
1291
1292 if (!(u32CR0 & CR0_WP_MASK))
1293 {
1294 STAM_COUNTER_INC(&gStatRefuseWP0);
1295 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1296 return false;
1297 }
1298
1299 if (PATMIsPatchGCAddr(env->pVM, eip))
1300 {
1301 Log2(("raw r0 mode forced: patch code\n"));
1302 *piException = EXCP_EXECUTE_RAW;
1303 return true;
1304 }
1305
1306#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1307 if (!(env->eflags & IF_MASK))
1308 {
1309 STAM_COUNTER_INC(&gStatRefuseIF0);
1310 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1311 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1312 return false;
1313 }
1314#endif
1315
1316 env->state |= CPU_RAW_RING0;
1317 }
1318
1319 /*
1320 * Don't reschedule the first time we're called, because there might be
1321 * special reasons why we're here that is not covered by the above checks.
1322 */
1323 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1324 {
1325 Log2(("raw mode refused: first scheduling\n"));
1326 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1327 return false;
1328 }
1329
1330 Assert(PGMPhysIsA20Enabled(env->pVM));
1331 *piException = EXCP_EXECUTE_RAW;
1332 return true;
1333}
1334
1335
1336/**
1337 * Fetches a code byte.
1338 *
1339 * @returns Success indicator (bool) for ease of use.
1340 * @param env The CPU environment structure.
1341 * @param GCPtrInstr Where to fetch code.
1342 * @param pu8Byte Where to store the byte on success
1343 */
1344bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1345{
1346 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1347 if (RT_SUCCESS(rc))
1348 return true;
1349 return false;
1350}
1351
1352
1353/**
1354 * Flush (or invalidate if you like) page table/dir entry.
1355 *
1356 * (invlpg instruction; tlb_flush_page)
1357 *
1358 * @param env Pointer to cpu environment.
1359 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1360 */
1361void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1362{
1363 PVM pVM = env->pVM;
1364 PCPUMCTX pCtx;
1365 int rc;
1366
1367 /*
1368 * When we're replaying invlpg instructions or restoring a saved
1369 * state we disable this path.
1370 */
1371 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.fIgnoreAll)
1372 return;
1373 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1374 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1375
1376 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1377
1378 /*
1379 * Update the control registers before calling PGMFlushPage.
1380 */
1381 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1382 pCtx->cr0 = env->cr[0];
1383 pCtx->cr3 = env->cr[3];
1384 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1385 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
1386 pCtx->cr4 = env->cr[4];
1387
1388 /*
1389 * Let PGM do the rest.
1390 */
1391 rc = PGMInvalidatePage(pVM, GCPtr);
1392 if (RT_FAILURE(rc))
1393 {
1394 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1395 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1396 }
1397 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1398}
1399
1400
1401#ifndef REM_PHYS_ADDR_IN_TLB
1402/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1403void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1404{
1405 void *pv;
1406 int rc;
1407
1408 /* Address must be aligned enough to fiddle with lower bits */
1409 Assert((physAddr & 0x3) == 0);
1410
1411 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1412 Assert( rc == VINF_SUCCESS
1413 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1414 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1415 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1416 if (RT_FAILURE(rc))
1417 return (void *)1;
1418 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1419 return (void *)((uintptr_t)pv | 2);
1420 return pv;
1421}
1422#endif /* REM_PHYS_ADDR_IN_TLB */
1423
1424
1425/**
1426 * Called from tlb_protect_code in order to write monitor a code page.
1427 *
1428 * @param env Pointer to the CPU environment.
1429 * @param GCPtr Code page to monitor
1430 */
1431void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1432{
1433#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1434 Assert(env->pVM->rem.s.fInREM);
1435 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1436 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1437 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1438 && !(env->eflags & VM_MASK) /* no V86 mode */
1439 && !HWACCMIsEnabled(env->pVM))
1440 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1441#endif
1442}
1443
1444
1445/**
1446 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1447 *
1448 * @param env Pointer to the CPU environment.
1449 * @param GCPtr Code page to monitor
1450 */
1451void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1452{
1453 Assert(env->pVM->rem.s.fInREM);
1454#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1455 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1456 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1457 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1458 && !(env->eflags & VM_MASK) /* no V86 mode */
1459 && !HWACCMIsEnabled(env->pVM))
1460 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1461#endif
1462}
1463
1464
1465/**
1466 * Called when the CPU is initialized, any of the CRx registers are changed or
1467 * when the A20 line is modified.
1468 *
1469 * @param env Pointer to the CPU environment.
1470 * @param fGlobal Set if the flush is global.
1471 */
1472void remR3FlushTLB(CPUState *env, bool fGlobal)
1473{
1474 PVM pVM = env->pVM;
1475 PCPUMCTX pCtx;
1476
1477 /*
1478 * When we're replaying invlpg instructions or restoring a saved
1479 * state we disable this path.
1480 */
1481 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.fIgnoreAll)
1482 return;
1483 Assert(pVM->rem.s.fInREM);
1484
1485 /*
1486 * The caller doesn't check cr4, so we have to do that for ourselves.
1487 */
1488 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1489 fGlobal = true;
1490 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1491
1492 /*
1493 * Update the control registers before calling PGMR3FlushTLB.
1494 */
1495 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1496 pCtx->cr0 = env->cr[0];
1497 pCtx->cr3 = env->cr[3];
1498 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1499 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
1500 pCtx->cr4 = env->cr[4];
1501
1502 /*
1503 * Let PGM do the rest.
1504 */
1505 PGMFlushTLB(pVM, env->cr[3], fGlobal);
1506}
1507
1508
1509/**
1510 * Called when any of the cr0, cr4 or efer registers is updated.
1511 *
1512 * @param env Pointer to the CPU environment.
1513 */
1514void remR3ChangeCpuMode(CPUState *env)
1515{
1516 int rc;
1517 PVM pVM = env->pVM;
1518 PCPUMCTX pCtx;
1519
1520 /*
1521 * When we're replaying loads or restoring a saved
1522 * state this path is disabled.
1523 */
1524 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.fIgnoreAll)
1525 return;
1526 Assert(pVM->rem.s.fInREM);
1527
1528 /*
1529 * Update the control registers before calling PGMChangeMode()
1530 * as it may need to map whatever cr3 is pointing to.
1531 */
1532 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1533 pCtx->cr0 = env->cr[0];
1534 pCtx->cr3 = env->cr[3];
1535 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1536 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
1537 pCtx->cr4 = env->cr[4];
1538
1539#ifdef TARGET_X86_64
1540 rc = PGMChangeMode(pVM, env->cr[0], env->cr[4], env->efer);
1541 if (rc != VINF_SUCCESS)
1542 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], env->efer, rc);
1543#else
1544 rc = PGMChangeMode(pVM, env->cr[0], env->cr[4], 0);
1545 if (rc != VINF_SUCCESS)
1546 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], 0LL, rc);
1547#endif
1548}
1549
1550
1551/**
1552 * Called from compiled code to run dma.
1553 *
1554 * @param env Pointer to the CPU environment.
1555 */
1556void remR3DmaRun(CPUState *env)
1557{
1558 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1559 PDMR3DmaRun(env->pVM);
1560 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1561}
1562
1563
1564/**
1565 * Called from compiled code to schedule pending timers in VMM
1566 *
1567 * @param env Pointer to the CPU environment.
1568 */
1569void remR3TimersRun(CPUState *env)
1570{
1571 LogFlow(("remR3TimersRun:\n"));
1572 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1573 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1574 TMR3TimerQueuesDo(env->pVM);
1575 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1576 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1577}
1578
1579
1580/**
1581 * Record trap occurance
1582 *
1583 * @returns VBox status code
1584 * @param env Pointer to the CPU environment.
1585 * @param uTrap Trap nr
1586 * @param uErrorCode Error code
1587 * @param pvNextEIP Next EIP
1588 */
1589int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1590{
1591 PVM pVM = env->pVM;
1592#ifdef VBOX_WITH_STATISTICS
1593 static STAMCOUNTER s_aStatTrap[255];
1594 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1595#endif
1596
1597#ifdef VBOX_WITH_STATISTICS
1598 if (uTrap < 255)
1599 {
1600 if (!s_aRegisters[uTrap])
1601 {
1602 char szStatName[64];
1603 s_aRegisters[uTrap] = true;
1604 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1605 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1606 }
1607 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1608 }
1609#endif
1610 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1611 if( uTrap < 0x20
1612 && (env->cr[0] & X86_CR0_PE)
1613 && !(env->eflags & X86_EFL_VM))
1614 {
1615#ifdef DEBUG
1616 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1617#endif
1618 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1619 {
1620 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1621 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1622 return VERR_REM_TOO_MANY_TRAPS;
1623 }
1624 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1625 pVM->rem.s.cPendingExceptions = 1;
1626 pVM->rem.s.uPendingException = uTrap;
1627 pVM->rem.s.uPendingExcptEIP = env->eip;
1628 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1629 }
1630 else
1631 {
1632 pVM->rem.s.cPendingExceptions = 0;
1633 pVM->rem.s.uPendingException = uTrap;
1634 pVM->rem.s.uPendingExcptEIP = env->eip;
1635 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1636 }
1637 return VINF_SUCCESS;
1638}
1639
1640
1641/*
1642 * Clear current active trap
1643 *
1644 * @param pVM VM Handle.
1645 */
1646void remR3TrapClear(PVM pVM)
1647{
1648 pVM->rem.s.cPendingExceptions = 0;
1649 pVM->rem.s.uPendingException = 0;
1650 pVM->rem.s.uPendingExcptEIP = 0;
1651 pVM->rem.s.uPendingExcptCR2 = 0;
1652}
1653
1654
1655/*
1656 * Record previous call instruction addresses
1657 *
1658 * @param env Pointer to the CPU environment.
1659 */
1660void remR3RecordCall(CPUState *env)
1661{
1662 CSAMR3RecordCallAddress(env->pVM, env->eip);
1663}
1664
1665
1666/**
1667 * Syncs the internal REM state with the VM.
1668 *
1669 * This must be called before REMR3Run() is invoked whenever when the REM
1670 * state is not up to date. Calling it several times in a row is not
1671 * permitted.
1672 *
1673 * @returns VBox status code.
1674 *
1675 * @param pVM VM Handle.
1676 *
1677 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
1678 * no do this since the majority of the callers don't want any unnecessary of events
1679 * pending that would immediatly interrupt execution.
1680 */
1681REMR3DECL(int) REMR3State(PVM pVM)
1682{
1683 register const CPUMCTX *pCtx;
1684 register unsigned fFlags;
1685 bool fHiddenSelRegsValid;
1686 unsigned i;
1687 TRPMEVENT enmType;
1688 uint8_t u8TrapNo;
1689 int rc;
1690
1691 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
1692 Log2(("REMR3State:\n"));
1693
1694 pCtx = pVM->rem.s.pCtx;
1695 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVM);
1696
1697 Assert(!pVM->rem.s.fInREM);
1698 pVM->rem.s.fInStateSync = true;
1699
1700 /*
1701 * If we have to flush TBs, do that immediately.
1702 */
1703 if (pVM->rem.s.fFlushTBs)
1704 {
1705 STAM_COUNTER_INC(&gStatFlushTBs);
1706 tb_flush(&pVM->rem.s.Env);
1707 pVM->rem.s.fFlushTBs = false;
1708 }
1709
1710 /*
1711 * Copy the registers which require no special handling.
1712 */
1713#ifdef TARGET_X86_64
1714 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
1715 Assert(R_EAX == 0);
1716 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
1717 Assert(R_ECX == 1);
1718 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
1719 Assert(R_EDX == 2);
1720 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
1721 Assert(R_EBX == 3);
1722 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
1723 Assert(R_ESP == 4);
1724 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
1725 Assert(R_EBP == 5);
1726 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
1727 Assert(R_ESI == 6);
1728 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
1729 Assert(R_EDI == 7);
1730 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
1731 pVM->rem.s.Env.regs[8] = pCtx->r8;
1732 pVM->rem.s.Env.regs[9] = pCtx->r9;
1733 pVM->rem.s.Env.regs[10] = pCtx->r10;
1734 pVM->rem.s.Env.regs[11] = pCtx->r11;
1735 pVM->rem.s.Env.regs[12] = pCtx->r12;
1736 pVM->rem.s.Env.regs[13] = pCtx->r13;
1737 pVM->rem.s.Env.regs[14] = pCtx->r14;
1738 pVM->rem.s.Env.regs[15] = pCtx->r15;
1739
1740 pVM->rem.s.Env.eip = pCtx->rip;
1741
1742 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
1743#else
1744 Assert(R_EAX == 0);
1745 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
1746 Assert(R_ECX == 1);
1747 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
1748 Assert(R_EDX == 2);
1749 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
1750 Assert(R_EBX == 3);
1751 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
1752 Assert(R_ESP == 4);
1753 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
1754 Assert(R_EBP == 5);
1755 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
1756 Assert(R_ESI == 6);
1757 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
1758 Assert(R_EDI == 7);
1759 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
1760 pVM->rem.s.Env.eip = pCtx->eip;
1761
1762 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
1763#endif
1764
1765 pVM->rem.s.Env.cr[2] = pCtx->cr2;
1766
1767 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
1768 for (i=0;i<8;i++)
1769 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
1770
1771 /*
1772 * Clear the halted hidden flag (the interrupt waking up the CPU can
1773 * have been dispatched in raw mode).
1774 */
1775 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
1776
1777 /*
1778 * Replay invlpg?
1779 */
1780 if (pVM->rem.s.cInvalidatedPages)
1781 {
1782 RTUINT i;
1783
1784 pVM->rem.s.fIgnoreInvlPg = true;
1785 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
1786 {
1787 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
1788 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
1789 }
1790 pVM->rem.s.fIgnoreInvlPg = false;
1791 pVM->rem.s.cInvalidatedPages = 0;
1792 }
1793
1794 /* Replay notification changes? */
1795 if (pVM->rem.s.cHandlerNotifications)
1796 REMR3ReplayHandlerNotifications(pVM);
1797
1798 /* Update MSRs; before CRx registers! */
1799 pVM->rem.s.Env.efer = pCtx->msrEFER;
1800 pVM->rem.s.Env.star = pCtx->msrSTAR;
1801 pVM->rem.s.Env.pat = pCtx->msrPAT;
1802#ifdef TARGET_X86_64
1803 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
1804 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
1805 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
1806 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
1807
1808 /* Update the internal long mode activate flag according to the new EFER value. */
1809 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
1810 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
1811 else
1812 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
1813#endif
1814
1815 /*
1816 * Registers which are rarely changed and require special handling / order when changed.
1817 */
1818 fFlags = CPUMGetAndClearChangedFlagsREM(pVM);
1819 LogFlow(("CPUMGetAndClearChangedFlagsREM %x\n", fFlags));
1820 if (fFlags & ( CPUM_CHANGED_CR4 | CPUM_CHANGED_CR3 | CPUM_CHANGED_CR0
1821 | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_LDTR
1822 | CPUM_CHANGED_FPU_REM | CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_CPUID))
1823 {
1824 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
1825 {
1826 pVM->rem.s.fIgnoreCR3Load = true;
1827 tlb_flush(&pVM->rem.s.Env, true);
1828 pVM->rem.s.fIgnoreCR3Load = false;
1829 }
1830
1831 /* CR4 before CR0! */
1832 if (fFlags & CPUM_CHANGED_CR4)
1833 {
1834 pVM->rem.s.fIgnoreCR3Load = true;
1835 pVM->rem.s.fIgnoreCpuMode = true;
1836 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
1837 pVM->rem.s.fIgnoreCpuMode = false;
1838 pVM->rem.s.fIgnoreCR3Load = false;
1839 }
1840
1841 if (fFlags & CPUM_CHANGED_CR0)
1842 {
1843 pVM->rem.s.fIgnoreCR3Load = true;
1844 pVM->rem.s.fIgnoreCpuMode = true;
1845 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
1846 pVM->rem.s.fIgnoreCpuMode = false;
1847 pVM->rem.s.fIgnoreCR3Load = false;
1848 }
1849
1850 if (fFlags & CPUM_CHANGED_CR3)
1851 {
1852 pVM->rem.s.fIgnoreCR3Load = true;
1853 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
1854 pVM->rem.s.fIgnoreCR3Load = false;
1855 }
1856
1857 if (fFlags & CPUM_CHANGED_GDTR)
1858 {
1859 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
1860 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
1861 }
1862
1863 if (fFlags & CPUM_CHANGED_IDTR)
1864 {
1865 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
1866 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
1867 }
1868
1869 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
1870 {
1871 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
1872 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
1873 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
1874 }
1875
1876 if (fFlags & CPUM_CHANGED_LDTR)
1877 {
1878 if (fHiddenSelRegsValid)
1879 {
1880 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
1881 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
1882 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
1883 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
1884 }
1885 else
1886 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
1887 }
1888
1889 if (fFlags & CPUM_CHANGED_CPUID)
1890 {
1891 uint32_t u32Dummy;
1892
1893 /*
1894 * Get the CPUID features.
1895 */
1896 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
1897 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
1898 }
1899
1900 /* Sync FPU state after CR4, CPUID and EFER (!). */
1901 if (fFlags & CPUM_CHANGED_FPU_REM)
1902 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
1903 }
1904
1905 /*
1906 * Sync TR unconditionally to make life simpler.
1907 */
1908 pVM->rem.s.Env.tr.selector = pCtx->tr;
1909 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
1910 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
1911 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
1912 /* Note! do_interrupt will fault if the busy flag is still set... */
1913 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
1914
1915 /*
1916 * Update selector registers.
1917 * This must be done *after* we've synced gdt, ldt and crX registers
1918 * since we're reading the GDT/LDT om sync_seg. This will happen with
1919 * saved state which takes a quick dip into rawmode for instance.
1920 */
1921 /*
1922 * Stack; Note first check this one as the CPL might have changed. The
1923 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
1924 */
1925
1926 if (fHiddenSelRegsValid)
1927 {
1928 /* The hidden selector registers are valid in the CPU context. */
1929 /** @note QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
1930
1931 /* Set current CPL */
1932 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
1933
1934 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
1935 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
1936 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
1937 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
1938 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
1939 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
1940 }
1941 else
1942 {
1943 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
1944 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
1945 {
1946 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
1947
1948 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
1949 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
1950#ifdef VBOX_WITH_STATISTICS
1951 if (pVM->rem.s.Env.segs[R_SS].newselector)
1952 {
1953 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
1954 }
1955#endif
1956 }
1957 else
1958 pVM->rem.s.Env.segs[R_SS].newselector = 0;
1959
1960 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
1961 {
1962 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
1963 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
1964#ifdef VBOX_WITH_STATISTICS
1965 if (pVM->rem.s.Env.segs[R_ES].newselector)
1966 {
1967 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
1968 }
1969#endif
1970 }
1971 else
1972 pVM->rem.s.Env.segs[R_ES].newselector = 0;
1973
1974 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
1975 {
1976 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
1977 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
1978#ifdef VBOX_WITH_STATISTICS
1979 if (pVM->rem.s.Env.segs[R_CS].newselector)
1980 {
1981 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
1982 }
1983#endif
1984 }
1985 else
1986 pVM->rem.s.Env.segs[R_CS].newselector = 0;
1987
1988 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
1989 {
1990 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
1991 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
1992#ifdef VBOX_WITH_STATISTICS
1993 if (pVM->rem.s.Env.segs[R_DS].newselector)
1994 {
1995 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
1996 }
1997#endif
1998 }
1999 else
2000 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2001
2002 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2003 * be the same but not the base/limit. */
2004 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2005 {
2006 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2007 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2008#ifdef VBOX_WITH_STATISTICS
2009 if (pVM->rem.s.Env.segs[R_FS].newselector)
2010 {
2011 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2012 }
2013#endif
2014 }
2015 else
2016 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2017
2018 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2019 {
2020 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2021 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2022#ifdef VBOX_WITH_STATISTICS
2023 if (pVM->rem.s.Env.segs[R_GS].newselector)
2024 {
2025 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2026 }
2027#endif
2028 }
2029 else
2030 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2031 }
2032
2033 /*
2034 * Check for traps.
2035 */
2036 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2037 rc = TRPMQueryTrap(pVM, &u8TrapNo, &enmType);
2038 if (RT_SUCCESS(rc))
2039 {
2040#ifdef DEBUG
2041 if (u8TrapNo == 0x80)
2042 {
2043 remR3DumpLnxSyscall(pVM);
2044 remR3DumpOBsdSyscall(pVM);
2045 }
2046#endif
2047
2048 pVM->rem.s.Env.exception_index = u8TrapNo;
2049 if (enmType != TRPM_SOFTWARE_INT)
2050 {
2051 pVM->rem.s.Env.exception_is_int = 0;
2052 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2053 }
2054 else
2055 {
2056 /*
2057 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2058 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2059 * for int03 and into.
2060 */
2061 pVM->rem.s.Env.exception_is_int = 1;
2062 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2063 /* int 3 may be generated by one-byte 0xcc */
2064 if (u8TrapNo == 3)
2065 {
2066 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2067 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2068 }
2069 /* int 4 may be generated by one-byte 0xce */
2070 else if (u8TrapNo == 4)
2071 {
2072 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2073 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2074 }
2075 }
2076
2077 /* get error code and cr2 if needed. */
2078 switch (u8TrapNo)
2079 {
2080 case 0x0e:
2081 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVM);
2082 /* fallthru */
2083 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2084 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVM);
2085 break;
2086
2087 case 0x11: case 0x08:
2088 default:
2089 pVM->rem.s.Env.error_code = 0;
2090 break;
2091 }
2092
2093 /*
2094 * We can now reset the active trap since the recompiler is gonna have a go at it.
2095 */
2096 rc = TRPMResetTrap(pVM);
2097 AssertRC(rc);
2098 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2099 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2100 }
2101
2102 /*
2103 * Clear old interrupt request flags; Check for pending hardware interrupts.
2104 * (See @remark for why we don't check for other FFs.)
2105 */
2106 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2107 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2108 || VM_FF_ISPENDING(pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
2109 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2110
2111 /*
2112 * We're now in REM mode.
2113 */
2114 pVM->rem.s.fInREM = true;
2115 pVM->rem.s.fInStateSync = false;
2116 pVM->rem.s.cCanExecuteRaw = 0;
2117 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2118 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2119 return VINF_SUCCESS;
2120}
2121
2122
2123/**
2124 * Syncs back changes in the REM state to the the VM state.
2125 *
2126 * This must be called after invoking REMR3Run().
2127 * Calling it several times in a row is not permitted.
2128 *
2129 * @returns VBox status code.
2130 *
2131 * @param pVM VM Handle.
2132 */
2133REMR3DECL(int) REMR3StateBack(PVM pVM)
2134{
2135 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2136 unsigned i;
2137
2138 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2139 Log2(("REMR3StateBack:\n"));
2140 Assert(pVM->rem.s.fInREM);
2141
2142 /*
2143 * Copy back the registers.
2144 * This is done in the order they are declared in the CPUMCTX structure.
2145 */
2146
2147 /** @todo FOP */
2148 /** @todo FPUIP */
2149 /** @todo CS */
2150 /** @todo FPUDP */
2151 /** @todo DS */
2152
2153 /** @todo check if FPU/XMM was actually used in the recompiler */
2154 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2155//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2156
2157#ifdef TARGET_X86_64
2158 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2159 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2160 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2161 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2162 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2163 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2164 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2165 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2166 pCtx->r8 = pVM->rem.s.Env.regs[8];
2167 pCtx->r9 = pVM->rem.s.Env.regs[9];
2168 pCtx->r10 = pVM->rem.s.Env.regs[10];
2169 pCtx->r11 = pVM->rem.s.Env.regs[11];
2170 pCtx->r12 = pVM->rem.s.Env.regs[12];
2171 pCtx->r13 = pVM->rem.s.Env.regs[13];
2172 pCtx->r14 = pVM->rem.s.Env.regs[14];
2173 pCtx->r15 = pVM->rem.s.Env.regs[15];
2174
2175 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2176
2177#else
2178 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2179 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2180 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2181 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2182 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2183 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2184 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2185
2186 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2187#endif
2188
2189 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2190
2191#ifdef VBOX_WITH_STATISTICS
2192 if (pVM->rem.s.Env.segs[R_SS].newselector)
2193 {
2194 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2195 }
2196 if (pVM->rem.s.Env.segs[R_GS].newselector)
2197 {
2198 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2199 }
2200 if (pVM->rem.s.Env.segs[R_FS].newselector)
2201 {
2202 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2203 }
2204 if (pVM->rem.s.Env.segs[R_ES].newselector)
2205 {
2206 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2207 }
2208 if (pVM->rem.s.Env.segs[R_DS].newselector)
2209 {
2210 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2211 }
2212 if (pVM->rem.s.Env.segs[R_CS].newselector)
2213 {
2214 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2215 }
2216#endif
2217 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2218 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2219 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2220 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2221 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2222
2223#ifdef TARGET_X86_64
2224 pCtx->rip = pVM->rem.s.Env.eip;
2225 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2226#else
2227 pCtx->eip = pVM->rem.s.Env.eip;
2228 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2229#endif
2230
2231 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2232 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2233 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2234 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2235 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2236 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2237
2238 for (i = 0; i < 8; i++)
2239 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2240
2241 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2242 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2243 {
2244 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2245 STAM_COUNTER_INC(&gStatREMGDTChange);
2246 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2247 }
2248
2249 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2250 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2251 {
2252 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2253 STAM_COUNTER_INC(&gStatREMIDTChange);
2254 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2255 }
2256
2257 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2258 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2259 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2260 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2261 {
2262 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2263 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2264 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2265 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2266 STAM_COUNTER_INC(&gStatREMLDTRChange);
2267 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2268 }
2269
2270 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2271 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2272 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2273 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2274 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2275 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2276 : 0) )
2277 {
2278 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2279 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2280 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2281 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2282 pCtx->tr = pVM->rem.s.Env.tr.selector;
2283 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2284 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2285 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2286 if (pCtx->trHid.Attr.u)
2287 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2288 STAM_COUNTER_INC(&gStatREMTRChange);
2289 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2290 }
2291
2292 /** @todo These values could still be out of sync! */
2293 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2294 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2295 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2296 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2297
2298 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2299 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2300 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2301
2302 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2303 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2304 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2305
2306 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2307 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2308 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2309
2310 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2311 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2312 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2313
2314 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2315 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2316 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2317
2318 /* Sysenter MSR */
2319 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2320 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2321 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2322
2323 /* System MSRs. */
2324 pCtx->msrEFER = pVM->rem.s.Env.efer;
2325 pCtx->msrSTAR = pVM->rem.s.Env.star;
2326 pCtx->msrPAT = pVM->rem.s.Env.pat;
2327#ifdef TARGET_X86_64
2328 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2329 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2330 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2331 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2332#endif
2333
2334 remR3TrapClear(pVM);
2335
2336 /*
2337 * Check for traps.
2338 */
2339 if ( pVM->rem.s.Env.exception_index >= 0
2340 && pVM->rem.s.Env.exception_index < 256)
2341 {
2342 int rc;
2343
2344 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2345 rc = TRPMAssertTrap(pVM, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2346 AssertRC(rc);
2347 switch (pVM->rem.s.Env.exception_index)
2348 {
2349 case 0x0e:
2350 TRPMSetFaultAddress(pVM, pCtx->cr2);
2351 /* fallthru */
2352 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2353 case 0x11: case 0x08: /* 0 */
2354 TRPMSetErrorCode(pVM, pVM->rem.s.Env.error_code);
2355 break;
2356 }
2357
2358 }
2359
2360 /*
2361 * We're not longer in REM mode.
2362 */
2363 pVM->rem.s.fInREM = false;
2364 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2365 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2366 return VINF_SUCCESS;
2367}
2368
2369
2370/**
2371 * This is called by the disassembler when it wants to update the cpu state
2372 * before for instance doing a register dump.
2373 */
2374static void remR3StateUpdate(PVM pVM)
2375{
2376 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2377 unsigned i;
2378
2379 Assert(pVM->rem.s.fInREM);
2380
2381 /*
2382 * Copy back the registers.
2383 * This is done in the order they are declared in the CPUMCTX structure.
2384 */
2385
2386 /** @todo FOP */
2387 /** @todo FPUIP */
2388 /** @todo CS */
2389 /** @todo FPUDP */
2390 /** @todo DS */
2391 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2392 pCtx->fpu.MXCSR = 0;
2393 pCtx->fpu.MXCSR_MASK = 0;
2394
2395 /** @todo check if FPU/XMM was actually used in the recompiler */
2396 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2397//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2398
2399#ifdef TARGET_X86_64
2400 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2401 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2402 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2403 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2404 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2405 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2406 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2407 pCtx->r8 = pVM->rem.s.Env.regs[8];
2408 pCtx->r9 = pVM->rem.s.Env.regs[9];
2409 pCtx->r10 = pVM->rem.s.Env.regs[10];
2410 pCtx->r11 = pVM->rem.s.Env.regs[11];
2411 pCtx->r12 = pVM->rem.s.Env.regs[12];
2412 pCtx->r13 = pVM->rem.s.Env.regs[13];
2413 pCtx->r14 = pVM->rem.s.Env.regs[14];
2414 pCtx->r15 = pVM->rem.s.Env.regs[15];
2415
2416 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2417#else
2418 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2419 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2420 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2421 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2422 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2423 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2424 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2425
2426 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2427#endif
2428
2429 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2430
2431 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2432 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2433 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2434 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2435 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2436
2437#ifdef TARGET_X86_64
2438 pCtx->rip = pVM->rem.s.Env.eip;
2439 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2440#else
2441 pCtx->eip = pVM->rem.s.Env.eip;
2442 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2443#endif
2444
2445 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2446 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2447 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2448 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2449 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2450 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2451
2452 for (i = 0; i < 8; i++)
2453 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2454
2455 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2456 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2457 {
2458 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2459 STAM_COUNTER_INC(&gStatREMGDTChange);
2460 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2461 }
2462
2463 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2464 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2465 {
2466 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2467 STAM_COUNTER_INC(&gStatREMIDTChange);
2468 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2469 }
2470
2471 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2472 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2473 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2474 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2475 {
2476 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2477 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2478 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2479 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2480 STAM_COUNTER_INC(&gStatREMLDTRChange);
2481 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2482 }
2483
2484 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2485 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2486 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2487 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2488 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2489 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2490 : 0) )
2491 {
2492 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2493 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2494 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2495 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2496 pCtx->tr = pVM->rem.s.Env.tr.selector;
2497 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2498 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2499 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2500 if (pCtx->trHid.Attr.u)
2501 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2502 STAM_COUNTER_INC(&gStatREMTRChange);
2503 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2504 }
2505
2506 /** @todo These values could still be out of sync! */
2507 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2508 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2509 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2510 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2511
2512 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2513 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2514 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2515
2516 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2517 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2518 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2519
2520 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2521 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2522 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2523
2524 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2525 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2526 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2527
2528 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2529 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2530 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2531
2532 /* Sysenter MSR */
2533 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2534 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2535 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2536
2537 /* System MSRs. */
2538 pCtx->msrEFER = pVM->rem.s.Env.efer;
2539 pCtx->msrSTAR = pVM->rem.s.Env.star;
2540 pCtx->msrPAT = pVM->rem.s.Env.pat;
2541#ifdef TARGET_X86_64
2542 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2543 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2544 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2545 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2546#endif
2547
2548}
2549
2550
2551/**
2552 * Update the VMM state information if we're currently in REM.
2553 *
2554 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2555 * we're currently executing in REM and the VMM state is invalid. This method will of
2556 * course check that we're executing in REM before syncing any data over to the VMM.
2557 *
2558 * @param pVM The VM handle.
2559 */
2560REMR3DECL(void) REMR3StateUpdate(PVM pVM)
2561{
2562 if (pVM->rem.s.fInREM)
2563 remR3StateUpdate(pVM);
2564}
2565
2566
2567#undef LOG_GROUP
2568#define LOG_GROUP LOG_GROUP_REM
2569
2570
2571/**
2572 * Notify the recompiler about Address Gate 20 state change.
2573 *
2574 * This notification is required since A20 gate changes are
2575 * initialized from a device driver and the VM might just as
2576 * well be in REM mode as in RAW mode.
2577 *
2578 * @param pVM VM handle.
2579 * @param fEnable True if the gate should be enabled.
2580 * False if the gate should be disabled.
2581 */
2582REMR3DECL(void) REMR3A20Set(PVM pVM, bool fEnable)
2583{
2584 bool fSaved;
2585
2586 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2587 VM_ASSERT_EMT(pVM);
2588
2589 fSaved = pVM->rem.s.fIgnoreAll; /* just in case. */
2590 pVM->rem.s.fIgnoreAll = fSaved || !pVM->rem.s.fInREM;
2591
2592 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2593
2594 pVM->rem.s.fIgnoreAll = fSaved;
2595}
2596
2597
2598/**
2599 * Replays the invalidated recorded pages.
2600 * Called in response to VERR_REM_FLUSHED_PAGES_OVERFLOW from the RAW execution loop.
2601 *
2602 * @param pVM VM handle.
2603 */
2604REMR3DECL(void) REMR3ReplayInvalidatedPages(PVM pVM)
2605{
2606 RTUINT i;
2607
2608 VM_ASSERT_EMT(pVM);
2609
2610 /*
2611 * Sync the required registers.
2612 */
2613 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2614 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2615 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2616 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2617
2618 /*
2619 * Replay the flushes.
2620 */
2621 pVM->rem.s.fIgnoreInvlPg = true;
2622 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2623 {
2624 Log2(("REMR3ReplayInvalidatedPages: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2625 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2626 }
2627 pVM->rem.s.fIgnoreInvlPg = false;
2628 pVM->rem.s.cInvalidatedPages = 0;
2629}
2630
2631
2632/**
2633 * Replays the handler notification changes
2634 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2635 *
2636 * @param pVM VM handle.
2637 */
2638REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2639{
2640 /*
2641 * Replay the flushes.
2642 */
2643 RTUINT i;
2644 const RTUINT c = pVM->rem.s.cHandlerNotifications;
2645
2646 LogFlow(("REMR3ReplayInvalidatedPages:\n"));
2647 VM_ASSERT_EMT(pVM);
2648
2649 pVM->rem.s.cHandlerNotifications = 0;
2650 for (i = 0; i < c; i++)
2651 {
2652 PREMHANDLERNOTIFICATION pRec = &pVM->rem.s.aHandlerNotifications[i];
2653 switch (pRec->enmKind)
2654 {
2655 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
2656 REMR3NotifyHandlerPhysicalRegister(pVM,
2657 pRec->u.PhysicalRegister.enmType,
2658 pRec->u.PhysicalRegister.GCPhys,
2659 pRec->u.PhysicalRegister.cb,
2660 pRec->u.PhysicalRegister.fHasHCHandler);
2661 break;
2662
2663 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
2664 REMR3NotifyHandlerPhysicalDeregister(pVM,
2665 pRec->u.PhysicalDeregister.enmType,
2666 pRec->u.PhysicalDeregister.GCPhys,
2667 pRec->u.PhysicalDeregister.cb,
2668 pRec->u.PhysicalDeregister.fHasHCHandler,
2669 pRec->u.PhysicalDeregister.fRestoreAsRAM);
2670 break;
2671
2672 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
2673 REMR3NotifyHandlerPhysicalModify(pVM,
2674 pRec->u.PhysicalModify.enmType,
2675 pRec->u.PhysicalModify.GCPhysOld,
2676 pRec->u.PhysicalModify.GCPhysNew,
2677 pRec->u.PhysicalModify.cb,
2678 pRec->u.PhysicalModify.fHasHCHandler,
2679 pRec->u.PhysicalModify.fRestoreAsRAM);
2680 break;
2681
2682 default:
2683 AssertReleaseMsgFailed(("enmKind=%d\n", pRec->enmKind));
2684 break;
2685 }
2686 }
2687 VM_FF_CLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY);
2688}
2689
2690
2691/**
2692 * Notify REM about changed code page.
2693 *
2694 * @returns VBox status code.
2695 * @param pVM VM handle.
2696 * @param pvCodePage Code page address
2697 */
2698REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, RTGCPTR pvCodePage)
2699{
2700#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
2701 int rc;
2702 RTGCPHYS PhysGC;
2703 uint64_t flags;
2704
2705 VM_ASSERT_EMT(pVM);
2706
2707 /*
2708 * Get the physical page address.
2709 */
2710 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
2711 if (rc == VINF_SUCCESS)
2712 {
2713 /*
2714 * Sync the required registers and flush the whole page.
2715 * (Easier to do the whole page than notifying it about each physical
2716 * byte that was changed.
2717 */
2718 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2719 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2720 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2721 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2722
2723 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
2724 }
2725#endif
2726 return VINF_SUCCESS;
2727}
2728
2729
2730/**
2731 * Notification about a successful MMR3PhysRegister() call.
2732 *
2733 * @param pVM VM handle.
2734 * @param GCPhys The physical address the RAM.
2735 * @param cb Size of the memory.
2736 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
2737 */
2738REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
2739{
2740 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
2741 VM_ASSERT_EMT(pVM);
2742
2743 /*
2744 * Validate input - we trust the caller.
2745 */
2746 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2747 Assert(cb);
2748 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2749#ifdef VBOX_WITH_NEW_PHYS_CODE
2750 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
2751#endif
2752
2753 /*
2754 * Base ram? Update GCPhysLastRam.
2755 */
2756#ifdef VBOX_WITH_NEW_PHYS_CODE
2757 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
2758#else
2759 if (!GCPhys)
2760#endif
2761 {
2762 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
2763 {
2764 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
2765 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
2766 }
2767 }
2768
2769 /*
2770 * Register the ram.
2771 */
2772 Assert(!pVM->rem.s.fIgnoreAll);
2773 pVM->rem.s.fIgnoreAll = true;
2774
2775#ifdef VBOX_WITH_NEW_PHYS_CODE
2776 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2777#else
2778 if (!GCPhys)
2779 cpu_register_physical_memory(GCPhys, cb, GCPhys | IO_MEM_RAM_MISSING);
2780 else
2781 {
2782 if (fFlags & MM_RAM_FLAGS_RESERVED)
2783 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2784 else
2785 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2786 }
2787#endif
2788 Assert(pVM->rem.s.fIgnoreAll);
2789 pVM->rem.s.fIgnoreAll = false;
2790}
2791
2792#ifndef VBOX_WITH_NEW_PHYS_CODE
2793
2794/**
2795 * Notification about a successful PGMR3PhysRegisterChunk() call.
2796 *
2797 * @param pVM VM handle.
2798 * @param GCPhys The physical address the RAM.
2799 * @param cb Size of the memory.
2800 * @param pvRam The HC address of the RAM.
2801 * @param fFlags Flags of the MM_RAM_FLAGS_* defines.
2802 */
2803REMR3DECL(void) REMR3NotifyPhysRamChunkRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, RTHCUINTPTR pvRam, unsigned fFlags)
2804{
2805 Log(("REMR3NotifyPhysRamChunkRegister: GCPhys=%RGp cb=%d pvRam=%p fFlags=%d\n", GCPhys, cb, pvRam, fFlags));
2806 VM_ASSERT_EMT(pVM);
2807
2808 /*
2809 * Validate input - we trust the caller.
2810 */
2811 Assert(pvRam);
2812 Assert(RT_ALIGN(pvRam, PAGE_SIZE) == pvRam);
2813 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2814 Assert(cb == PGM_DYNAMIC_CHUNK_SIZE);
2815 Assert(fFlags == 0 /* normal RAM */);
2816 Assert(!pVM->rem.s.fIgnoreAll);
2817 pVM->rem.s.fIgnoreAll = true;
2818 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2819 Assert(pVM->rem.s.fIgnoreAll);
2820 pVM->rem.s.fIgnoreAll = false;
2821}
2822
2823
2824/**
2825 * Grows dynamically allocated guest RAM.
2826 * Will raise a fatal error if the operation fails.
2827 *
2828 * @param physaddr The physical address.
2829 */
2830void remR3GrowDynRange(unsigned long physaddr) /** @todo Needs fixing for MSC... */
2831{
2832 int rc;
2833 PVM pVM = cpu_single_env->pVM;
2834 const RTGCPHYS GCPhys = physaddr;
2835
2836 LogFlow(("remR3GrowDynRange %RGp\n", (RTGCPTR)physaddr));
2837 rc = PGM3PhysGrowRange(pVM, &GCPhys);
2838 if (RT_SUCCESS(rc))
2839 return;
2840
2841 LogRel(("\nUnable to allocate guest RAM chunk at %RGp\n", (RTGCPTR)physaddr));
2842 cpu_abort(cpu_single_env, "Unable to allocate guest RAM chunk at %RGp\n", (RTGCPTR)physaddr);
2843 AssertFatalFailed();
2844}
2845
2846#endif /* !VBOX_WITH_NEW_PHYS_CODE */
2847
2848/**
2849 * Notification about a successful MMR3PhysRomRegister() call.
2850 *
2851 * @param pVM VM handle.
2852 * @param GCPhys The physical address of the ROM.
2853 * @param cb The size of the ROM.
2854 * @param pvCopy Pointer to the ROM copy.
2855 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
2856 * This function will be called when ever the protection of the
2857 * shadow ROM changes (at reset and end of POST).
2858 */
2859REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
2860{
2861 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
2862 VM_ASSERT_EMT(pVM);
2863
2864 /*
2865 * Validate input - we trust the caller.
2866 */
2867 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2868 Assert(cb);
2869 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2870
2871 /*
2872 * Register the rom.
2873 */
2874 Assert(!pVM->rem.s.fIgnoreAll);
2875 pVM->rem.s.fIgnoreAll = true;
2876
2877 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
2878
2879 Assert(pVM->rem.s.fIgnoreAll);
2880 pVM->rem.s.fIgnoreAll = false;
2881}
2882
2883
2884/**
2885 * Notification about a successful memory deregistration or reservation.
2886 *
2887 * @param pVM VM Handle.
2888 * @param GCPhys Start physical address.
2889 * @param cb The size of the range.
2890 */
2891REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
2892{
2893 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
2894 VM_ASSERT_EMT(pVM);
2895
2896 /*
2897 * Validate input - we trust the caller.
2898 */
2899 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2900 Assert(cb);
2901 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2902
2903 /*
2904 * Unassigning the memory.
2905 */
2906 Assert(!pVM->rem.s.fIgnoreAll);
2907 pVM->rem.s.fIgnoreAll = true;
2908
2909 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2910
2911 Assert(pVM->rem.s.fIgnoreAll);
2912 pVM->rem.s.fIgnoreAll = false;
2913}
2914
2915
2916/**
2917 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
2918 *
2919 * @param pVM VM Handle.
2920 * @param enmType Handler type.
2921 * @param GCPhys Handler range address.
2922 * @param cb Size of the handler range.
2923 * @param fHasHCHandler Set if the handler has a HC callback function.
2924 *
2925 * @remark MMR3PhysRomRegister assumes that this function will not apply the
2926 * Handler memory type to memory which has no HC handler.
2927 */
2928REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
2929{
2930 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
2931 enmType, GCPhys, cb, fHasHCHandler));
2932 VM_ASSERT_EMT(pVM);
2933 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2934 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2935
2936 if (pVM->rem.s.cHandlerNotifications)
2937 REMR3ReplayHandlerNotifications(pVM);
2938
2939 Assert(!pVM->rem.s.fIgnoreAll);
2940 pVM->rem.s.fIgnoreAll = true;
2941
2942 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2943 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
2944 else if (fHasHCHandler)
2945 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
2946
2947 Assert(pVM->rem.s.fIgnoreAll);
2948 pVM->rem.s.fIgnoreAll = false;
2949}
2950
2951
2952/**
2953 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
2954 *
2955 * @param pVM VM Handle.
2956 * @param enmType Handler type.
2957 * @param GCPhys Handler range address.
2958 * @param cb Size of the handler range.
2959 * @param fHasHCHandler Set if the handler has a HC callback function.
2960 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
2961 */
2962REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
2963{
2964 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
2965 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
2966 VM_ASSERT_EMT(pVM);
2967
2968 if (pVM->rem.s.cHandlerNotifications)
2969 REMR3ReplayHandlerNotifications(pVM);
2970
2971 Assert(!pVM->rem.s.fIgnoreAll);
2972 pVM->rem.s.fIgnoreAll = true;
2973
2974/** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
2975 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2976 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2977 else if (fHasHCHandler)
2978 {
2979 if (!fRestoreAsRAM)
2980 {
2981 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
2982 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2983 }
2984 else
2985 {
2986 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2987 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2988 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2989 }
2990 }
2991
2992 Assert(pVM->rem.s.fIgnoreAll);
2993 pVM->rem.s.fIgnoreAll = false;
2994}
2995
2996
2997/**
2998 * Notification about a successful PGMR3HandlerPhysicalModify() call.
2999 *
3000 * @param pVM VM Handle.
3001 * @param enmType Handler type.
3002 * @param GCPhysOld Old handler range address.
3003 * @param GCPhysNew New handler range address.
3004 * @param cb Size of the handler range.
3005 * @param fHasHCHandler Set if the handler has a HC callback function.
3006 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3007 */
3008REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3009{
3010 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3011 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3012 VM_ASSERT_EMT(pVM);
3013 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3014
3015 if (pVM->rem.s.cHandlerNotifications)
3016 REMR3ReplayHandlerNotifications(pVM);
3017
3018 if (fHasHCHandler)
3019 {
3020 Assert(!pVM->rem.s.fIgnoreAll);
3021 pVM->rem.s.fIgnoreAll = true;
3022
3023 /*
3024 * Reset the old page.
3025 */
3026 if (!fRestoreAsRAM)
3027 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
3028 else
3029 {
3030 /* This is not perfect, but it'll do for PD monitoring... */
3031 Assert(cb == PAGE_SIZE);
3032 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3033 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
3034 }
3035
3036 /*
3037 * Update the new page.
3038 */
3039 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3040 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3041 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
3042
3043 Assert(pVM->rem.s.fIgnoreAll);
3044 pVM->rem.s.fIgnoreAll = false;
3045 }
3046}
3047
3048
3049/**
3050 * Checks if we're handling access to this page or not.
3051 *
3052 * @returns true if we're trapping access.
3053 * @returns false if we aren't.
3054 * @param pVM The VM handle.
3055 * @param GCPhys The physical address.
3056 *
3057 * @remark This function will only work correctly in VBOX_STRICT builds!
3058 */
3059REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3060{
3061#ifdef VBOX_STRICT
3062 unsigned long off;
3063 if (pVM->rem.s.cHandlerNotifications)
3064 REMR3ReplayHandlerNotifications(pVM);
3065
3066 off = get_phys_page_offset(GCPhys);
3067 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3068 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3069 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3070#else
3071 return false;
3072#endif
3073}
3074
3075
3076/**
3077 * Deals with a rare case in get_phys_addr_code where the code
3078 * is being monitored.
3079 *
3080 * It could also be an MMIO page, in which case we will raise a fatal error.
3081 *
3082 * @returns The physical address corresponding to addr.
3083 * @param env The cpu environment.
3084 * @param addr The virtual address.
3085 * @param pTLBEntry The TLB entry.
3086 */
3087target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
3088 target_ulong addr,
3089 CPUTLBEntry* pTLBEntry,
3090 target_phys_addr_t ioTLBEntry)
3091{
3092 PVM pVM = env->pVM;
3093
3094 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3095 {
3096 /* If code memory is being monitored, appropriate IOTLB entry will have
3097 handler IO type, and addend will provide real physical address, no
3098 matter if we store VA in TLB or not, as handlers are always passed PA */
3099 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3100 return ret;
3101 }
3102 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3103 "*** handlers\n",
3104 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3105 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3106 LogRel(("*** mmio\n"));
3107 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3108 LogRel(("*** phys\n"));
3109 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3110 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3111 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3112 AssertFatalFailed();
3113}
3114
3115/**
3116 * Read guest RAM and ROM.
3117 *
3118 * @param SrcGCPhys The source address (guest physical).
3119 * @param pvDst The destination address.
3120 * @param cb Number of bytes
3121 */
3122void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3123{
3124 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3125 VBOX_CHECK_ADDR(SrcGCPhys);
3126 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3127#ifdef VBOX_DEBUG_PHYS
3128 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3129#endif
3130 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3131}
3132
3133
3134/**
3135 * Read guest RAM and ROM, unsigned 8-bit.
3136 *
3137 * @param SrcGCPhys The source address (guest physical).
3138 */
3139RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3140{
3141 uint8_t val;
3142 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3143 VBOX_CHECK_ADDR(SrcGCPhys);
3144 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3145 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3146#ifdef VBOX_DEBUG_PHYS
3147 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3148#endif
3149 return val;
3150}
3151
3152
3153/**
3154 * Read guest RAM and ROM, signed 8-bit.
3155 *
3156 * @param SrcGCPhys The source address (guest physical).
3157 */
3158RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3159{
3160 int8_t val;
3161 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3162 VBOX_CHECK_ADDR(SrcGCPhys);
3163 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3164 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3165#ifdef VBOX_DEBUG_PHYS
3166 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3167#endif
3168 return val;
3169}
3170
3171
3172/**
3173 * Read guest RAM and ROM, unsigned 16-bit.
3174 *
3175 * @param SrcGCPhys The source address (guest physical).
3176 */
3177RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3178{
3179 uint16_t val;
3180 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3181 VBOX_CHECK_ADDR(SrcGCPhys);
3182 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3183 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3184#ifdef VBOX_DEBUG_PHYS
3185 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3186#endif
3187 return val;
3188}
3189
3190
3191/**
3192 * Read guest RAM and ROM, signed 16-bit.
3193 *
3194 * @param SrcGCPhys The source address (guest physical).
3195 */
3196RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3197{
3198 int16_t val;
3199 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3200 VBOX_CHECK_ADDR(SrcGCPhys);
3201 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3202 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3203#ifdef VBOX_DEBUG_PHYS
3204 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3205#endif
3206 return val;
3207}
3208
3209
3210/**
3211 * Read guest RAM and ROM, unsigned 32-bit.
3212 *
3213 * @param SrcGCPhys The source address (guest physical).
3214 */
3215RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3216{
3217 uint32_t val;
3218 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3219 VBOX_CHECK_ADDR(SrcGCPhys);
3220 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3221 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3222#ifdef VBOX_DEBUG_PHYS
3223 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3224#endif
3225 return val;
3226}
3227
3228
3229/**
3230 * Read guest RAM and ROM, signed 32-bit.
3231 *
3232 * @param SrcGCPhys The source address (guest physical).
3233 */
3234RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3235{
3236 int32_t val;
3237 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3238 VBOX_CHECK_ADDR(SrcGCPhys);
3239 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3240 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3241#ifdef VBOX_DEBUG_PHYS
3242 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3243#endif
3244 return val;
3245}
3246
3247
3248/**
3249 * Read guest RAM and ROM, unsigned 64-bit.
3250 *
3251 * @param SrcGCPhys The source address (guest physical).
3252 */
3253uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3254{
3255 uint64_t val;
3256 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3257 VBOX_CHECK_ADDR(SrcGCPhys);
3258 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3259 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3260#ifdef VBOX_DEBUG_PHYS
3261 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3262#endif
3263 return val;
3264}
3265
3266
3267/**
3268 * Read guest RAM and ROM, signed 64-bit.
3269 *
3270 * @param SrcGCPhys The source address (guest physical).
3271 */
3272int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3273{
3274 int64_t val;
3275 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3276 VBOX_CHECK_ADDR(SrcGCPhys);
3277 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3278 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3279#ifdef VBOX_DEBUG_PHYS
3280 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3281#endif
3282 return val;
3283}
3284
3285
3286/**
3287 * Write guest RAM.
3288 *
3289 * @param DstGCPhys The destination address (guest physical).
3290 * @param pvSrc The source address.
3291 * @param cb Number of bytes to write
3292 */
3293void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3294{
3295 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3296 VBOX_CHECK_ADDR(DstGCPhys);
3297 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3298 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3299#ifdef VBOX_DEBUG_PHYS
3300 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3301#endif
3302}
3303
3304
3305/**
3306 * Write guest RAM, unsigned 8-bit.
3307 *
3308 * @param DstGCPhys The destination address (guest physical).
3309 * @param val Value
3310 */
3311void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3312{
3313 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3314 VBOX_CHECK_ADDR(DstGCPhys);
3315 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3316 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3317#ifdef VBOX_DEBUG_PHYS
3318 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3319#endif
3320}
3321
3322
3323/**
3324 * Write guest RAM, unsigned 8-bit.
3325 *
3326 * @param DstGCPhys The destination address (guest physical).
3327 * @param val Value
3328 */
3329void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3330{
3331 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3332 VBOX_CHECK_ADDR(DstGCPhys);
3333 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3334 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3335#ifdef VBOX_DEBUG_PHYS
3336 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3337#endif
3338}
3339
3340
3341/**
3342 * Write guest RAM, unsigned 32-bit.
3343 *
3344 * @param DstGCPhys The destination address (guest physical).
3345 * @param val Value
3346 */
3347void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3348{
3349 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3350 VBOX_CHECK_ADDR(DstGCPhys);
3351 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3352 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3353#ifdef VBOX_DEBUG_PHYS
3354 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3355#endif
3356}
3357
3358
3359/**
3360 * Write guest RAM, unsigned 64-bit.
3361 *
3362 * @param DstGCPhys The destination address (guest physical).
3363 * @param val Value
3364 */
3365void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3366{
3367 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3368 VBOX_CHECK_ADDR(DstGCPhys);
3369 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3370 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3371#ifdef VBOX_DEBUG_PHYS
3372 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3373#endif
3374}
3375
3376#undef LOG_GROUP
3377#define LOG_GROUP LOG_GROUP_REM_MMIO
3378
3379/** Read MMIO memory. */
3380static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3381{
3382 uint32_t u32 = 0;
3383 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3384 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3385 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", GCPhys, u32));
3386 return u32;
3387}
3388
3389/** Read MMIO memory. */
3390static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3391{
3392 uint32_t u32 = 0;
3393 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3394 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3395 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", GCPhys, u32));
3396 return u32;
3397}
3398
3399/** Read MMIO memory. */
3400static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3401{
3402 uint32_t u32 = 0;
3403 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3404 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3405 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", GCPhys, u32));
3406 return u32;
3407}
3408
3409/** Write to MMIO memory. */
3410static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3411{
3412 int rc;
3413 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3414 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3415 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3416}
3417
3418/** Write to MMIO memory. */
3419static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3420{
3421 int rc;
3422 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3423 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3424 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3425}
3426
3427/** Write to MMIO memory. */
3428static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3429{
3430 int rc;
3431 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3432 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3433 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3434}
3435
3436
3437#undef LOG_GROUP
3438#define LOG_GROUP LOG_GROUP_REM_HANDLER
3439
3440/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3441
3442static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3443{
3444 uint8_t u8;
3445 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", GCPhys));
3446 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3447 return u8;
3448}
3449
3450static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3451{
3452 uint16_t u16;
3453 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", GCPhys));
3454 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3455 return u16;
3456}
3457
3458static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3459{
3460 uint32_t u32;
3461 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", GCPhys));
3462 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3463 return u32;
3464}
3465
3466static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3467{
3468 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3469 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3470}
3471
3472static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3473{
3474 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3475 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3476}
3477
3478static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3479{
3480 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3481 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3482}
3483
3484/* -+- disassembly -+- */
3485
3486#undef LOG_GROUP
3487#define LOG_GROUP LOG_GROUP_REM_DISAS
3488
3489
3490/**
3491 * Enables or disables singled stepped disassembly.
3492 *
3493 * @returns VBox status code.
3494 * @param pVM VM handle.
3495 * @param fEnable To enable set this flag, to disable clear it.
3496 */
3497static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3498{
3499 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3500 VM_ASSERT_EMT(pVM);
3501
3502 if (fEnable)
3503 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3504 else
3505 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3506 return VINF_SUCCESS;
3507}
3508
3509
3510/**
3511 * Enables or disables singled stepped disassembly.
3512 *
3513 * @returns VBox status code.
3514 * @param pVM VM handle.
3515 * @param fEnable To enable set this flag, to disable clear it.
3516 */
3517REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3518{
3519 PVMREQ pReq;
3520 int rc;
3521
3522 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3523 if (VM_IS_EMT(pVM))
3524 return remR3DisasEnableStepping(pVM, fEnable);
3525
3526 rc = VMR3ReqCall(pVM, VMREQDEST_ANY, &pReq, RT_INDEFINITE_WAIT, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3527 AssertRC(rc);
3528 if (RT_SUCCESS(rc))
3529 rc = pReq->iStatus;
3530 VMR3ReqFree(pReq);
3531 return rc;
3532}
3533
3534
3535#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3536/**
3537 * External Debugger Command: .remstep [on|off|1|0]
3538 */
3539static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3540{
3541 bool fEnable;
3542 int rc;
3543
3544 /* print status */
3545 if (cArgs == 0)
3546 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "DisasStepping is %s\n",
3547 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3548
3549 /* convert the argument and change the mode. */
3550 rc = pCmdHlp->pfnVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3551 if (RT_FAILURE(rc))
3552 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "boolean conversion failed!\n");
3553 rc = REMR3DisasEnableStepping(pVM, fEnable);
3554 if (RT_FAILURE(rc))
3555 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "REMR3DisasEnableStepping failed!\n");
3556 return rc;
3557}
3558#endif
3559
3560
3561/**
3562 * Disassembles one instruction and prints it to the log.
3563 *
3564 * @returns Success indicator.
3565 * @param env Pointer to the recompiler CPU structure.
3566 * @param f32BitCode Indicates that whether or not the code should
3567 * be disassembled as 16 or 32 bit. If -1 the CS
3568 * selector will be inspected.
3569 * @param pszPrefix
3570 */
3571bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3572{
3573 PVM pVM = env->pVM;
3574 const bool fLog = LogIsEnabled();
3575 const bool fLog2 = LogIs2Enabled();
3576 int rc = VINF_SUCCESS;
3577
3578 /*
3579 * Don't bother if there ain't any log output to do.
3580 */
3581 if (!fLog && !fLog2)
3582 return true;
3583
3584 /*
3585 * Update the state so DBGF reads the correct register values.
3586 */
3587 remR3StateUpdate(pVM);
3588
3589 /*
3590 * Log registers if requested.
3591 */
3592 if (!fLog2)
3593 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3594
3595 /*
3596 * Disassemble to log.
3597 */
3598 if (fLog)
3599 rc = DBGFR3DisasInstrCurrentLogInternal(pVM, pszPrefix);
3600
3601 return RT_SUCCESS(rc);
3602}
3603
3604
3605/**
3606 * Disassemble recompiled code.
3607 *
3608 * @param phFileIgnored Ignored, logfile usually.
3609 * @param pvCode Pointer to the code block.
3610 * @param cb Size of the code block.
3611 */
3612void disas(FILE *phFile, void *pvCode, unsigned long cb)
3613{
3614#ifdef DEBUG_TMP_LOGGING
3615# define DISAS_PRINTF(x...) fprintf(phFile, x)
3616#else
3617# define DISAS_PRINTF(x...) RTLogPrintf(x)
3618 if (LogIs2Enabled())
3619#endif
3620 {
3621 unsigned off = 0;
3622 char szOutput[256];
3623 DISCPUSTATE Cpu;
3624
3625 memset(&Cpu, 0, sizeof(Cpu));
3626#ifdef RT_ARCH_X86
3627 Cpu.mode = CPUMODE_32BIT;
3628#else
3629 Cpu.mode = CPUMODE_64BIT;
3630#endif
3631
3632 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
3633 while (off < cb)
3634 {
3635 uint32_t cbInstr;
3636 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
3637 DISAS_PRINTF("%s", szOutput);
3638 else
3639 {
3640 DISAS_PRINTF("disas error\n");
3641 cbInstr = 1;
3642#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporing 64-bit code. */
3643 break;
3644#endif
3645 }
3646 off += cbInstr;
3647 }
3648 }
3649
3650#undef DISAS_PRINTF
3651}
3652
3653
3654/**
3655 * Disassemble guest code.
3656 *
3657 * @param phFileIgnored Ignored, logfile usually.
3658 * @param uCode The guest address of the code to disassemble. (flat?)
3659 * @param cb Number of bytes to disassemble.
3660 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
3661 */
3662void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
3663{
3664#ifdef DEBUG_TMP_LOGGING
3665# define DISAS_PRINTF(x...) fprintf(phFile, x)
3666#else
3667# define DISAS_PRINTF(x...) RTLogPrintf(x)
3668 if (LogIs2Enabled())
3669#endif
3670 {
3671 PVM pVM = cpu_single_env->pVM;
3672 RTSEL cs;
3673 RTGCUINTPTR eip;
3674
3675 /*
3676 * Update the state so DBGF reads the correct register values (flags).
3677 */
3678 remR3StateUpdate(pVM);
3679
3680 /*
3681 * Do the disassembling.
3682 */
3683 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
3684 cs = cpu_single_env->segs[R_CS].selector;
3685 eip = uCode - cpu_single_env->segs[R_CS].base;
3686 for (;;)
3687 {
3688 char szBuf[256];
3689 uint32_t cbInstr;
3690 int rc = DBGFR3DisasInstrEx(pVM,
3691 cs,
3692 eip,
3693 0,
3694 szBuf, sizeof(szBuf),
3695 &cbInstr);
3696 if (RT_SUCCESS(rc))
3697 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
3698 else
3699 {
3700 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
3701 cbInstr = 1;
3702 }
3703
3704 /* next */
3705 if (cb <= cbInstr)
3706 break;
3707 cb -= cbInstr;
3708 uCode += cbInstr;
3709 eip += cbInstr;
3710 }
3711 }
3712#undef DISAS_PRINTF
3713}
3714
3715
3716/**
3717 * Looks up a guest symbol.
3718 *
3719 * @returns Pointer to symbol name. This is a static buffer.
3720 * @param orig_addr The address in question.
3721 */
3722const char *lookup_symbol(target_ulong orig_addr)
3723{
3724 RTGCINTPTR off = 0;
3725 DBGFSYMBOL Sym;
3726 PVM pVM = cpu_single_env->pVM;
3727 int rc = DBGFR3SymbolByAddr(pVM, orig_addr, &off, &Sym);
3728 if (RT_SUCCESS(rc))
3729 {
3730 static char szSym[sizeof(Sym.szName) + 48];
3731 if (!off)
3732 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
3733 else if (off > 0)
3734 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
3735 else
3736 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
3737 return szSym;
3738 }
3739 return "<N/A>";
3740}
3741
3742
3743#undef LOG_GROUP
3744#define LOG_GROUP LOG_GROUP_REM
3745
3746
3747/* -+- FF notifications -+- */
3748
3749
3750/**
3751 * Notification about a pending interrupt.
3752 *
3753 * @param pVM VM Handle.
3754 * @param u8Interrupt Interrupt
3755 * @thread The emulation thread.
3756 */
3757REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, uint8_t u8Interrupt)
3758{
3759 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
3760 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
3761}
3762
3763/**
3764 * Notification about a pending interrupt.
3765 *
3766 * @returns Pending interrupt or REM_NO_PENDING_IRQ
3767 * @param pVM VM Handle.
3768 * @thread The emulation thread.
3769 */
3770REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM)
3771{
3772 return pVM->rem.s.u32PendingInterrupt;
3773}
3774
3775/**
3776 * Notification about the interrupt FF being set.
3777 *
3778 * @param pVM VM Handle.
3779 * @thread The emulation thread.
3780 */
3781REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM)
3782{
3783 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
3784 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
3785 if (pVM->rem.s.fInREM)
3786 {
3787 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3788 CPU_INTERRUPT_EXTERNAL_HARD);
3789 }
3790}
3791
3792
3793/**
3794 * Notification about the interrupt FF being set.
3795 *
3796 * @param pVM VM Handle.
3797 * @thread Any.
3798 */
3799REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM)
3800{
3801 LogFlow(("REMR3NotifyInterruptClear:\n"));
3802 if (pVM->rem.s.fInREM)
3803 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3804}
3805
3806
3807/**
3808 * Notification about pending timer(s).
3809 *
3810 * @param pVM VM Handle.
3811 * @thread Any.
3812 */
3813REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM)
3814{
3815#ifndef DEBUG_bird
3816 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
3817#endif
3818 if (pVM->rem.s.fInREM)
3819 {
3820 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3821 CPU_INTERRUPT_EXTERNAL_TIMER);
3822 }
3823}
3824
3825
3826/**
3827 * Notification about pending DMA transfers.
3828 *
3829 * @param pVM VM Handle.
3830 * @thread Any.
3831 */
3832REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
3833{
3834 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
3835 if (pVM->rem.s.fInREM)
3836 {
3837 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3838 CPU_INTERRUPT_EXTERNAL_DMA);
3839 }
3840}
3841
3842
3843/**
3844 * Notification about pending timer(s).
3845 *
3846 * @param pVM VM Handle.
3847 * @thread Any.
3848 */
3849REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
3850{
3851 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
3852 if (pVM->rem.s.fInREM)
3853 {
3854 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3855 CPU_INTERRUPT_EXTERNAL_EXIT);
3856 }
3857}
3858
3859
3860/**
3861 * Notification about pending FF set by an external thread.
3862 *
3863 * @param pVM VM handle.
3864 * @thread Any.
3865 */
3866REMR3DECL(void) REMR3NotifyFF(PVM pVM)
3867{
3868 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
3869 if (pVM->rem.s.fInREM)
3870 {
3871 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3872 CPU_INTERRUPT_EXTERNAL_EXIT);
3873 }
3874}
3875
3876
3877#ifdef VBOX_WITH_STATISTICS
3878void remR3ProfileStart(int statcode)
3879{
3880 STAMPROFILEADV *pStat;
3881 switch(statcode)
3882 {
3883 case STATS_EMULATE_SINGLE_INSTR:
3884 pStat = &gStatExecuteSingleInstr;
3885 break;
3886 case STATS_QEMU_COMPILATION:
3887 pStat = &gStatCompilationQEmu;
3888 break;
3889 case STATS_QEMU_RUN_EMULATED_CODE:
3890 pStat = &gStatRunCodeQEmu;
3891 break;
3892 case STATS_QEMU_TOTAL:
3893 pStat = &gStatTotalTimeQEmu;
3894 break;
3895 case STATS_QEMU_RUN_TIMERS:
3896 pStat = &gStatTimers;
3897 break;
3898 case STATS_TLB_LOOKUP:
3899 pStat= &gStatTBLookup;
3900 break;
3901 case STATS_IRQ_HANDLING:
3902 pStat= &gStatIRQ;
3903 break;
3904 case STATS_RAW_CHECK:
3905 pStat = &gStatRawCheck;
3906 break;
3907
3908 default:
3909 AssertMsgFailed(("unknown stat %d\n", statcode));
3910 return;
3911 }
3912 STAM_PROFILE_ADV_START(pStat, a);
3913}
3914
3915
3916void remR3ProfileStop(int statcode)
3917{
3918 STAMPROFILEADV *pStat;
3919 switch(statcode)
3920 {
3921 case STATS_EMULATE_SINGLE_INSTR:
3922 pStat = &gStatExecuteSingleInstr;
3923 break;
3924 case STATS_QEMU_COMPILATION:
3925 pStat = &gStatCompilationQEmu;
3926 break;
3927 case STATS_QEMU_RUN_EMULATED_CODE:
3928 pStat = &gStatRunCodeQEmu;
3929 break;
3930 case STATS_QEMU_TOTAL:
3931 pStat = &gStatTotalTimeQEmu;
3932 break;
3933 case STATS_QEMU_RUN_TIMERS:
3934 pStat = &gStatTimers;
3935 break;
3936 case STATS_TLB_LOOKUP:
3937 pStat= &gStatTBLookup;
3938 break;
3939 case STATS_IRQ_HANDLING:
3940 pStat= &gStatIRQ;
3941 break;
3942 case STATS_RAW_CHECK:
3943 pStat = &gStatRawCheck;
3944 break;
3945 default:
3946 AssertMsgFailed(("unknown stat %d\n", statcode));
3947 return;
3948 }
3949 STAM_PROFILE_ADV_STOP(pStat, a);
3950}
3951#endif
3952
3953/**
3954 * Raise an RC, force rem exit.
3955 *
3956 * @param pVM VM handle.
3957 * @param rc The rc.
3958 */
3959void remR3RaiseRC(PVM pVM, int rc)
3960{
3961 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
3962 Assert(pVM->rem.s.fInREM);
3963 VM_ASSERT_EMT(pVM);
3964 pVM->rem.s.rc = rc;
3965 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
3966}
3967
3968
3969/* -+- timers -+- */
3970
3971uint64_t cpu_get_tsc(CPUX86State *env)
3972{
3973 STAM_COUNTER_INC(&gStatCpuGetTSC);
3974 return TMCpuTickGet(env->pVM);
3975}
3976
3977
3978/* -+- interrupts -+- */
3979
3980void cpu_set_ferr(CPUX86State *env)
3981{
3982 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
3983 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
3984}
3985
3986int cpu_get_pic_interrupt(CPUState *env)
3987{
3988 uint8_t u8Interrupt;
3989 int rc;
3990
3991 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
3992 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
3993 * with the (a)pic.
3994 */
3995 /** @note We assume we will go directly to the recompiler to handle the pending interrupt! */
3996 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
3997 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
3998 * remove this kludge. */
3999 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4000 {
4001 rc = VINF_SUCCESS;
4002 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4003 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4004 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4005 }
4006 else
4007 rc = PDMGetInterrupt(env->pVM, &u8Interrupt);
4008
4009 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc\n", u8Interrupt, rc));
4010 if (RT_SUCCESS(rc))
4011 {
4012 if (VM_FF_ISPENDING(env->pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
4013 env->interrupt_request |= CPU_INTERRUPT_HARD;
4014 return u8Interrupt;
4015 }
4016 return -1;
4017}
4018
4019
4020/* -+- local apic -+- */
4021
4022void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4023{
4024 int rc = PDMApicSetBase(env->pVM, val);
4025 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4026}
4027
4028uint64_t cpu_get_apic_base(CPUX86State *env)
4029{
4030 uint64_t u64;
4031 int rc = PDMApicGetBase(env->pVM, &u64);
4032 if (RT_SUCCESS(rc))
4033 {
4034 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4035 return u64;
4036 }
4037 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4038 return 0;
4039}
4040
4041void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4042{
4043 int rc = PDMApicSetTPR(env->pVM, val);
4044 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4045}
4046
4047uint8_t cpu_get_apic_tpr(CPUX86State *env)
4048{
4049 uint8_t u8;
4050 int rc = PDMApicGetTPR(env->pVM, &u8, NULL);
4051 if (RT_SUCCESS(rc))
4052 {
4053 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4054 return u8;
4055 }
4056 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4057 return 0;
4058}
4059
4060
4061uint64_t cpu_apic_rdmsr(CPUX86State *env, uint32_t reg)
4062{
4063 uint64_t value;
4064 int rc = PDMApicReadMSR(env->pVM, 0/* cpu */, reg, &value);
4065 if (RT_SUCCESS(rc))
4066 {
4067 LogFlow(("cpu_apic_rdms returns %#x\n", value));
4068 return value;
4069 }
4070 /** @todo: exception ? */
4071 LogFlow(("cpu_apic_rdms returns 0 (rc=%Rrc)\n", rc));
4072 return value;
4073}
4074
4075void cpu_apic_wrmsr(CPUX86State *env, uint32_t reg, uint64_t value)
4076{
4077 int rc = PDMApicWriteMSR(env->pVM, 0 /* cpu */, reg, value);
4078 /** @todo: exception if error ? */
4079 LogFlow(("cpu_apic_wrmsr: rc=%Rrc\n", rc)); NOREF(rc);
4080}
4081
4082uint64_t cpu_rdmsr(CPUX86State *env, uint32_t msr)
4083{
4084 return CPUMGetGuestMsr(env->pVM, msr);
4085}
4086
4087void cpu_wrmsr(CPUX86State *env, uint32_t msr, uint64_t val)
4088{
4089 CPUMSetGuestMsr(env->pVM, msr, val);
4090}
4091
4092/* -+- I/O Ports -+- */
4093
4094#undef LOG_GROUP
4095#define LOG_GROUP LOG_GROUP_REM_IOPORT
4096
4097void cpu_outb(CPUState *env, int addr, int val)
4098{
4099 int rc;
4100
4101 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4102 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4103
4104 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4105 if (RT_LIKELY(rc == VINF_SUCCESS))
4106 return;
4107 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4108 {
4109 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4110 remR3RaiseRC(env->pVM, rc);
4111 return;
4112 }
4113 remAbort(rc, __FUNCTION__);
4114}
4115
4116void cpu_outw(CPUState *env, int addr, int val)
4117{
4118 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4119 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4120 if (RT_LIKELY(rc == VINF_SUCCESS))
4121 return;
4122 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4123 {
4124 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4125 remR3RaiseRC(env->pVM, rc);
4126 return;
4127 }
4128 remAbort(rc, __FUNCTION__);
4129}
4130
4131void cpu_outl(CPUState *env, int addr, int val)
4132{
4133 int rc;
4134 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4135 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4136 if (RT_LIKELY(rc == VINF_SUCCESS))
4137 return;
4138 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4139 {
4140 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4141 remR3RaiseRC(env->pVM, rc);
4142 return;
4143 }
4144 remAbort(rc, __FUNCTION__);
4145}
4146
4147int cpu_inb(CPUState *env, int addr)
4148{
4149 uint32_t u32 = 0;
4150 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4151 if (RT_LIKELY(rc == VINF_SUCCESS))
4152 {
4153 if (/*addr != 0x61 && */addr != 0x71)
4154 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4155 return (int)u32;
4156 }
4157 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4158 {
4159 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4160 remR3RaiseRC(env->pVM, rc);
4161 return (int)u32;
4162 }
4163 remAbort(rc, __FUNCTION__);
4164 return 0xff;
4165}
4166
4167int cpu_inw(CPUState *env, int addr)
4168{
4169 uint32_t u32 = 0;
4170 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4171 if (RT_LIKELY(rc == VINF_SUCCESS))
4172 {
4173 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4174 return (int)u32;
4175 }
4176 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4177 {
4178 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4179 remR3RaiseRC(env->pVM, rc);
4180 return (int)u32;
4181 }
4182 remAbort(rc, __FUNCTION__);
4183 return 0xffff;
4184}
4185
4186int cpu_inl(CPUState *env, int addr)
4187{
4188 uint32_t u32 = 0;
4189 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4190 if (RT_LIKELY(rc == VINF_SUCCESS))
4191 {
4192//if (addr==0x01f0 && u32 == 0x6b6d)
4193// loglevel = ~0;
4194 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4195 return (int)u32;
4196 }
4197 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4198 {
4199 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4200 remR3RaiseRC(env->pVM, rc);
4201 return (int)u32;
4202 }
4203 remAbort(rc, __FUNCTION__);
4204 return 0xffffffff;
4205}
4206
4207#undef LOG_GROUP
4208#define LOG_GROUP LOG_GROUP_REM
4209
4210
4211/* -+- helpers and misc other interfaces -+- */
4212
4213/**
4214 * Perform the CPUID instruction.
4215 *
4216 * ASMCpuId cannot be invoked from some source files where this is used because of global
4217 * register allocations.
4218 *
4219 * @param env Pointer to the recompiler CPU structure.
4220 * @param uOperator CPUID operation (eax).
4221 * @param pvEAX Where to store eax.
4222 * @param pvEBX Where to store ebx.
4223 * @param pvECX Where to store ecx.
4224 * @param pvEDX Where to store edx.
4225 */
4226void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4227{
4228 CPUMGetGuestCpuId(env->pVM, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4229}
4230
4231
4232#if 0 /* not used */
4233/**
4234 * Interface for qemu hardware to report back fatal errors.
4235 */
4236void hw_error(const char *pszFormat, ...)
4237{
4238 /*
4239 * Bitch about it.
4240 */
4241 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4242 * this in my Odin32 tree at home! */
4243 va_list args;
4244 va_start(args, pszFormat);
4245 RTLogPrintf("fatal error in virtual hardware:");
4246 RTLogPrintfV(pszFormat, args);
4247 va_end(args);
4248 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4249
4250 /*
4251 * If we're in REM context we'll sync back the state before 'jumping' to
4252 * the EMs failure handling.
4253 */
4254 PVM pVM = cpu_single_env->pVM;
4255 if (pVM->rem.s.fInREM)
4256 REMR3StateBack(pVM);
4257 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4258 AssertMsgFailed(("EMR3FatalError returned!\n"));
4259}
4260#endif
4261
4262/**
4263 * Interface for the qemu cpu to report unhandled situation
4264 * raising a fatal VM error.
4265 */
4266void cpu_abort(CPUState *env, const char *pszFormat, ...)
4267{
4268 va_list args;
4269 PVM pVM;
4270
4271 /*
4272 * Bitch about it.
4273 */
4274#ifndef _MSC_VER
4275 /** @todo: MSVC is right - it's not valid C */
4276 RTLogFlags(NULL, "nodisabled nobuffered");
4277#endif
4278 va_start(args, pszFormat);
4279 RTLogPrintf("fatal error in recompiler cpu: %N\n", pszFormat, &args);
4280 va_end(args);
4281 va_start(args, pszFormat);
4282 AssertReleaseMsgFailed(("fatal error in recompiler cpu: %N\n", pszFormat, &args));
4283 va_end(args);
4284
4285 /*
4286 * If we're in REM context we'll sync back the state before 'jumping' to
4287 * the EMs failure handling.
4288 */
4289 pVM = cpu_single_env->pVM;
4290 if (pVM->rem.s.fInREM)
4291 REMR3StateBack(pVM);
4292 EMR3FatalError(pVM, VERR_REM_VIRTUAL_CPU_ERROR);
4293 AssertMsgFailed(("EMR3FatalError returned!\n"));
4294}
4295
4296
4297/**
4298 * Aborts the VM.
4299 *
4300 * @param rc VBox error code.
4301 * @param pszTip Hint about why/when this happend.
4302 */
4303void remAbort(int rc, const char *pszTip)
4304{
4305 PVM pVM;
4306
4307 /*
4308 * Bitch about it.
4309 */
4310 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4311 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4312
4313 /*
4314 * Jump back to where we entered the recompiler.
4315 */
4316 pVM = cpu_single_env->pVM;
4317 if (pVM->rem.s.fInREM)
4318 REMR3StateBack(pVM);
4319 EMR3FatalError(pVM, rc);
4320 AssertMsgFailed(("EMR3FatalError returned!\n"));
4321}
4322
4323
4324/**
4325 * Dumps a linux system call.
4326 * @param pVM VM handle.
4327 */
4328void remR3DumpLnxSyscall(PVM pVM)
4329{
4330 static const char *apsz[] =
4331 {
4332 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4333 "sys_exit",
4334 "sys_fork",
4335 "sys_read",
4336 "sys_write",
4337 "sys_open", /* 5 */
4338 "sys_close",
4339 "sys_waitpid",
4340 "sys_creat",
4341 "sys_link",
4342 "sys_unlink", /* 10 */
4343 "sys_execve",
4344 "sys_chdir",
4345 "sys_time",
4346 "sys_mknod",
4347 "sys_chmod", /* 15 */
4348 "sys_lchown16",
4349 "sys_ni_syscall", /* old break syscall holder */
4350 "sys_stat",
4351 "sys_lseek",
4352 "sys_getpid", /* 20 */
4353 "sys_mount",
4354 "sys_oldumount",
4355 "sys_setuid16",
4356 "sys_getuid16",
4357 "sys_stime", /* 25 */
4358 "sys_ptrace",
4359 "sys_alarm",
4360 "sys_fstat",
4361 "sys_pause",
4362 "sys_utime", /* 30 */
4363 "sys_ni_syscall", /* old stty syscall holder */
4364 "sys_ni_syscall", /* old gtty syscall holder */
4365 "sys_access",
4366 "sys_nice",
4367 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4368 "sys_sync",
4369 "sys_kill",
4370 "sys_rename",
4371 "sys_mkdir",
4372 "sys_rmdir", /* 40 */
4373 "sys_dup",
4374 "sys_pipe",
4375 "sys_times",
4376 "sys_ni_syscall", /* old prof syscall holder */
4377 "sys_brk", /* 45 */
4378 "sys_setgid16",
4379 "sys_getgid16",
4380 "sys_signal",
4381 "sys_geteuid16",
4382 "sys_getegid16", /* 50 */
4383 "sys_acct",
4384 "sys_umount", /* recycled never used phys() */
4385 "sys_ni_syscall", /* old lock syscall holder */
4386 "sys_ioctl",
4387 "sys_fcntl", /* 55 */
4388 "sys_ni_syscall", /* old mpx syscall holder */
4389 "sys_setpgid",
4390 "sys_ni_syscall", /* old ulimit syscall holder */
4391 "sys_olduname",
4392 "sys_umask", /* 60 */
4393 "sys_chroot",
4394 "sys_ustat",
4395 "sys_dup2",
4396 "sys_getppid",
4397 "sys_getpgrp", /* 65 */
4398 "sys_setsid",
4399 "sys_sigaction",
4400 "sys_sgetmask",
4401 "sys_ssetmask",
4402 "sys_setreuid16", /* 70 */
4403 "sys_setregid16",
4404 "sys_sigsuspend",
4405 "sys_sigpending",
4406 "sys_sethostname",
4407 "sys_setrlimit", /* 75 */
4408 "sys_old_getrlimit",
4409 "sys_getrusage",
4410 "sys_gettimeofday",
4411 "sys_settimeofday",
4412 "sys_getgroups16", /* 80 */
4413 "sys_setgroups16",
4414 "old_select",
4415 "sys_symlink",
4416 "sys_lstat",
4417 "sys_readlink", /* 85 */
4418 "sys_uselib",
4419 "sys_swapon",
4420 "sys_reboot",
4421 "old_readdir",
4422 "old_mmap", /* 90 */
4423 "sys_munmap",
4424 "sys_truncate",
4425 "sys_ftruncate",
4426 "sys_fchmod",
4427 "sys_fchown16", /* 95 */
4428 "sys_getpriority",
4429 "sys_setpriority",
4430 "sys_ni_syscall", /* old profil syscall holder */
4431 "sys_statfs",
4432 "sys_fstatfs", /* 100 */
4433 "sys_ioperm",
4434 "sys_socketcall",
4435 "sys_syslog",
4436 "sys_setitimer",
4437 "sys_getitimer", /* 105 */
4438 "sys_newstat",
4439 "sys_newlstat",
4440 "sys_newfstat",
4441 "sys_uname",
4442 "sys_iopl", /* 110 */
4443 "sys_vhangup",
4444 "sys_ni_syscall", /* old "idle" system call */
4445 "sys_vm86old",
4446 "sys_wait4",
4447 "sys_swapoff", /* 115 */
4448 "sys_sysinfo",
4449 "sys_ipc",
4450 "sys_fsync",
4451 "sys_sigreturn",
4452 "sys_clone", /* 120 */
4453 "sys_setdomainname",
4454 "sys_newuname",
4455 "sys_modify_ldt",
4456 "sys_adjtimex",
4457 "sys_mprotect", /* 125 */
4458 "sys_sigprocmask",
4459 "sys_ni_syscall", /* old "create_module" */
4460 "sys_init_module",
4461 "sys_delete_module",
4462 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4463 "sys_quotactl",
4464 "sys_getpgid",
4465 "sys_fchdir",
4466 "sys_bdflush",
4467 "sys_sysfs", /* 135 */
4468 "sys_personality",
4469 "sys_ni_syscall", /* reserved for afs_syscall */
4470 "sys_setfsuid16",
4471 "sys_setfsgid16",
4472 "sys_llseek", /* 140 */
4473 "sys_getdents",
4474 "sys_select",
4475 "sys_flock",
4476 "sys_msync",
4477 "sys_readv", /* 145 */
4478 "sys_writev",
4479 "sys_getsid",
4480 "sys_fdatasync",
4481 "sys_sysctl",
4482 "sys_mlock", /* 150 */
4483 "sys_munlock",
4484 "sys_mlockall",
4485 "sys_munlockall",
4486 "sys_sched_setparam",
4487 "sys_sched_getparam", /* 155 */
4488 "sys_sched_setscheduler",
4489 "sys_sched_getscheduler",
4490 "sys_sched_yield",
4491 "sys_sched_get_priority_max",
4492 "sys_sched_get_priority_min", /* 160 */
4493 "sys_sched_rr_get_interval",
4494 "sys_nanosleep",
4495 "sys_mremap",
4496 "sys_setresuid16",
4497 "sys_getresuid16", /* 165 */
4498 "sys_vm86",
4499 "sys_ni_syscall", /* Old sys_query_module */
4500 "sys_poll",
4501 "sys_nfsservctl",
4502 "sys_setresgid16", /* 170 */
4503 "sys_getresgid16",
4504 "sys_prctl",
4505 "sys_rt_sigreturn",
4506 "sys_rt_sigaction",
4507 "sys_rt_sigprocmask", /* 175 */
4508 "sys_rt_sigpending",
4509 "sys_rt_sigtimedwait",
4510 "sys_rt_sigqueueinfo",
4511 "sys_rt_sigsuspend",
4512 "sys_pread64", /* 180 */
4513 "sys_pwrite64",
4514 "sys_chown16",
4515 "sys_getcwd",
4516 "sys_capget",
4517 "sys_capset", /* 185 */
4518 "sys_sigaltstack",
4519 "sys_sendfile",
4520 "sys_ni_syscall", /* reserved for streams1 */
4521 "sys_ni_syscall", /* reserved for streams2 */
4522 "sys_vfork", /* 190 */
4523 "sys_getrlimit",
4524 "sys_mmap2",
4525 "sys_truncate64",
4526 "sys_ftruncate64",
4527 "sys_stat64", /* 195 */
4528 "sys_lstat64",
4529 "sys_fstat64",
4530 "sys_lchown",
4531 "sys_getuid",
4532 "sys_getgid", /* 200 */
4533 "sys_geteuid",
4534 "sys_getegid",
4535 "sys_setreuid",
4536 "sys_setregid",
4537 "sys_getgroups", /* 205 */
4538 "sys_setgroups",
4539 "sys_fchown",
4540 "sys_setresuid",
4541 "sys_getresuid",
4542 "sys_setresgid", /* 210 */
4543 "sys_getresgid",
4544 "sys_chown",
4545 "sys_setuid",
4546 "sys_setgid",
4547 "sys_setfsuid", /* 215 */
4548 "sys_setfsgid",
4549 "sys_pivot_root",
4550 "sys_mincore",
4551 "sys_madvise",
4552 "sys_getdents64", /* 220 */
4553 "sys_fcntl64",
4554 "sys_ni_syscall", /* reserved for TUX */
4555 "sys_ni_syscall",
4556 "sys_gettid",
4557 "sys_readahead", /* 225 */
4558 "sys_setxattr",
4559 "sys_lsetxattr",
4560 "sys_fsetxattr",
4561 "sys_getxattr",
4562 "sys_lgetxattr", /* 230 */
4563 "sys_fgetxattr",
4564 "sys_listxattr",
4565 "sys_llistxattr",
4566 "sys_flistxattr",
4567 "sys_removexattr", /* 235 */
4568 "sys_lremovexattr",
4569 "sys_fremovexattr",
4570 "sys_tkill",
4571 "sys_sendfile64",
4572 "sys_futex", /* 240 */
4573 "sys_sched_setaffinity",
4574 "sys_sched_getaffinity",
4575 "sys_set_thread_area",
4576 "sys_get_thread_area",
4577 "sys_io_setup", /* 245 */
4578 "sys_io_destroy",
4579 "sys_io_getevents",
4580 "sys_io_submit",
4581 "sys_io_cancel",
4582 "sys_fadvise64", /* 250 */
4583 "sys_ni_syscall",
4584 "sys_exit_group",
4585 "sys_lookup_dcookie",
4586 "sys_epoll_create",
4587 "sys_epoll_ctl", /* 255 */
4588 "sys_epoll_wait",
4589 "sys_remap_file_pages",
4590 "sys_set_tid_address",
4591 "sys_timer_create",
4592 "sys_timer_settime", /* 260 */
4593 "sys_timer_gettime",
4594 "sys_timer_getoverrun",
4595 "sys_timer_delete",
4596 "sys_clock_settime",
4597 "sys_clock_gettime", /* 265 */
4598 "sys_clock_getres",
4599 "sys_clock_nanosleep",
4600 "sys_statfs64",
4601 "sys_fstatfs64",
4602 "sys_tgkill", /* 270 */
4603 "sys_utimes",
4604 "sys_fadvise64_64",
4605 "sys_ni_syscall" /* sys_vserver */
4606 };
4607
4608 uint32_t uEAX = CPUMGetGuestEAX(pVM);
4609 switch (uEAX)
4610 {
4611 default:
4612 if (uEAX < RT_ELEMENTS(apsz))
4613 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
4614 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), CPUMGetGuestEBX(pVM), CPUMGetGuestECX(pVM),
4615 CPUMGetGuestEDX(pVM), CPUMGetGuestESI(pVM), CPUMGetGuestEDI(pVM), CPUMGetGuestEBP(pVM)));
4616 else
4617 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVM), uEAX, uEAX));
4618 break;
4619
4620 }
4621}
4622
4623
4624/**
4625 * Dumps an OpenBSD system call.
4626 * @param pVM VM handle.
4627 */
4628void remR3DumpOBsdSyscall(PVM pVM)
4629{
4630 static const char *apsz[] =
4631 {
4632 "SYS_syscall", //0
4633 "SYS_exit", //1
4634 "SYS_fork", //2
4635 "SYS_read", //3
4636 "SYS_write", //4
4637 "SYS_open", //5
4638 "SYS_close", //6
4639 "SYS_wait4", //7
4640 "SYS_8",
4641 "SYS_link", //9
4642 "SYS_unlink", //10
4643 "SYS_11",
4644 "SYS_chdir", //12
4645 "SYS_fchdir", //13
4646 "SYS_mknod", //14
4647 "SYS_chmod", //15
4648 "SYS_chown", //16
4649 "SYS_break", //17
4650 "SYS_18",
4651 "SYS_19",
4652 "SYS_getpid", //20
4653 "SYS_mount", //21
4654 "SYS_unmount", //22
4655 "SYS_setuid", //23
4656 "SYS_getuid", //24
4657 "SYS_geteuid", //25
4658 "SYS_ptrace", //26
4659 "SYS_recvmsg", //27
4660 "SYS_sendmsg", //28
4661 "SYS_recvfrom", //29
4662 "SYS_accept", //30
4663 "SYS_getpeername", //31
4664 "SYS_getsockname", //32
4665 "SYS_access", //33
4666 "SYS_chflags", //34
4667 "SYS_fchflags", //35
4668 "SYS_sync", //36
4669 "SYS_kill", //37
4670 "SYS_38",
4671 "SYS_getppid", //39
4672 "SYS_40",
4673 "SYS_dup", //41
4674 "SYS_opipe", //42
4675 "SYS_getegid", //43
4676 "SYS_profil", //44
4677 "SYS_ktrace", //45
4678 "SYS_sigaction", //46
4679 "SYS_getgid", //47
4680 "SYS_sigprocmask", //48
4681 "SYS_getlogin", //49
4682 "SYS_setlogin", //50
4683 "SYS_acct", //51
4684 "SYS_sigpending", //52
4685 "SYS_osigaltstack", //53
4686 "SYS_ioctl", //54
4687 "SYS_reboot", //55
4688 "SYS_revoke", //56
4689 "SYS_symlink", //57
4690 "SYS_readlink", //58
4691 "SYS_execve", //59
4692 "SYS_umask", //60
4693 "SYS_chroot", //61
4694 "SYS_62",
4695 "SYS_63",
4696 "SYS_64",
4697 "SYS_65",
4698 "SYS_vfork", //66
4699 "SYS_67",
4700 "SYS_68",
4701 "SYS_sbrk", //69
4702 "SYS_sstk", //70
4703 "SYS_61",
4704 "SYS_vadvise", //72
4705 "SYS_munmap", //73
4706 "SYS_mprotect", //74
4707 "SYS_madvise", //75
4708 "SYS_76",
4709 "SYS_77",
4710 "SYS_mincore", //78
4711 "SYS_getgroups", //79
4712 "SYS_setgroups", //80
4713 "SYS_getpgrp", //81
4714 "SYS_setpgid", //82
4715 "SYS_setitimer", //83
4716 "SYS_84",
4717 "SYS_85",
4718 "SYS_getitimer", //86
4719 "SYS_87",
4720 "SYS_88",
4721 "SYS_89",
4722 "SYS_dup2", //90
4723 "SYS_91",
4724 "SYS_fcntl", //92
4725 "SYS_select", //93
4726 "SYS_94",
4727 "SYS_fsync", //95
4728 "SYS_setpriority", //96
4729 "SYS_socket", //97
4730 "SYS_connect", //98
4731 "SYS_99",
4732 "SYS_getpriority", //100
4733 "SYS_101",
4734 "SYS_102",
4735 "SYS_sigreturn", //103
4736 "SYS_bind", //104
4737 "SYS_setsockopt", //105
4738 "SYS_listen", //106
4739 "SYS_107",
4740 "SYS_108",
4741 "SYS_109",
4742 "SYS_110",
4743 "SYS_sigsuspend", //111
4744 "SYS_112",
4745 "SYS_113",
4746 "SYS_114",
4747 "SYS_115",
4748 "SYS_gettimeofday", //116
4749 "SYS_getrusage", //117
4750 "SYS_getsockopt", //118
4751 "SYS_119",
4752 "SYS_readv", //120
4753 "SYS_writev", //121
4754 "SYS_settimeofday", //122
4755 "SYS_fchown", //123
4756 "SYS_fchmod", //124
4757 "SYS_125",
4758 "SYS_setreuid", //126
4759 "SYS_setregid", //127
4760 "SYS_rename", //128
4761 "SYS_129",
4762 "SYS_130",
4763 "SYS_flock", //131
4764 "SYS_mkfifo", //132
4765 "SYS_sendto", //133
4766 "SYS_shutdown", //134
4767 "SYS_socketpair", //135
4768 "SYS_mkdir", //136
4769 "SYS_rmdir", //137
4770 "SYS_utimes", //138
4771 "SYS_139",
4772 "SYS_adjtime", //140
4773 "SYS_141",
4774 "SYS_142",
4775 "SYS_143",
4776 "SYS_144",
4777 "SYS_145",
4778 "SYS_146",
4779 "SYS_setsid", //147
4780 "SYS_quotactl", //148
4781 "SYS_149",
4782 "SYS_150",
4783 "SYS_151",
4784 "SYS_152",
4785 "SYS_153",
4786 "SYS_154",
4787 "SYS_nfssvc", //155
4788 "SYS_156",
4789 "SYS_157",
4790 "SYS_158",
4791 "SYS_159",
4792 "SYS_160",
4793 "SYS_getfh", //161
4794 "SYS_162",
4795 "SYS_163",
4796 "SYS_164",
4797 "SYS_sysarch", //165
4798 "SYS_166",
4799 "SYS_167",
4800 "SYS_168",
4801 "SYS_169",
4802 "SYS_170",
4803 "SYS_171",
4804 "SYS_172",
4805 "SYS_pread", //173
4806 "SYS_pwrite", //174
4807 "SYS_175",
4808 "SYS_176",
4809 "SYS_177",
4810 "SYS_178",
4811 "SYS_179",
4812 "SYS_180",
4813 "SYS_setgid", //181
4814 "SYS_setegid", //182
4815 "SYS_seteuid", //183
4816 "SYS_lfs_bmapv", //184
4817 "SYS_lfs_markv", //185
4818 "SYS_lfs_segclean", //186
4819 "SYS_lfs_segwait", //187
4820 "SYS_188",
4821 "SYS_189",
4822 "SYS_190",
4823 "SYS_pathconf", //191
4824 "SYS_fpathconf", //192
4825 "SYS_swapctl", //193
4826 "SYS_getrlimit", //194
4827 "SYS_setrlimit", //195
4828 "SYS_getdirentries", //196
4829 "SYS_mmap", //197
4830 "SYS___syscall", //198
4831 "SYS_lseek", //199
4832 "SYS_truncate", //200
4833 "SYS_ftruncate", //201
4834 "SYS___sysctl", //202
4835 "SYS_mlock", //203
4836 "SYS_munlock", //204
4837 "SYS_205",
4838 "SYS_futimes", //206
4839 "SYS_getpgid", //207
4840 "SYS_xfspioctl", //208
4841 "SYS_209",
4842 "SYS_210",
4843 "SYS_211",
4844 "SYS_212",
4845 "SYS_213",
4846 "SYS_214",
4847 "SYS_215",
4848 "SYS_216",
4849 "SYS_217",
4850 "SYS_218",
4851 "SYS_219",
4852 "SYS_220",
4853 "SYS_semget", //221
4854 "SYS_222",
4855 "SYS_223",
4856 "SYS_224",
4857 "SYS_msgget", //225
4858 "SYS_msgsnd", //226
4859 "SYS_msgrcv", //227
4860 "SYS_shmat", //228
4861 "SYS_229",
4862 "SYS_shmdt", //230
4863 "SYS_231",
4864 "SYS_clock_gettime", //232
4865 "SYS_clock_settime", //233
4866 "SYS_clock_getres", //234
4867 "SYS_235",
4868 "SYS_236",
4869 "SYS_237",
4870 "SYS_238",
4871 "SYS_239",
4872 "SYS_nanosleep", //240
4873 "SYS_241",
4874 "SYS_242",
4875 "SYS_243",
4876 "SYS_244",
4877 "SYS_245",
4878 "SYS_246",
4879 "SYS_247",
4880 "SYS_248",
4881 "SYS_249",
4882 "SYS_minherit", //250
4883 "SYS_rfork", //251
4884 "SYS_poll", //252
4885 "SYS_issetugid", //253
4886 "SYS_lchown", //254
4887 "SYS_getsid", //255
4888 "SYS_msync", //256
4889 "SYS_257",
4890 "SYS_258",
4891 "SYS_259",
4892 "SYS_getfsstat", //260
4893 "SYS_statfs", //261
4894 "SYS_fstatfs", //262
4895 "SYS_pipe", //263
4896 "SYS_fhopen", //264
4897 "SYS_265",
4898 "SYS_fhstatfs", //266
4899 "SYS_preadv", //267
4900 "SYS_pwritev", //268
4901 "SYS_kqueue", //269
4902 "SYS_kevent", //270
4903 "SYS_mlockall", //271
4904 "SYS_munlockall", //272
4905 "SYS_getpeereid", //273
4906 "SYS_274",
4907 "SYS_275",
4908 "SYS_276",
4909 "SYS_277",
4910 "SYS_278",
4911 "SYS_279",
4912 "SYS_280",
4913 "SYS_getresuid", //281
4914 "SYS_setresuid", //282
4915 "SYS_getresgid", //283
4916 "SYS_setresgid", //284
4917 "SYS_285",
4918 "SYS_mquery", //286
4919 "SYS_closefrom", //287
4920 "SYS_sigaltstack", //288
4921 "SYS_shmget", //289
4922 "SYS_semop", //290
4923 "SYS_stat", //291
4924 "SYS_fstat", //292
4925 "SYS_lstat", //293
4926 "SYS_fhstat", //294
4927 "SYS___semctl", //295
4928 "SYS_shmctl", //296
4929 "SYS_msgctl", //297
4930 "SYS_MAXSYSCALL", //298
4931 //299
4932 //300
4933 };
4934 uint32_t uEAX;
4935 if (!LogIsEnabled())
4936 return;
4937 uEAX = CPUMGetGuestEAX(pVM);
4938 switch (uEAX)
4939 {
4940 default:
4941 if (uEAX < RT_ELEMENTS(apsz))
4942 {
4943 uint32_t au32Args[8] = {0};
4944 PGMPhysSimpleReadGCPtr(pVM, au32Args, CPUMGetGuestESP(pVM), sizeof(au32Args));
4945 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
4946 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
4947 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
4948 }
4949 else
4950 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVM), uEAX, uEAX);
4951 break;
4952 }
4953}
4954
4955
4956#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
4957/**
4958 * The Dll main entry point (stub).
4959 */
4960bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
4961{
4962 return true;
4963}
4964
4965void *memcpy(void *dst, const void *src, size_t size)
4966{
4967 uint8_t*pbDst = dst, *pbSrc = src;
4968 while (size-- > 0)
4969 *pbDst++ = *pbSrc++;
4970 return dst;
4971}
4972
4973#endif
4974
4975void cpu_smm_update(CPUState *env)
4976{
4977}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette