Changeset 17537 in vbox for trunk/src/recompiler/VBoxRecompiler.c
- Timestamp:
- Mar 8, 2009 5:22:28 AM (16 years ago)
- svn:sync-xref-src-repo-rev:
- 43971
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/recompiler/VBoxRecompiler.c
r17426 r17537 86 86 static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version); 87 87 static void remR3StateUpdate(PVM pVM); 88 static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded); 88 89 89 90 static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys); … … 401 402 #endif 402 403 404 return rc; 405 } 406 407 408 /** 409 * Finalizes the REM initialization. 410 * 411 * This is called after all components, devices and drivers has 412 * been initialized. Its main purpose it to finish the RAM related 413 * initialization. 414 * 415 * @returns VBox status code. 416 * 417 * @param pVM The VM handle. 418 */ 419 REMR3DECL(int) REMR3InitFinalize(PVM pVM) 420 { 421 int rc; 422 423 /* 424 * Ram size & dirty bit map. 425 */ 426 Assert(!pVM->rem.s.fGCPhysLastRamFixed); 427 pVM->rem.s.fGCPhysLastRamFixed = true; 428 #ifdef RT_STRICT 429 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */); 430 #else 431 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */); 432 #endif 433 return rc; 434 } 435 436 437 /** 438 * Initializes phys_ram_size, phys_ram_dirty and phys_ram_dirty_size. 439 * 440 * @returns VBox status code. 441 * @param pVM The VM handle. 442 * @param fGuarded Whether to guard the map. 443 */ 444 static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded) 445 { 446 int rc = VINF_SUCCESS; 447 RTGCPHYS cb; 448 449 cb = pVM->rem.s.GCPhysLastRam + 1; 450 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam, 451 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam), 452 VERR_OUT_OF_RANGE); 453 phys_ram_size = cb; 454 phys_ram_dirty_size = cb >> PAGE_SHIFT; 455 AssertMsg(((RTGCPHYS)phys_ram_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb)); 456 457 if (!fGuarded) 458 { 459 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size); 460 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", phys_ram_dirty_size), VERR_NO_MEMORY); 461 } 462 else 463 { 464 /* 465 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it. 466 */ 467 uint32_t cbBitmapAligned = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE); 468 uint32_t cbBitmapFull = RT_ALIGN_32(phys_ram_dirty_size, (_4G >> PAGE_SHIFT)); 469 if (cbBitmapFull == cbBitmapAligned) 470 cbBitmapFull += _4G >> PAGE_SHIFT; 471 else if (cbBitmapFull - cbBitmapAligned < _64K) 472 cbBitmapFull += _64K; 473 474 phys_ram_dirty = RTMemPageAlloc(cbBitmapFull); 475 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY); 476 477 rc = RTMemProtect(phys_ram_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE); 478 if (RT_FAILURE(rc)) 479 { 480 RTMemPageFree(phys_ram_dirty); 481 AssertLogRelRCReturn(rc, rc); 482 } 483 484 phys_ram_dirty += cbBitmapAligned - phys_ram_dirty_size; 485 } 486 487 /* initialize it. */ 488 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size); 403 489 return rc; 404 490 } … … 1419 1505 } 1420 1506 1507 1421 1508 /** 1422 1509 * Called from tlb_unprotect_code in order to clear write monitoring for a code page. … … 1863 1950 } 1864 1951 1865 /* Sync FPU state after CR4 and CPUID. */1952 /* Sync FPU state after CR4, CPUID and EFER (!). */ 1866 1953 if (fFlags & CPUM_CHANGED_FPU_REM) 1867 1954 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */ … … 2698 2785 * @param fFlags Flags of the MM_RAM_FLAGS_* defines. 2699 2786 */ 2700 REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RT UINTcb, unsigned fFlags)2701 { 2702 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=% dfFlags=%d\n", GCPhys, cb, fFlags));2787 REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags) 2788 { 2789 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%d\n", GCPhys, cb, fFlags)); 2703 2790 VM_ASSERT_EMT(pVM); 2704 2791 … … 2713 2800 * Base ram? 2714 2801 */ 2715 if (!GCPhys) 2716 { 2717 phys_ram_size = cb; 2718 phys_ram_dirty_size = cb >> PAGE_SHIFT; 2719 #ifndef VBOX_STRICT 2720 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size); 2721 AssertReleaseMsg(phys_ram_dirty, ("failed to allocate %d bytes of dirty bytes\n", phys_ram_dirty_size)); 2722 #else /* VBOX_STRICT: allocate a full map and make the out of bounds pages invalid. */ 2723 phys_ram_dirty = RTMemPageAlloc(_4G >> PAGE_SHIFT); 2724 AssertReleaseMsg(phys_ram_dirty, ("failed to allocate %d bytes of dirty bytes\n", _4G >> PAGE_SHIFT)); 2725 uint32_t cbBitmap = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE); 2726 int rc = RTMemProtect(phys_ram_dirty + cbBitmap, (_4G >> PAGE_SHIFT) - cbBitmap, RTMEM_PROT_NONE); 2727 AssertRC(rc); 2728 phys_ram_dirty += cbBitmap - phys_ram_dirty_size; 2729 #endif 2730 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size); 2802 if (!GCPhys) /** @todo add a flag for identifying MMIO2 memory here (new phys code)*/ 2803 { 2804 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam) 2805 { 2806 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb)); 2807 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1); 2808 } 2731 2809 } 2732 2810 … … 2738 2816 2739 2817 #ifdef VBOX_WITH_NEW_PHYS_CODE 2740 if (fFlags & MM_RAM_FLAGS_RESERVED) 2741 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED); 2742 else 2743 cpu_register_physical_memory(GCPhys, cb, GCPhys); 2818 cpu_register_physical_memory(GCPhys, cb, GCPhys); 2744 2819 #else 2745 2820 if (!GCPhys)
Note:
See TracChangeset
for help on using the changeset viewer.