VirtualBox

Ignore:
Timestamp:
Mar 8, 2009 5:22:28 AM (16 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
43971
Message:

REM: Added REMR3InitFinalize and moved the dirty page bit map allocation over there. REMR3NotifyPhysRamRegister now just tracks the highest ram address. Removed some old obsolete bits in REMInternal.h.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/recompiler/VBoxRecompiler.c

    r17426 r17537  
    8686static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
    8787static void     remR3StateUpdate(PVM pVM);
     88static int      remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
    8889
    8990static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
     
    401402#endif
    402403
     404    return rc;
     405}
     406
     407
     408/**
     409 * Finalizes the REM initialization.
     410 *
     411 * This is called after all components, devices and drivers has
     412 * been initialized. Its main purpose it to finish the RAM related
     413 * initialization.
     414 *
     415 * @returns VBox status code.
     416 *
     417 * @param   pVM         The VM handle.
     418 */
     419REMR3DECL(int) REMR3InitFinalize(PVM pVM)
     420{
     421    int rc;
     422
     423    /*
     424     * Ram size & dirty bit map.
     425     */
     426    Assert(!pVM->rem.s.fGCPhysLastRamFixed);
     427    pVM->rem.s.fGCPhysLastRamFixed = true;
     428#ifdef RT_STRICT
     429    rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
     430#else
     431    rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
     432#endif
     433    return rc;
     434}
     435
     436
     437/**
     438 * Initializes phys_ram_size, phys_ram_dirty and phys_ram_dirty_size.
     439 *
     440 * @returns VBox status code.
     441 * @param   pVM         The VM handle.
     442 * @param   fGuarded    Whether to guard the map.
     443 */
     444static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
     445{
     446    int      rc = VINF_SUCCESS;
     447    RTGCPHYS cb;
     448
     449    cb = pVM->rem.s.GCPhysLastRam + 1;
     450    AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
     451                          ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
     452                          VERR_OUT_OF_RANGE);
     453    phys_ram_size = cb;
     454    phys_ram_dirty_size = cb >> PAGE_SHIFT;
     455    AssertMsg(((RTGCPHYS)phys_ram_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
     456
     457    if (!fGuarded)
     458    {
     459        phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
     460        AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", phys_ram_dirty_size), VERR_NO_MEMORY);
     461    }
     462    else
     463    {
     464        /*
     465         * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
     466         */
     467        uint32_t cbBitmapAligned = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
     468        uint32_t cbBitmapFull    = RT_ALIGN_32(phys_ram_dirty_size, (_4G >> PAGE_SHIFT));
     469        if (cbBitmapFull == cbBitmapAligned)
     470            cbBitmapFull += _4G >> PAGE_SHIFT;
     471        else if (cbBitmapFull - cbBitmapAligned < _64K)
     472            cbBitmapFull += _64K;
     473
     474        phys_ram_dirty = RTMemPageAlloc(cbBitmapFull);
     475        AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
     476
     477        rc = RTMemProtect(phys_ram_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
     478        if (RT_FAILURE(rc))
     479        {
     480            RTMemPageFree(phys_ram_dirty);
     481            AssertLogRelRCReturn(rc, rc);
     482        }
     483
     484        phys_ram_dirty += cbBitmapAligned - phys_ram_dirty_size;
     485    }
     486
     487    /* initialize it. */
     488    memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
    403489    return rc;
    404490}
     
    14191505}
    14201506
     1507
    14211508/**
    14221509 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
     
    18631950        }
    18641951
    1865         /* Sync FPU state after CR4 and CPUID. */
     1952        /* Sync FPU state after CR4, CPUID and EFER (!). */
    18661953        if (fFlags & CPUM_CHANGED_FPU_REM)
    18671954            save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
     
    26982785 * @param   fFlags      Flags of the MM_RAM_FLAGS_* defines.
    26992786 */
    2700 REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, unsigned fFlags)
    2701 {
    2702     Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%d fFlags=%d\n", GCPhys, cb, fFlags));
     2787REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
     2788{
     2789    Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%d\n", GCPhys, cb, fFlags));
    27032790    VM_ASSERT_EMT(pVM);
    27042791
     
    27132800     * Base ram?
    27142801     */
    2715     if (!GCPhys)
    2716     {
    2717         phys_ram_size = cb;
    2718         phys_ram_dirty_size = cb >> PAGE_SHIFT;
    2719 #ifndef VBOX_STRICT
    2720         phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
    2721         AssertReleaseMsg(phys_ram_dirty, ("failed to allocate %d bytes of dirty bytes\n", phys_ram_dirty_size));
    2722 #else /* VBOX_STRICT: allocate a full map and make the out of bounds pages invalid. */
    2723         phys_ram_dirty = RTMemPageAlloc(_4G >> PAGE_SHIFT);
    2724         AssertReleaseMsg(phys_ram_dirty, ("failed to allocate %d bytes of dirty bytes\n", _4G >> PAGE_SHIFT));
    2725         uint32_t cbBitmap = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
    2726         int rc = RTMemProtect(phys_ram_dirty + cbBitmap, (_4G >> PAGE_SHIFT) - cbBitmap, RTMEM_PROT_NONE);
    2727         AssertRC(rc);
    2728         phys_ram_dirty += cbBitmap - phys_ram_dirty_size;
    2729 #endif
    2730         memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
     2802    if (!GCPhys) /** @todo add a flag for identifying MMIO2 memory here (new phys code)*/
     2803    {
     2804        if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
     2805        {
     2806            AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
     2807            pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
     2808        }
    27312809    }
    27322810
     
    27382816
    27392817#ifdef VBOX_WITH_NEW_PHYS_CODE
    2740     if (fFlags & MM_RAM_FLAGS_RESERVED)
    2741         cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
    2742     else
    2743         cpu_register_physical_memory(GCPhys, cb, GCPhys);
     2818    cpu_register_physical_memory(GCPhys, cb, GCPhys);
    27442819#else
    27452820    if (!GCPhys)
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette