VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/Support/SUPDRVShared.c@ 4806

Last change on this file since 4806 was 4806, checked in by vboxsync, 18 years ago

SUPLDRLOAD eEPType mess

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 144.2 KB
Line 
1/* $Revision: 4806 $ */
2/** @file
3 * VirtualBox Support Driver - Shared code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#include "SUPDRV.h"
23#ifndef PAGE_SHIFT
24# include <iprt/param.h>
25#endif
26#include <iprt/alloc.h>
27#include <iprt/semaphore.h>
28#include <iprt/spinlock.h>
29#include <iprt/thread.h>
30#include <iprt/process.h>
31#include <iprt/log.h>
32#ifdef VBOX_WITHOUT_IDT_PATCHING
33# include <VBox/vmm.h>
34# include <VBox/err.h>
35#endif
36
37
38/*******************************************************************************
39* Defined Constants And Macros *
40*******************************************************************************/
41/* from x86.h - clashes with linux thus this duplication */
42#undef X86_CR0_PG
43#define X86_CR0_PG BIT(31)
44#undef X86_CR0_PE
45#define X86_CR0_PE BIT(0)
46#undef X86_CPUID_AMD_FEATURE_EDX_NX
47#define X86_CPUID_AMD_FEATURE_EDX_NX BIT(20)
48#undef MSR_K6_EFER
49#define MSR_K6_EFER 0xc0000080
50#undef MSR_K6_EFER_NXE
51#define MSR_K6_EFER_NXE BIT(11)
52#undef MSR_K6_EFER_LMA
53#define MSR_K6_EFER_LMA BIT(10)
54#undef X86_CR4_PGE
55#define X86_CR4_PGE BIT(7)
56#undef X86_CR4_PAE
57#define X86_CR4_PAE BIT(5)
58#undef X86_CPUID_AMD_FEATURE_EDX_LONG_MODE
59#define X86_CPUID_AMD_FEATURE_EDX_LONG_MODE BIT(29)
60
61
62/** The frequency by which we recalculate the u32UpdateHz and
63 * u32UpdateIntervalNS GIP members. The value must be a power of 2. */
64#define GIP_UPDATEHZ_RECALC_FREQ 0x800
65
66/**
67 * Validates a session pointer.
68 *
69 * @returns true/false accordingly.
70 * @param pSession The session.
71 */
72#define SUP_IS_SESSION_VALID(pSession) \
73 ( VALID_PTR(pSession) \
74 && pSession->u32Cookie == BIRD_INV)
75
76
77/*******************************************************************************
78* Global Variables *
79*******************************************************************************/
80/**
81 * Array of the R0 SUP API.
82 */
83static SUPFUNC g_aFunctions[] =
84{
85 /* name function */
86 { "SUPR0ObjRegister", (void *)SUPR0ObjRegister },
87 { "SUPR0ObjAddRef", (void *)SUPR0ObjAddRef },
88 { "SUPR0ObjRelease", (void *)SUPR0ObjRelease },
89 { "SUPR0ObjVerifyAccess", (void *)SUPR0ObjVerifyAccess },
90 { "SUPR0LockMem", (void *)SUPR0LockMem },
91 { "SUPR0UnlockMem", (void *)SUPR0UnlockMem },
92 { "SUPR0ContAlloc", (void *)SUPR0ContAlloc },
93 { "SUPR0ContFree", (void *)SUPR0ContFree },
94 { "SUPR0MemAlloc", (void *)SUPR0MemAlloc },
95 { "SUPR0MemGetPhys", (void *)SUPR0MemGetPhys },
96 { "SUPR0MemFree", (void *)SUPR0MemFree },
97#ifdef USE_NEW_OS_INTERFACE_FOR_MM
98 { "SUPR0PageAlloc", (void *)SUPR0PageAlloc },
99 { "SUPR0PageFree", (void *)SUPR0PageFree },
100#endif
101 { "SUPR0Printf", (void *)SUPR0Printf },
102 { "RTMemAlloc", (void *)RTMemAlloc },
103 { "RTMemAllocZ", (void *)RTMemAllocZ },
104 { "RTMemFree", (void *)RTMemFree },
105/* These doesn't work yet on linux - use fast mutexes!
106 { "RTSemMutexCreate", (void *)RTSemMutexCreate },
107 { "RTSemMutexRequest", (void *)RTSemMutexRequest },
108 { "RTSemMutexRelease", (void *)RTSemMutexRelease },
109 { "RTSemMutexDestroy", (void *)RTSemMutexDestroy },
110*/
111 { "RTSemFastMutexCreate", (void *)RTSemFastMutexCreate },
112 { "RTSemFastMutexDestroy", (void *)RTSemFastMutexDestroy },
113 { "RTSemFastMutexRequest", (void *)RTSemFastMutexRequest },
114 { "RTSemFastMutexRelease", (void *)RTSemFastMutexRelease },
115 { "RTSemEventCreate", (void *)RTSemEventCreate },
116 { "RTSemEventSignal", (void *)RTSemEventSignal },
117 { "RTSemEventWait", (void *)RTSemEventWait },
118 { "RTSemEventDestroy", (void *)RTSemEventDestroy },
119 { "RTSpinlockCreate", (void *)RTSpinlockCreate },
120 { "RTSpinlockDestroy", (void *)RTSpinlockDestroy },
121 { "RTSpinlockAcquire", (void *)RTSpinlockAcquire },
122 { "RTSpinlockRelease", (void *)RTSpinlockRelease },
123 { "RTSpinlockAcquireNoInts", (void *)RTSpinlockAcquireNoInts },
124 { "RTSpinlockReleaseNoInts", (void *)RTSpinlockReleaseNoInts },
125 { "RTThreadNativeSelf", (void *)RTThreadNativeSelf },
126 { "RTThreadSleep", (void *)RTThreadSleep },
127 { "RTThreadYield", (void *)RTThreadYield },
128#if 0 /* Thread APIs, Part 2. */
129 { "RTThreadSelf", (void *)RTThreadSelf },
130 { "RTThreadCreate", (void *)RTThreadCreate },
131 { "RTThreadGetNative", (void *)RTThreadGetNative },
132 { "RTThreadWait", (void *)RTThreadWait },
133 { "RTThreadWaitNoResume", (void *)RTThreadWaitNoResume },
134 { "RTThreadGetName", (void *)RTThreadGetName },
135 { "RTThreadSelfName", (void *)RTThreadSelfName },
136 { "RTThreadGetType", (void *)RTThreadGetType },
137 { "RTThreadUserSignal", (void *)RTThreadUserSignal },
138 { "RTThreadUserReset", (void *)RTThreadUserReset },
139 { "RTThreadUserWait", (void *)RTThreadUserWait },
140 { "RTThreadUserWaitNoResume", (void *)RTThreadUserWaitNoResume },
141#endif
142 { "RTLogDefaultInstance", (void *)RTLogDefaultInstance },
143 { "RTLogRelDefaultInstance", (void *)RTLogRelDefaultInstance },
144 { "RTLogSetDefaultInstanceThread", (void *)RTLogSetDefaultInstanceThread },
145 { "RTLogLogger", (void *)RTLogLogger },
146 { "RTLogLoggerEx", (void *)RTLogLoggerEx },
147 { "RTLogLoggerExV", (void *)RTLogLoggerExV },
148 { "RTLogPrintf", (void *)RTLogPrintf },
149 { "RTLogPrintfV", (void *)RTLogPrintfV },
150 { "AssertMsg1", (void *)AssertMsg1 },
151 { "AssertMsg2", (void *)AssertMsg2 },
152};
153
154
155/*******************************************************************************
156* Internal Functions *
157*******************************************************************************/
158static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession);
159static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType);
160#ifndef VBOX_WITHOUT_IDT_PATCHING
161static int supdrvIOCtl_IdtInstall(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPIDTINSTALL pReq);
162static PSUPDRVPATCH supdrvIdtPatchOne(PSUPDRVDEVEXT pDevExt, PSUPDRVPATCH pPatch);
163static int supdrvIOCtl_IdtRemoveAll(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession);
164static void supdrvIdtRemoveOne(PSUPDRVDEVEXT pDevExt, PSUPDRVPATCH pPatch);
165static void supdrvIdtWrite(volatile void *pvIdtEntry, const SUPDRVIDTE *pNewIDTEntry);
166#endif /* !VBOX_WITHOUT_IDT_PATCHING */
167static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq);
168static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq);
169static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq);
170static int supdrvIOCtl_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq);
171static int supdrvLdrSetR0EP(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0Entry);
172static void supdrvLdrUnsetR0EP(PSUPDRVDEVEXT pDevExt);
173static void supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage);
174static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage);
175static SUPPAGINGMODE supdrvIOCtl_GetPagingMode(void);
176static SUPGIPMODE supdrvGipDeterminTscMode(void);
177#ifdef RT_OS_WINDOWS
178static int supdrvPageGetPhys(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages);
179static bool supdrvPageWasLockedByPageAlloc(PSUPDRVSESSION pSession, RTR3PTR pvR3);
180#endif
181#ifdef USE_NEW_OS_INTERFACE_FOR_GIP
182static int supdrvGipCreate(PSUPDRVDEVEXT pDevExt);
183static void supdrvGipDestroy(PSUPDRVDEVEXT pDevExt);
184static DECLCALLBACK(void) supdrvGipTimer(PRTTIMER pTimer, void *pvUser);
185#endif
186
187
188/**
189 * Initializes the device extentsion structure.
190 *
191 * @returns IPRT status code.
192 * @param pDevExt The device extension to initialize.
193 */
194int VBOXCALL supdrvInitDevExt(PSUPDRVDEVEXT pDevExt)
195{
196 /*
197 * Initialize it.
198 */
199 int rc;
200 memset(pDevExt, 0, sizeof(*pDevExt));
201 rc = RTSpinlockCreate(&pDevExt->Spinlock);
202 if (!rc)
203 {
204 rc = RTSemFastMutexCreate(&pDevExt->mtxLdr);
205 if (!rc)
206 {
207 rc = RTSemFastMutexCreate(&pDevExt->mtxGip);
208 if (!rc)
209 {
210#ifdef USE_NEW_OS_INTERFACE_FOR_GIP
211 rc = supdrvGipCreate(pDevExt);
212 if (RT_SUCCESS(rc))
213 {
214 pDevExt->u32Cookie = BIRD; /** @todo make this random? */
215 return VINF_SUCCESS;
216 }
217#else
218 pDevExt->u32Cookie = BIRD;
219 return VINF_SUCCESS;
220#endif
221 }
222 RTSemFastMutexDestroy(pDevExt->mtxLdr);
223 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
224 }
225 RTSpinlockDestroy(pDevExt->Spinlock);
226 pDevExt->Spinlock = NIL_RTSPINLOCK;
227 }
228 return rc;
229}
230
231
232/**
233 * Delete the device extension (e.g. cleanup members).
234 *
235 * @param pDevExt The device extension to delete.
236 */
237void VBOXCALL supdrvDeleteDevExt(PSUPDRVDEVEXT pDevExt)
238{
239#ifndef VBOX_WITHOUT_IDT_PATCHING
240 PSUPDRVPATCH pPatch;
241#endif
242 PSUPDRVOBJ pObj;
243 PSUPDRVUSAGE pUsage;
244
245 /*
246 * Kill mutexes and spinlocks.
247 */
248 RTSemFastMutexDestroy(pDevExt->mtxGip);
249 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
250 RTSemFastMutexDestroy(pDevExt->mtxLdr);
251 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
252 RTSpinlockDestroy(pDevExt->Spinlock);
253 pDevExt->Spinlock = NIL_RTSPINLOCK;
254
255 /*
256 * Free lists.
257 */
258#ifndef VBOX_WITHOUT_IDT_PATCHING
259 /* patches */
260 /** @todo make sure we don't uninstall patches which has been patched by someone else. */
261 pPatch = pDevExt->pIdtPatchesFree;
262 pDevExt->pIdtPatchesFree = NULL;
263 while (pPatch)
264 {
265 void *pvFree = pPatch;
266 pPatch = pPatch->pNext;
267 RTMemExecFree(pvFree);
268 }
269#endif /* !VBOX_WITHOUT_IDT_PATCHING */
270
271 /* objects. */
272 pObj = pDevExt->pObjs;
273#if !defined(DEBUG_bird) || !defined(RT_OS_LINUX) /* breaks unloading, temporary, remove me! */
274 Assert(!pObj); /* (can trigger on forced unloads) */
275#endif
276 pDevExt->pObjs = NULL;
277 while (pObj)
278 {
279 void *pvFree = pObj;
280 pObj = pObj->pNext;
281 RTMemFree(pvFree);
282 }
283
284 /* usage records. */
285 pUsage = pDevExt->pUsageFree;
286 pDevExt->pUsageFree = NULL;
287 while (pUsage)
288 {
289 void *pvFree = pUsage;
290 pUsage = pUsage->pNext;
291 RTMemFree(pvFree);
292 }
293
294#ifdef USE_NEW_OS_INTERFACE_FOR_GIP
295 /* kill the GIP */
296 supdrvGipDestroy(pDevExt);
297#endif
298}
299
300
301/**
302 * Create session.
303 *
304 * @returns IPRT status code.
305 * @param pDevExt Device extension.
306 * @param ppSession Where to store the pointer to the session data.
307 */
308int VBOXCALL supdrvCreateSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION *ppSession)
309{
310 /*
311 * Allocate memory for the session data.
312 */
313 int rc = VERR_NO_MEMORY;
314 PSUPDRVSESSION pSession = *ppSession = (PSUPDRVSESSION)RTMemAllocZ(sizeof(*pSession));
315 if (pSession)
316 {
317 /* Initialize session data. */
318 rc = RTSpinlockCreate(&pSession->Spinlock);
319 if (!rc)
320 {
321 Assert(pSession->Spinlock != NIL_RTSPINLOCK);
322 pSession->pDevExt = pDevExt;
323 pSession->u32Cookie = BIRD_INV;
324 /*pSession->pLdrUsage = NULL;
325 pSession->pPatchUsage = NULL;
326 pSession->pUsage = NULL;
327 pSession->pGip = NULL;
328 pSession->fGipReferenced = false;
329 pSession->Bundle.cUsed = 0 */
330
331 dprintf(("Created session %p initial cookie=%#x\n", pSession, pSession->u32Cookie));
332 return VINF_SUCCESS;
333 }
334
335 RTMemFree(pSession);
336 *ppSession = NULL;
337 }
338
339 dprintf(("Failed to create spinlock, rc=%d!\n", rc));
340 return rc;
341}
342
343
344/**
345 * Shared code for cleaning up a session.
346 *
347 * @param pDevExt Device extension.
348 * @param pSession Session data.
349 * This data will be freed by this routine.
350 */
351void VBOXCALL supdrvCloseSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
352{
353 /*
354 * Cleanup the session first.
355 */
356 supdrvCleanupSession(pDevExt, pSession);
357
358 /*
359 * Free the rest of the session stuff.
360 */
361 RTSpinlockDestroy(pSession->Spinlock);
362 pSession->Spinlock = NIL_RTSPINLOCK;
363 pSession->pDevExt = NULL;
364 RTMemFree(pSession);
365 dprintf2(("supdrvCloseSession: returns\n"));
366}
367
368
369/**
370 * Shared code for cleaning up a session (but not quite freeing it).
371 *
372 * This is primarily intended for MAC OS X where we have to clean up the memory
373 * stuff before the file handle is closed.
374 *
375 * @param pDevExt Device extension.
376 * @param pSession Session data.
377 * This data will be freed by this routine.
378 */
379void VBOXCALL supdrvCleanupSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
380{
381 PSUPDRVBUNDLE pBundle;
382 dprintf(("supdrvCleanupSession: pSession=%p\n", pSession));
383
384 /*
385 * Remove logger instances related to this session.
386 * (This assumes the dprintf and dprintf2 macros doesn't use the normal logging.)
387 */
388 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pSession);
389
390#ifndef VBOX_WITHOUT_IDT_PATCHING
391 /*
392 * Uninstall any IDT patches installed for this session.
393 */
394 supdrvIOCtl_IdtRemoveAll(pDevExt, pSession);
395#endif
396
397 /*
398 * Release object references made in this session.
399 * In theory there should be noone racing us in this session.
400 */
401 dprintf2(("release objects - start\n"));
402 if (pSession->pUsage)
403 {
404 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
405 PSUPDRVUSAGE pUsage;
406 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
407
408 while ((pUsage = pSession->pUsage) != NULL)
409 {
410 PSUPDRVOBJ pObj = pUsage->pObj;
411 pSession->pUsage = pUsage->pNext;
412
413 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
414 if (pUsage->cUsage < pObj->cUsage)
415 {
416 pObj->cUsage -= pUsage->cUsage;
417 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
418 }
419 else
420 {
421 /* Destroy the object and free the record. */
422 if (pDevExt->pObjs == pObj)
423 pDevExt->pObjs = pObj->pNext;
424 else
425 {
426 PSUPDRVOBJ pObjPrev;
427 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
428 if (pObjPrev->pNext == pObj)
429 {
430 pObjPrev->pNext = pObj->pNext;
431 break;
432 }
433 Assert(pObjPrev);
434 }
435 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
436
437 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
438 RTMemFree(pObj);
439 }
440
441 /* free it and continue. */
442 RTMemFree(pUsage);
443
444 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
445 }
446
447 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
448 AssertMsg(!pSession->pUsage, ("Some buster reregistered an object during desturction!\n"));
449 }
450 dprintf2(("release objects - done\n"));
451
452 /*
453 * Release memory allocated in the session.
454 *
455 * We do not serialize this as we assume that the application will
456 * not allocated memory while closing the file handle object.
457 */
458 dprintf2(("freeing memory:\n"));
459 pBundle = &pSession->Bundle;
460 while (pBundle)
461 {
462 PSUPDRVBUNDLE pToFree;
463 unsigned i;
464
465 /*
466 * Check and unlock all entries in the bundle.
467 */
468 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
469 {
470#ifdef USE_NEW_OS_INTERFACE_FOR_MM
471 if (pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ)
472 {
473 int rc;
474 dprintf2(("eType=%d pvR0=%p pvR3=%p cb=%ld\n", pBundle->aMem[i].eType, RTR0MemObjAddress(pBundle->aMem[i].MemObj),
475 (void *)RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3), (long)RTR0MemObjSize(pBundle->aMem[i].MemObj)));
476 if (pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ)
477 {
478 rc = RTR0MemObjFree(pBundle->aMem[i].MapObjR3, false);
479 AssertRC(rc); /** @todo figure out how to handle this. */
480 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
481 }
482 rc = RTR0MemObjFree(pBundle->aMem[i].MemObj, false);
483 AssertRC(rc); /** @todo figure out how to handle this. */
484 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
485 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
486 }
487
488#else /* !USE_NEW_OS_INTERFACE_FOR_MM */
489 if ( pBundle->aMem[i].pvR0
490 || pBundle->aMem[i].pvR3)
491 {
492 dprintf2(("eType=%d pvR0=%p pvR3=%p cb=%d\n", pBundle->aMem[i].eType,
493 pBundle->aMem[i].pvR0, pBundle->aMem[i].pvR3, pBundle->aMem[i].cb));
494 switch (pBundle->aMem[i].eType)
495 {
496 case MEMREF_TYPE_LOCKED:
497 supdrvOSUnlockMemOne(&pBundle->aMem[i]);
498 break;
499 case MEMREF_TYPE_CONT:
500 supdrvOSContFreeOne(&pBundle->aMem[i]);
501 break;
502 case MEMREF_TYPE_LOW:
503 supdrvOSLowFreeOne(&pBundle->aMem[i]);
504 break;
505 case MEMREF_TYPE_MEM:
506 supdrvOSMemFreeOne(&pBundle->aMem[i]);
507 break;
508 default:
509 break;
510 }
511 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
512 }
513#endif /* !USE_NEW_OS_INTERFACE_FOR_MM */
514 }
515
516 /*
517 * Advance and free previous bundle.
518 */
519 pToFree = pBundle;
520 pBundle = pBundle->pNext;
521
522 pToFree->pNext = NULL;
523 pToFree->cUsed = 0;
524 if (pToFree != &pSession->Bundle)
525 RTMemFree(pToFree);
526 }
527 dprintf2(("freeing memory - done\n"));
528
529 /*
530 * Loaded images needs to be dereferenced and possibly freed up.
531 */
532 RTSemFastMutexRequest(pDevExt->mtxLdr);
533 dprintf2(("freeing images:\n"));
534 if (pSession->pLdrUsage)
535 {
536 PSUPDRVLDRUSAGE pUsage = pSession->pLdrUsage;
537 pSession->pLdrUsage = NULL;
538 while (pUsage)
539 {
540 void *pvFree = pUsage;
541 PSUPDRVLDRIMAGE pImage = pUsage->pImage;
542 if (pImage->cUsage > pUsage->cUsage)
543 pImage->cUsage -= pUsage->cUsage;
544 else
545 supdrvLdrFree(pDevExt, pImage);
546 pUsage->pImage = NULL;
547 pUsage = pUsage->pNext;
548 RTMemFree(pvFree);
549 }
550 }
551 RTSemFastMutexRelease(pDevExt->mtxLdr);
552 dprintf2(("freeing images - done\n"));
553
554 /*
555 * Unmap the GIP.
556 */
557 dprintf2(("umapping GIP:\n"));
558#ifdef USE_NEW_OS_INTERFACE_FOR_GIP
559 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
560#else
561 if (pSession->pGip)
562#endif
563 {
564 SUPR0GipUnmap(pSession);
565#ifndef USE_NEW_OS_INTERFACE_FOR_GIP
566 pSession->pGip = NULL;
567#endif
568 pSession->fGipReferenced = 0;
569 }
570 dprintf2(("umapping GIP - done\n"));
571}
572
573
574#ifdef VBOX_WITHOUT_IDT_PATCHING
575/**
576 * Fast path I/O Control worker.
577 *
578 * @returns VBox status code that should be passed down to ring-3 unchanged.
579 * @param uIOCtl Function number.
580 * @param pDevExt Device extention.
581 * @param pSession Session data.
582 */
583int VBOXCALL supdrvIOCtlFast(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
584{
585 /*
586 * Disable interrupts before invoking VMMR0Entry() because it ASSUMES
587 * that interrupts are disabled. (We check the two prereqs after doing
588 * this only to allow the compiler to optimize things better.)
589 */
590 int rc;
591 RTCCUINTREG uFlags = ASMGetFlags();
592 ASMIntDisable();
593
594 if (RT_LIKELY(pSession->pVM && pDevExt->pfnVMMR0Entry))
595 {
596 switch (uIOCtl)
597 {
598 case SUP_IOCTL_FAST_DO_RAW_RUN:
599 rc = pDevExt->pfnVMMR0Entry(pSession->pVM, VMMR0_DO_RAW_RUN, NULL);
600 break;
601 case SUP_IOCTL_FAST_DO_HWACC_RUN:
602 rc = pDevExt->pfnVMMR0Entry(pSession->pVM, VMMR0_DO_HWACC_RUN, NULL);
603 break;
604 case SUP_IOCTL_FAST_DO_NOP:
605 rc = pDevExt->pfnVMMR0Entry(pSession->pVM, VMMR0_DO_NOP, NULL);
606 break;
607 default:
608 rc = VERR_INTERNAL_ERROR;
609 break;
610 }
611 }
612 else
613 rc = VERR_INTERNAL_ERROR;
614
615 ASMSetFlags(uFlags);
616 return rc;
617}
618#endif /* VBOX_WITHOUT_IDT_PATCHING */
619
620
621/**
622 * I/O Control worker.
623 *
624 * @returns 0 on success.
625 * @returns VERR_INVALID_PARAMETER if the request is invalid.
626 *
627 * @param uIOCtl Function number.
628 * @param pDevExt Device extention.
629 * @param pSession Session data.
630 * @param pReqHdr The request header.
631 */
632int VBOXCALL supdrvIOCtl(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr)
633{
634 /*
635 * Validate the request.
636 */
637 /* this first check could probably be omitted as its also done by the OS specific code... */
638 if (RT_UNLIKELY( (pReqHdr->fFlags & SUPREQHDR_FLAGS_MAGIC_MASK) != SUPREQHDR_FLAGS_MAGIC
639 || pReqHdr->cbIn < sizeof(*pReqHdr)
640 || pReqHdr->cbOut < sizeof(*pReqHdr)))
641 {
642 OSDBGPRINT(("vboxdrv: Bad ioctl request header; cbIn=%#lx cbOut=%#lx fFlags=%#lx\n",
643 (long)pReqHdr->cbIn, (long)pReqHdr->cbOut, (long)pReqHdr->fFlags));
644 return VERR_INVALID_PARAMETER;
645 }
646 if (RT_UNLIKELY(uIOCtl == SUP_IOCTL_COOKIE))
647 {
648 if (pReqHdr->u32Cookie != SUPCOOKIE_INITIAL_COOKIE)
649 {
650 OSDBGPRINT(("SUP_IOCTL_COOKIE: bad cookie %#lx\n", (long)pReqHdr->u32Cookie));
651 return VERR_INVALID_PARAMETER;
652 }
653 }
654 else if (RT_UNLIKELY( pReqHdr->u32Cookie != pDevExt->u32Cookie
655 || pReqHdr->u32SessionCookie != pSession->u32Cookie))
656 {
657 OSDBGPRINT(("vboxdrv: bad cookie %#lx / %#lx.\n", (long)pReqHdr->u32Cookie, (long)pReqHdr->u32SessionCookie));
658 return VERR_INVALID_PARAMETER;
659 }
660
661/*
662 * Validation macros
663 */
664#define REQ_CHECK_SIZES_EX(Name, cbInExpect, cbOutExpect) \
665 do { \
666 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect) || pReqHdr->cbOut != (cbOutExpect))) \
667 { \
668 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n", \
669 (long)pReq->Hdr.cbIn, (long)(cbInExpect), (long)pReq->Hdr.cbOut, (long)(cbOutExpect))); \
670 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
671 } \
672 } while (0)
673
674#define REQ_CHECK_SIZES(Name) REQ_CHECK_SIZES_EX(Name, Name ## _SIZE_IN, Name ## _SIZE_OUT)
675
676#define REQ_CHECK_SIZE_IN(Name, cbInExpect) \
677 do { \
678 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect))) \
679 { \
680 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld.\n", \
681 (long)pReq->Hdr.cbIn, (long)(cbInExpect))); \
682 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
683 } \
684 } while (0)
685
686#define REQ_CHECK_SIZE_OUT(Name, cbOutExpect) \
687 do { \
688 if (RT_UNLIKELY(pReqHdr->cbOut != (cbOutExpect))) \
689 { \
690 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbOut=%ld expected %ld.\n", \
691 (long)pReq->Hdr.cbOut, (long)(cbOutExpect))); \
692 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
693 } \
694 } while (0)
695
696#define REQ_CHECK_EXPR(Name, expr) \
697 do { \
698 if (RT_UNLIKELY(!(expr))) \
699 { \
700 OSDBGPRINT(( #Name ": %s\n", #expr)); \
701 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
702 } \
703 } while (0)
704
705#define REQ_CHECK_EXPR_FMT(expr, fmt) \
706 do { \
707 if (RT_UNLIKELY(!(expr))) \
708 { \
709 OSDBGPRINT( fmt ); \
710 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
711 } \
712 } while (0)
713
714
715 /*
716 * The switch.
717 */
718 switch (SUP_CTL_CODE_NO_SIZE(uIOCtl))
719 {
720 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_COOKIE):
721 {
722 PSUPCOOKIE pReq = (PSUPCOOKIE)pReqHdr;
723 REQ_CHECK_SIZES(SUP_IOCTL_COOKIE);
724 if (strncmp(pReq->u.In.szMagic, SUPCOOKIE_MAGIC, sizeof(pReq->u.In.szMagic)))
725 {
726 OSDBGPRINT(("SUP_IOCTL_COOKIE: invalid magic %.16s\n", pReq->u.In.szMagic));
727 pReq->Hdr.rc = VERR_INVALID_MAGIC;
728 return 0;
729 }
730
731#if 0
732 /*
733 * Call out to the OS specific code and let it do permission checks on the
734 * client process.
735 */
736 if (!supdrvOSValidateClientProcess(pDevExt, pSession))
737 {
738 pReq->u.Out.u32Cookie = 0xffffffff;
739 pReq->u.Out.u32SessionCookie = 0xffffffff;
740 pReq->u.Out.u32SessionVersion = 0xffffffff;
741 pReq->u.Out.u32DriverVersion = SUPDRVIOC_VERSION;
742 pReq->u.Out.pSession = NULL;
743 pReq->u.Out.cFunctions = 0;
744 pReq->Hdr.rc = VERR_PERMISSION_DENIED;
745 return 0;
746 }
747#endif
748
749 /*
750 * Match the version.
751 * The current logic is very simple, match the major interface version.
752 */
753 if ( pReq->u.In.u32MinVersion > SUPDRVIOC_VERSION
754 || (pReq->u.In.u32MinVersion & 0xffff0000) != (SUPDRVIOC_VERSION & 0xffff0000))
755 {
756 OSDBGPRINT(("SUP_IOCTL_COOKIE: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
757 pReq->u.In.u32ReqVersion, pReq->u.In.u32MinVersion, SUPDRVIOC_VERSION));
758 pReq->u.Out.u32Cookie = 0xffffffff;
759 pReq->u.Out.u32SessionCookie = 0xffffffff;
760 pReq->u.Out.u32SessionVersion = 0xffffffff;
761 pReq->u.Out.u32DriverVersion = SUPDRVIOC_VERSION;
762 pReq->u.Out.pSession = NULL;
763 pReq->u.Out.cFunctions = 0;
764 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
765 return 0;
766 }
767
768 /*
769 * Fill in return data and be gone.
770 * N.B. The first one to change SUPDRVIOC_VERSION shall makes sure that
771 * u32SessionVersion <= u32ReqVersion!
772 */
773 /** @todo Somehow validate the client and negotiate a secure cookie... */
774 pReq->u.Out.u32Cookie = pDevExt->u32Cookie;
775 pReq->u.Out.u32SessionCookie = pSession->u32Cookie;
776 pReq->u.Out.u32SessionVersion = SUPDRVIOC_VERSION;
777 pReq->u.Out.u32DriverVersion = SUPDRVIOC_VERSION;
778 pReq->u.Out.pSession = pSession;
779 pReq->u.Out.cFunctions = sizeof(g_aFunctions) / sizeof(g_aFunctions[0]);
780 pReq->Hdr.rc = VINF_SUCCESS;
781 return 0;
782 }
783
784 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_QUERY_FUNCS(0)):
785 {
786 /* validate */
787 PSUPQUERYFUNCS pReq = (PSUPQUERYFUNCS)pReqHdr;
788 REQ_CHECK_SIZES_EX(SUP_IOCTL_QUERY_FUNCS, SUP_IOCTL_QUERY_FUNCS_SIZE_IN, SUP_IOCTL_QUERY_FUNCS_SIZE_OUT(RT_ELEMENTS(g_aFunctions)));
789
790 /* execute */
791 pReq->u.Out.cFunctions = RT_ELEMENTS(g_aFunctions);
792 memcpy(&pReq->u.Out.aFunctions[0], g_aFunctions, sizeof(g_aFunctions));
793 pReq->Hdr.rc = VINF_SUCCESS;
794 return 0;
795 }
796
797 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_IDT_INSTALL):
798 {
799 /* validate */
800 PSUPIDTINSTALL pReq = (PSUPIDTINSTALL)pReqHdr;
801 REQ_CHECK_SIZES(SUP_IOCTL_IDT_INSTALL);
802
803 /* execute */
804#ifndef VBOX_WITHOUT_IDT_PATCHING
805 pReq->Hdr.rc = supdrvIOCtl_IdtInstall(pDevExt, pSession, pReq);
806#else
807 pReq->u.Out.u8Idt = 3;
808 pReq->Hdr.rc = VERR_NOT_SUPPORTED;
809#endif
810 return 0;
811 }
812
813 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_IDT_REMOVE):
814 {
815 /* validate */
816 PSUPIDTREMOVE pReq = (PSUPIDTREMOVE)pReqHdr;
817 REQ_CHECK_SIZES(SUP_IOCTL_IDT_REMOVE);
818
819 /* execute */
820#ifndef VBOX_WITHOUT_IDT_PATCHING
821 pReq->Hdr.rc = supdrvIOCtl_IdtRemoveAll(pDevExt, pSession);
822#else
823 pReq->Hdr.rc = VERR_NOT_SUPPORTED;
824#endif
825 return 0;
826 }
827
828 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_LOCK):
829 {
830 /* validate */
831 PSUPPAGELOCK pReq = (PSUPPAGELOCK)pReqHdr;
832 REQ_CHECK_SIZE_IN(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_IN);
833 REQ_CHECK_SIZE_OUT(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_OUT(pReq->u.In.cPages));
834 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.cPages > 0);
835 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.pvR3 >= PAGE_SIZE);
836
837 /* execute */
838 pReq->Hdr.rc = SUPR0LockMem(pSession, pReq->u.In.pvR3, pReq->u.In.cPages, &pReq->u.Out.aPages[0]);
839 if (RT_FAILURE(pReq->Hdr.rc))
840 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
841 return 0;
842 }
843
844 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_UNLOCK):
845 {
846 /* validate */
847 PSUPPAGEUNLOCK pReq = (PSUPPAGEUNLOCK)pReqHdr;
848 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_UNLOCK);
849
850 /* execute */
851 pReq->Hdr.rc = SUPR0UnlockMem(pSession, pReq->u.In.pvR3);
852 return 0;
853 }
854
855 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_ALLOC):
856 {
857 /* validate */
858 PSUPCONTALLOC pReq = (PSUPCONTALLOC)pReqHdr;
859 REQ_CHECK_SIZES(SUP_IOCTL_CONT_ALLOC);
860
861 /* execute */
862 pReq->Hdr.rc = SUPR0ContAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.HCPhys);
863 if (RT_FAILURE(pReq->Hdr.rc))
864 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
865 return 0;
866 }
867
868 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_FREE):
869 {
870 /* validate */
871 PSUPCONTFREE pReq = (PSUPCONTFREE)pReqHdr;
872 REQ_CHECK_SIZES(SUP_IOCTL_CONT_FREE);
873
874 /* execute */
875 pReq->Hdr.rc = SUPR0ContFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
876 return 0;
877 }
878
879 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_OPEN):
880 {
881 /* validate */
882 PSUPLDROPEN pReq = (PSUPLDROPEN)pReqHdr;
883 REQ_CHECK_SIZES(SUP_IOCTL_LDR_OPEN);
884 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImage > 0);
885 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImage < _1M*16);
886 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.szName[0]);
887 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, memchr(pReq->u.In.szName, '\0', sizeof(pReq->u.In.szName)));
888 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, !strpbrk(pReq->u.In.szName, ";:()[]{}/\\|&*%#@!~`\"'"));
889
890 /* execute */
891 pReq->Hdr.rc = supdrvIOCtl_LdrOpen(pDevExt, pSession, pReq);
892 return 0;
893 }
894
895 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_LOAD):
896 {
897 /* validate */
898 PSUPLDRLOAD pReq = (PSUPLDRLOAD)pReqHdr;
899 REQ_CHECK_EXPR(Name, pReq->Hdr.cbIn >= sizeof(*pReq));
900 REQ_CHECK_SIZES_EX(SUP_IOCTL_LDR_LOAD, SUP_IOCTL_LDR_LOAD_SIZE_IN(pReq->u.In.cbImage), SUP_IOCTL_LDR_LOAD_SIZE_OUT);
901 REQ_CHECK_EXPR(SUP_IOCTL_LDR_LOAD, pReq->u.In.cSymbols <= 16384);
902 REQ_CHECK_EXPR_FMT( !pReq->u.In.cSymbols
903 || ( pReq->u.In.offSymbols < pReq->u.In.cbImage
904 && pReq->u.In.offSymbols + pReq->u.In.cSymbols * sizeof(SUPLDRSYM) <= pReq->u.In.cbImage),
905 ("SUP_IOCTL_LDR_LOAD: offSymbols=%#lx cSymbols=%#lx cbImage=%#lx\n", (long)pReq->u.In.offSymbols,
906 (long)pReq->u.In.cSymbols, (long)pReq->u.In.cbImage));
907 REQ_CHECK_EXPR_FMT( !pReq->u.In.cbStrTab
908 || ( pReq->u.In.offStrTab < pReq->u.In.cbImage
909 && pReq->u.In.offStrTab + pReq->u.In.cbStrTab <= pReq->u.In.cbImage
910 && pReq->u.In.cbStrTab <= pReq->u.In.cbImage),
911 ("SUP_IOCTL_LDR_LOAD: offStrTab=%#lx cbStrTab=%#lx cbImage=%#lx\n", (long)pReq->u.In.offStrTab,
912 (long)pReq->u.In.cbStrTab, (long)pReq->u.In.cbImage));
913
914 if (pReq->u.In.cSymbols)
915 {
916 uint32_t i;
917 PSUPLDRSYM paSyms = (PSUPLDRSYM)&pReq->u.In.achImage[pReq->u.In.offSymbols];
918 for (i = 0; i < pReq->u.In.cSymbols; i++)
919 {
920 REQ_CHECK_EXPR_FMT(paSyms[i].offSymbol < pReq->u.In.cbImage,
921 ("SUP_IOCTL_LDR_LOAD: sym #%ld: symb off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offSymbol, (long)pReq->u.In.cbImage));
922 REQ_CHECK_EXPR_FMT(paSyms[i].offName < pReq->u.In.cbStrTab,
923 ("SUP_IOCTL_LDR_LOAD: sym #%ld: name off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImage));
924 REQ_CHECK_EXPR_FMT(memchr(&pReq->u.In.achImage[pReq->u.In.offStrTab + paSyms[i].offName], '\0', pReq->u.In.cbStrTab - paSyms[i].offName),
925 ("SUP_IOCTL_LDR_LOAD: sym #%ld: unterminated name! (%#lx / %#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImage));
926 }
927 }
928
929 /* execute */
930 pReq->Hdr.rc = supdrvIOCtl_LdrLoad(pDevExt, pSession, pReq);
931 return 0;
932 }
933
934 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_FREE):
935 {
936 /* validate */
937 PSUPLDRFREE pReq = (PSUPLDRFREE)pReqHdr;
938 REQ_CHECK_SIZES(SUP_IOCTL_LDR_FREE);
939
940 /* execute */
941 pReq->Hdr.rc = supdrvIOCtl_LdrFree(pDevExt, pSession, pReq);
942 return 0;
943 }
944
945 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_GET_SYMBOL):
946 {
947 /* validate */
948 PSUPLDRGETSYMBOL pReq = (PSUPLDRGETSYMBOL)pReqHdr;
949 REQ_CHECK_SIZES(SUP_IOCTL_LDR_GET_SYMBOL);
950 REQ_CHECK_EXPR(SUP_IOCTL_LDR_GET_SYMBOL, memchr(pReq->u.In.szSymbol, '\0', sizeof(pReq->u.In.szSymbol)));
951
952 /* execute */
953 pReq->Hdr.rc = supdrvIOCtl_LdrGetSymbol(pDevExt, pSession, pReq);
954 return 0;
955 }
956
957 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_VMMR0(0)):
958 {
959 /* validate */
960 PSUPCALLVMMR0 pReq = (PSUPCALLVMMR0)pReqHdr;
961 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_VMMR0(0))
962 {
963 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(0), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(0));
964
965 /* execute */
966 if (RT_LIKELY(pDevExt->pfnVMMR0Entry))
967 pReq->Hdr.rc = pDevExt->pfnVMMR0Entry(pReq->u.In.pVMR0, pReq->u.In.uOperation, (void *)pReq->u.In.uArg);
968 else
969 pReq->Hdr.rc = VERR_WRONG_ORDER;
970 }
971 else
972 {
973 PSUPVMMR0REQHDR pVMMReq = (PSUPVMMR0REQHDR)&pReq->abReqPkt[0];
974 REQ_CHECK_EXPR(SUP_IOCTL_CALL_VMMR0, pReq->Hdr.cbIn >= SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR)));
975 REQ_CHECK_EXPR(SUP_IOCTL_CALL_VMMR0, pVMMReq->u32Magic == SUPVMMR0REQHDR_MAGIC);
976 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(pVMMReq->cbReq), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(pVMMReq->cbReq));
977
978 /* execute */
979 if (RT_LIKELY(pDevExt->pfnVMMR0Entry))
980 pReq->Hdr.rc = pDevExt->pfnVMMR0Entry(pReq->u.In.pVMR0, pReq->u.In.uOperation, pVMMReq);
981 else
982 pReq->Hdr.rc = VERR_WRONG_ORDER;
983 }
984 return 0;
985 }
986
987 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GET_PAGING_MODE):
988 {
989 /* validate */
990 PSUPGETPAGINGMODE pReq = (PSUPGETPAGINGMODE)pReqHdr;
991 REQ_CHECK_SIZES(SUP_IOCTL_GET_PAGING_MODE);
992
993 /* execute */
994 pReq->Hdr.rc = VINF_SUCCESS;
995 pReq->u.Out.enmMode = supdrvIOCtl_GetPagingMode();
996 return 0;
997 }
998
999 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_ALLOC):
1000 {
1001 /* validate */
1002 PSUPLOWALLOC pReq = (PSUPLOWALLOC)pReqHdr;
1003 REQ_CHECK_EXPR(SUP_IOCTL_LOW_ALLOC, pReq->Hdr.cbIn <= SUP_IOCTL_LOW_ALLOC_SIZE_IN);
1004 REQ_CHECK_SIZES_EX(SUP_IOCTL_LOW_ALLOC, SUP_IOCTL_LOW_ALLOC_SIZE_IN, SUP_IOCTL_LOW_ALLOC_SIZE_OUT(pReq->u.In.cPages));
1005
1006 /* execute */
1007 pReq->Hdr.rc = SUPR0LowAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.aPages[0]);
1008 if (RT_FAILURE(pReq->Hdr.rc))
1009 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1010 return 0;
1011 }
1012
1013 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_FREE):
1014 {
1015 /* validate */
1016 PSUPLOWFREE pReq = (PSUPLOWFREE)pReqHdr;
1017 REQ_CHECK_SIZES(SUP_IOCTL_LOW_FREE);
1018
1019 /* execute */
1020 pReq->Hdr.rc = SUPR0LowFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1021 return 0;
1022 }
1023
1024 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_MAP):
1025 {
1026 /* validate */
1027 PSUPGIPMAP pReq = (PSUPGIPMAP)pReqHdr;
1028 REQ_CHECK_SIZES(SUP_IOCTL_GIP_MAP);
1029
1030 /* execute */
1031 pReq->Hdr.rc = SUPR0GipMap(pSession, &pReq->u.Out.pGipR3, &pReq->u.Out.HCPhysGip);
1032 if (RT_SUCCESS(pReq->Hdr.rc))
1033 pReq->u.Out.pGipR0 = pDevExt->pGip;
1034 return 0;
1035 }
1036
1037 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_UNMAP):
1038 {
1039 /* validate */
1040 PSUPGIPUNMAP pReq = (PSUPGIPUNMAP)pReqHdr;
1041 REQ_CHECK_SIZES(SUP_IOCTL_GIP_UNMAP);
1042
1043 /* execute */
1044 pReq->Hdr.rc = SUPR0GipUnmap(pSession);
1045 return 0;
1046 }
1047
1048 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SET_VM_FOR_FAST):
1049 {
1050 /* validate */
1051 PSUPSETVMFORFAST pReq = (PSUPSETVMFORFAST)pReqHdr;
1052 REQ_CHECK_SIZES(SUP_IOCTL_SET_VM_FOR_FAST);
1053 REQ_CHECK_EXPR_FMT( !pReq->u.In.pVMR0
1054 || ( VALID_PTR(pReq->u.In.pVMR0)
1055 && !((uintptr_t)pReq->u.In.pVMR0 & (PAGE_SIZE - 1))),
1056 ("SUP_IOCTL_SET_VM_FOR_FAST: pVMR0=%p!\n", pReq->u.In.pVMR0));
1057 /* execute */
1058#ifndef VBOX_WITHOUT_IDT_PATCHING
1059 OSDBGPRINT(("SUP_IOCTL_SET_VM_FOR_FAST: !VBOX_WITHOUT_IDT_PATCHING\n"));
1060 pReq->Hdr.rc = VERR_NOT_SUPPORTED;
1061#else
1062 pSession->pVM = pReq->u.In.pVMR0;
1063 pReq->Hdr.rc = VINF_SUCCESS;
1064#endif
1065 return 0;
1066 }
1067
1068#ifdef USE_NEW_OS_INTERFACE_FOR_MM
1069 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_ALLOC):
1070 {
1071 /* validate */
1072 PSUPPAGEALLOC pReq = (PSUPPAGEALLOC)pReqHdr;
1073 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_ALLOC, pReq->Hdr.cbIn <= SUP_IOCTL_PAGE_ALLOC_SIZE_IN);
1074 REQ_CHECK_SIZES_EX(SUP_IOCTL_PAGE_ALLOC, SUP_IOCTL_PAGE_ALLOC_SIZE_IN, SUP_IOCTL_PAGE_ALLOC_SIZE_OUT(pReq->u.In.cPages));
1075
1076 /* execute */
1077 pReq->Hdr.rc = SUPR0PageAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR3, &pReq->u.Out.aPages[0]);
1078 if (RT_FAILURE(pReq->Hdr.rc))
1079 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1080 return 0;
1081 }
1082
1083 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_FREE):
1084 {
1085 /* validate */
1086 PSUPPAGEFREE pReq = (PSUPPAGEFREE)pReqHdr;
1087 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_FREE);
1088
1089 /* execute */
1090 pReq->Hdr.rc = SUPR0PageFree(pSession, pReq->u.In.pvR3);
1091 return 0;
1092 }
1093#endif /* USE_NEW_OS_INTERFACE_FOR_MM */
1094
1095 default:
1096 dprintf(("Unknown IOCTL %#lx\n", (long)uIOCtl));
1097 break;
1098 }
1099 return SUPDRV_ERR_GENERAL_FAILURE;
1100}
1101
1102
1103/**
1104 * Register a object for reference counting.
1105 * The object is registered with one reference in the specified session.
1106 *
1107 * @returns Unique identifier on success (pointer).
1108 * All future reference must use this identifier.
1109 * @returns NULL on failure.
1110 * @param pfnDestructor The destructore function which will be called when the reference count reaches 0.
1111 * @param pvUser1 The first user argument.
1112 * @param pvUser2 The second user argument.
1113 */
1114SUPR0DECL(void *) SUPR0ObjRegister(PSUPDRVSESSION pSession, SUPDRVOBJTYPE enmType, PFNSUPDRVDESTRUCTOR pfnDestructor, void *pvUser1, void *pvUser2)
1115{
1116 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1117 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1118 PSUPDRVOBJ pObj;
1119 PSUPDRVUSAGE pUsage;
1120
1121 /*
1122 * Validate the input.
1123 */
1124 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
1125 AssertReturn(enmType > SUPDRVOBJTYPE_INVALID && enmType < SUPDRVOBJTYPE_END, NULL);
1126 AssertPtrReturn(pfnDestructor, NULL);
1127
1128 /*
1129 * Allocate and initialize the object.
1130 */
1131 pObj = (PSUPDRVOBJ)RTMemAlloc(sizeof(*pObj));
1132 if (!pObj)
1133 return NULL;
1134 pObj->u32Magic = SUPDRVOBJ_MAGIC;
1135 pObj->enmType = enmType;
1136 pObj->pNext = NULL;
1137 pObj->cUsage = 1;
1138 pObj->pfnDestructor = pfnDestructor;
1139 pObj->pvUser1 = pvUser1;
1140 pObj->pvUser2 = pvUser2;
1141 pObj->CreatorUid = pSession->Uid;
1142 pObj->CreatorGid = pSession->Gid;
1143 pObj->CreatorProcess= pSession->Process;
1144 supdrvOSObjInitCreator(pObj, pSession);
1145
1146 /*
1147 * Allocate the usage record.
1148 * (We keep freed usage records around to simplity SUPR0ObjAddRef().)
1149 */
1150 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1151
1152 pUsage = pDevExt->pUsageFree;
1153 if (pUsage)
1154 pDevExt->pUsageFree = pUsage->pNext;
1155 else
1156 {
1157 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1158 pUsage = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsage));
1159 if (!pUsage)
1160 {
1161 RTMemFree(pObj);
1162 return NULL;
1163 }
1164 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1165 }
1166
1167 /*
1168 * Insert the object and create the session usage record.
1169 */
1170 /* The object. */
1171 pObj->pNext = pDevExt->pObjs;
1172 pDevExt->pObjs = pObj;
1173
1174 /* The session record. */
1175 pUsage->cUsage = 1;
1176 pUsage->pObj = pObj;
1177 pUsage->pNext = pSession->pUsage;
1178 dprintf(("SUPR0ObjRegister: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));
1179 pSession->pUsage = pUsage;
1180
1181 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1182
1183 dprintf(("SUPR0ObjRegister: returns %p (pvUser1=%p, pvUser=%p)\n", pObj, pvUser1, pvUser2));
1184 return pObj;
1185}
1186
1187
1188/**
1189 * Increment the reference counter for the object associating the reference
1190 * with the specified session.
1191 *
1192 * @returns IPRT status code.
1193 * @param pvObj The identifier returned by SUPR0ObjRegister().
1194 * @param pSession The session which is referencing the object.
1195 */
1196SUPR0DECL(int) SUPR0ObjAddRef(void *pvObj, PSUPDRVSESSION pSession)
1197{
1198 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1199 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1200 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
1201 PSUPDRVUSAGE pUsagePre;
1202 PSUPDRVUSAGE pUsage;
1203
1204 /*
1205 * Validate the input.
1206 */
1207 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1208 AssertMsgReturn(VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
1209 ("Invalid pvObj=%p magic=%#x (exepcted %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
1210 VERR_INVALID_PARAMETER);
1211
1212 /*
1213 * Preallocate the usage record.
1214 */
1215 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1216
1217 pUsagePre = pDevExt->pUsageFree;
1218 if (pUsagePre)
1219 pDevExt->pUsageFree = pUsagePre->pNext;
1220 else
1221 {
1222 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1223 pUsagePre = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsagePre));
1224 if (!pUsagePre)
1225 return VERR_NO_MEMORY;
1226 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1227 }
1228
1229 /*
1230 * Reference the object.
1231 */
1232 pObj->cUsage++;
1233
1234 /*
1235 * Look for the session record.
1236 */
1237 for (pUsage = pSession->pUsage; pUsage; pUsage = pUsage->pNext)
1238 {
1239 dprintf(("SUPR0AddRef: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));
1240 if (pUsage->pObj == pObj)
1241 break;
1242 }
1243 if (pUsage)
1244 pUsage->cUsage++;
1245 else
1246 {
1247 /* create a new session record. */
1248 pUsagePre->cUsage = 1;
1249 pUsagePre->pObj = pObj;
1250 pUsagePre->pNext = pSession->pUsage;
1251 pSession->pUsage = pUsagePre;
1252 dprintf(("SUPR0AddRef: pUsagePre=%p:{.pObj=%p, .pNext=%p}\n", pUsagePre, pUsagePre->pObj, pUsagePre->pNext));
1253
1254 pUsagePre = NULL;
1255 }
1256
1257 /*
1258 * Put any unused usage record into the free list..
1259 */
1260 if (pUsagePre)
1261 {
1262 pUsagePre->pNext = pDevExt->pUsageFree;
1263 pDevExt->pUsageFree = pUsagePre;
1264 }
1265
1266 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1267
1268 return VINF_SUCCESS;
1269}
1270
1271
1272/**
1273 * Decrement / destroy a reference counter record for an object.
1274 *
1275 * The object is uniquely identified by pfnDestructor+pvUser1+pvUser2.
1276 *
1277 * @returns IPRT status code.
1278 * @param pvObj The identifier returned by SUPR0ObjRegister().
1279 * @param pSession The session which is referencing the object.
1280 */
1281SUPR0DECL(int) SUPR0ObjRelease(void *pvObj, PSUPDRVSESSION pSession)
1282{
1283 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1284 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1285 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
1286 bool fDestroy = false;
1287 PSUPDRVUSAGE pUsage;
1288 PSUPDRVUSAGE pUsagePrev;
1289
1290 /*
1291 * Validate the input.
1292 */
1293 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1294 AssertMsgReturn(VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
1295 ("Invalid pvObj=%p magic=%#x (exepcted %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
1296 VERR_INVALID_PARAMETER);
1297
1298 /*
1299 * Acquire the spinlock and look for the usage record.
1300 */
1301 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1302
1303 for (pUsagePrev = NULL, pUsage = pSession->pUsage;
1304 pUsage;
1305 pUsagePrev = pUsage, pUsage = pUsage->pNext)
1306 {
1307 dprintf(("SUPR0ObjRelease: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));
1308 if (pUsage->pObj == pObj)
1309 {
1310 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
1311 if (pUsage->cUsage > 1)
1312 {
1313 pObj->cUsage--;
1314 pUsage->cUsage--;
1315 }
1316 else
1317 {
1318 /*
1319 * Free the session record.
1320 */
1321 if (pUsagePrev)
1322 pUsagePrev->pNext = pUsage->pNext;
1323 else
1324 pSession->pUsage = pUsage->pNext;
1325 pUsage->pNext = pDevExt->pUsageFree;
1326 pDevExt->pUsageFree = pUsage;
1327
1328 /* What about the object? */
1329 if (pObj->cUsage > 1)
1330 pObj->cUsage--;
1331 else
1332 {
1333 /*
1334 * Object is to be destroyed, unlink it.
1335 */
1336 fDestroy = true;
1337 if (pDevExt->pObjs == pObj)
1338 pDevExt->pObjs = pObj->pNext;
1339 else
1340 {
1341 PSUPDRVOBJ pObjPrev;
1342 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
1343 if (pObjPrev->pNext == pObj)
1344 {
1345 pObjPrev->pNext = pObj->pNext;
1346 break;
1347 }
1348 Assert(pObjPrev);
1349 }
1350 }
1351 }
1352 break;
1353 }
1354 }
1355
1356 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1357
1358 /*
1359 * Call the destructor and free the object if required.
1360 */
1361 if (fDestroy)
1362 {
1363 pObj->u32Magic++;
1364 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
1365 RTMemFree(pObj);
1366 }
1367
1368 AssertMsg(pUsage, ("pvObj=%p\n", pvObj));
1369 return pUsage ? VINF_SUCCESS : VERR_INVALID_PARAMETER;
1370}
1371
1372/**
1373 * Verifies that the current process can access the specified object.
1374 *
1375 * @returns The following IPRT status code:
1376 * @retval VINF_SUCCESS if access was granted.
1377 * @retval VERR_PERMISSION_DENIED if denied access.
1378 * @retval VERR_INVALID_PARAMETER if invalid parameter.
1379 *
1380 * @param pvObj The identifier returned by SUPR0ObjRegister().
1381 * @param pSession The session which wishes to access the object.
1382 * @param pszObjName Object string name. This is optional and depends on the object type.
1383 *
1384 * @remark The caller is responsible for making sure the object isn't removed while
1385 * we're inside this function. If uncertain about this, just call AddRef before calling us.
1386 */
1387SUPR0DECL(int) SUPR0ObjVerifyAccess(void *pvObj, PSUPDRVSESSION pSession, const char *pszObjName)
1388{
1389 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
1390 int rc;
1391
1392 /*
1393 * Validate the input.
1394 */
1395 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1396 AssertMsgReturn(VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
1397 ("Invalid pvObj=%p magic=%#x (exepcted %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
1398 VERR_INVALID_PARAMETER);
1399
1400 /*
1401 * Check access. (returns true if a decision has been made.)
1402 */
1403 rc = VERR_INTERNAL_ERROR;
1404 if (supdrvOSObjCanAccess(pObj, pSession, pszObjName, &rc))
1405 return rc;
1406
1407 /*
1408 * Default policy is to allow the user to access his own
1409 * stuff but nothing else.
1410 */
1411 if (pObj->CreatorUid == pSession->Uid)
1412 return VINF_SUCCESS;
1413 return VERR_PERMISSION_DENIED;
1414}
1415
1416
1417/**
1418 * Lock pages.
1419 *
1420 * @returns IPRT status code.
1421 * @param pSession Session to which the locked memory should be associated.
1422 * @param pvR3 Start of the memory range to lock.
1423 * This must be page aligned.
1424 * @param cb Size of the memory range to lock.
1425 * This must be page aligned.
1426 */
1427SUPR0DECL(int) SUPR0LockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages)
1428{
1429 int rc;
1430 SUPDRVMEMREF Mem = {0};
1431 const size_t cb = (size_t)cPages << PAGE_SHIFT;
1432 dprintf(("SUPR0LockMem: pSession=%p pvR3=%p cPages=%d paPages=%p\n", pSession, (void *)pvR3, cPages, paPages));
1433
1434 /*
1435 * Verify input.
1436 */
1437 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1438 AssertPtrReturn(paPages, VERR_INVALID_PARAMETER);
1439 if ( RT_ALIGN_R3PT(pvR3, PAGE_SIZE, RTR3PTR) != pvR3
1440 || !pvR3)
1441 {
1442 dprintf(("pvR3 (%p) must be page aligned and not NULL!\n", (void *)pvR3));
1443 return VERR_INVALID_PARAMETER;
1444 }
1445
1446#ifdef USE_NEW_OS_INTERFACE_FOR_MM
1447# ifdef RT_OS_WINDOWS /* A temporary hack for windows, will be removed once all ring-3 code has been cleaned up. */
1448 /* First check if we allocated it using SUPPageAlloc; if so then we don't need to lock it again */
1449 rc = supdrvPageGetPhys(pSession, pvR3, cPages, paPages);
1450 if (RT_SUCCESS(rc))
1451 return rc;
1452# endif
1453
1454 /*
1455 * Let IPRT do the job.
1456 */
1457 Mem.eType = MEMREF_TYPE_LOCKED;
1458 rc = RTR0MemObjLockUser(&Mem.MemObj, pvR3, cb, RTR0ProcHandleSelf());
1459 if (RT_SUCCESS(rc))
1460 {
1461 uint32_t iPage = cPages;
1462 AssertMsg(RTR0MemObjAddressR3(Mem.MemObj) == pvR3, ("%p == %p\n", RTR0MemObjAddressR3(Mem.MemObj), pvR3));
1463 AssertMsg(RTR0MemObjSize(Mem.MemObj) == cb, ("%x == %x\n", RTR0MemObjSize(Mem.MemObj), cb));
1464
1465 while (iPage-- > 0)
1466 {
1467 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
1468 if (RT_UNLIKELY(paPages[iPage] == NIL_RTCCPHYS))
1469 {
1470 AssertMsgFailed(("iPage=%d\n", iPage));
1471 rc = VERR_INTERNAL_ERROR;
1472 break;
1473 }
1474 }
1475 if (RT_SUCCESS(rc))
1476 rc = supdrvMemAdd(&Mem, pSession);
1477 if (RT_FAILURE(rc))
1478 {
1479 int rc2 = RTR0MemObjFree(Mem.MemObj, false);
1480 AssertRC(rc2);
1481 }
1482 }
1483
1484#else /* !USE_NEW_OS_INTERFACE_FOR_MM */
1485
1486 /*
1487 * Let the OS specific code have a go.
1488 */
1489 Mem.pvR0 = NULL;
1490 Mem.pvR3 = pvR3;
1491 Mem.eType = MEMREF_TYPE_LOCKED;
1492 Mem.cb = cb;
1493 rc = supdrvOSLockMemOne(&Mem, paPages);
1494 if (rc)
1495 return rc;
1496
1497 /*
1498 * Everything when fine, add the memory reference to the session.
1499 */
1500 rc = supdrvMemAdd(&Mem, pSession);
1501 if (rc)
1502 supdrvOSUnlockMemOne(&Mem);
1503#endif /* !USE_NEW_OS_INTERFACE_FOR_MM */
1504 return rc;
1505}
1506
1507
1508/**
1509 * Unlocks the memory pointed to by pv.
1510 *
1511 * @returns IPRT status code.
1512 * @param pSession Session to which the memory was locked.
1513 * @param pvR3 Memory to unlock.
1514 */
1515SUPR0DECL(int) SUPR0UnlockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3)
1516{
1517 dprintf(("SUPR0UnlockMem: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
1518 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1519#ifdef RT_OS_WINDOWS
1520 /*
1521 * Temporary hack for windows - SUPR0PageFree will unlock SUPR0PageAlloc
1522 * allocations; ignore this call.
1523 */
1524 if (supdrvPageWasLockedByPageAlloc(pSession, pvR3))
1525 {
1526 dprintf(("Page will be unlocked in SUPR0PageFree -> ignore\n"));
1527 return VINF_SUCCESS;
1528 }
1529#endif
1530 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_LOCKED);
1531}
1532
1533
1534/**
1535 * Allocates a chunk of page aligned memory with contiguous and fixed physical
1536 * backing.
1537 *
1538 * @returns IPRT status code.
1539 * @param pSession Session data.
1540 * @param cb Number of bytes to allocate.
1541 * @param ppvR0 Where to put the address of Ring-0 mapping the allocated memory.
1542 * @param ppvR3 Where to put the address of Ring-3 mapping the allocated memory.
1543 * @param pHCPhys Where to put the physical address of allocated memory.
1544 */
1545SUPR0DECL(int) SUPR0ContAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS pHCPhys)
1546{
1547 int rc;
1548 SUPDRVMEMREF Mem = {0};
1549 dprintf(("SUPR0ContAlloc: pSession=%p cPages=%d ppvR0=%p ppvR3=%p pHCPhys=%p\n", pSession, cPages, ppvR0, ppvR3, pHCPhys));
1550
1551 /*
1552 * Validate input.
1553 */
1554 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1555 if (!ppvR3 || !ppvR0 || !pHCPhys)
1556 {
1557 dprintf(("Null pointer. All of these should be set: pSession=%p ppvR0=%p ppvR3=%p pHCPhys=%p\n",
1558 pSession, ppvR0, ppvR3, pHCPhys));
1559 return VERR_INVALID_PARAMETER;
1560
1561 }
1562 if (cPages < 1 || cPages >= 256)
1563 {
1564 dprintf(("Illegal request cPages=%d, must be greater than 0 and smaller than 256\n", cPages));
1565 return VERR_INVALID_PARAMETER;
1566 }
1567
1568#ifdef USE_NEW_OS_INTERFACE_FOR_MM
1569 /*
1570 * Let IPRT do the job.
1571 */
1572 rc = RTR0MemObjAllocCont(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable R0 mapping */);
1573 if (RT_SUCCESS(rc))
1574 {
1575 int rc2;
1576 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
1577 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
1578 if (RT_SUCCESS(rc))
1579 {
1580 Mem.eType = MEMREF_TYPE_CONT;
1581 rc = supdrvMemAdd(&Mem, pSession);
1582 if (!rc)
1583 {
1584 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
1585 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
1586 *pHCPhys = RTR0MemObjGetPagePhysAddr(Mem.MemObj, 0);
1587 return 0;
1588 }
1589
1590 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
1591 AssertRC(rc2);
1592 }
1593 rc2 = RTR0MemObjFree(Mem.MemObj, false);
1594 AssertRC(rc2);
1595 }
1596
1597#else /* !USE_NEW_OS_INTERFACE_FOR_MM */
1598
1599 /*
1600 * Let the OS specific code have a go.
1601 */
1602 Mem.pvR0 = NULL;
1603 Mem.pvR3 = NIL_RTR3PTR;
1604 Mem.eType = MEMREF_TYPE_CONT;
1605 Mem.cb = cPages << PAGE_SHIFT;
1606 rc = supdrvOSContAllocOne(&Mem, ppvR0, ppvR3, pHCPhys);
1607 if (rc)
1608 return rc;
1609 AssertMsg(!((uintptr_t)*ppvR3 & (PAGE_SIZE - 1)) || !(*pHCPhys & (PAGE_SIZE - 1)),
1610 ("Memory is not page aligned! *ppvR0=%p *ppvR3=%p phys=%VHp\n", ppvR0 ? *ppvR0 : NULL, *ppvR3, *pHCPhys));
1611
1612 /*
1613 * Everything when fine, add the memory reference to the session.
1614 */
1615 rc = supdrvMemAdd(&Mem, pSession);
1616 if (rc)
1617 supdrvOSContFreeOne(&Mem);
1618#endif /* !USE_NEW_OS_INTERFACE_FOR_MM */
1619
1620 return rc;
1621}
1622
1623
1624/**
1625 * Frees memory allocated using SUPR0ContAlloc().
1626 *
1627 * @returns IPRT status code.
1628 * @param pSession The session to which the memory was allocated.
1629 * @param uPtr Pointer to the memory (ring-3 or ring-0).
1630 */
1631SUPR0DECL(int) SUPR0ContFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
1632{
1633 dprintf(("SUPR0ContFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
1634 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1635 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_CONT);
1636}
1637
1638
1639/**
1640 * Allocates a chunk of page aligned memory with fixed physical backing below 4GB.
1641 *
1642 * @returns IPRT status code.
1643 * @param pSession Session data.
1644 * @param cPages Number of pages to allocate.
1645 * @param ppvR0 Where to put the address of Ring-0 mapping of the allocated memory.
1646 * @param ppvR3 Where to put the address of Ring-3 mapping of the allocated memory.
1647 * @param paPages Where to put the physical addresses of allocated memory.
1648 */
1649SUPR0DECL(int) SUPR0LowAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS paPages)
1650{
1651 unsigned iPage;
1652 int rc;
1653 SUPDRVMEMREF Mem = {0};
1654 dprintf(("SUPR0LowAlloc: pSession=%p cPages=%d ppvR3=%p ppvR0=%p paPages=%p\n", pSession, cPages, ppvR3, ppvR0, paPages));
1655
1656 /*
1657 * Validate input.
1658 */
1659 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1660 if (!ppvR3 || !ppvR0 || !paPages)
1661 {
1662 dprintf(("Null pointer. All of these should be set: pSession=%p ppvR3=%p ppvR0=%p paPages=%p\n",
1663 pSession, ppvR3, ppvR0, paPages));
1664 return VERR_INVALID_PARAMETER;
1665
1666 }
1667 if (cPages < 1 || cPages > 256)
1668 {
1669 dprintf(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
1670 return VERR_INVALID_PARAMETER;
1671 }
1672
1673#ifdef USE_NEW_OS_INTERFACE_FOR_MM
1674 /*
1675 * Let IPRT do the work.
1676 */
1677 rc = RTR0MemObjAllocLow(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable ring-0 mapping */);
1678 if (RT_SUCCESS(rc))
1679 {
1680 int rc2;
1681 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
1682 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
1683 if (RT_SUCCESS(rc))
1684 {
1685 Mem.eType = MEMREF_TYPE_LOW;
1686 rc = supdrvMemAdd(&Mem, pSession);
1687 if (!rc)
1688 {
1689 for (iPage = 0; iPage < cPages; iPage++)
1690 {
1691 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
1692 AssertMsg(!(paPages[iPage] & (PAGE_SIZE - 1)), ("iPage=%d Phys=%VHp\n", paPages[iPage]));
1693 }
1694 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
1695 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
1696 return 0;
1697 }
1698
1699 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
1700 AssertRC(rc2);
1701 }
1702
1703 rc2 = RTR0MemObjFree(Mem.MemObj, false);
1704 AssertRC(rc2);
1705 }
1706
1707#else /* !USE_NEW_OS_INTERFACE_FOR_MM */
1708
1709 /*
1710 * Let the OS specific code have a go.
1711 */
1712 Mem.pvR0 = NULL;
1713 Mem.pvR3 = NIL_RTR3PTR;
1714 Mem.eType = MEMREF_TYPE_LOW;
1715 Mem.cb = cPages << PAGE_SHIFT;
1716 rc = supdrvOSLowAllocOne(&Mem, ppvR0, ppvR3, paPages);
1717 if (rc)
1718 return rc;
1719 AssertMsg(!((uintptr_t)*ppvR3 & (PAGE_SIZE - 1)), ("Memory is not page aligned! virt=%p\n", *ppvR3));
1720 AssertMsg(!((uintptr_t)*ppvR0 & (PAGE_SIZE - 1)), ("Memory is not page aligned! virt=%p\n", *ppvR0));
1721 for (iPage = 0; iPage < cPages; iPage++)
1722 AssertMsg(!(paPages[iPage].Phys & (PAGE_SIZE - 1)), ("iPage=%d Phys=%VHp\n", paPages[iPage].Phys));
1723
1724 /*
1725 * Everything when fine, add the memory reference to the session.
1726 */
1727 rc = supdrvMemAdd(&Mem, pSession);
1728 if (rc)
1729 supdrvOSLowFreeOne(&Mem);
1730#endif /* !USE_NEW_OS_INTERFACE_FOR_MM */
1731 return rc;
1732}
1733
1734
1735/**
1736 * Frees memory allocated using SUPR0LowAlloc().
1737 *
1738 * @returns IPRT status code.
1739 * @param pSession The session to which the memory was allocated.
1740 * @param uPtr Pointer to the memory (ring-3 or ring-0).
1741 */
1742SUPR0DECL(int) SUPR0LowFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
1743{
1744 dprintf(("SUPR0LowFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
1745 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1746 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_LOW);
1747}
1748
1749
1750
1751/**
1752 * Allocates a chunk of memory with both R0 and R3 mappings.
1753 * The memory is fixed and it's possible to query the physical addresses using SUPR0MemGetPhys().
1754 *
1755 * @returns IPRT status code.
1756 * @param pSession The session to associated the allocation with.
1757 * @param cb Number of bytes to allocate.
1758 * @param ppvR0 Where to store the address of the Ring-0 mapping.
1759 * @param ppvR3 Where to store the address of the Ring-3 mapping.
1760 */
1761SUPR0DECL(int) SUPR0MemAlloc(PSUPDRVSESSION pSession, uint32_t cb, PRTR0PTR ppvR0, PRTR3PTR ppvR3)
1762{
1763 int rc;
1764 SUPDRVMEMREF Mem = {0};
1765 dprintf(("SUPR0MemAlloc: pSession=%p cb=%d ppvR0=%p ppvR3=%p\n", pSession, cb, ppvR0, ppvR3));
1766
1767 /*
1768 * Validate input.
1769 */
1770 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1771 AssertPtrReturn(ppvR0, VERR_INVALID_POINTER);
1772 AssertPtrReturn(ppvR3, VERR_INVALID_POINTER);
1773 if (cb < 1 || cb >= _4M)
1774 {
1775 dprintf(("Illegal request cb=%u; must be greater than 0 and smaller than 4MB.\n", cb));
1776 return VERR_INVALID_PARAMETER;
1777 }
1778
1779#ifdef USE_NEW_OS_INTERFACE_FOR_MM
1780 /*
1781 * Let IPRT do the work.
1782 */
1783 rc = RTR0MemObjAllocPage(&Mem.MemObj, cb, true /* executable ring-0 mapping */);
1784 if (RT_SUCCESS(rc))
1785 {
1786 int rc2;
1787 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
1788 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
1789 if (RT_SUCCESS(rc))
1790 {
1791 Mem.eType = MEMREF_TYPE_MEM;
1792 rc = supdrvMemAdd(&Mem, pSession);
1793 if (!rc)
1794 {
1795 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
1796 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
1797 return VINF_SUCCESS;
1798 }
1799 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
1800 AssertRC(rc2);
1801 }
1802
1803 rc2 = RTR0MemObjFree(Mem.MemObj, false);
1804 AssertRC(rc2);
1805 }
1806
1807#else /* !USE_NEW_OS_INTERFACE_FOR_MM */
1808
1809 /*
1810 * Let the OS specific code have a go.
1811 */
1812 Mem.pvR0 = NULL;
1813 Mem.pvR3 = NIL_RTR3PTR;
1814 Mem.eType = MEMREF_TYPE_MEM;
1815 Mem.cb = cb;
1816 rc = supdrvOSMemAllocOne(&Mem, ppvR0, ppvR3);
1817 if (rc)
1818 return rc;
1819 AssertMsg(!((uintptr_t)*ppvR0 & (PAGE_SIZE - 1)), ("Memory is not page aligned! pvR0=%p\n", *ppvR0));
1820 AssertMsg(!((uintptr_t)*ppvR3 & (PAGE_SIZE - 1)), ("Memory is not page aligned! pvR3=%p\n", *ppvR3));
1821
1822 /*
1823 * Everything when fine, add the memory reference to the session.
1824 */
1825 rc = supdrvMemAdd(&Mem, pSession);
1826 if (rc)
1827 supdrvOSMemFreeOne(&Mem);
1828#endif /* !USE_NEW_OS_INTERFACE_FOR_MM */
1829 return rc;
1830}
1831
1832
1833/**
1834 * Get the physical addresses of memory allocated using SUPR0MemAlloc().
1835 *
1836 * @returns IPRT status code.
1837 * @param pSession The session to which the memory was allocated.
1838 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
1839 * @param paPages Where to store the physical addresses.
1840 */
1841SUPR0DECL(int) SUPR0MemGetPhys(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, PSUPPAGE paPages) /** @todo switch this bugger to RTHCPHYS */
1842{
1843 PSUPDRVBUNDLE pBundle;
1844 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1845 dprintf(("SUPR0MemGetPhys: pSession=%p uPtr=%p paPages=%p\n", pSession, (void *)uPtr, paPages));
1846
1847 /*
1848 * Validate input.
1849 */
1850 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1851 AssertPtrReturn(paPages, VERR_INVALID_POINTER);
1852 AssertReturn(uPtr, VERR_INVALID_PARAMETER);
1853
1854 /*
1855 * Search for the address.
1856 */
1857 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
1858 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
1859 {
1860 if (pBundle->cUsed > 0)
1861 {
1862 unsigned i;
1863 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
1864 {
1865#ifdef USE_NEW_OS_INTERFACE_FOR_MM
1866 if ( pBundle->aMem[i].eType == MEMREF_TYPE_MEM
1867 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
1868 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
1869 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
1870 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr)
1871 )
1872 )
1873 {
1874 const unsigned cPages = RTR0MemObjSize(pBundle->aMem[i].MemObj) >> PAGE_SHIFT;
1875 unsigned iPage;
1876 for (iPage = 0; iPage < cPages; iPage++)
1877 {
1878 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(pBundle->aMem[i].MemObj, iPage);
1879 paPages[iPage].uReserved = 0;
1880 }
1881 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
1882 return VINF_SUCCESS;
1883 }
1884#else /* !USE_NEW_OS_INTERFACE_FOR_MM */
1885 if ( pBundle->aMem[i].eType == MEMREF_TYPE_MEM
1886 && ( (RTHCUINTPTR)pBundle->aMem[i].pvR0 == uPtr
1887 || (RTHCUINTPTR)pBundle->aMem[i].pvR3 == uPtr))
1888 {
1889 supdrvOSMemGetPages(&pBundle->aMem[i], paPages);
1890 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
1891 return 0;
1892 }
1893#endif
1894 }
1895 }
1896 }
1897 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
1898 dprintf(("Failed to find %p!!!\n", (void *)uPtr));
1899 return VERR_INVALID_PARAMETER;
1900}
1901
1902
1903/**
1904 * Free memory allocated by SUPR0MemAlloc().
1905 *
1906 * @returns IPRT status code.
1907 * @param pSession The session owning the allocation.
1908 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
1909 */
1910SUPR0DECL(int) SUPR0MemFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
1911{
1912 dprintf(("SUPR0MemFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
1913 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1914 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_MEM);
1915}
1916
1917
1918#ifdef USE_NEW_OS_INTERFACE_FOR_MM
1919/**
1920 * Allocates a chunk of memory with only a R3 mappings.
1921 * The memory is fixed and it's possible to query the physical addresses using SUPR0MemGetPhys().
1922 *
1923 * @returns IPRT status code.
1924 * @param pSession The session to associated the allocation with.
1925 * @param cPages The number of pages to allocate.
1926 * @param ppvR3 Where to store the address of the Ring-3 mapping.
1927 * @param paPages Where to store the addresses of the pages. Optional.
1928 */
1929SUPR0DECL(int) SUPR0PageAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR3PTR ppvR3, PRTHCPHYS paPages)
1930{
1931 int rc;
1932 SUPDRVMEMREF Mem = {0};
1933 dprintf(("SUPR0PageAlloc: pSession=%p cb=%d ppvR3=%p\n", pSession, cPages, ppvR3));
1934
1935 /*
1936 * Validate input.
1937 */
1938 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1939 AssertPtrReturn(ppvR3, VERR_INVALID_POINTER);
1940 if (cPages < 1 || cPages >= 4096)
1941 {
1942 dprintf(("SUPR0PageAlloc: Illegal request cb=%u; must be greater than 0 and smaller than 16MB.\n", cPages));
1943 return VERR_INVALID_PARAMETER;
1944 }
1945
1946 /*
1947 * Let IPRT do the work.
1948 */
1949 rc = RTR0MemObjAllocPhysNC(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, NIL_RTHCPHYS);
1950 if (RT_SUCCESS(rc))
1951 {
1952 int rc2;
1953 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
1954 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
1955 if (RT_SUCCESS(rc))
1956 {
1957 Mem.eType = MEMREF_TYPE_LOCKED_SUP;
1958 rc = supdrvMemAdd(&Mem, pSession);
1959 if (!rc)
1960 {
1961 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
1962 if (paPages)
1963 {
1964 uint32_t iPage = cPages;
1965 while (iPage-- > 0)
1966 {
1967 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MapObjR3, iPage);
1968 Assert(paPages[iPage] != NIL_RTHCPHYS);
1969 }
1970 }
1971 return VINF_SUCCESS;
1972 }
1973 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
1974 AssertRC(rc2);
1975 }
1976
1977 rc2 = RTR0MemObjFree(Mem.MemObj, false);
1978 AssertRC(rc2);
1979 }
1980 return rc;
1981}
1982
1983
1984#ifdef RT_OS_WINDOWS
1985/**
1986 * Check if the pages were locked by SUPR0PageAlloc
1987 *
1988 * This function will be removed along with the lock/unlock hacks when
1989 * we've cleaned up the ring-3 code properly.
1990 *
1991 * @returns boolean
1992 * @param pSession The session to which the memory was allocated.
1993 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc().
1994 */
1995static bool supdrvPageWasLockedByPageAlloc(PSUPDRVSESSION pSession, RTR3PTR pvR3)
1996{
1997 PSUPDRVBUNDLE pBundle;
1998 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1999 dprintf(("SUPR0PageIsLockedByPageAlloc: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
2000
2001 /*
2002 * Search for the address.
2003 */
2004 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2005 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2006 {
2007 if (pBundle->cUsed > 0)
2008 {
2009 unsigned i;
2010 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2011 {
2012 if ( pBundle->aMem[i].eType == MEMREF_TYPE_LOCKED_SUP
2013 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2014 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2015 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
2016 {
2017 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2018 return true;
2019 }
2020 }
2021 }
2022 }
2023 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2024 return false;
2025}
2026
2027
2028/**
2029 * Get the physical addresses of memory allocated using SUPR0PageAlloc().
2030 *
2031 * This function will be removed along with the lock/unlock hacks when
2032 * we've cleaned up the ring-3 code properly.
2033 *
2034 * @returns IPRT status code.
2035 * @param pSession The session to which the memory was allocated.
2036 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc().
2037 * @param cPages Number of pages in paPages
2038 * @param paPages Where to store the physical addresses.
2039 */
2040static int supdrvPageGetPhys(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages)
2041{
2042 PSUPDRVBUNDLE pBundle;
2043 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2044 dprintf(("supdrvPageGetPhys: pSession=%p pvR3=%p cPages=%#lx paPages=%p\n", pSession, (void *)pvR3, (long)cPages, paPages));
2045
2046 /*
2047 * Search for the address.
2048 */
2049 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2050 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2051 {
2052 if (pBundle->cUsed > 0)
2053 {
2054 unsigned i;
2055 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2056 {
2057 if ( pBundle->aMem[i].eType == MEMREF_TYPE_LOCKED_SUP
2058 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2059 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2060 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
2061 {
2062 uint32_t iPage = RTR0MemObjSize(pBundle->aMem[i].MemObj) >> PAGE_SHIFT;
2063 cPages = RT_MIN(iPage, cPages);
2064 for (iPage = 0; iPage < cPages; iPage++)
2065 paPages[iPage] = RTR0MemObjGetPagePhysAddr(pBundle->aMem[i].MemObj, iPage);
2066 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2067 return VINF_SUCCESS;
2068 }
2069 }
2070 }
2071 }
2072 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2073 return VERR_INVALID_PARAMETER;
2074}
2075#endif /* RT_OS_WINDOWS */
2076
2077/**
2078 * Free memory allocated by SUPR0PageAlloc().
2079 *
2080 * @returns IPRT status code.
2081 * @param pSession The session owning the allocation.
2082 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc().
2083 */
2084SUPR0DECL(int) SUPR0PageFree(PSUPDRVSESSION pSession, RTR3PTR pvR3)
2085{
2086 dprintf(("SUPR0PageFree: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
2087 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2088 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_LOCKED_SUP);
2089}
2090#endif /* USE_NEW_OS_INTERFACE_FOR_MM */
2091
2092
2093/**
2094 * Maps the GIP into userspace and/or get the physical address of the GIP.
2095 *
2096 * @returns IPRT status code.
2097 * @param pSession Session to which the GIP mapping should belong.
2098 * @param ppGipR3 Where to store the address of the ring-3 mapping. (optional)
2099 * @param pHCPhysGip Where to store the physical address. (optional)
2100 *
2101 * @remark There is no reference counting on the mapping, so one call to this function
2102 * count globally as one reference. One call to SUPR0GipUnmap() is will unmap GIP
2103 * and remove the session as a GIP user.
2104 */
2105SUPR0DECL(int) SUPR0GipMap(PSUPDRVSESSION pSession, PRTR3PTR ppGipR3, PRTHCPHYS pHCPhysGip)
2106{
2107 int rc = 0;
2108 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2109 RTR3PTR pGip = NIL_RTR3PTR;
2110 RTHCPHYS HCPhys = NIL_RTHCPHYS;
2111 dprintf(("SUPR0GipMap: pSession=%p ppGipR3=%p pHCPhysGip=%p\n", pSession, ppGipR3, pHCPhysGip));
2112
2113 /*
2114 * Validate
2115 */
2116 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2117 AssertPtrNullReturn(ppGipR3, VERR_INVALID_POINTER);
2118 AssertPtrNullReturn(pHCPhysGip, VERR_INVALID_POINTER);
2119
2120 RTSemFastMutexRequest(pDevExt->mtxGip);
2121 if (pDevExt->pGip)
2122 {
2123 /*
2124 * Map it?
2125 */
2126 if (ppGipR3)
2127 {
2128#ifdef USE_NEW_OS_INTERFACE_FOR_GIP
2129 if (pSession->GipMapObjR3 == NIL_RTR0MEMOBJ)
2130 rc = RTR0MemObjMapUser(&pSession->GipMapObjR3, pDevExt->GipMemObj, (RTR3PTR)-1, 0,
2131 RTMEM_PROT_READ, RTR0ProcHandleSelf());
2132 if (RT_SUCCESS(rc))
2133 {
2134 pGip = RTR0MemObjAddressR3(pSession->GipMapObjR3);
2135 rc = VINF_SUCCESS; /** @todo remove this and replace the !rc below with RT_SUCCESS(rc). */
2136 }
2137#else /* !USE_NEW_OS_INTERFACE_FOR_GIP */
2138 if (!pSession->pGip)
2139 rc = supdrvOSGipMap(pSession->pDevExt, &pSession->pGip);
2140 if (!rc)
2141 pGip = (RTR3PTR)pSession->pGip;
2142#endif /* !USE_NEW_OS_INTERFACE_FOR_GIP */
2143 }
2144
2145 /*
2146 * Get physical address.
2147 */
2148 if (pHCPhysGip && !rc)
2149 HCPhys = pDevExt->HCPhysGip;
2150
2151 /*
2152 * Reference globally.
2153 */
2154 if (!pSession->fGipReferenced && !rc)
2155 {
2156 pSession->fGipReferenced = 1;
2157 pDevExt->cGipUsers++;
2158 if (pDevExt->cGipUsers == 1)
2159 {
2160 PSUPGLOBALINFOPAGE pGip = pDevExt->pGip;
2161 unsigned i;
2162
2163 dprintf(("SUPR0GipMap: Resumes GIP updating\n"));
2164
2165 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
2166 ASMAtomicXchgU32(&pGip->aCPUs[i].u32TransactionId, pGip->aCPUs[i].u32TransactionId & ~(GIP_UPDATEHZ_RECALC_FREQ * 2 - 1));
2167 ASMAtomicXchgU64(&pGip->u64NanoTSLastUpdateHz, 0);
2168
2169#ifdef USE_NEW_OS_INTERFACE_FOR_GIP
2170 rc = RTTimerStart(pDevExt->pGipTimer, 0);
2171 AssertRC(rc); rc = VINF_SUCCESS;
2172#else
2173 supdrvOSGipResume(pDevExt);
2174#endif
2175 }
2176 }
2177 }
2178 else
2179 {
2180 rc = SUPDRV_ERR_GENERAL_FAILURE;
2181 dprintf(("SUPR0GipMap: GIP is not available!\n"));
2182 }
2183 RTSemFastMutexRelease(pDevExt->mtxGip);
2184
2185 /*
2186 * Write returns.
2187 */
2188 if (pHCPhysGip)
2189 *pHCPhysGip = HCPhys;
2190 if (ppGipR3)
2191 *ppGipR3 = pGip;
2192
2193#ifdef DEBUG_DARWIN_GIP
2194 OSDBGPRINT(("SUPR0GipMap: returns %d *pHCPhysGip=%lx *ppGip=%p GipMapObjR3\n", rc, (unsigned long)HCPhys, pGip, pSession->GipMapObjR3));
2195#else
2196 dprintf(("SUPR0GipMap: returns %d *pHCPhysGip=%lx *ppGipR3=%p\n", rc, (unsigned long)HCPhys, (void *)(uintptr_t)pGip));
2197#endif
2198 return rc;
2199}
2200
2201
2202/**
2203 * Unmaps any user mapping of the GIP and terminates all GIP access
2204 * from this session.
2205 *
2206 * @returns IPRT status code.
2207 * @param pSession Session to which the GIP mapping should belong.
2208 */
2209SUPR0DECL(int) SUPR0GipUnmap(PSUPDRVSESSION pSession)
2210{
2211 int rc = VINF_SUCCESS;
2212 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2213#ifdef DEBUG_DARWIN_GIP
2214 OSDBGPRINT(("SUPR0GipUnmap: pSession=%p pGip=%p GipMapObjR3=%p\n",
2215 pSession,
2216 pSession->GipMapObjR3 != NIL_RTR0MEMOBJ ? RTR0MemObjAddress(pSession->GipMapObjR3) : NULL,
2217 pSession->GipMapObjR3));
2218#else
2219 dprintf(("SUPR0GipUnmap: pSession=%p\n", pSession));
2220#endif
2221 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2222
2223 RTSemFastMutexRequest(pDevExt->mtxGip);
2224
2225 /*
2226 * Unmap anything?
2227 */
2228#ifdef USE_NEW_OS_INTERFACE_FOR_GIP
2229 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
2230 {
2231 rc = RTR0MemObjFree(pSession->GipMapObjR3, false);
2232 AssertRC(rc);
2233 if (RT_SUCCESS(rc))
2234 pSession->GipMapObjR3 = NIL_RTR0MEMOBJ;
2235 }
2236#else
2237 if (pSession->pGip)
2238 {
2239 rc = supdrvOSGipUnmap(pDevExt, pSession->pGip);
2240 if (!rc)
2241 pSession->pGip = NULL;
2242 }
2243#endif
2244
2245 /*
2246 * Dereference global GIP.
2247 */
2248 if (pSession->fGipReferenced && !rc)
2249 {
2250 pSession->fGipReferenced = 0;
2251 if ( pDevExt->cGipUsers > 0
2252 && !--pDevExt->cGipUsers)
2253 {
2254 dprintf(("SUPR0GipUnmap: Suspends GIP updating\n"));
2255#ifdef USE_NEW_OS_INTERFACE_FOR_GIP
2256 rc = RTTimerStop(pDevExt->pGipTimer); AssertRC(rc); rc = 0;
2257#else
2258 supdrvOSGipSuspend(pDevExt);
2259#endif
2260 }
2261 }
2262
2263 RTSemFastMutexRelease(pDevExt->mtxGip);
2264
2265 return rc;
2266}
2267
2268
2269/**
2270 * Adds a memory object to the session.
2271 *
2272 * @returns IPRT status code.
2273 * @param pMem Memory tracking structure containing the
2274 * information to track.
2275 * @param pSession The session.
2276 */
2277static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession)
2278{
2279 PSUPDRVBUNDLE pBundle;
2280 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2281
2282 /*
2283 * Find free entry and record the allocation.
2284 */
2285 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2286 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2287 {
2288 if (pBundle->cUsed < RT_ELEMENTS(pBundle->aMem))
2289 {
2290 unsigned i;
2291 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2292 {
2293#ifdef USE_NEW_OS_INTERFACE_FOR_MM
2294 if (pBundle->aMem[i].MemObj == NIL_RTR0MEMOBJ)
2295#else /* !USE_NEW_OS_INTERFACE_FOR_MM */
2296 if ( !pBundle->aMem[i].pvR0
2297 && !pBundle->aMem[i].pvR3)
2298#endif /* !USE_NEW_OS_INTERFACE_FOR_MM */
2299 {
2300 pBundle->cUsed++;
2301 pBundle->aMem[i] = *pMem;
2302 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2303 return VINF_SUCCESS;
2304 }
2305 }
2306 AssertFailed(); /* !!this can't be happening!!! */
2307 }
2308 }
2309 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2310
2311 /*
2312 * Need to allocate a new bundle.
2313 * Insert into the last entry in the bundle.
2314 */
2315 pBundle = (PSUPDRVBUNDLE)RTMemAllocZ(sizeof(*pBundle));
2316 if (!pBundle)
2317 return VERR_NO_MEMORY;
2318
2319 /* take last entry. */
2320 pBundle->cUsed++;
2321 pBundle->aMem[RT_ELEMENTS(pBundle->aMem) - 1] = *pMem;
2322
2323 /* insert into list. */
2324 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2325 pBundle->pNext = pSession->Bundle.pNext;
2326 pSession->Bundle.pNext = pBundle;
2327 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2328
2329 return VINF_SUCCESS;
2330}
2331
2332
2333/**
2334 * Releases a memory object referenced by pointer and type.
2335 *
2336 * @returns IPRT status code.
2337 * @param pSession Session data.
2338 * @param uPtr Pointer to memory. This is matched against both the R0 and R3 addresses.
2339 * @param eType Memory type.
2340 */
2341static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType)
2342{
2343 PSUPDRVBUNDLE pBundle;
2344 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2345
2346 /*
2347 * Validate input.
2348 */
2349 if (!uPtr)
2350 {
2351 dprintf(("Illegal address %p\n", (void *)uPtr));
2352 return VERR_INVALID_PARAMETER;
2353 }
2354
2355 /*
2356 * Search for the address.
2357 */
2358 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2359 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2360 {
2361 if (pBundle->cUsed > 0)
2362 {
2363 unsigned i;
2364 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2365 {
2366#ifdef USE_NEW_OS_INTERFACE_FOR_MM
2367 if ( pBundle->aMem[i].eType == eType
2368 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2369 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
2370 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2371 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr))
2372 )
2373 {
2374 /* Make a copy of it and release it outside the spinlock. */
2375 SUPDRVMEMREF Mem = pBundle->aMem[i];
2376 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
2377 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
2378 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
2379 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2380
2381 if (Mem.MapObjR3)
2382 {
2383 int rc = RTR0MemObjFree(Mem.MapObjR3, false);
2384 AssertRC(rc); /** @todo figure out how to handle this. */
2385 }
2386 if (Mem.MemObj)
2387 {
2388 int rc = RTR0MemObjFree(Mem.MemObj, false);
2389 AssertRC(rc); /** @todo figure out how to handle this. */
2390 }
2391 return VINF_SUCCESS;
2392 }
2393#else /* !USE_NEW_OS_INTERFACE_FOR_MM */
2394 if ( pBundle->aMem[i].eType == eType
2395 && ( (RTHCUINTPTR)pBundle->aMem[i].pvR0 == uPtr
2396 || (RTHCUINTPTR)pBundle->aMem[i].pvR3 == uPtr))
2397 {
2398 /* Make a copy of it and release it outside the spinlock. */
2399 SUPDRVMEMREF Mem = pBundle->aMem[i];
2400 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
2401 pBundle->aMem[i].pvR0 = NULL;
2402 pBundle->aMem[i].pvR3 = NIL_RTR3PTR;
2403 pBundle->aMem[i].cb = 0;
2404 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2405
2406 /* Type specific free operation. */
2407 switch (Mem.eType)
2408 {
2409 case MEMREF_TYPE_LOCKED:
2410 supdrvOSUnlockMemOne(&Mem);
2411 break;
2412 case MEMREF_TYPE_CONT:
2413 supdrvOSContFreeOne(&Mem);
2414 break;
2415 case MEMREF_TYPE_LOW:
2416 supdrvOSLowFreeOne(&Mem);
2417 break;
2418 case MEMREF_TYPE_MEM:
2419 supdrvOSMemFreeOne(&Mem);
2420 break;
2421 default:
2422 case MEMREF_TYPE_UNUSED:
2423 break;
2424 }
2425 return VINF_SUCCESS;
2426 }
2427#endif /* !USE_NEW_OS_INTERFACE_FOR_MM */
2428 }
2429 }
2430 }
2431 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2432 dprintf(("Failed to find %p!!! (eType=%d)\n", (void *)uPtr, eType));
2433 return VERR_INVALID_PARAMETER;
2434}
2435
2436
2437#ifndef VBOX_WITHOUT_IDT_PATCHING
2438/**
2439 * Install IDT for the current CPU.
2440 *
2441 * @returns One of the following IPRT status codes:
2442 * @retval VINF_SUCCESS on success.
2443 * @retval VERR_IDT_FAILED.
2444 * @retval VERR_NO_MEMORY.
2445 * @param pDevExt The device extension.
2446 * @param pSession The session data.
2447 * @param pReq The request.
2448 */
2449static int supdrvIOCtl_IdtInstall(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPIDTINSTALL pReq)
2450{
2451 PSUPDRVPATCHUSAGE pUsagePre;
2452 PSUPDRVPATCH pPatchPre;
2453 RTIDTR Idtr;
2454 PSUPDRVPATCH pPatch;
2455 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2456 dprintf(("supdrvIOCtl_IdtInstall\n"));
2457
2458 /*
2459 * Preallocate entry for this CPU cause we don't wanna do
2460 * that inside the spinlock!
2461 */
2462 pUsagePre = (PSUPDRVPATCHUSAGE)RTMemAlloc(sizeof(*pUsagePre));
2463 if (!pUsagePre)
2464 return VERR_NO_MEMORY;
2465
2466 /*
2467 * Take the spinlock and see what we need to do.
2468 */
2469 RTSpinlockAcquireNoInts(pDevExt->Spinlock, &SpinlockTmp);
2470
2471 /* check if we already got a free patch. */
2472 if (!pDevExt->pIdtPatchesFree)
2473 {
2474 /*
2475 * Allocate a patch - outside the spinlock of course.
2476 */
2477 RTSpinlockReleaseNoInts(pDevExt->Spinlock, &SpinlockTmp);
2478
2479 pPatchPre = (PSUPDRVPATCH)RTMemExecAlloc(sizeof(*pPatchPre));
2480 if (!pPatchPre)
2481 return VERR_NO_MEMORY;
2482
2483 RTSpinlockAcquireNoInts(pDevExt->Spinlock, &SpinlockTmp);
2484 }
2485 else
2486 {
2487 pPatchPre = pDevExt->pIdtPatchesFree;
2488 pDevExt->pIdtPatchesFree = pPatchPre->pNext;
2489 }
2490
2491 /* look for matching patch entry */
2492 ASMGetIDTR(&Idtr);
2493 pPatch = pDevExt->pIdtPatches;
2494 while (pPatch && pPatch->pvIdt != (void *)Idtr.pIdt)
2495 pPatch = pPatch->pNext;
2496
2497 if (!pPatch)
2498 {
2499 /*
2500 * Create patch.
2501 */
2502 pPatch = supdrvIdtPatchOne(pDevExt, pPatchPre);
2503 if (pPatch)
2504 pPatchPre = NULL; /* mark as used. */
2505 }
2506 else
2507 {
2508 /*
2509 * Simply increment patch usage.
2510 */
2511 pPatch->cUsage++;
2512 }
2513
2514 if (pPatch)
2515 {
2516 /*
2517 * Increment and add if need be the session usage record for this patch.
2518 */
2519 PSUPDRVPATCHUSAGE pUsage = pSession->pPatchUsage;
2520 while (pUsage && pUsage->pPatch != pPatch)
2521 pUsage = pUsage->pNext;
2522
2523 if (!pUsage)
2524 {
2525 /*
2526 * Add usage record.
2527 */
2528 pUsagePre->cUsage = 1;
2529 pUsagePre->pPatch = pPatch;
2530 pUsagePre->pNext = pSession->pPatchUsage;
2531 pSession->pPatchUsage = pUsagePre;
2532 pUsagePre = NULL; /* mark as used. */
2533 }
2534 else
2535 {
2536 /*
2537 * Increment usage count.
2538 */
2539 pUsage->cUsage++;
2540 }
2541 }
2542
2543 /* free patch - we accumulate them for paranoid saftly reasons. */
2544 if (pPatchPre)
2545 {
2546 pPatchPre->pNext = pDevExt->pIdtPatchesFree;
2547 pDevExt->pIdtPatchesFree = pPatchPre;
2548 }
2549
2550 RTSpinlockReleaseNoInts(pDevExt->Spinlock, &SpinlockTmp);
2551
2552 /*
2553 * Free unused preallocated buffers.
2554 */
2555 if (pUsagePre)
2556 RTMemFree(pUsagePre);
2557
2558 pReq->u.Out.u8Idt = pDevExt->u8Idt;
2559
2560 return pPatch ? VINF_SUCCESS : VERR_IDT_FAILED;
2561}
2562
2563
2564/**
2565 * This creates a IDT patch entry.
2566 * If the first patch being installed it'll also determin the IDT entry
2567 * to use.
2568 *
2569 * @returns pPatch on success.
2570 * @returns NULL on failure.
2571 * @param pDevExt Pointer to globals.
2572 * @param pPatch Patch entry to use.
2573 * This will be linked into SUPDRVDEVEXT::pIdtPatches on
2574 * successful return.
2575 * @remark Call must be owning the SUPDRVDEVEXT::Spinlock!
2576 */
2577static PSUPDRVPATCH supdrvIdtPatchOne(PSUPDRVDEVEXT pDevExt, PSUPDRVPATCH pPatch)
2578{
2579 RTIDTR Idtr;
2580 PSUPDRVIDTE paIdt;
2581 dprintf(("supdrvIOCtl_IdtPatchOne: pPatch=%p\n", pPatch));
2582
2583 /*
2584 * Get IDT.
2585 */
2586 ASMGetIDTR(&Idtr);
2587 paIdt = (PSUPDRVIDTE)Idtr.pIdt;
2588 /*
2589 * Recent Linux kernels can be configured to 1G user /3G kernel.
2590 */
2591 if ((uintptr_t)paIdt < 0x40000000)
2592 {
2593 AssertMsgFailed(("bad paIdt=%p\n", paIdt));
2594 return NULL;
2595 }
2596
2597 if (!pDevExt->u8Idt)
2598 {
2599 /*
2600 * Test out the alternatives.
2601 *
2602 * At the moment we do not support chaining thus we ASSUME that one of
2603 * these 48 entries is unused (which is not a problem on Win32 and
2604 * Linux to my knowledge).
2605 */
2606 /** @todo we MUST change this detection to try grab an entry which is NOT in use. This can be
2607 * combined with gathering info about which guest system call gates we can hook up directly. */
2608 unsigned i;
2609 uint8_t u8Idt = 0;
2610 static uint8_t au8Ints[] =
2611 {
2612#ifdef RT_OS_WINDOWS /* We don't use 0xef and above because they are system stuff on linux (ef is IPI,
2613 * local apic timer, or some other frequently fireing thing). */
2614 0xef, 0xee, 0xed, 0xec,
2615#endif
2616 0xeb, 0xea, 0xe9, 0xe8,
2617 0xdf, 0xde, 0xdd, 0xdc,
2618 0x7b, 0x7a, 0x79, 0x78,
2619 0xbf, 0xbe, 0xbd, 0xbc,
2620 };
2621#if defined(RT_ARCH_AMD64) && defined(DEBUG)
2622 static int s_iWobble = 0;
2623 unsigned iMax = !(s_iWobble++ % 2) ? 0x80 : 0x100;
2624 dprintf(("IDT: Idtr=%p:%#x\n", (void *)Idtr.pIdt, (unsigned)Idtr.cbIdt));
2625 for (i = iMax - 0x80; i*16+15 < Idtr.cbIdt && i < iMax; i++)
2626 {
2627 dprintf(("%#x: %04x:%08x%04x%04x P=%d DPL=%d IST=%d Type1=%#x u32Reserved=%#x u5Reserved=%#x\n",
2628 i, paIdt[i].u16SegSel, paIdt[i].u32OffsetTop, paIdt[i].u16OffsetHigh, paIdt[i].u16OffsetLow,
2629 paIdt[i].u1Present, paIdt[i].u2DPL, paIdt[i].u3IST, paIdt[i].u5Type2,
2630 paIdt[i].u32Reserved, paIdt[i].u5Reserved));
2631 }
2632#endif
2633 /* look for entries which are not present or otherwise unused. */
2634 for (i = 0; i < sizeof(au8Ints) / sizeof(au8Ints[0]); i++)
2635 {
2636 u8Idt = au8Ints[i];
2637 if ( u8Idt * sizeof(SUPDRVIDTE) < Idtr.cbIdt
2638 && ( !paIdt[u8Idt].u1Present
2639 || paIdt[u8Idt].u5Type2 == 0))
2640 break;
2641 u8Idt = 0;
2642 }
2643 if (!u8Idt)
2644 {
2645 /* try again, look for a compatible entry .*/
2646 for (i = 0; i < sizeof(au8Ints) / sizeof(au8Ints[0]); i++)
2647 {
2648 u8Idt = au8Ints[i];
2649 if ( u8Idt * sizeof(SUPDRVIDTE) < Idtr.cbIdt
2650 && paIdt[u8Idt].u1Present
2651 && paIdt[u8Idt].u5Type2 == SUPDRV_IDTE_TYPE2_INTERRUPT_GATE
2652 && !(paIdt[u8Idt].u16SegSel & 3))
2653 break;
2654 u8Idt = 0;
2655 }
2656 if (!u8Idt)
2657 {
2658 dprintf(("Failed to find appropirate IDT entry!!\n"));
2659 return NULL;
2660 }
2661 }
2662 pDevExt->u8Idt = u8Idt;
2663 dprintf(("supdrvIOCtl_IdtPatchOne: u8Idt=%x\n", u8Idt));
2664 }
2665
2666 /*
2667 * Prepare the patch
2668 */
2669 memset(pPatch, 0, sizeof(*pPatch));
2670 pPatch->pvIdt = paIdt;
2671 pPatch->cUsage = 1;
2672 pPatch->pIdtEntry = &paIdt[pDevExt->u8Idt];
2673 pPatch->SavedIdt = paIdt[pDevExt->u8Idt];
2674 pPatch->ChangedIdt.u16OffsetLow = (uint32_t)((uintptr_t)&pPatch->auCode[0] & 0xffff);
2675 pPatch->ChangedIdt.u16OffsetHigh = (uint32_t)((uintptr_t)&pPatch->auCode[0] >> 16);
2676#ifdef RT_ARCH_AMD64
2677 pPatch->ChangedIdt.u32OffsetTop = (uint32_t)((uintptr_t)&pPatch->auCode[0] >> 32);
2678#endif
2679 pPatch->ChangedIdt.u16SegSel = ASMGetCS();
2680#ifdef RT_ARCH_AMD64
2681 pPatch->ChangedIdt.u3IST = 0;
2682 pPatch->ChangedIdt.u5Reserved = 0;
2683#else /* x86 */
2684 pPatch->ChangedIdt.u5Reserved = 0;
2685 pPatch->ChangedIdt.u3Type1 = 0;
2686#endif /* x86 */
2687 pPatch->ChangedIdt.u5Type2 = SUPDRV_IDTE_TYPE2_INTERRUPT_GATE;
2688 pPatch->ChangedIdt.u2DPL = 3;
2689 pPatch->ChangedIdt.u1Present = 1;
2690
2691 /*
2692 * Generate the patch code.
2693 */
2694 {
2695#ifdef RT_ARCH_AMD64
2696 union
2697 {
2698 uint8_t *pb;
2699 uint32_t *pu32;
2700 uint64_t *pu64;
2701 } u, uFixJmp, uFixCall, uNotNested;
2702 u.pb = &pPatch->auCode[0];
2703
2704 /* check the cookie */
2705 *u.pb++ = 0x3d; // cmp eax, GLOBALCOOKIE
2706 *u.pu32++ = pDevExt->u32Cookie;
2707
2708 *u.pb++ = 0x74; // jz @VBoxCall
2709 *u.pb++ = 2;
2710
2711 /* jump to forwarder code. */
2712 *u.pb++ = 0xeb;
2713 uFixJmp = u;
2714 *u.pb++ = 0xfe;
2715
2716 // @VBoxCall:
2717 *u.pb++ = 0x0f; // swapgs
2718 *u.pb++ = 0x01;
2719 *u.pb++ = 0xf8;
2720
2721 /*
2722 * Call VMMR0Entry
2723 * We don't have to push the arguments here, but we have top
2724 * reserve some stack space for the interrupt forwarding.
2725 */
2726# ifdef RT_OS_WINDOWS
2727 *u.pb++ = 0x50; // push rax ; alignment filler.
2728 *u.pb++ = 0x41; // push r8 ; uArg
2729 *u.pb++ = 0x50;
2730 *u.pb++ = 0x52; // push rdx ; uOperation
2731 *u.pb++ = 0x51; // push rcx ; pVM
2732# else
2733 *u.pb++ = 0x51; // push rcx ; alignment filler.
2734 *u.pb++ = 0x52; // push rdx ; uArg
2735 *u.pb++ = 0x56; // push rsi ; uOperation
2736 *u.pb++ = 0x57; // push rdi ; pVM
2737# endif
2738
2739 *u.pb++ = 0xff; // call qword [pfnVMMR0Entry wrt rip]
2740 *u.pb++ = 0x15;
2741 uFixCall = u;
2742 *u.pu32++ = 0;
2743
2744 *u.pb++ = 0x48; // add rsp, 20h ; remove call frame.
2745 *u.pb++ = 0x81;
2746 *u.pb++ = 0xc4;
2747 *u.pu32++ = 0x20;
2748
2749 *u.pb++ = 0x0f; // swapgs
2750 *u.pb++ = 0x01;
2751 *u.pb++ = 0xf8;
2752
2753 /* Return to R3. */
2754 uNotNested = u;
2755 *u.pb++ = 0x48; // iretq
2756 *u.pb++ = 0xcf;
2757
2758 while ((uintptr_t)u.pb & 0x7) // align 8
2759 *u.pb++ = 0xcc;
2760
2761 /* Pointer to the VMMR0Entry. */ // pfnVMMR0Entry dq StubVMMR0Entry
2762 *uFixCall.pu32 = (uint32_t)(u.pb - uFixCall.pb - 4); uFixCall.pb = NULL;
2763 pPatch->offVMMR0EntryFixup = (uint16_t)(u.pb - &pPatch->auCode[0]);
2764 *u.pu64++ = pDevExt->pvVMMR0 ? (uint64_t)pDevExt->pfnVMMR0Entry : (uint64_t)u.pb + 8;
2765
2766 /* stub entry. */ // StubVMMR0Entry:
2767 pPatch->offStub = (uint16_t)(u.pb - &pPatch->auCode[0]);
2768 *u.pb++ = 0x33; // xor eax, eax
2769 *u.pb++ = 0xc0;
2770
2771 *u.pb++ = 0x48; // dec rax
2772 *u.pb++ = 0xff;
2773 *u.pb++ = 0xc8;
2774
2775 *u.pb++ = 0xc3; // ret
2776
2777 /* forward to the original handler using a retf. */
2778 *uFixJmp.pb = (uint8_t)(u.pb - uFixJmp.pb - 1); uFixJmp.pb = NULL;
2779
2780 *u.pb++ = 0x68; // push <target cs>
2781 *u.pu32++ = !pPatch->SavedIdt.u5Type2 ? ASMGetCS() : pPatch->SavedIdt.u16SegSel;
2782
2783 *u.pb++ = 0x68; // push <low target rip>
2784 *u.pu32++ = !pPatch->SavedIdt.u5Type2
2785 ? (uint32_t)(uintptr_t)uNotNested.pb
2786 : (uint32_t)pPatch->SavedIdt.u16OffsetLow
2787 | (uint32_t)pPatch->SavedIdt.u16OffsetHigh << 16;
2788
2789 *u.pb++ = 0xc7; // mov dword [rsp + 4], <high target rip>
2790 *u.pb++ = 0x44;
2791 *u.pb++ = 0x24;
2792 *u.pb++ = 0x04;
2793 *u.pu32++ = !pPatch->SavedIdt.u5Type2
2794 ? (uint32_t)((uint64_t)uNotNested.pb >> 32)
2795 : pPatch->SavedIdt.u32OffsetTop;
2796
2797 *u.pb++ = 0x48; // retf ; does this require prefix?
2798 *u.pb++ = 0xcb;
2799
2800#else /* RT_ARCH_X86 */
2801
2802 union
2803 {
2804 uint8_t *pb;
2805 uint16_t *pu16;
2806 uint32_t *pu32;
2807 } u, uFixJmpNotNested, uFixJmp, uFixCall, uNotNested;
2808 u.pb = &pPatch->auCode[0];
2809
2810 /* check the cookie */
2811 *u.pb++ = 0x81; // cmp esi, GLOBALCOOKIE
2812 *u.pb++ = 0xfe;
2813 *u.pu32++ = pDevExt->u32Cookie;
2814
2815 *u.pb++ = 0x74; // jz VBoxCall
2816 uFixJmp = u;
2817 *u.pb++ = 0;
2818
2819 /* jump (far) to the original handler / not-nested-stub. */
2820 *u.pb++ = 0xea; // jmp far NotNested
2821 uFixJmpNotNested = u;
2822 *u.pu32++ = 0;
2823 *u.pu16++ = 0;
2824
2825 /* save selector registers. */ // VBoxCall:
2826 *uFixJmp.pb = (uint8_t)(u.pb - uFixJmp.pb - 1);
2827 *u.pb++ = 0x0f; // push fs
2828 *u.pb++ = 0xa0;
2829
2830 *u.pb++ = 0x1e; // push ds
2831
2832 *u.pb++ = 0x06; // push es
2833
2834 /* call frame */
2835 *u.pb++ = 0x51; // push ecx
2836
2837 *u.pb++ = 0x52; // push edx
2838
2839 *u.pb++ = 0x50; // push eax
2840
2841 /* load ds, es and perhaps fs before call. */
2842 *u.pb++ = 0xb8; // mov eax, KernelDS
2843 *u.pu32++ = ASMGetDS();
2844
2845 *u.pb++ = 0x8e; // mov ds, eax
2846 *u.pb++ = 0xd8;
2847
2848 *u.pb++ = 0x8e; // mov es, eax
2849 *u.pb++ = 0xc0;
2850
2851#ifdef RT_OS_WINDOWS
2852 *u.pb++ = 0xb8; // mov eax, KernelFS
2853 *u.pu32++ = ASMGetFS();
2854
2855 *u.pb++ = 0x8e; // mov fs, eax
2856 *u.pb++ = 0xe0;
2857#endif
2858
2859 /* do the call. */
2860 *u.pb++ = 0xe8; // call _VMMR0Entry / StubVMMR0Entry
2861 uFixCall = u;
2862 pPatch->offVMMR0EntryFixup = (uint16_t)(u.pb - &pPatch->auCode[0]);
2863 *u.pu32++ = 0xfffffffb;
2864
2865 *u.pb++ = 0x83; // add esp, 0ch ; cdecl
2866 *u.pb++ = 0xc4;
2867 *u.pb++ = 0x0c;
2868
2869 /* restore selector registers. */
2870 *u.pb++ = 0x07; // pop es
2871 //
2872 *u.pb++ = 0x1f; // pop ds
2873
2874 *u.pb++ = 0x0f; // pop fs
2875 *u.pb++ = 0xa1;
2876
2877 uNotNested = u; // NotNested:
2878 *u.pb++ = 0xcf; // iretd
2879
2880 /* the stub VMMR0Entry. */ // StubVMMR0Entry:
2881 pPatch->offStub = (uint16_t)(u.pb - &pPatch->auCode[0]);
2882 *u.pb++ = 0x33; // xor eax, eax
2883 *u.pb++ = 0xc0;
2884
2885 *u.pb++ = 0x48; // dec eax
2886
2887 *u.pb++ = 0xc3; // ret
2888
2889 /* Fixup the VMMR0Entry call. */
2890 if (pDevExt->pvVMMR0)
2891 *uFixCall.pu32 = (uint32_t)pDevExt->pfnVMMR0Entry - (uint32_t)(uFixCall.pu32 + 1);
2892 else
2893 *uFixCall.pu32 = (uint32_t)&pPatch->auCode[pPatch->offStub] - (uint32_t)(uFixCall.pu32 + 1);
2894
2895 /* Fixup the forward / nested far jump. */
2896 if (!pPatch->SavedIdt.u5Type2)
2897 {
2898 *uFixJmpNotNested.pu32++ = (uint32_t)uNotNested.pb;
2899 *uFixJmpNotNested.pu16++ = ASMGetCS();
2900 }
2901 else
2902 {
2903 *uFixJmpNotNested.pu32++ = ((uint32_t)pPatch->SavedIdt.u16OffsetHigh << 16) | pPatch->SavedIdt.u16OffsetLow;
2904 *uFixJmpNotNested.pu16++ = pPatch->SavedIdt.u16SegSel;
2905 }
2906#endif /* RT_ARCH_X86 */
2907 Assert(u.pb <= &pPatch->auCode[sizeof(pPatch->auCode)]);
2908#if 0
2909 /* dump the patch code */
2910 dprintf(("patch code: %p\n", &pPatch->auCode[0]));
2911 for (uFixCall.pb = &pPatch->auCode[0]; uFixCall.pb < u.pb; uFixCall.pb++)
2912 dprintf(("0x%02x,\n", *uFixCall.pb));
2913#endif
2914 }
2915
2916 /*
2917 * Install the patch.
2918 */
2919 supdrvIdtWrite(pPatch->pIdtEntry, &pPatch->ChangedIdt);
2920 AssertMsg(!memcmp((void *)pPatch->pIdtEntry, &pPatch->ChangedIdt, sizeof(pPatch->ChangedIdt)), ("The stupid change code didn't work!!!!!\n"));
2921
2922 /*
2923 * Link in the patch.
2924 */
2925 pPatch->pNext = pDevExt->pIdtPatches;
2926 pDevExt->pIdtPatches = pPatch;
2927
2928 return pPatch;
2929}
2930
2931
2932/**
2933 * Removes the sessions IDT references.
2934 * This will uninstall our IDT patch if we left unreferenced.
2935 *
2936 * @returns VINF_SUCCESS.
2937 * @param pDevExt Device globals.
2938 * @param pSession Session data.
2939 */
2940static int supdrvIOCtl_IdtRemoveAll(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
2941{
2942 PSUPDRVPATCHUSAGE pUsage;
2943 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2944 dprintf(("supdrvIOCtl_IdtRemoveAll: pSession=%p\n", pSession));
2945
2946 /*
2947 * Take the spinlock.
2948 */
2949 RTSpinlockAcquireNoInts(pDevExt->Spinlock, &SpinlockTmp);
2950
2951 /*
2952 * Walk usage list, removing patches as their usage count reaches zero.
2953 */
2954 pUsage = pSession->pPatchUsage;
2955 while (pUsage)
2956 {
2957 if (pUsage->pPatch->cUsage <= pUsage->cUsage)
2958 supdrvIdtRemoveOne(pDevExt, pUsage->pPatch);
2959 else
2960 pUsage->pPatch->cUsage -= pUsage->cUsage;
2961
2962 /* next */
2963 pUsage = pUsage->pNext;
2964 }
2965
2966 /*
2967 * Empty the usage chain and we're done inside the spinlock.
2968 */
2969 pUsage = pSession->pPatchUsage;
2970 pSession->pPatchUsage = NULL;
2971
2972 RTSpinlockReleaseNoInts(pDevExt->Spinlock, &SpinlockTmp);
2973
2974 /*
2975 * Free usage entries.
2976 */
2977 while (pUsage)
2978 {
2979 void *pvToFree = pUsage;
2980 pUsage->cUsage = 0;
2981 pUsage->pPatch = NULL;
2982 pUsage = pUsage->pNext;
2983 RTMemFree(pvToFree);
2984 }
2985
2986 return VINF_SUCCESS;
2987}
2988
2989
2990/**
2991 * Remove one patch.
2992 *
2993 * Worker for supdrvIOCtl_IdtRemoveAll.
2994 *
2995 * @param pDevExt Device globals.
2996 * @param pPatch Patch entry to remove.
2997 * @remark Caller must own SUPDRVDEVEXT::Spinlock!
2998 */
2999static void supdrvIdtRemoveOne(PSUPDRVDEVEXT pDevExt, PSUPDRVPATCH pPatch)
3000{
3001 dprintf(("supdrvIdtRemoveOne: pPatch=%p\n", pPatch));
3002
3003 pPatch->cUsage = 0;
3004
3005 /*
3006 * If the IDT entry was changed it have to kick around for ever!
3007 * This will be attempted freed again, perhaps next time we'll succeed :-)
3008 */
3009 if (memcmp((void *)pPatch->pIdtEntry, &pPatch->ChangedIdt, sizeof(pPatch->ChangedIdt)))
3010 {
3011 AssertMsgFailed(("The hijacked IDT entry has CHANGED!!!\n"));
3012 return;
3013 }
3014
3015 /*
3016 * Unlink it.
3017 */
3018 if (pDevExt->pIdtPatches != pPatch)
3019 {
3020 PSUPDRVPATCH pPatchPrev = pDevExt->pIdtPatches;
3021 while (pPatchPrev)
3022 {
3023 if (pPatchPrev->pNext == pPatch)
3024 {
3025 pPatchPrev->pNext = pPatch->pNext;
3026 break;
3027 }
3028 pPatchPrev = pPatchPrev->pNext;
3029 }
3030 Assert(!pPatchPrev);
3031 }
3032 else
3033 pDevExt->pIdtPatches = pPatch->pNext;
3034 pPatch->pNext = NULL;
3035
3036
3037 /*
3038 * Verify and restore the IDT.
3039 */
3040 AssertMsg(!memcmp((void *)pPatch->pIdtEntry, &pPatch->ChangedIdt, sizeof(pPatch->ChangedIdt)), ("The hijacked IDT entry has CHANGED!!!\n"));
3041 supdrvIdtWrite(pPatch->pIdtEntry, &pPatch->SavedIdt);
3042 AssertMsg(!memcmp((void *)pPatch->pIdtEntry, &pPatch->SavedIdt, sizeof(pPatch->SavedIdt)), ("The hijacked IDT entry has CHANGED!!!\n"));
3043
3044 /*
3045 * Put it in the free list.
3046 * (This free list stuff is to calm my paranoia.)
3047 */
3048 pPatch->pvIdt = NULL;
3049 pPatch->pIdtEntry = NULL;
3050
3051 pPatch->pNext = pDevExt->pIdtPatchesFree;
3052 pDevExt->pIdtPatchesFree = pPatch;
3053}
3054
3055
3056/**
3057 * Write to an IDT entry.
3058 *
3059 * @param pvIdtEntry Where to write.
3060 * @param pNewIDTEntry What to write.
3061 */
3062static void supdrvIdtWrite(volatile void *pvIdtEntry, const SUPDRVIDTE *pNewIDTEntry)
3063{
3064 RTUINTREG uCR0;
3065 RTUINTREG uFlags;
3066
3067 /*
3068 * On SMP machines (P4 hyperthreading included) we must preform a
3069 * 64-bit locked write when updating the IDT entry.
3070 *
3071 * The F00F bugfix for linux (and probably other OSes) causes
3072 * the IDT to be pointing to an readonly mapping. We get around that
3073 * by temporarily turning of WP. Since we're inside a spinlock at this
3074 * point, interrupts are disabled and there isn't any way the WP bit
3075 * flipping can cause any trouble.
3076 */
3077
3078 /* Save & Clear interrupt flag; Save & clear WP. */
3079 uFlags = ASMGetFlags();
3080 ASMSetFlags(uFlags & ~(RTUINTREG)(1 << 9)); /*X86_EFL_IF*/
3081 Assert(!(ASMGetFlags() & (1 << 9)));
3082 uCR0 = ASMGetCR0();
3083 ASMSetCR0(uCR0 & ~(RTUINTREG)(1 << 16)); /*X86_CR0_WP*/
3084
3085 /* Update IDT Entry */
3086#ifdef RT_ARCH_AMD64
3087 ASMAtomicXchgU128((volatile uint128_t *)pvIdtEntry, *(uint128_t *)(uintptr_t)pNewIDTEntry);
3088#else
3089 ASMAtomicXchgU64((volatile uint64_t *)pvIdtEntry, *(uint64_t *)(uintptr_t)pNewIDTEntry);
3090#endif
3091
3092 /* Restore CR0 & Flags */
3093 ASMSetCR0(uCR0);
3094 ASMSetFlags(uFlags);
3095}
3096#endif /* !VBOX_WITHOUT_IDT_PATCHING */
3097
3098
3099/**
3100 * Opens an image. If it's the first time it's opened the call must upload
3101 * the bits using the supdrvIOCtl_LdrLoad() / SUPDRV_IOCTL_LDR_LOAD function.
3102 *
3103 * This is the 1st step of the loading.
3104 *
3105 * @returns IPRT status code.
3106 * @param pDevExt Device globals.
3107 * @param pSession Session data.
3108 * @param pReq The open request.
3109 */
3110static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq)
3111{
3112 PSUPDRVLDRIMAGE pImage;
3113 unsigned cb;
3114 void *pv;
3115 dprintf(("supdrvIOCtl_LdrOpen: szName=%s cbImage=%d\n", pReq->u.In.szName, pReq->u.In.cbImage));
3116
3117 /*
3118 * Check if we got an instance of the image already.
3119 */
3120 RTSemFastMutexRequest(pDevExt->mtxLdr);
3121 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
3122 {
3123 if (!strcmp(pImage->szName, pReq->u.In.szName))
3124 {
3125 pImage->cUsage++;
3126 pReq->u.Out.pvImageBase = pImage->pvImage;
3127 pReq->u.Out.fNeedsLoading = pImage->uState == SUP_IOCTL_LDR_OPEN;
3128 supdrvLdrAddUsage(pSession, pImage);
3129 RTSemFastMutexRelease(pDevExt->mtxLdr);
3130 return VINF_SUCCESS;
3131 }
3132 }
3133 /* (not found - add it!) */
3134
3135 /*
3136 * Allocate memory.
3137 */
3138 cb = pReq->u.In.cbImage + sizeof(SUPDRVLDRIMAGE) + 31;
3139 pv = RTMemExecAlloc(cb);
3140 if (!pv)
3141 {
3142 RTSemFastMutexRelease(pDevExt->mtxLdr);
3143 return VERR_NO_MEMORY;
3144 }
3145
3146 /*
3147 * Setup and link in the LDR stuff.
3148 */
3149 pImage = (PSUPDRVLDRIMAGE)pv;
3150 pImage->pvImage = ALIGNP(pImage + 1, 32);
3151 pImage->cbImage = pReq->u.In.cbImage;
3152 pImage->pfnModuleInit = NULL;
3153 pImage->pfnModuleTerm = NULL;
3154 pImage->uState = SUP_IOCTL_LDR_OPEN;
3155 pImage->cUsage = 1;
3156 strcpy(pImage->szName, pReq->u.In.szName);
3157
3158 pImage->pNext = pDevExt->pLdrImages;
3159 pDevExt->pLdrImages = pImage;
3160
3161 supdrvLdrAddUsage(pSession, pImage);
3162
3163 pReq->u.Out.pvImageBase = pImage->pvImage;
3164 pReq->u.Out.fNeedsLoading = true;
3165 RTSemFastMutexRelease(pDevExt->mtxLdr);
3166 return VINF_SUCCESS;
3167}
3168
3169
3170/**
3171 * Loads the image bits.
3172 *
3173 * This is the 2nd step of the loading.
3174 *
3175 * @returns IPRT status code.
3176 * @param pDevExt Device globals.
3177 * @param pSession Session data.
3178 * @param pReq The request.
3179 */
3180static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq)
3181{
3182 PSUPDRVLDRUSAGE pUsage;
3183 PSUPDRVLDRIMAGE pImage;
3184 int rc;
3185 dprintf(("supdrvIOCtl_LdrLoad: pvImageBase=%p cbImage=%d\n", pReq->u.In.pvImageBase, pReq->u.In.cbImage));
3186
3187 /*
3188 * Find the ldr image.
3189 */
3190 RTSemFastMutexRequest(pDevExt->mtxLdr);
3191 pUsage = pSession->pLdrUsage;
3192 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
3193 pUsage = pUsage->pNext;
3194 if (!pUsage)
3195 {
3196 RTSemFastMutexRelease(pDevExt->mtxLdr);
3197 dprintf(("SUP_IOCTL_LDR_LOAD: couldn't find image!\n"));
3198 return VERR_INVALID_HANDLE;
3199 }
3200 pImage = pUsage->pImage;
3201 if (pImage->cbImage != pReq->u.In.cbImage)
3202 {
3203 RTSemFastMutexRelease(pDevExt->mtxLdr);
3204 dprintf(("SUP_IOCTL_LDR_LOAD: image size mismatch!! %d(prep) != %d(load)\n", pImage->cbImage, pReq->u.In.cbImage));
3205 return VERR_INVALID_HANDLE;
3206 }
3207 if (pImage->uState != SUP_IOCTL_LDR_OPEN)
3208 {
3209 unsigned uState = pImage->uState;
3210 RTSemFastMutexRelease(pDevExt->mtxLdr);
3211 if (uState != SUP_IOCTL_LDR_LOAD)
3212 AssertMsgFailed(("SUP_IOCTL_LDR_LOAD: invalid image state %d (%#x)!\n", uState, uState));
3213 return SUPDRV_ERR_ALREADY_LOADED;
3214 }
3215 switch (pReq->u.In.eEPType)
3216 {
3217 case SUPLDRLOADEP_NOTHING:
3218 break;
3219 case SUPLDRLOADEP_VMMR0:
3220 if (!pReq->u.In.EP.VMMR0.pvVMMR0 || !pReq->u.In.EP.VMMR0.pvVMMR0Entry)
3221 {
3222 RTSemFastMutexRelease(pDevExt->mtxLdr);
3223 dprintf(("pvVMMR0=%p or pReq->u.In.EP.VMMR0.pvVMMR0Entry=%p is NULL!\n",
3224 pReq->u.In.EP.VMMR0.pvVMMR0, pReq->u.In.EP.VMMR0.pvVMMR0Entry));
3225 return VERR_INVALID_PARAMETER;
3226 }
3227 if ((uintptr_t)pReq->u.In.EP.VMMR0.pvVMMR0Entry - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
3228 {
3229 RTSemFastMutexRelease(pDevExt->mtxLdr);
3230 dprintf(("SUP_IOCTL_LDR_LOAD: pvVMMR0Entry=%p is outside the image (%p %d bytes)\n",
3231 pReq->u.In.EP.VMMR0.pvVMMR0Entry, pImage->pvImage, pReq->u.In.cbImage));
3232 return VERR_INVALID_PARAMETER;
3233 }
3234 break;
3235 default:
3236 RTSemFastMutexRelease(pDevExt->mtxLdr);
3237 dprintf(("Invalid eEPType=%d\n", pReq->u.In.eEPType));
3238 return VERR_INVALID_PARAMETER;
3239 }
3240 if ( pReq->u.In.pfnModuleInit
3241 && (uintptr_t)pReq->u.In.pfnModuleInit - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
3242 {
3243 RTSemFastMutexRelease(pDevExt->mtxLdr);
3244 dprintf(("SUP_IOCTL_LDR_LOAD: pfnModuleInit=%p is outside the image (%p %d bytes)\n",
3245 pReq->u.In.pfnModuleInit, pImage->pvImage, pReq->u.In.cbImage));
3246 return VERR_INVALID_PARAMETER;
3247 }
3248 if ( pReq->u.In.pfnModuleTerm
3249 && (uintptr_t)pReq->u.In.pfnModuleTerm - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
3250 {
3251 RTSemFastMutexRelease(pDevExt->mtxLdr);
3252 dprintf(("SUP_IOCTL_LDR_LOAD: pfnModuleTerm=%p is outside the image (%p %d bytes)\n",
3253 pReq->u.In.pfnModuleTerm, pImage->pvImage, pReq->u.In.cbImage));
3254 return VERR_INVALID_PARAMETER;
3255 }
3256
3257 /*
3258 * Copy the memory.
3259 */
3260 /* no need to do try/except as this is a buffered request. */
3261 memcpy(pImage->pvImage, &pReq->u.In.achImage[0], pImage->cbImage);
3262 pImage->uState = SUP_IOCTL_LDR_LOAD;
3263 pImage->pfnModuleInit = pReq->u.In.pfnModuleInit;
3264 pImage->pfnModuleTerm = pReq->u.In.pfnModuleTerm;
3265 pImage->offSymbols = pReq->u.In.offSymbols;
3266 pImage->cSymbols = pReq->u.In.cSymbols;
3267 pImage->offStrTab = pReq->u.In.offStrTab;
3268 pImage->cbStrTab = pReq->u.In.cbStrTab;
3269
3270 /*
3271 * Update any entry points.
3272 */
3273 switch (pReq->u.In.eEPType)
3274 {
3275 default:
3276 case SUPLDRLOADEP_NOTHING:
3277 rc = VINF_SUCCESS;
3278 break;
3279 case SUPLDRLOADEP_VMMR0:
3280 rc = supdrvLdrSetR0EP(pDevExt, pReq->u.In.EP.VMMR0.pvVMMR0, pReq->u.In.EP.VMMR0.pvVMMR0Entry);
3281 break;
3282 }
3283
3284 /*
3285 * On success call the module initialization.
3286 */
3287 dprintf(("supdrvIOCtl_LdrLoad: pfnModuleInit=%p\n", pImage->pfnModuleInit));
3288 if (!rc && pImage->pfnModuleInit)
3289 {
3290 dprintf(("supdrvIOCtl_LdrLoad: calling pfnModuleInit=%p\n", pImage->pfnModuleInit));
3291 rc = pImage->pfnModuleInit();
3292 if (rc && pDevExt->pvVMMR0 == pImage->pvImage)
3293 supdrvLdrUnsetR0EP(pDevExt);
3294 }
3295
3296 if (rc)
3297 pImage->uState = SUP_IOCTL_LDR_OPEN;
3298
3299 RTSemFastMutexRelease(pDevExt->mtxLdr);
3300 return rc;
3301}
3302
3303
3304/**
3305 * Frees a previously loaded (prep'ed) image.
3306 *
3307 * @returns IPRT status code.
3308 * @param pDevExt Device globals.
3309 * @param pSession Session data.
3310 * @param pReq The request.
3311 */
3312static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq)
3313{
3314 PSUPDRVLDRUSAGE pUsagePrev;
3315 PSUPDRVLDRUSAGE pUsage;
3316 PSUPDRVLDRIMAGE pImage;
3317 dprintf(("supdrvIOCtl_LdrFree: pvImageBase=%p\n", pReq->u.In.pvImageBase));
3318
3319 /*
3320 * Find the ldr image.
3321 */
3322 RTSemFastMutexRequest(pDevExt->mtxLdr);
3323 pUsagePrev = NULL;
3324 pUsage = pSession->pLdrUsage;
3325 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
3326 {
3327 pUsagePrev = pUsage;
3328 pUsage = pUsage->pNext;
3329 }
3330 if (!pUsage)
3331 {
3332 RTSemFastMutexRelease(pDevExt->mtxLdr);
3333 dprintf(("SUP_IOCTL_LDR_FREE: couldn't find image!\n"));
3334 return VERR_INVALID_HANDLE;
3335 }
3336
3337 /*
3338 * Check if we can remove anything.
3339 */
3340 pImage = pUsage->pImage;
3341 if (pImage->cUsage <= 1 || pUsage->cUsage <= 1)
3342 {
3343 /* unlink it */
3344 if (pUsagePrev)
3345 pUsagePrev->pNext = pUsage->pNext;
3346 else
3347 pSession->pLdrUsage = pUsage->pNext;
3348 /* free it */
3349 pUsage->pImage = NULL;
3350 pUsage->pNext = NULL;
3351 RTMemFree(pUsage);
3352
3353 /*
3354 * Derefrence the image.
3355 */
3356 if (pImage->cUsage <= 1)
3357 supdrvLdrFree(pDevExt, pImage);
3358 else
3359 pImage->cUsage--;
3360 }
3361 else
3362 {
3363 /*
3364 * Dereference both image and usage.
3365 */
3366 pImage->cUsage--;
3367 pUsage->cUsage--;
3368 }
3369
3370 RTSemFastMutexRelease(pDevExt->mtxLdr);
3371 return VINF_SUCCESS;
3372}
3373
3374
3375/**
3376 * Gets the address of a symbol in an open image.
3377 *
3378 * @returns 0 on success.
3379 * @returns SUPDRV_ERR_* on failure.
3380 * @param pDevExt Device globals.
3381 * @param pSession Session data.
3382 * @param pReq The request buffer.
3383 */
3384static int supdrvIOCtl_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq)
3385{
3386 PSUPDRVLDRIMAGE pImage;
3387 PSUPDRVLDRUSAGE pUsage;
3388 uint32_t i;
3389 PSUPLDRSYM paSyms;
3390 const char *pchStrings;
3391 const size_t cbSymbol = strlen(pReq->u.In.szSymbol) + 1;
3392 void *pvSymbol = NULL;
3393 int rc = VERR_GENERAL_FAILURE;
3394 dprintf2(("supdrvIOCtl_LdrGetSymbol: pvImageBase=%p szSymbol=\"%s\"\n", pReq->u.In.pvImageBase, pReq->u.In.szSymbol));
3395
3396 /*
3397 * Find the ldr image.
3398 */
3399 RTSemFastMutexRequest(pDevExt->mtxLdr);
3400 pUsage = pSession->pLdrUsage;
3401 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
3402 pUsage = pUsage->pNext;
3403 if (!pUsage)
3404 {
3405 RTSemFastMutexRelease(pDevExt->mtxLdr);
3406 dprintf(("SUP_IOCTL_LDR_GET_SYMBOL: couldn't find image!\n"));
3407 return VERR_INVALID_HANDLE;
3408 }
3409 pImage = pUsage->pImage;
3410 if (pImage->uState != SUP_IOCTL_LDR_LOAD)
3411 {
3412 unsigned uState = pImage->uState;
3413 RTSemFastMutexRelease(pDevExt->mtxLdr);
3414 dprintf(("SUP_IOCTL_LDR_GET_SYMBOL: invalid image state %d (%#x)!\n", uState, uState)); NOREF(uState);
3415 return VERR_ALREADY_LOADED;
3416 }
3417
3418 /*
3419 * Search the symbol string.
3420 */
3421 pchStrings = (const char *)((uint8_t *)pImage->pvImage + pImage->offStrTab);
3422 paSyms = (PSUPLDRSYM)((uint8_t *)pImage->pvImage + pImage->offSymbols);
3423 for (i = 0; i < pImage->cSymbols; i++)
3424 {
3425 if ( paSyms[i].offSymbol < pImage->cbImage /* paranoia */
3426 && paSyms[i].offName + cbSymbol <= pImage->cbStrTab
3427 && !memcmp(pchStrings + paSyms[i].offName, pReq->u.In.szSymbol, cbSymbol))
3428 {
3429 pvSymbol = (uint8_t *)pImage->pvImage + paSyms[i].offSymbol;
3430 rc = VINF_SUCCESS;
3431 break;
3432 }
3433 }
3434 RTSemFastMutexRelease(pDevExt->mtxLdr);
3435 pReq->u.Out.pvSymbol = pvSymbol;
3436 return rc;
3437}
3438
3439
3440/**
3441 * Updates the IDT patches to point to the specified VMM R0 entry
3442 * point (i.e. VMMR0Enter()).
3443 *
3444 * @returns IPRT status code.
3445 * @param pDevExt Device globals.
3446 * @param pSession Session data.
3447 * @param pVMMR0 VMMR0 image handle.
3448 * @param pVMMR0Entry VMMR0Entry address.
3449 * @remark Caller must own the loader mutex.
3450 */
3451static int supdrvLdrSetR0EP(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0Entry)
3452{
3453 int rc = VINF_SUCCESS;
3454 dprintf(("supdrvLdrSetR0EP pvVMMR0=%p pvVMMR0Entry=%p\n", pvVMMR0, pvVMMR0Entry));
3455
3456
3457 /*
3458 * Check if not yet set.
3459 */
3460 if (!pDevExt->pvVMMR0)
3461 {
3462#ifndef VBOX_WITHOUT_IDT_PATCHING
3463 PSUPDRVPATCH pPatch;
3464#endif
3465
3466 /*
3467 * Set it and update IDT patch code.
3468 */
3469 pDevExt->pvVMMR0 = pvVMMR0;
3470 pDevExt->pfnVMMR0Entry = pvVMMR0Entry;
3471#ifndef VBOX_WITHOUT_IDT_PATCHING
3472 for (pPatch = pDevExt->pIdtPatches; pPatch; pPatch = pPatch->pNext)
3473 {
3474# ifdef RT_ARCH_AMD64
3475 ASMAtomicXchgU64((volatile uint64_t *)&pPatch->auCode[pPatch->offVMMR0EntryFixup], (uint64_t)pvVMMR0);
3476# else /* RT_ARCH_X86 */
3477 ASMAtomicXchgU32((volatile uint32_t *)&pPatch->auCode[pPatch->offVMMR0EntryFixup],
3478 (uint32_t)pvVMMR0 - (uint32_t)&pPatch->auCode[pPatch->offVMMR0EntryFixup + 4]);
3479# endif
3480 }
3481#endif /* !VBOX_WITHOUT_IDT_PATCHING */
3482 }
3483 else
3484 {
3485 /*
3486 * Return failure or success depending on whether the
3487 * values match or not.
3488 */
3489 if ( pDevExt->pvVMMR0 != pvVMMR0
3490 || (void *)pDevExt->pfnVMMR0Entry != pvVMMR0Entry)
3491 {
3492 AssertMsgFailed(("SUP_IOCTL_LDR_SETR0EP: Already set pointing to a different module!\n"));
3493 rc = VERR_INVALID_PARAMETER;
3494 }
3495 }
3496 return rc;
3497}
3498
3499
3500/**
3501 * Unsets the R0 entry point installed by supdrvLdrSetR0EP.
3502 *
3503 * @param pDevExt Device globals.
3504 */
3505static void supdrvLdrUnsetR0EP(PSUPDRVDEVEXT pDevExt)
3506{
3507#ifndef VBOX_WITHOUT_IDT_PATCHING
3508 PSUPDRVPATCH pPatch;
3509#endif
3510
3511 pDevExt->pvVMMR0 = NULL;
3512 pDevExt->pfnVMMR0Entry = NULL;
3513
3514#ifndef VBOX_WITHOUT_IDT_PATCHING
3515 for (pPatch = pDevExt->pIdtPatches; pPatch; pPatch = pPatch->pNext)
3516 {
3517# ifdef RT_ARCH_AMD64
3518 ASMAtomicXchgU64((volatile uint64_t *)&pPatch->auCode[pPatch->offVMMR0EntryFixup],
3519 (uint64_t)&pPatch->auCode[pPatch->offStub]);
3520# else /* RT_ARCH_X86 */
3521 ASMAtomicXchgU32((volatile uint32_t *)&pPatch->auCode[pPatch->offVMMR0EntryFixup],
3522 (uint32_t)&pPatch->auCode[pPatch->offStub] - (uint32_t)&pPatch->auCode[pPatch->offVMMR0EntryFixup + 4]);
3523# endif
3524 }
3525#endif /* !VBOX_WITHOUT_IDT_PATCHING */
3526}
3527
3528
3529/**
3530 * Adds a usage reference in the specified session of an image.
3531 *
3532 * @param pSession Session in question.
3533 * @param pImage Image which the session is using.
3534 */
3535static void supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage)
3536{
3537 PSUPDRVLDRUSAGE pUsage;
3538 dprintf(("supdrvLdrAddUsage: pImage=%p\n", pImage));
3539
3540 /*
3541 * Referenced it already?
3542 */
3543 pUsage = pSession->pLdrUsage;
3544 while (pUsage)
3545 {
3546 if (pUsage->pImage == pImage)
3547 {
3548 pUsage->cUsage++;
3549 return;
3550 }
3551 pUsage = pUsage->pNext;
3552 }
3553
3554 /*
3555 * Allocate new usage record.
3556 */
3557 pUsage = (PSUPDRVLDRUSAGE)RTMemAlloc(sizeof(*pUsage));
3558 Assert(pUsage);
3559 if (pUsage)
3560 {
3561 pUsage->cUsage = 1;
3562 pUsage->pImage = pImage;
3563 pUsage->pNext = pSession->pLdrUsage;
3564 pSession->pLdrUsage = pUsage;
3565 }
3566 /* ignore errors... */
3567}
3568
3569
3570/**
3571 * Frees a load image.
3572 *
3573 * @param pDevExt Pointer to device extension.
3574 * @param pImage Pointer to the image we're gonna free.
3575 * This image must exit!
3576 * @remark The caller MUST own SUPDRVDEVEXT::mtxLdr!
3577 */
3578static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage)
3579{
3580 PSUPDRVLDRIMAGE pImagePrev;
3581 dprintf(("supdrvLdrFree: pImage=%p\n", pImage));
3582
3583 /* find it - arg. should've used doubly linked list. */
3584 Assert(pDevExt->pLdrImages);
3585 pImagePrev = NULL;
3586 if (pDevExt->pLdrImages != pImage)
3587 {
3588 pImagePrev = pDevExt->pLdrImages;
3589 while (pImagePrev->pNext != pImage)
3590 pImagePrev = pImagePrev->pNext;
3591 Assert(pImagePrev->pNext == pImage);
3592 }
3593
3594 /* unlink */
3595 if (pImagePrev)
3596 pImagePrev->pNext = pImage->pNext;
3597 else
3598 pDevExt->pLdrImages = pImage->pNext;
3599
3600 /* check if this is VMMR0.r0 and fix the Idt patches if it is. */
3601 if (pDevExt->pvVMMR0 == pImage->pvImage)
3602 supdrvLdrUnsetR0EP(pDevExt);
3603
3604 /* call termination function if fully loaded. */
3605 if ( pImage->pfnModuleTerm
3606 && pImage->uState == SUP_IOCTL_LDR_LOAD)
3607 {
3608 dprintf(("supdrvIOCtl_LdrLoad: calling pfnModuleTerm=%p\n", pImage->pfnModuleTerm));
3609 pImage->pfnModuleTerm();
3610 }
3611
3612 /* free the image */
3613 pImage->cUsage = 0;
3614 pImage->pNext = 0;
3615 pImage->uState = SUP_IOCTL_LDR_FREE;
3616 RTMemExecFree(pImage);
3617}
3618
3619
3620/**
3621 * Gets the current paging mode of the CPU and stores in in pOut.
3622 */
3623static SUPPAGINGMODE supdrvIOCtl_GetPagingMode(void)
3624{
3625 SUPPAGINGMODE enmMode;
3626
3627 RTUINTREG cr0 = ASMGetCR0();
3628 if ((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
3629 enmMode = SUPPAGINGMODE_INVALID;
3630 else
3631 {
3632 RTUINTREG cr4 = ASMGetCR4();
3633 uint32_t fNXEPlusLMA = 0;
3634 if (cr4 & X86_CR4_PAE)
3635 {
3636 uint32_t fAmdFeatures = ASMCpuId_EDX(0x80000001);
3637 if (fAmdFeatures & (X86_CPUID_AMD_FEATURE_EDX_NX | X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
3638 {
3639 uint64_t efer = ASMRdMsr(MSR_K6_EFER);
3640 if ((fAmdFeatures & X86_CPUID_AMD_FEATURE_EDX_NX) && (efer & MSR_K6_EFER_NXE))
3641 fNXEPlusLMA |= BIT(0);
3642 if ((fAmdFeatures & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE) && (efer & MSR_K6_EFER_LMA))
3643 fNXEPlusLMA |= BIT(1);
3644 }
3645 }
3646
3647 switch ((cr4 & (X86_CR4_PAE | X86_CR4_PGE)) | fNXEPlusLMA)
3648 {
3649 case 0:
3650 enmMode = SUPPAGINGMODE_32_BIT;
3651 break;
3652
3653 case X86_CR4_PGE:
3654 enmMode = SUPPAGINGMODE_32_BIT_GLOBAL;
3655 break;
3656
3657 case X86_CR4_PAE:
3658 enmMode = SUPPAGINGMODE_PAE;
3659 break;
3660
3661 case X86_CR4_PAE | BIT(0):
3662 enmMode = SUPPAGINGMODE_PAE_NX;
3663 break;
3664
3665 case X86_CR4_PAE | X86_CR4_PGE:
3666 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
3667 break;
3668
3669 case X86_CR4_PAE | X86_CR4_PGE | BIT(0):
3670 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
3671 break;
3672
3673 case BIT(1) | X86_CR4_PAE:
3674 enmMode = SUPPAGINGMODE_AMD64;
3675 break;
3676
3677 case BIT(1) | X86_CR4_PAE | BIT(0):
3678 enmMode = SUPPAGINGMODE_AMD64_NX;
3679 break;
3680
3681 case BIT(1) | X86_CR4_PAE | X86_CR4_PGE:
3682 enmMode = SUPPAGINGMODE_AMD64_GLOBAL;
3683 break;
3684
3685 case BIT(1) | X86_CR4_PAE | X86_CR4_PGE | BIT(0):
3686 enmMode = SUPPAGINGMODE_AMD64_GLOBAL_NX;
3687 break;
3688
3689 default:
3690 AssertMsgFailed(("Cannot happen! cr4=%#x fNXEPlusLMA=%d\n", cr4, fNXEPlusLMA));
3691 enmMode = SUPPAGINGMODE_INVALID;
3692 break;
3693 }
3694 }
3695 return enmMode;
3696}
3697
3698
3699#if !defined(SUPDRV_OS_HAVE_LOW) && !defined(USE_NEW_OS_INTERFACE_FOR_MM) /* Use same backend as the contiguous stuff */
3700/**
3701 * OS Specific code for allocating page aligned memory with fixed
3702 * physical backing below 4GB.
3703 *
3704 * @returns 0 on success.
3705 * @returns SUPDRV_ERR_* on failure.
3706 * @param pMem Memory reference record of the memory to be allocated.
3707 * (This is not linked in anywhere.)
3708 * @param ppvR3 Where to store the Ring-0 mapping of the allocated memory.
3709 * @param ppvR3 Where to store the Ring-3 mapping of the allocated memory.
3710 * @param paPagesOut Where to store the physical addresss.
3711 */
3712int VBOXCALL supdrvOSLowAllocOne(PSUPDRVMEMREF pMem, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PSUPPAGE paPagesOut)
3713{
3714#if defined(USE_NEW_OS_INTERFACE_FOR_LOW) /* a temp hack */
3715 int rc = RTR0MemObjAllocLow(&pMem->u.iprt.MemObj, pMem->cb, true /* executable ring-0 mapping */);
3716 if (RT_SUCCESS(rc))
3717 {
3718 int rc2;
3719 rc = RTR0MemObjMapUser(&pMem->u.iprt.MapObjR3, pMem->u.iprt.MemObj, (RTR3PTR)-1, 0,
3720 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
3721 if (RT_SUCCESS(rc))
3722 {
3723 pMem->eType = MEMREF_TYPE_LOW;
3724 pMem->pvR0 = RTR0MemObjAddress(pMem->u.iprt.MemObj);
3725 pMem->pvR3 = RTR0MemObjAddressR3(pMem->u.iprt.MapObjR3);
3726 /*if (RT_SUCCESS(rc))*/
3727 {
3728 size_t cPages = pMem->cb >> PAGE_SHIFT;
3729 size_t iPage;
3730 for (iPage = 0; iPage < cPages; iPage++)
3731 {
3732 paPagesOut[iPage].Phys = RTR0MemObjGetPagePhysAddr(pMem->u.iprt.MemObj, iPage);
3733 paPagesOut[iPage].uReserved = 0;
3734 AssertMsg(!(paPagesOut[iPage].Phys & (PAGE_SIZE - 1)), ("iPage=%d Phys=%VHp\n", paPagesOut[iPage].Phys));
3735 }
3736 *ppvR0 = RTR0MemObjAddress(pMem->u.iprt.MemObj);
3737 *ppvR3 = RTR0MemObjAddressR3(pMem->u.iprt.MapObjR3);
3738 return VINF_SUCCESS;
3739 }
3740
3741 rc2 = RTR0MemObjFree(pMem->u.iprt.MapObjR3, false);
3742 AssertRC(rc2);
3743 }
3744
3745 rc2 = RTR0MemObjFree(pMem->u.iprt.MemObj, false);
3746 AssertRC(rc2);
3747 }
3748 return rc;
3749#else
3750 RTHCPHYS HCPhys;
3751 int rc = supdrvOSContAllocOne(pMem, ppvR0, ppvR3, &HCPhys);
3752 if (!rc)
3753 {
3754 unsigned iPage = pMem->cb >> PAGE_SHIFT;
3755 while (iPage-- > 0)
3756 {
3757 paPagesOut[iPage].Phys = HCPhys + (iPage << PAGE_SHIFT);
3758 paPagesOut[iPage].uReserved = 0;
3759 }
3760 }
3761 return rc;
3762#endif
3763}
3764
3765
3766/**
3767 * Frees low memory.
3768 *
3769 * @param pMem Memory reference record of the memory to be freed.
3770 */
3771void VBOXCALL supdrvOSLowFreeOne(PSUPDRVMEMREF pMem)
3772{
3773# if defined(USE_NEW_OS_INTERFACE_FOR_LOW)
3774 if (pMem->u.iprt.MapObjR3)
3775 {
3776 int rc = RTR0MemObjFree(pMem->u.iprt.MapObjR3, false);
3777 AssertRC(rc); /** @todo figure out how to handle this. */
3778 }
3779 if (pMem->u.iprt.MemObj)
3780 {
3781 int rc = RTR0MemObjFree(pMem->u.iprt.MemObj, false);
3782 AssertRC(rc); /** @todo figure out how to handle this. */
3783 }
3784# else
3785 supdrvOSContFreeOne(pMem);
3786# endif
3787}
3788#endif /* !SUPDRV_OS_HAVE_LOW && !USE_NEW_OS_INTERFACE_FOR_MM */
3789
3790
3791#ifdef USE_NEW_OS_INTERFACE_FOR_GIP
3792/**
3793 * Creates the GIP.
3794 *
3795 * @returns negative errno.
3796 * @param pDevExt Instance data. GIP stuff may be updated.
3797 */
3798static int supdrvGipCreate(PSUPDRVDEVEXT pDevExt)
3799{
3800 PSUPGLOBALINFOPAGE pGip;
3801 RTHCPHYS HCPhysGip;
3802 uint32_t u32SystemResolution;
3803 uint32_t u32Interval;
3804 int rc;
3805
3806 dprintf(("supdrvGipCreate:\n"));
3807
3808 /* assert order */
3809 Assert(pDevExt->u32SystemTimerGranularityGrant == 0);
3810 Assert(pDevExt->GipMemObj == NIL_RTR0MEMOBJ);
3811 Assert(!pDevExt->pGipTimer);
3812
3813 /*
3814 * Allocate a suitable page with a default kernel mapping.
3815 */
3816 rc = RTR0MemObjAllocLow(&pDevExt->GipMemObj, PAGE_SIZE, false);
3817 if (RT_FAILURE(rc))
3818 {
3819 OSDBGPRINT(("supdrvGipCreate: failed to allocate the GIP page. rc=%d\n", rc));
3820 return rc;
3821 }
3822 pGip = (PSUPGLOBALINFOPAGE)RTR0MemObjAddress(pDevExt->GipMemObj); AssertPtr(pGip);
3823 HCPhysGip = RTR0MemObjGetPagePhysAddr(pDevExt->GipMemObj, 0); Assert(HCPhysGip != NIL_RTHCPHYS);
3824
3825 /*
3826 * Try bump up the system timer resolution.
3827 * The more interrupts the better...
3828 */
3829 if ( RT_SUCCESS(RTTimerRequestSystemGranularity( 976563 /* 1024 HZ */, &u32SystemResolution))
3830 || RT_SUCCESS(RTTimerRequestSystemGranularity( 1000000 /* 1000 HZ */, &u32SystemResolution))
3831 || RT_SUCCESS(RTTimerRequestSystemGranularity( 3906250 /* 256 HZ */, &u32SystemResolution))
3832 || RT_SUCCESS(RTTimerRequestSystemGranularity( 4000000 /* 250 HZ */, &u32SystemResolution))
3833 || RT_SUCCESS(RTTimerRequestSystemGranularity( 7812500 /* 128 HZ */, &u32SystemResolution))
3834 || RT_SUCCESS(RTTimerRequestSystemGranularity(10000000 /* 100 HZ */, &u32SystemResolution))
3835 || RT_SUCCESS(RTTimerRequestSystemGranularity(15625000 /* 64 HZ */, &u32SystemResolution))
3836 || RT_SUCCESS(RTTimerRequestSystemGranularity(31250000 /* 32 HZ */, &u32SystemResolution))
3837 )
3838 {
3839 Assert(RTTimerGetSystemGranularity() <= u32SystemResolution);
3840 pDevExt->u32SystemTimerGranularityGrant = u32SystemResolution;
3841 }
3842
3843 /*
3844 * Find a reasonable update interval, something close to 10ms would be nice,
3845 * and create a recurring timer.
3846 */
3847 u32Interval = u32SystemResolution = RTTimerGetSystemGranularity();
3848 while (u32Interval < 10000000 /* 10 ms */)
3849 u32Interval += u32SystemResolution;
3850
3851 rc = RTTimerCreateEx(&pDevExt->pGipTimer, u32Interval, 0, supdrvGipTimer, pDevExt);
3852 if (RT_FAILURE(rc))
3853 {
3854 OSDBGPRINT(("supdrvGipCreate: failed create GIP timer at %RU32 ns interval. rc=%d\n", u32Interval, rc));
3855 Assert(!pDevExt->pGipTimer);
3856 supdrvGipDestroy(pDevExt);
3857 return rc;
3858 }
3859
3860 /*
3861 * We're good.
3862 */
3863 supdrvGipInit(pDevExt, pGip, HCPhysGip, RTTimeSystemNanoTS(), 1000000000 / u32Interval /*=Hz*/);
3864 return VINF_SUCCESS;
3865}
3866
3867
3868/**
3869 * Terminates the GIP.
3870 *
3871 * @param pDevExt Instance data. GIP stuff may be updated.
3872 */
3873static void supdrvGipDestroy(PSUPDRVDEVEXT pDevExt)
3874{
3875 int rc;
3876#ifdef DEBUG_DARWIN_GIP
3877 OSDBGPRINT(("supdrvGipDestroy: pDevExt=%p pGip=%p pGipTimer=%p GipMemObj=%p\n", pDevExt,
3878 pDevExt->GipMemObj != NIL_RTR0MEMOBJ ? RTR0MemObjAddress(pDevExt->GipMemObj) : NULL,
3879 pDevExt->pGipTimer, pDevExt->GipMemObj));
3880#endif
3881
3882 /*
3883 * Invalid the GIP data.
3884 */
3885 if (pDevExt->pGip)
3886 {
3887 supdrvGipTerm(pDevExt->pGip);
3888 pDevExt->pGip = NULL;
3889 }
3890
3891 /*
3892 * Destroy the timer and free the GIP memory object.
3893 */
3894 if (pDevExt->pGipTimer)
3895 {
3896 rc = RTTimerDestroy(pDevExt->pGipTimer); AssertRC(rc);
3897 pDevExt->pGipTimer = NULL;
3898 }
3899
3900 if (pDevExt->GipMemObj != NIL_RTR0MEMOBJ)
3901 {
3902 rc = RTR0MemObjFree(pDevExt->GipMemObj, true /* free mappings */); AssertRC(rc);
3903 pDevExt->GipMemObj = NIL_RTR0MEMOBJ;
3904 }
3905
3906 /*
3907 * Finally, release the system timer resolution request if one succeeded.
3908 */
3909 if (pDevExt->u32SystemTimerGranularityGrant)
3910 {
3911 rc = RTTimerReleaseSystemGranularity(pDevExt->u32SystemTimerGranularityGrant); AssertRC(rc);
3912 pDevExt->u32SystemTimerGranularityGrant = 0;
3913 }
3914}
3915
3916
3917/**
3918 * Timer callback function.
3919 * @param pTimer The timer.
3920 * @param pvUser The device extension.
3921 */
3922static DECLCALLBACK(void) supdrvGipTimer(PRTTIMER pTimer, void *pvUser)
3923{
3924 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
3925 supdrvGipUpdate(pDevExt->pGip, RTTimeSystemNanoTS());
3926}
3927#endif /* USE_NEW_OS_INTERFACE_FOR_GIP */
3928
3929
3930/**
3931 * Initializes the GIP data.
3932 *
3933 * @returns IPRT status code.
3934 * @param pDevExt Pointer to the device instance data.
3935 * @param pGip Pointer to the read-write kernel mapping of the GIP.
3936 * @param HCPhys The physical address of the GIP.
3937 * @param u64NanoTS The current nanosecond timestamp.
3938 * @param uUpdateHz The update freqence.
3939 */
3940int VBOXCALL supdrvGipInit(PSUPDRVDEVEXT pDevExt, PSUPGLOBALINFOPAGE pGip, RTHCPHYS HCPhys, uint64_t u64NanoTS, unsigned uUpdateHz)
3941{
3942 unsigned i;
3943#ifdef DEBUG_DARWIN_GIP
3944 OSDBGPRINT(("supdrvGipInit: pGip=%p HCPhys=%lx u64NanoTS=%llu uUpdateHz=%d\n", pGip, (long)HCPhys, u64NanoTS, uUpdateHz));
3945#else
3946 dprintf(("supdrvGipInit: pGip=%p HCPhys=%lx u64NanoTS=%llu uUpdateHz=%d\n", pGip, (long)HCPhys, u64NanoTS, uUpdateHz));
3947#endif
3948
3949 /*
3950 * Initialize the structure.
3951 */
3952 memset(pGip, 0, PAGE_SIZE);
3953 pGip->u32Magic = SUPGLOBALINFOPAGE_MAGIC;
3954 pGip->u32Version = SUPGLOBALINFOPAGE_VERSION;
3955 pGip->u32Mode = supdrvGipDeterminTscMode();
3956 pGip->u32UpdateHz = uUpdateHz;
3957 pGip->u32UpdateIntervalNS = 1000000000 / uUpdateHz;
3958 pGip->u64NanoTSLastUpdateHz = u64NanoTS;
3959
3960 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
3961 {
3962 pGip->aCPUs[i].u32TransactionId = 2;
3963 pGip->aCPUs[i].u64NanoTS = u64NanoTS;
3964 pGip->aCPUs[i].u64TSC = ASMReadTSC();
3965
3966 /*
3967 * We don't know the following values until we've executed updates.
3968 * So, we'll just insert very high values.
3969 */
3970 pGip->aCPUs[i].u64CpuHz = _4G + 1;
3971 pGip->aCPUs[i].u32UpdateIntervalTSC = _2G / 4;
3972 pGip->aCPUs[i].au32TSCHistory[0] = _2G / 4;
3973 pGip->aCPUs[i].au32TSCHistory[1] = _2G / 4;
3974 pGip->aCPUs[i].au32TSCHistory[2] = _2G / 4;
3975 pGip->aCPUs[i].au32TSCHistory[3] = _2G / 4;
3976 pGip->aCPUs[i].au32TSCHistory[4] = _2G / 4;
3977 pGip->aCPUs[i].au32TSCHistory[5] = _2G / 4;
3978 pGip->aCPUs[i].au32TSCHistory[6] = _2G / 4;
3979 pGip->aCPUs[i].au32TSCHistory[7] = _2G / 4;
3980 }
3981
3982 /*
3983 * Link it to the device extension.
3984 */
3985 pDevExt->pGip = pGip;
3986 pDevExt->HCPhysGip = HCPhys;
3987 pDevExt->cGipUsers = 0;
3988
3989 return VINF_SUCCESS;
3990}
3991
3992
3993/**
3994 * Determin the GIP TSC mode.
3995 *
3996 * @returns The most suitable TSC mode.
3997 */
3998static SUPGIPMODE supdrvGipDeterminTscMode(void)
3999{
4000#ifndef USE_NEW_OS_INTERFACE_FOR_GIP
4001 /*
4002 * The problem here is that AMD processors with power management features
4003 * may easily end up with different TSCs because the CPUs or even cores
4004 * on the same physical chip run at different frequencies to save power.
4005 *
4006 * It is rumoured that this will be corrected with Barcelona and it's
4007 * expected that this will be indicated by the TscInvariant bit in
4008 * cpuid(0x80000007). So, the "difficult" bit here is to correctly
4009 * identify the older CPUs which don't do different frequency and
4010 * can be relied upon to have somewhat uniform TSC between the cpus.
4011 */
4012 if (supdrvOSGetCPUCount() > 1)
4013 {
4014 uint32_t uEAX, uEBX, uECX, uEDX;
4015
4016 /* Permit user users override. */
4017 if (supdrvOSGetForcedAsyncTscMode())
4018 return SUPGIPMODE_ASYNC_TSC;
4019
4020 /* Check for "AuthenticAMD" */
4021 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
4022 if (uEAX >= 1 && uEBX == 0x68747541 && uECX == 0x444d4163 && uEDX == 0x69746e65)
4023 {
4024 /* Check for APM support and that TscInvariant is cleared. */
4025 ASMCpuId(0x80000000, &uEAX, &uEBX, &uECX, &uEDX);
4026 if (uEAX >= 0x80000007)
4027 {
4028 ASMCpuId(0x80000007, &uEAX, &uEBX, &uECX, &uEDX);
4029 if ( !(uEDX & BIT(8))/* TscInvariant */
4030 && (uEDX & 0x3e)) /* STC|TM|THERMTRIP|VID|FID. Ignore TS. */
4031 return SUPGIPMODE_ASYNC_TSC;
4032 }
4033 }
4034 }
4035#endif
4036 return SUPGIPMODE_SYNC_TSC;
4037}
4038
4039
4040/**
4041 * Invalidates the GIP data upon termination.
4042 *
4043 * @param pGip Pointer to the read-write kernel mapping of the GIP.
4044 */
4045void VBOXCALL supdrvGipTerm(PSUPGLOBALINFOPAGE pGip)
4046{
4047 unsigned i;
4048 pGip->u32Magic = 0;
4049 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
4050 {
4051 pGip->aCPUs[i].u64NanoTS = 0;
4052 pGip->aCPUs[i].u64TSC = 0;
4053 pGip->aCPUs[i].iTSCHistoryHead = 0;
4054 }
4055}
4056
4057
4058/**
4059 * Worker routine for supdrvGipUpdate and supdrvGipUpdatePerCpu that
4060 * updates all the per cpu data except the transaction id.
4061 *
4062 * @param pGip The GIP.
4063 * @param pGipCpu Pointer to the per cpu data.
4064 * @param u64NanoTS The current time stamp.
4065 */
4066static void supdrvGipDoUpdateCpu(PSUPGLOBALINFOPAGE pGip, PSUPGIPCPU pGipCpu, uint64_t u64NanoTS)
4067{
4068 uint64_t u64TSC;
4069 uint64_t u64TSCDelta;
4070 uint32_t u32UpdateIntervalTSC;
4071 uint32_t u32UpdateIntervalTSCSlack;
4072 unsigned iTSCHistoryHead;
4073 uint64_t u64CpuHz;
4074
4075 /*
4076 * Update the NanoTS.
4077 */
4078 ASMAtomicXchgU64(&pGipCpu->u64NanoTS, u64NanoTS);
4079
4080 /*
4081 * Calc TSC delta.
4082 */
4083 /** @todo validate the NanoTS delta, don't trust the OS to call us when it should... */
4084 u64TSC = ASMReadTSC();
4085 u64TSCDelta = u64TSC - pGipCpu->u64TSC;
4086 ASMAtomicXchgU64(&pGipCpu->u64TSC, u64TSC);
4087
4088 if (u64TSCDelta >> 32)
4089 {
4090 u64TSCDelta = pGipCpu->u32UpdateIntervalTSC;
4091 pGipCpu->cErrors++;
4092 }
4093
4094 /*
4095 * TSC History.
4096 */
4097 Assert(ELEMENTS(pGipCpu->au32TSCHistory) == 8);
4098
4099 iTSCHistoryHead = (pGipCpu->iTSCHistoryHead + 1) & 7;
4100 ASMAtomicXchgU32(&pGipCpu->iTSCHistoryHead, iTSCHistoryHead);
4101 ASMAtomicXchgU32(&pGipCpu->au32TSCHistory[iTSCHistoryHead], (uint32_t)u64TSCDelta);
4102
4103 /*
4104 * UpdateIntervalTSC = average of last 8,2,1 intervals depending on update HZ.
4105 */
4106 if (pGip->u32UpdateHz >= 1000)
4107 {
4108 uint32_t u32;
4109 u32 = pGipCpu->au32TSCHistory[0];
4110 u32 += pGipCpu->au32TSCHistory[1];
4111 u32 += pGipCpu->au32TSCHistory[2];
4112 u32 += pGipCpu->au32TSCHistory[3];
4113 u32 >>= 2;
4114 u32UpdateIntervalTSC = pGipCpu->au32TSCHistory[4];
4115 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[5];
4116 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[6];
4117 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[7];
4118 u32UpdateIntervalTSC >>= 2;
4119 u32UpdateIntervalTSC += u32;
4120 u32UpdateIntervalTSC >>= 1;
4121
4122 /* Value choosen for a 2GHz Athlon64 running linux 2.6.10/11, . */
4123 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 14;
4124 }
4125 else if (pGip->u32UpdateHz >= 90)
4126 {
4127 u32UpdateIntervalTSC = (uint32_t)u64TSCDelta;
4128 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[(iTSCHistoryHead - 1) & 7];
4129 u32UpdateIntervalTSC >>= 1;
4130
4131 /* value choosen on a 2GHz thinkpad running windows */
4132 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 7;
4133 }
4134 else
4135 {
4136 u32UpdateIntervalTSC = (uint32_t)u64TSCDelta;
4137
4138 /* This value hasn't be checked yet.. waiting for OS/2 and 33Hz timers.. :-) */
4139 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 6;
4140 }
4141 ASMAtomicXchgU32(&pGipCpu->u32UpdateIntervalTSC, u32UpdateIntervalTSC + u32UpdateIntervalTSCSlack);
4142
4143 /*
4144 * CpuHz.
4145 */
4146 u64CpuHz = ASMMult2xU32RetU64(u32UpdateIntervalTSC, pGip->u32UpdateHz);
4147 ASMAtomicXchgU64(&pGipCpu->u64CpuHz, u64CpuHz);
4148}
4149
4150
4151/**
4152 * Updates the GIP.
4153 *
4154 * @param pGip Pointer to the GIP.
4155 * @param u64NanoTS The current nanosecond timesamp.
4156 */
4157void VBOXCALL supdrvGipUpdate(PSUPGLOBALINFOPAGE pGip, uint64_t u64NanoTS)
4158{
4159 /*
4160 * Determin the relevant CPU data.
4161 */
4162 PSUPGIPCPU pGipCpu;
4163 if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
4164 pGipCpu = &pGip->aCPUs[0];
4165 else
4166 {
4167 unsigned iCpu = ASMGetApicId();
4168 if (RT_LIKELY(iCpu >= RT_ELEMENTS(pGip->aCPUs)))
4169 return;
4170 pGipCpu = &pGip->aCPUs[iCpu];
4171 }
4172
4173 /*
4174 * Start update transaction.
4175 */
4176 if (!(ASMAtomicIncU32(&pGipCpu->u32TransactionId) & 1))
4177 {
4178 /* this can happen on win32 if we're taking to long and there are more CPUs around. shouldn't happen though. */
4179 AssertMsgFailed(("Invalid transaction id, %#x, not odd!\n", pGipCpu->u32TransactionId));
4180 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4181 pGipCpu->cErrors++;
4182 return;
4183 }
4184
4185 /*
4186 * Recalc the update frequency every 0x800th time.
4187 */
4188 if (!(pGipCpu->u32TransactionId & (GIP_UPDATEHZ_RECALC_FREQ * 2 - 2)))
4189 {
4190 if (pGip->u64NanoTSLastUpdateHz)
4191 {
4192#ifdef RT_ARCH_AMD64 /** @todo fix 64-bit div here to work on x86 linux. */
4193 uint64_t u64Delta = u64NanoTS - pGip->u64NanoTSLastUpdateHz;
4194 uint32_t u32UpdateHz = (uint32_t)((UINT64_C(1000000000) * GIP_UPDATEHZ_RECALC_FREQ) / u64Delta);
4195 if (u32UpdateHz <= 2000 && u32UpdateHz >= 30)
4196 {
4197 ASMAtomicXchgU32(&pGip->u32UpdateHz, u32UpdateHz);
4198 ASMAtomicXchgU32(&pGip->u32UpdateIntervalNS, 1000000000 / u32UpdateHz);
4199 }
4200#endif
4201 }
4202 ASMAtomicXchgU64(&pGip->u64NanoTSLastUpdateHz, u64NanoTS);
4203 }
4204
4205 /*
4206 * Update the data.
4207 */
4208 supdrvGipDoUpdateCpu(pGip, pGipCpu, u64NanoTS);
4209
4210 /*
4211 * Complete transaction.
4212 */
4213 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4214}
4215
4216
4217/**
4218 * Updates the per cpu GIP data for the calling cpu.
4219 *
4220 * @param pGip Pointer to the GIP.
4221 * @param u64NanoTS The current nanosecond timesamp.
4222 * @param iCpu The CPU index.
4223 */
4224void VBOXCALL supdrvGipUpdatePerCpu(PSUPGLOBALINFOPAGE pGip, uint64_t u64NanoTS, unsigned iCpu)
4225{
4226 PSUPGIPCPU pGipCpu;
4227
4228 if (RT_LIKELY(iCpu <= RT_ELEMENTS(pGip->aCPUs)))
4229 {
4230 pGipCpu = &pGip->aCPUs[iCpu];
4231
4232 /*
4233 * Start update transaction.
4234 */
4235 if (!(ASMAtomicIncU32(&pGipCpu->u32TransactionId) & 1))
4236 {
4237 AssertMsgFailed(("Invalid transaction id, %#x, not odd!\n", pGipCpu->u32TransactionId));
4238 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4239 pGipCpu->cErrors++;
4240 return;
4241 }
4242
4243 /*
4244 * Update the data.
4245 */
4246 supdrvGipDoUpdateCpu(pGip, pGipCpu, u64NanoTS);
4247
4248 /*
4249 * Complete transaction.
4250 */
4251 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4252 }
4253}
4254
4255
4256#ifndef DEBUG /** @todo change #ifndef DEBUG -> #ifdef LOG_ENABLED */
4257/**
4258 * Stub function for non-debug builds.
4259 */
4260RTDECL(PRTLOGGER) RTLogDefaultInstance(void)
4261{
4262 return NULL;
4263}
4264
4265RTDECL(PRTLOGGER) RTLogRelDefaultInstance(void)
4266{
4267 return NULL;
4268}
4269
4270/**
4271 * Stub function for non-debug builds.
4272 */
4273RTDECL(int) RTLogSetDefaultInstanceThread(PRTLOGGER pLogger, uintptr_t uKey)
4274{
4275 return 0;
4276}
4277
4278/**
4279 * Stub function for non-debug builds.
4280 */
4281RTDECL(void) RTLogLogger(PRTLOGGER pLogger, void *pvCallerRet, const char *pszFormat, ...)
4282{
4283}
4284
4285/**
4286 * Stub function for non-debug builds.
4287 */
4288RTDECL(void) RTLogLoggerEx(PRTLOGGER pLogger, unsigned fFlags, unsigned iGroup, const char *pszFormat, ...)
4289{
4290}
4291
4292/**
4293 * Stub function for non-debug builds.
4294 */
4295RTDECL(void) RTLogLoggerExV(PRTLOGGER pLogger, unsigned fFlags, unsigned iGroup, const char *pszFormat, va_list args)
4296{
4297}
4298
4299/**
4300 * Stub function for non-debug builds.
4301 */
4302RTDECL(void) RTLogPrintf(const char *pszFormat, ...)
4303{
4304}
4305
4306/**
4307 * Stub function for non-debug builds.
4308 */
4309RTDECL(void) RTLogPrintfV(const char *pszFormat, va_list args)
4310{
4311}
4312#endif /* !DEBUG */
4313
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette