VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/Support/SUPDrv.cpp@ 109034

Last change on this file since 109034 was 109034, checked in by vboxsync, 3 weeks ago

SUPDrv: Omit the aarch32 capability registers if the CPU doesn't do aarch32 (unless they are zero). jiraref:VBP-1598

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 278.9 KB
Line 
1/* $Id: SUPDrv.cpp 109034 2025-04-21 11:08:11Z vboxsync $ */
2/** @file
3 * VBoxDrv - The VirtualBox Support Driver - Common code.
4 */
5
6/*
7 * Copyright (C) 2006-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.215389.xyz.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
35 */
36
37
38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
41#define LOG_GROUP LOG_GROUP_SUP_DRV
42#define SUPDRV_AGNOSTIC
43#include "SUPDrvInternal.h"
44#ifndef PAGE_SHIFT
45# include <iprt/param.h>
46#endif
47#include <iprt/asm.h>
48#include <iprt/asm-math.h>
49#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
50# include <iprt/asm-amd64-x86.h>
51#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
52# include <iprt/asm-arm.h>
53#else
54# error "Port me!"
55#endif
56#include <iprt/cpuset.h>
57#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_WINDOWS)
58# include <iprt/dbg.h>
59#endif
60#include <iprt/handletable.h>
61#include <iprt/mem.h>
62#include <iprt/mp.h>
63#include <iprt/power.h>
64#include <iprt/process.h>
65#include <iprt/semaphore.h>
66#include <iprt/spinlock.h>
67#include <iprt/thread.h>
68#include <iprt/uuid.h>
69#include <iprt/net.h>
70#include <iprt/crc.h>
71#include <iprt/string.h>
72#include <iprt/timer.h>
73#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD)
74# include <iprt/rand.h>
75# include <iprt/path.h>
76#endif
77#include <iprt/uint128.h>
78#include <iprt/x86.h>
79#ifdef RT_ARCH_ARM64
80# include <iprt/armv8.h>
81#endif
82
83#include <VBox/param.h>
84#include <VBox/log.h>
85#include <VBox/err.h>
86#include <VBox/vmm/hm_vmx.h>
87
88#if defined(RT_OS_SOLARIS) || defined(RT_OS_DARWIN)
89# include "dtrace/SUPDrv.h"
90#else
91# define VBOXDRV_SESSION_CREATE(pvSession, fUser) do { } while (0)
92# define VBOXDRV_SESSION_CLOSE(pvSession) do { } while (0)
93# define VBOXDRV_IOCTL_ENTRY(pvSession, uIOCtl, pvReqHdr) do { } while (0)
94# define VBOXDRV_IOCTL_RETURN(pvSession, uIOCtl, pvReqHdr, rcRet, rcReq) do { } while (0)
95#endif
96
97#ifdef __cplusplus
98# if __cplusplus >= 201100 || RT_MSC_PREREQ(RT_MSC_VER_VS2019)
99# define SUPDRV_CAN_COUNT_FUNCTION_ARGS
100# ifdef _MSC_VER
101# pragma warning(push)
102# pragma warning(disable:4577)
103# include <type_traits>
104# pragma warning(pop)
105
106# elif defined(RT_OS_DARWIN)
107# define _LIBCPP_CSTDDEF
108# include <__nullptr>
109# include <type_traits>
110
111# else
112# include <type_traits>
113# endif
114# endif
115#endif
116
117
118/*
119 * Logging assignments:
120 * Log - useful stuff, like failures.
121 * LogFlow - program flow, except the really noisy bits.
122 * Log2 - Cleanup.
123 * Log3 - Loader flow noise.
124 * Log4 - Call VMMR0 flow noise.
125 * Log5 - Native yet-to-be-defined noise.
126 * Log6 - Native ioctl flow noise.
127 *
128 * Logging requires KBUILD_TYPE=debug and possibly changes to the logger
129 * instantiation in log-vbox.c(pp).
130 */
131
132
133/*********************************************************************************************************************************
134* Defined Constants And Macros *
135*********************************************************************************************************************************/
136/** @def VBOX_SVN_REV
137 * The makefile should define this if it can. */
138#ifndef VBOX_SVN_REV
139# define VBOX_SVN_REV 0
140#endif
141
142/** @ SUPDRV_CHECK_SMAP_SETUP
143 * SMAP check setup. */
144/** @def SUPDRV_CHECK_SMAP_CHECK
145 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it
146 * will be logged and @a a_BadExpr is executed. */
147#if (defined(RT_OS_DARWIN) || defined(RT_OS_LINUX)) && !defined(VBOX_WITHOUT_EFLAGS_AC_SET_IN_VBOXDRV)
148# define SUPDRV_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
149# define SUPDRV_CHECK_SMAP_CHECK(a_pDevExt, a_BadExpr) \
150 do { \
151 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
152 { \
153 RTCCUINTREG fEfl = ASMGetFlags(); \
154 if (RT_LIKELY(fEfl & X86_EFL_AC)) \
155 { /* likely */ } \
156 else \
157 { \
158 supdrvBadContext(a_pDevExt, "SUPDrv.cpp", __LINE__, "EFLAGS.AC is 0!"); \
159 a_BadExpr; \
160 } \
161 } \
162 } while (0)
163#else
164# define SUPDRV_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
165# define SUPDRV_CHECK_SMAP_CHECK(a_pDevExt, a_BadExpr) NOREF(fKernelFeatures)
166#endif
167
168
169/*********************************************************************************************************************************
170* Internal Functions *
171*********************************************************************************************************************************/
172static DECLCALLBACK(int) supdrvSessionObjHandleRetain(RTHANDLETABLE hHandleTable, void *pvObj, void *pvCtx, void *pvUser);
173static DECLCALLBACK(void) supdrvSessionObjHandleDelete(RTHANDLETABLE hHandleTable, uint32_t h, void *pvObj, void *pvCtx, void *pvUser);
174static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession);
175static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType);
176static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq);
177static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq);
178static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq);
179static int supdrvIOCtl_LdrLockDown(PSUPDRVDEVEXT pDevExt);
180static int supdrvIOCtl_LdrQuerySymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq);
181static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq);
182static int supdrvLdrAddUsage(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage, bool fRing3Usage);
183DECLINLINE(void) supdrvLdrSubtractUsage(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage, uint32_t cReference);
184static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage);
185DECLINLINE(int) supdrvLdrLock(PSUPDRVDEVEXT pDevExt);
186DECLINLINE(int) supdrvLdrUnlock(PSUPDRVDEVEXT pDevExt);
187static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq);
188static int supdrvIOCtl_LoggerSettings(PSUPLOGGERSETTINGS pReq);
189#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
190static int supdrvIOCtl_X86MsrProber(PSUPDRVDEVEXT pDevExt, PSUPMSRPROBER pReq);
191#endif
192#if defined(RT_ARCH_ARM64)
193static int supdrvIOCtl_ArmGetSysRegs(PSUPARMGETSYSREGS pReq, uint32_t cMaxRegs, RTCPUID idCpu, uint32_t fFlags);
194#endif
195static int supdrvIOCtl_ResumeSuspendedKbds(void);
196
197
198/*********************************************************************************************************************************
199* Global Variables *
200*********************************************************************************************************************************/
201/** @def SUPEXP_CHECK_ARGS
202 * This is for checking the argument count of the function in the entry,
203 * just to make sure we don't accidentally export something the wrapper
204 * can't deal with.
205 *
206 * Using some C++11 magic to do the counting.
207 *
208 * The error is reported by overflowing the SUPFUNC::cArgs field, so the
209 * warnings can probably be a little mysterious.
210 *
211 * @note Doesn't work for CLANG 11. Works for Visual C++, unless there
212 * are function pointers in the argument list.
213 */
214#if defined(SUPDRV_CAN_COUNT_FUNCTION_ARGS) && RT_CLANG_PREREQ(99, 0)
215template <typename RetType, typename ... Types>
216constexpr std::integral_constant<unsigned, sizeof ...(Types)>
217CountFunctionArguments(RetType(RTCALL *)(Types ...))
218{
219 return std::integral_constant<unsigned, sizeof ...(Types)>{};
220}
221# define SUPEXP_CHECK_ARGS(a_cArgs, a_Name) \
222 ((a_cArgs) >= decltype(CountFunctionArguments(a_Name))::value ? (uint8_t)(a_cArgs) : 1023)
223
224#else
225# define SUPEXP_CHECK_ARGS(a_cArgs, a_Name) a_cArgs
226#endif
227
228/** @name Function table entry macros.
229 * @note The SUPEXP_STK_BACKF macro is because VC++ has trouble with functions
230 * with function pointer arguments (probably noexcept related).
231 * @{ */
232#define SUPEXP_CUSTOM(a_cArgs, a_Name, a_Value) { #a_Name, a_cArgs, (void *)(uintptr_t)(a_Value) }
233#define SUPEXP_STK_OKAY(a_cArgs, a_Name) { #a_Name, SUPEXP_CHECK_ARGS(a_cArgs, a_Name), (void *)(uintptr_t)a_Name }
234#if 0
235# define SUPEXP_STK_BACK(a_cArgs, a_Name) { "StkBack_" #a_Name, SUPEXP_CHECK_ARGS(a_cArgs, a_Name), (void *)(uintptr_t)a_Name }
236# define SUPEXP_STK_BACKF(a_cArgs, a_Name) { "StkBack_" #a_Name, SUPEXP_CHECK_ARGS(a_cArgs, a_Name), (void *)(uintptr_t)a_Name }
237#else
238# define SUPEXP_STK_BACK(a_cArgs, a_Name) { #a_Name, SUPEXP_CHECK_ARGS(a_cArgs, a_Name), (void *)(uintptr_t)a_Name }
239# ifdef _MSC_VER
240# define SUPEXP_STK_BACKF(a_cArgs, a_Name) { #a_Name, a_cArgs, (void *)(uintptr_t)a_Name }
241# else
242# define SUPEXP_STK_BACKF(a_cArgs, a_Name) { #a_Name, SUPEXP_CHECK_ARGS(a_cArgs, a_Name), (void *)(uintptr_t)a_Name }
243# endif
244#endif
245/** @} */
246
247/**
248 * Array of the R0 SUP API.
249 *
250 * While making changes to these exports, make sure to update the IOC
251 * minor version (SUPDRV_IOC_VERSION).
252 *
253 * @remarks This array is processed by SUPR0-def-pe.sed and SUPR0-def-lx.sed to
254 * produce definition files from which import libraries are generated.
255 * Take care when commenting things and especially with \#ifdef'ing.
256 */
257static SUPFUNC g_aFunctions[] =
258{
259/* SED: START */
260 /* name function */
261 /* Entries with absolute addresses determined at runtime, fixup
262 code makes ugly ASSUMPTIONS about the order here: */
263#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
264 SUPEXP_CUSTOM( 0, SUPR0AbsIs64bit, 0), /* not-arch-arm64 */
265 SUPEXP_CUSTOM( 0, SUPR0Abs64bitKernelCS, 0), /* not-arch-arm64 */
266 SUPEXP_CUSTOM( 0, SUPR0Abs64bitKernelSS, 0), /* not-arch-arm64 */
267 SUPEXP_CUSTOM( 0, SUPR0Abs64bitKernelDS, 0), /* not-arch-arm64 */
268 SUPEXP_CUSTOM( 0, SUPR0AbsKernelCS, 0), /* not-arch-arm64 */
269 SUPEXP_CUSTOM( 0, SUPR0AbsKernelSS, 0), /* not-arch-arm64 */
270 SUPEXP_CUSTOM( 0, SUPR0AbsKernelDS, 0), /* not-arch-arm64 */
271 SUPEXP_CUSTOM( 0, SUPR0AbsKernelES, 0), /* not-arch-arm64 */
272 SUPEXP_CUSTOM( 0, SUPR0AbsKernelFS, 0), /* not-arch-arm64 */
273 SUPEXP_CUSTOM( 0, SUPR0AbsKernelGS, 0), /* not-arch-arm64 */
274#endif
275 /* Normal function & data pointers: */
276 SUPEXP_CUSTOM( 0, g_pSUPGlobalInfoPage, &g_pSUPGlobalInfoPage), /* SED: DATA */
277 SUPEXP_STK_OKAY( 0, SUPGetGIP),
278 SUPEXP_STK_BACK( 1, SUPReadTscWithDelta),
279 SUPEXP_STK_BACK( 1, SUPGetTscDeltaSlow),
280 SUPEXP_STK_BACK( 1, SUPGetCpuHzFromGipForAsyncMode),
281 SUPEXP_STK_OKAY( 3, SUPIsTscFreqCompatible),
282 SUPEXP_STK_OKAY( 3, SUPIsTscFreqCompatibleEx),
283 SUPEXP_STK_BACK( 4, SUPR0BadContext),
284 SUPEXP_STK_BACK( 2, SUPR0ComponentDeregisterFactory),
285 SUPEXP_STK_BACK( 4, SUPR0ComponentQueryFactory),
286 SUPEXP_STK_BACK( 2, SUPR0ComponentRegisterFactory),
287 SUPEXP_STK_BACK( 5, SUPR0ContAlloc),
288 SUPEXP_STK_BACK( 2, SUPR0ContFree),
289 SUPEXP_STK_OKAY( 0, SUPR0GetKernelFeatures),
290 SUPEXP_STK_BACK( 0, SUPR0GetPagingMode),
291 SUPEXP_STK_OKAY( 1, SUPR0FpuBegin),
292 SUPEXP_STK_OKAY( 1, SUPR0FpuEnd),
293#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
294 SUPEXP_STK_BACK( 2, SUPR0ChangeCR4), /* not-arch-arm64 */
295 SUPEXP_STK_BACK( 1, SUPR0EnableVTx), /* not-arch-arm64 */
296 SUPEXP_STK_BACK( 0, SUPR0SuspendVTxOnCpu), /* not-arch-arm64 */
297 SUPEXP_STK_BACK( 1, SUPR0ResumeVTxOnCpu), /* not-arch-arm64 */
298 SUPEXP_STK_OKAY( 1, SUPR0GetCurrentGdtRw), /* not-arch-arm64 */
299 SUPEXP_STK_BACK( 3, SUPR0GetHwvirtMsrs), /* not-arch-arm64 */
300 SUPEXP_STK_BACK( 1, SUPR0GetSvmUsability), /* not-arch-arm64 */
301 SUPEXP_STK_BACK( 1, SUPR0GetVTSupport), /* not-arch-arm64 */
302 SUPEXP_STK_BACK( 1, SUPR0GetVmxUsability), /* not-arch-arm64 */
303#endif
304 SUPEXP_STK_BACK( 2, SUPR0LdrIsLockOwnerByMod),
305 SUPEXP_STK_BACK( 1, SUPR0LdrLock),
306 SUPEXP_STK_BACK( 1, SUPR0LdrUnlock),
307 SUPEXP_STK_BACK( 3, SUPR0LdrModByName),
308 SUPEXP_STK_BACK( 2, SUPR0LdrModRelease),
309 SUPEXP_STK_BACK( 2, SUPR0LdrModRetain),
310 SUPEXP_STK_BACK( 4, SUPR0LockMem),
311 SUPEXP_STK_BACK( 5, SUPR0LowAlloc),
312 SUPEXP_STK_BACK( 2, SUPR0LowFree),
313 SUPEXP_STK_BACK( 4, SUPR0MemAlloc),
314 SUPEXP_STK_BACK( 2, SUPR0MemFree),
315 SUPEXP_STK_BACK( 3, SUPR0MemGetPhys),
316 SUPEXP_STK_BACK( 2, SUPR0ObjAddRef),
317 SUPEXP_STK_BACK( 3, SUPR0ObjAddRefEx),
318 SUPEXP_STK_BACKF( 5, SUPR0ObjRegister),
319 SUPEXP_STK_BACK( 2, SUPR0ObjRelease),
320 SUPEXP_STK_BACK( 3, SUPR0ObjVerifyAccess),
321 SUPEXP_STK_BACK( 6, SUPR0PageAllocEx),
322 SUPEXP_STK_BACK( 2, SUPR0PageFree),
323 SUPEXP_STK_BACK( 6, SUPR0PageMapKernel),
324 SUPEXP_STK_BACK( 6, SUPR0PageProtect),
325#if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD)
326 SUPEXP_STK_OKAY( 2, SUPR0HCPhysToVirt), /* only-linux, only-solaris, only-freebsd */
327#endif
328 SUPEXP_STK_BACK( 2, SUPR0PrintfV),
329 SUPEXP_STK_BACK( 1, SUPR0GetSessionGVM),
330 SUPEXP_STK_BACK( 1, SUPR0GetSessionVM),
331 SUPEXP_STK_BACK( 3, SUPR0SetSessionVM),
332 SUPEXP_STK_BACK( 1, SUPR0GetSessionUid),
333 SUPEXP_STK_BACK( 6, SUPR0TscDeltaMeasureBySetIndex),
334 SUPEXP_STK_BACK( 1, SUPR0TracerDeregisterDrv),
335 SUPEXP_STK_BACK( 2, SUPR0TracerDeregisterImpl),
336 SUPEXP_STK_BACK( 6, SUPR0TracerFireProbe),
337 SUPEXP_STK_BACK( 3, SUPR0TracerRegisterDrv),
338 SUPEXP_STK_BACK( 4, SUPR0TracerRegisterImpl),
339 SUPEXP_STK_BACK( 2, SUPR0TracerRegisterModule),
340 SUPEXP_STK_BACK( 2, SUPR0TracerUmodProbeFire),
341 SUPEXP_STK_BACK( 2, SUPR0UnlockMem),
342#ifdef RT_OS_WINDOWS
343 SUPEXP_STK_BACK( 4, SUPR0IoCtlSetupForHandle), /* only-windows */
344 SUPEXP_STK_BACK( 9, SUPR0IoCtlPerform), /* only-windows */
345 SUPEXP_STK_BACK( 1, SUPR0IoCtlCleanup), /* only-windows */
346#endif
347 SUPEXP_STK_BACK( 2, SUPSemEventClose),
348 SUPEXP_STK_BACK( 2, SUPSemEventCreate),
349 SUPEXP_STK_BACK( 1, SUPSemEventGetResolution),
350 SUPEXP_STK_BACK( 2, SUPSemEventMultiClose),
351 SUPEXP_STK_BACK( 2, SUPSemEventMultiCreate),
352 SUPEXP_STK_BACK( 1, SUPSemEventMultiGetResolution),
353 SUPEXP_STK_BACK( 2, SUPSemEventMultiReset),
354 SUPEXP_STK_BACK( 2, SUPSemEventMultiSignal),
355 SUPEXP_STK_BACK( 3, SUPSemEventMultiWait),
356 SUPEXP_STK_BACK( 3, SUPSemEventMultiWaitNoResume),
357 SUPEXP_STK_BACK( 3, SUPSemEventMultiWaitNsAbsIntr),
358 SUPEXP_STK_BACK( 3, SUPSemEventMultiWaitNsRelIntr),
359 SUPEXP_STK_BACK( 2, SUPSemEventSignal),
360 SUPEXP_STK_BACK( 3, SUPSemEventWait),
361 SUPEXP_STK_BACK( 3, SUPSemEventWaitNoResume),
362 SUPEXP_STK_BACK( 3, SUPSemEventWaitNsAbsIntr),
363 SUPEXP_STK_BACK( 3, SUPSemEventWaitNsRelIntr),
364
365 SUPEXP_STK_BACK( 0, RTAssertAreQuiet),
366 SUPEXP_STK_BACK( 0, RTAssertMayPanic),
367 SUPEXP_STK_BACK( 4, RTAssertMsg1),
368 SUPEXP_STK_BACK( 2, RTAssertMsg2AddV),
369 SUPEXP_STK_BACK( 2, RTAssertMsg2V),
370 SUPEXP_STK_BACK( 1, RTAssertSetMayPanic),
371 SUPEXP_STK_BACK( 1, RTAssertSetQuiet),
372 SUPEXP_STK_OKAY( 2, RTCrc32),
373 SUPEXP_STK_OKAY( 1, RTCrc32Finish),
374 SUPEXP_STK_OKAY( 3, RTCrc32Process),
375 SUPEXP_STK_OKAY( 0, RTCrc32Start),
376 SUPEXP_STK_OKAY( 1, RTErrConvertFromErrno),
377 SUPEXP_STK_OKAY( 1, RTErrConvertToErrno),
378 SUPEXP_STK_BACK( 4, RTHandleTableAllocWithCtx),
379 SUPEXP_STK_BACK( 1, RTHandleTableCreate),
380 SUPEXP_STK_BACKF( 6, RTHandleTableCreateEx),
381 SUPEXP_STK_BACKF( 3, RTHandleTableDestroy),
382 SUPEXP_STK_BACK( 3, RTHandleTableFreeWithCtx),
383 SUPEXP_STK_BACK( 3, RTHandleTableLookupWithCtx),
384 SUPEXP_STK_BACK( 4, RTLogBulkNestedWrite),
385 SUPEXP_STK_BACK( 5, RTLogBulkUpdate),
386 SUPEXP_STK_BACK( 2, RTLogCheckGroupFlags),
387 SUPEXP_STK_BACKF( 17, RTLogCreateExV),
388 SUPEXP_STK_BACK( 1, RTLogDestroy),
389 SUPEXP_STK_BACK( 0, RTLogDefaultInstance),
390 SUPEXP_STK_BACK( 1, RTLogDefaultInstanceEx),
391 SUPEXP_STK_BACK( 1, SUPR0DefaultLogInstanceEx),
392 SUPEXP_STK_BACK( 0, RTLogGetDefaultInstance),
393 SUPEXP_STK_BACK( 1, RTLogGetDefaultInstanceEx),
394 SUPEXP_STK_BACK( 1, SUPR0GetDefaultLogInstanceEx),
395 SUPEXP_STK_BACK( 5, RTLogLoggerExV),
396 SUPEXP_STK_BACK( 2, RTLogPrintfV),
397 SUPEXP_STK_BACK( 0, RTLogRelGetDefaultInstance),
398 SUPEXP_STK_BACK( 1, RTLogRelGetDefaultInstanceEx),
399 SUPEXP_STK_BACK( 1, SUPR0GetDefaultLogRelInstanceEx),
400 SUPEXP_STK_BACK( 2, RTLogSetDefaultInstanceThread),
401 SUPEXP_STK_BACKF( 2, RTLogSetFlushCallback),
402 SUPEXP_STK_BACK( 2, RTLogSetR0ProgramStart),
403 SUPEXP_STK_BACK( 3, RTLogSetR0ThreadNameV),
404 SUPEXP_STK_BACK( 5, RTMemAllocExTag),
405 SUPEXP_STK_BACK( 2, RTMemAllocTag),
406 SUPEXP_STK_BACK( 2, RTMemAllocVarTag),
407 SUPEXP_STK_BACK( 2, RTMemAllocZTag),
408 SUPEXP_STK_BACK( 2, RTMemAllocZVarTag),
409 SUPEXP_STK_BACK( 4, RTMemDupExTag),
410 SUPEXP_STK_BACK( 3, RTMemDupTag),
411 SUPEXP_STK_BACK( 1, RTMemFree),
412 SUPEXP_STK_BACK( 2, RTMemFreeEx),
413 SUPEXP_STK_BACK( 3, RTMemReallocTag),
414 SUPEXP_STK_BACK( 0, RTMpCpuId),
415 SUPEXP_STK_BACK( 1, RTMpCpuIdFromSetIndex),
416 SUPEXP_STK_BACK( 1, RTMpCpuIdToSetIndex),
417 SUPEXP_STK_BACK( 0, RTMpCurSetIndex),
418 SUPEXP_STK_BACK( 1, RTMpCurSetIndexAndId),
419 SUPEXP_STK_BACK( 0, RTMpGetArraySize),
420 SUPEXP_STK_BACK( 0, RTMpGetCount),
421 SUPEXP_STK_BACK( 0, RTMpGetMaxCpuId),
422 SUPEXP_STK_BACK( 0, RTMpGetOnlineCount),
423 SUPEXP_STK_BACK( 1, RTMpGetOnlineSet),
424 SUPEXP_STK_BACK( 1, RTMpGetSet),
425 SUPEXP_STK_BACK( 1, RTMpIsCpuOnline),
426 SUPEXP_STK_BACK( 1, RTMpIsCpuPossible),
427 SUPEXP_STK_BACK( 0, RTMpIsCpuWorkPending),
428 SUPEXP_STK_BACKF( 2, RTMpNotificationDeregister),
429 SUPEXP_STK_BACKF( 2, RTMpNotificationRegister),
430 SUPEXP_STK_BACKF( 3, RTMpOnAll),
431 SUPEXP_STK_BACKF( 3, RTMpOnOthers),
432 SUPEXP_STK_BACKF( 4, RTMpOnSpecific),
433 SUPEXP_STK_BACK( 1, RTMpPokeCpu),
434 SUPEXP_STK_OKAY( 4, RTNetIPv4AddDataChecksum),
435 SUPEXP_STK_OKAY( 2, RTNetIPv4AddTCPChecksum),
436 SUPEXP_STK_OKAY( 2, RTNetIPv4AddUDPChecksum),
437 SUPEXP_STK_OKAY( 1, RTNetIPv4FinalizeChecksum),
438 SUPEXP_STK_OKAY( 1, RTNetIPv4HdrChecksum),
439 SUPEXP_STK_OKAY( 4, RTNetIPv4IsDHCPValid),
440 SUPEXP_STK_OKAY( 4, RTNetIPv4IsHdrValid),
441 SUPEXP_STK_OKAY( 4, RTNetIPv4IsTCPSizeValid),
442 SUPEXP_STK_OKAY( 6, RTNetIPv4IsTCPValid),
443 SUPEXP_STK_OKAY( 3, RTNetIPv4IsUDPSizeValid),
444 SUPEXP_STK_OKAY( 5, RTNetIPv4IsUDPValid),
445 SUPEXP_STK_OKAY( 1, RTNetIPv4PseudoChecksum),
446 SUPEXP_STK_OKAY( 4, RTNetIPv4PseudoChecksumBits),
447 SUPEXP_STK_OKAY( 3, RTNetIPv4TCPChecksum),
448 SUPEXP_STK_OKAY( 3, RTNetIPv4UDPChecksum),
449 SUPEXP_STK_OKAY( 1, RTNetIPv6PseudoChecksum),
450 SUPEXP_STK_OKAY( 4, RTNetIPv6PseudoChecksumBits),
451 SUPEXP_STK_OKAY( 3, RTNetIPv6PseudoChecksumEx),
452 SUPEXP_STK_OKAY( 4, RTNetTCPChecksum),
453 SUPEXP_STK_OKAY( 2, RTNetUDPChecksum),
454 SUPEXP_STK_BACKF( 2, RTPowerNotificationDeregister),
455 SUPEXP_STK_BACKF( 2, RTPowerNotificationRegister),
456 SUPEXP_STK_BACK( 0, RTProcSelf),
457 SUPEXP_STK_BACK( 0, RTR0AssertPanicSystem),
458#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_WINDOWS)
459 SUPEXP_STK_BACK( 2, RTR0DbgKrnlInfoOpen), /* only-darwin, only-solaris, only-windows */
460 SUPEXP_STK_BACK( 5, RTR0DbgKrnlInfoQueryMember), /* only-darwin, only-solaris, only-windows */
461# if defined(RT_OS_SOLARIS)
462 SUPEXP_STK_BACK( 4, RTR0DbgKrnlInfoQuerySize), /* only-solaris */
463# endif
464 SUPEXP_STK_BACK( 4, RTR0DbgKrnlInfoQuerySymbol), /* only-darwin, only-solaris, only-windows */
465 SUPEXP_STK_BACK( 1, RTR0DbgKrnlInfoRelease), /* only-darwin, only-solaris, only-windows */
466 SUPEXP_STK_BACK( 1, RTR0DbgKrnlInfoRetain), /* only-darwin, only-solaris, only-windows */
467#endif
468 SUPEXP_STK_BACK( 0, RTR0MemAreKrnlAndUsrDifferent),
469 SUPEXP_STK_BACK( 1, RTR0MemKernelIsValidAddr),
470 SUPEXP_STK_BACK( 3, RTR0MemKernelCopyFrom),
471 SUPEXP_STK_BACK( 3, RTR0MemKernelCopyTo),
472 SUPEXP_STK_OKAY( 1, RTR0MemObjAddress),
473 SUPEXP_STK_OKAY( 1, RTR0MemObjAddressR3),
474 SUPEXP_STK_BACK( 5, RTR0MemObjAllocContTag),
475 SUPEXP_STK_BACK( 5, RTR0MemObjAllocLargeTag),
476 SUPEXP_STK_BACK( 4, RTR0MemObjAllocLowTag),
477 SUPEXP_STK_BACK( 4, RTR0MemObjAllocPageTag),
478 SUPEXP_STK_BACK( 5, RTR0MemObjAllocPhysExTag),
479 SUPEXP_STK_BACK( 4, RTR0MemObjAllocPhysNCTag),
480 SUPEXP_STK_BACK( 4, RTR0MemObjAllocPhysTag),
481 SUPEXP_STK_BACK( 5, RTR0MemObjEnterPhysTag),
482 SUPEXP_STK_BACK( 2, RTR0MemObjFree),
483 SUPEXP_STK_BACK( 2, RTR0MemObjGetPagePhysAddr),
484 SUPEXP_STK_OKAY( 1, RTR0MemObjIsMapping),
485 SUPEXP_STK_BACK( 6, RTR0MemObjLockUserTag),
486 SUPEXP_STK_BACK( 5, RTR0MemObjLockKernelTag),
487 SUPEXP_STK_BACK( 8, RTR0MemObjMapKernelExTag),
488 SUPEXP_STK_BACK( 6, RTR0MemObjMapKernelTag),
489 SUPEXP_STK_BACK( 9, RTR0MemObjMapUserExTag),
490 SUPEXP_STK_BACK( 7, RTR0MemObjMapUserTag),
491 SUPEXP_STK_BACK( 4, RTR0MemObjProtect),
492 SUPEXP_STK_OKAY( 1, RTR0MemObjSize),
493 SUPEXP_STK_OKAY( 1, RTR0MemObjWasZeroInitialized),
494 SUPEXP_STK_OKAY( 2, RTR0MemObjZeroInitialize),
495 SUPEXP_STK_BACK( 3, RTR0MemUserCopyFrom),
496 SUPEXP_STK_BACK( 3, RTR0MemUserCopyTo),
497 SUPEXP_STK_BACK( 1, RTR0MemUserIsValidAddr),
498 SUPEXP_STK_BACK( 0, RTR0ProcHandleSelf),
499 SUPEXP_STK_BACK( 1, RTSemEventCreate),
500 SUPEXP_STK_BACK( 1, RTSemEventDestroy),
501 SUPEXP_STK_BACK( 0, RTSemEventGetResolution),
502 SUPEXP_STK_BACK( 0, RTSemEventIsSignalSafe),
503 SUPEXP_STK_BACK( 1, RTSemEventMultiCreate),
504 SUPEXP_STK_BACK( 1, RTSemEventMultiDestroy),
505 SUPEXP_STK_BACK( 0, RTSemEventMultiGetResolution),
506 SUPEXP_STK_BACK( 0, RTSemEventMultiIsSignalSafe),
507 SUPEXP_STK_BACK( 1, RTSemEventMultiReset),
508 SUPEXP_STK_BACK( 1, RTSemEventMultiSignal),
509 SUPEXP_STK_BACK( 2, RTSemEventMultiWait),
510 SUPEXP_STK_BACK( 3, RTSemEventMultiWaitEx),
511 SUPEXP_STK_BACK( 7, RTSemEventMultiWaitExDebug),
512 SUPEXP_STK_BACK( 2, RTSemEventMultiWaitNoResume),
513 SUPEXP_STK_BACK( 1, RTSemEventSignal),
514 SUPEXP_STK_BACK( 2, RTSemEventWait),
515 SUPEXP_STK_BACK( 3, RTSemEventWaitEx),
516 SUPEXP_STK_BACK( 7, RTSemEventWaitExDebug),
517 SUPEXP_STK_BACK( 2, RTSemEventWaitNoResume),
518 SUPEXP_STK_BACK( 1, RTSemFastMutexCreate),
519 SUPEXP_STK_BACK( 1, RTSemFastMutexDestroy),
520 SUPEXP_STK_BACK( 1, RTSemFastMutexRelease),
521 SUPEXP_STK_BACK( 1, RTSemFastMutexRequest),
522 SUPEXP_STK_BACK( 1, RTSemMutexCreate),
523 SUPEXP_STK_BACK( 1, RTSemMutexDestroy),
524 SUPEXP_STK_BACK( 1, RTSemMutexRelease),
525 SUPEXP_STK_BACK( 2, RTSemMutexRequest),
526 SUPEXP_STK_BACK( 6, RTSemMutexRequestDebug),
527 SUPEXP_STK_BACK( 2, RTSemMutexRequestNoResume),
528 SUPEXP_STK_BACK( 6, RTSemMutexRequestNoResumeDebug),
529 SUPEXP_STK_BACK( 1, RTSpinlockAcquire),
530 SUPEXP_STK_BACK( 3, RTSpinlockCreate),
531 SUPEXP_STK_BACK( 1, RTSpinlockDestroy),
532 SUPEXP_STK_BACK( 1, RTSpinlockRelease),
533 SUPEXP_STK_OKAY( 3, RTStrCopy),
534 SUPEXP_STK_BACK( 2, RTStrDupTag),
535 SUPEXP_STK_BACK( 6, RTStrFormatNumber),
536 SUPEXP_STK_BACK( 1, RTStrFormatTypeDeregister),
537 SUPEXP_STK_BACKF( 3, RTStrFormatTypeRegister),
538 SUPEXP_STK_BACKF( 2, RTStrFormatTypeSetUser),
539 SUPEXP_STK_BACKF( 6, RTStrFormatV),
540 SUPEXP_STK_BACK( 1, RTStrFree),
541 SUPEXP_STK_OKAY( 3, RTStrNCmp),
542 SUPEXP_STK_BACKF( 6, RTStrPrintfExV),
543 SUPEXP_STK_BACK( 4, RTStrPrintfV),
544 SUPEXP_STK_BACKF( 6, RTStrPrintf2ExV),
545 SUPEXP_STK_BACK( 4, RTStrPrintf2V),
546 SUPEXP_STK_BACKF( 7, RTThreadCreate),
547 SUPEXP_STK_BACK( 1, RTThreadCtxHookIsEnabled),
548 SUPEXP_STK_BACKF( 4, RTThreadCtxHookCreate),
549 SUPEXP_STK_BACK( 1, RTThreadCtxHookDestroy),
550 SUPEXP_STK_BACK( 1, RTThreadCtxHookDisable),
551 SUPEXP_STK_BACK( 1, RTThreadCtxHookEnable),
552 SUPEXP_STK_BACK( 1, RTThreadGetName),
553 SUPEXP_STK_BACK( 1, RTThreadGetNative),
554 SUPEXP_STK_BACK( 1, RTThreadGetType),
555 SUPEXP_STK_BACK( 1, RTThreadIsInInterrupt),
556 SUPEXP_STK_BACK( 0, RTThreadNativeSelf),
557 SUPEXP_STK_BACK( 1, RTThreadPreemptDisable),
558 SUPEXP_STK_BACK( 1, RTThreadPreemptIsEnabled),
559 SUPEXP_STK_BACK( 1, RTThreadPreemptIsPending),
560 SUPEXP_STK_BACK( 0, RTThreadPreemptIsPendingTrusty),
561 SUPEXP_STK_BACK( 0, RTThreadPreemptIsPossible),
562 SUPEXP_STK_BACK( 1, RTThreadPreemptRestore),
563 SUPEXP_STK_BACK( 1, RTThreadQueryTerminationStatus),
564 SUPEXP_STK_BACK( 0, RTThreadSelf),
565 SUPEXP_STK_BACK( 0, RTThreadSelfName),
566 SUPEXP_STK_BACK( 1, RTThreadSleep),
567 SUPEXP_STK_BACK( 1, RTThreadUserReset),
568 SUPEXP_STK_BACK( 1, RTThreadUserSignal),
569 SUPEXP_STK_BACK( 2, RTThreadUserWait),
570 SUPEXP_STK_BACK( 2, RTThreadUserWaitNoResume),
571 SUPEXP_STK_BACK( 3, RTThreadWait),
572 SUPEXP_STK_BACK( 3, RTThreadWaitNoResume),
573 SUPEXP_STK_BACK( 0, RTThreadYield),
574 SUPEXP_STK_BACK( 1, RTTimeNow),
575 SUPEXP_STK_BACK( 0, RTTimerCanDoHighResolution),
576 SUPEXP_STK_BACK( 2, RTTimerChangeInterval),
577 SUPEXP_STK_BACKF( 4, RTTimerCreate),
578 SUPEXP_STK_BACKF( 5, RTTimerCreateEx),
579 SUPEXP_STK_BACK( 1, RTTimerDestroy),
580 SUPEXP_STK_BACK( 0, RTTimerGetSystemGranularity),
581 SUPEXP_STK_BACK( 1, RTTimerReleaseSystemGranularity),
582 SUPEXP_STK_BACK( 2, RTTimerRequestSystemGranularity),
583 SUPEXP_STK_BACK( 2, RTTimerStart),
584 SUPEXP_STK_BACK( 1, RTTimerStop),
585 SUPEXP_STK_BACK( 0, RTTimeSystemMilliTS),
586 SUPEXP_STK_BACK( 0, RTTimeSystemNanoTS),
587 SUPEXP_STK_OKAY( 2, RTUuidCompare),
588 SUPEXP_STK_OKAY( 2, RTUuidCompareStr),
589 SUPEXP_STK_OKAY( 2, RTUuidFromStr),
590/* SED: END */
591};
592
593#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD)
594/**
595 * Drag in the rest of IRPT since we share it with the
596 * rest of the kernel modules on darwin.
597 */
598struct CLANG11WERIDNESS { PFNRT pfn; } g_apfnVBoxDrvIPRTDeps[] =
599{
600 /* VBoxNetAdp */
601 { (PFNRT)RTRandBytes },
602 /* VBoxUSB */
603 { (PFNRT)RTPathStripFilename },
604#if !defined(RT_OS_FREEBSD)
605 { (PFNRT)RTHandleTableAlloc },
606 { (PFNRT)RTStrPurgeEncoding },
607#endif
608 { NULL }
609};
610#endif /* RT_OS_DARWIN || RT_OS_SOLARIS || RT_OS_FREEBSD */
611
612
613
614/**
615 * Initializes the device extentsion structure.
616 *
617 * @returns IPRT status code.
618 * @param pDevExt The device extension to initialize.
619 * @param cbSession The size of the session structure. The size of
620 * SUPDRVSESSION may be smaller when SUPDRV_AGNOSTIC is
621 * defined because we're skipping the OS specific members
622 * then.
623 */
624int VBOXCALL supdrvInitDevExt(PSUPDRVDEVEXT pDevExt, size_t cbSession)
625{
626 int rc;
627
628#ifdef SUPDRV_WITH_RELEASE_LOGGER
629 /*
630 * Create the release log.
631 */
632 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
633 PRTLOGGER pRelLogger;
634 rc = RTLogCreate(&pRelLogger, 0 /* fFlags */, "all",
635 "VBOX_RELEASE_LOG", RT_ELEMENTS(s_apszGroups), s_apszGroups, RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER, NULL);
636 if (RT_SUCCESS(rc))
637 RTLogRelSetDefaultInstance(pRelLogger);
638 /** @todo Add native hook for getting logger config parameters and setting
639 * them. On linux we should use the module parameter stuff... */
640#endif
641
642#if (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)) && !defined(VBOX_WITH_OLD_CPU_SUPPORT)
643 /*
644 * Require SSE2 to be present.
645 */
646 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SSE2))
647 {
648 SUPR0Printf("vboxdrv: Requires SSE2 (cpuid(0).EDX=%#x)\n", ASMCpuId_EDX(1));
649 return VERR_UNSUPPORTED_CPU;
650 }
651#endif
652
653 /*
654 * Initialize it.
655 */
656 memset(pDevExt, 0, sizeof(*pDevExt)); /* Does not wipe OS specific tail section of the structure. */
657 pDevExt->Spinlock = NIL_RTSPINLOCK;
658 pDevExt->hGipSpinlock = NIL_RTSPINLOCK;
659 pDevExt->hSessionHashTabSpinlock = NIL_RTSPINLOCK;
660#ifdef SUPDRV_USE_MUTEX_FOR_LDR
661 pDevExt->mtxLdr = NIL_RTSEMMUTEX;
662#else
663 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
664#endif
665#ifdef SUPDRV_USE_MUTEX_FOR_GIP
666 pDevExt->mtxGip = NIL_RTSEMMUTEX;
667 pDevExt->mtxTscDelta = NIL_RTSEMMUTEX;
668#else
669 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
670 pDevExt->mtxTscDelta = NIL_RTSEMFASTMUTEX;
671#endif
672
673 rc = RTSpinlockCreate(&pDevExt->Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "SUPDrvDevExt");
674 if (RT_SUCCESS(rc))
675 rc = RTSpinlockCreate(&pDevExt->hGipSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "SUPDrvGip");
676 if (RT_SUCCESS(rc))
677 rc = RTSpinlockCreate(&pDevExt->hSessionHashTabSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "SUPDrvSession");
678
679 if (RT_SUCCESS(rc))
680#ifdef SUPDRV_USE_MUTEX_FOR_LDR
681 rc = RTSemMutexCreate(&pDevExt->mtxLdr);
682#else
683 rc = RTSemFastMutexCreate(&pDevExt->mtxLdr);
684#endif
685 if (RT_SUCCESS(rc))
686#ifdef SUPDRV_USE_MUTEX_FOR_GIP
687 rc = RTSemMutexCreate(&pDevExt->mtxTscDelta);
688#else
689 rc = RTSemFastMutexCreate(&pDevExt->mtxTscDelta);
690#endif
691 if (RT_SUCCESS(rc))
692 {
693 rc = RTSemFastMutexCreate(&pDevExt->mtxComponentFactory);
694 if (RT_SUCCESS(rc))
695 {
696#ifdef SUPDRV_USE_MUTEX_FOR_GIP
697 rc = RTSemMutexCreate(&pDevExt->mtxGip);
698#else
699 rc = RTSemFastMutexCreate(&pDevExt->mtxGip);
700#endif
701 if (RT_SUCCESS(rc))
702 {
703 rc = supdrvGipCreate(pDevExt);
704 if (RT_SUCCESS(rc))
705 {
706 rc = supdrvTracerInit(pDevExt);
707 if (RT_SUCCESS(rc))
708 {
709 pDevExt->pLdrInitImage = NULL;
710 pDevExt->hLdrInitThread = NIL_RTNATIVETHREAD;
711 pDevExt->hLdrTermThread = NIL_RTNATIVETHREAD;
712 pDevExt->u32Cookie = BIRD; /** @todo make this random? */
713 pDevExt->cbSession = (uint32_t)cbSession;
714
715 /*
716 * Fixup the absolute symbols.
717 *
718 * Because of the table indexing assumptions we'll have a little #ifdef orgy
719 * here rather than distributing this to OS specific files. At least for now.
720 */
721#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
722# ifdef RT_OS_DARWIN
723# if ARCH_BITS == 32
724 if (SUPR0GetPagingMode() >= SUPPAGINGMODE_AMD64)
725 {
726 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
727 g_aFunctions[1].pfn = (void *)0x80; /* SUPR0Abs64bitKernelCS - KERNEL64_CS, seg.h */
728 g_aFunctions[2].pfn = (void *)0x88; /* SUPR0Abs64bitKernelSS - KERNEL64_SS, seg.h */
729 g_aFunctions[3].pfn = (void *)0x88; /* SUPR0Abs64bitKernelDS - KERNEL64_SS, seg.h */
730 }
731 else
732 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[3].pfn = (void *)0;
733 g_aFunctions[4].pfn = (void *)0x08; /* SUPR0AbsKernelCS - KERNEL_CS, seg.h */
734 g_aFunctions[5].pfn = (void *)0x10; /* SUPR0AbsKernelSS - KERNEL_DS, seg.h */
735 g_aFunctions[6].pfn = (void *)0x10; /* SUPR0AbsKernelDS - KERNEL_DS, seg.h */
736 g_aFunctions[7].pfn = (void *)0x10; /* SUPR0AbsKernelES - KERNEL_DS, seg.h */
737 g_aFunctions[8].pfn = (void *)0x10; /* SUPR0AbsKernelFS - KERNEL_DS, seg.h */
738 g_aFunctions[9].pfn = (void *)0x48; /* SUPR0AbsKernelGS - CPU_DATA_GS, seg.h */
739# else /* 64-bit darwin: */
740 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
741 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
742 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
743 g_aFunctions[3].pfn = (void *)0; /* SUPR0Abs64bitKernelDS */
744 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
745 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
746 g_aFunctions[6].pfn = (void *)0; /* SUPR0AbsKernelDS */
747 g_aFunctions[7].pfn = (void *)0; /* SUPR0AbsKernelES */
748 g_aFunctions[8].pfn = (void *)0; /* SUPR0AbsKernelFS */
749 g_aFunctions[9].pfn = (void *)0; /* SUPR0AbsKernelGS */
750
751# endif
752# else /* !RT_OS_DARWIN */
753# if ARCH_BITS == 64
754 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
755 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
756 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
757 g_aFunctions[3].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0Abs64bitKernelDS */
758# else
759 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[3].pfn = (void *)0;
760# endif
761 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
762 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
763 g_aFunctions[6].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0AbsKernelDS */
764 g_aFunctions[7].pfn = (void *)(uintptr_t)ASMGetES(); /* SUPR0AbsKernelES */
765 g_aFunctions[8].pfn = (void *)(uintptr_t)ASMGetFS(); /* SUPR0AbsKernelFS */
766 g_aFunctions[9].pfn = (void *)(uintptr_t)ASMGetGS(); /* SUPR0AbsKernelGS */
767# endif /* !RT_OS_DARWIN */
768#endif /* AMD64 || X86 */
769 return VINF_SUCCESS;
770 }
771
772 supdrvGipDestroy(pDevExt);
773 }
774
775#ifdef SUPDRV_USE_MUTEX_FOR_GIP
776 RTSemMutexDestroy(pDevExt->mtxGip);
777 pDevExt->mtxGip = NIL_RTSEMMUTEX;
778#else
779 RTSemFastMutexDestroy(pDevExt->mtxGip);
780 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
781#endif
782 }
783 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
784 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
785 }
786 }
787
788#ifdef SUPDRV_USE_MUTEX_FOR_GIP
789 RTSemMutexDestroy(pDevExt->mtxTscDelta);
790 pDevExt->mtxTscDelta = NIL_RTSEMMUTEX;
791#else
792 RTSemFastMutexDestroy(pDevExt->mtxTscDelta);
793 pDevExt->mtxTscDelta = NIL_RTSEMFASTMUTEX;
794#endif
795#ifdef SUPDRV_USE_MUTEX_FOR_LDR
796 RTSemMutexDestroy(pDevExt->mtxLdr);
797 pDevExt->mtxLdr = NIL_RTSEMMUTEX;
798#else
799 RTSemFastMutexDestroy(pDevExt->mtxLdr);
800 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
801#endif
802 RTSpinlockDestroy(pDevExt->Spinlock);
803 pDevExt->Spinlock = NIL_RTSPINLOCK;
804 RTSpinlockDestroy(pDevExt->hGipSpinlock);
805 pDevExt->hGipSpinlock = NIL_RTSPINLOCK;
806 RTSpinlockDestroy(pDevExt->hSessionHashTabSpinlock);
807 pDevExt->hSessionHashTabSpinlock = NIL_RTSPINLOCK;
808
809#ifdef SUPDRV_WITH_RELEASE_LOGGER
810 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
811 RTLogDestroy(RTLogSetDefaultInstance(NULL));
812#endif
813
814 return rc;
815}
816
817
818/**
819 * Delete the device extension (e.g. cleanup members).
820 *
821 * @param pDevExt The device extension to delete.
822 */
823void VBOXCALL supdrvDeleteDevExt(PSUPDRVDEVEXT pDevExt)
824{
825 PSUPDRVOBJ pObj;
826 PSUPDRVUSAGE pUsage;
827
828 /*
829 * Kill mutexes and spinlocks.
830 */
831#ifdef SUPDRV_USE_MUTEX_FOR_GIP
832 RTSemMutexDestroy(pDevExt->mtxGip);
833 pDevExt->mtxGip = NIL_RTSEMMUTEX;
834 RTSemMutexDestroy(pDevExt->mtxTscDelta);
835 pDevExt->mtxTscDelta = NIL_RTSEMMUTEX;
836#else
837 RTSemFastMutexDestroy(pDevExt->mtxGip);
838 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
839 RTSemFastMutexDestroy(pDevExt->mtxTscDelta);
840 pDevExt->mtxTscDelta = NIL_RTSEMFASTMUTEX;
841#endif
842#ifdef SUPDRV_USE_MUTEX_FOR_LDR
843 RTSemMutexDestroy(pDevExt->mtxLdr);
844 pDevExt->mtxLdr = NIL_RTSEMMUTEX;
845#else
846 RTSemFastMutexDestroy(pDevExt->mtxLdr);
847 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
848#endif
849 RTSpinlockDestroy(pDevExt->Spinlock);
850 pDevExt->Spinlock = NIL_RTSPINLOCK;
851 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
852 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
853 RTSpinlockDestroy(pDevExt->hSessionHashTabSpinlock);
854 pDevExt->hSessionHashTabSpinlock = NIL_RTSPINLOCK;
855
856 /*
857 * Free lists.
858 */
859 /* objects. */
860 pObj = pDevExt->pObjs;
861 Assert(!pObj); /* (can trigger on forced unloads) */
862 pDevExt->pObjs = NULL;
863 while (pObj)
864 {
865 void *pvFree = pObj;
866 pObj = pObj->pNext;
867 RTMemFree(pvFree);
868 }
869
870 /* usage records. */
871 pUsage = pDevExt->pUsageFree;
872 pDevExt->pUsageFree = NULL;
873 while (pUsage)
874 {
875 void *pvFree = pUsage;
876 pUsage = pUsage->pNext;
877 RTMemFree(pvFree);
878 }
879
880 /* kill the GIP. */
881 supdrvGipDestroy(pDevExt);
882 RTSpinlockDestroy(pDevExt->hGipSpinlock);
883 pDevExt->hGipSpinlock = NIL_RTSPINLOCK;
884
885 supdrvTracerTerm(pDevExt);
886
887#ifdef SUPDRV_WITH_RELEASE_LOGGER
888 /* destroy the loggers. */
889 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
890 RTLogDestroy(RTLogSetDefaultInstance(NULL));
891#endif
892}
893
894
895/**
896 * Create session.
897 *
898 * @returns IPRT status code.
899 * @param pDevExt Device extension.
900 * @param fUser Flag indicating whether this is a user or kernel
901 * session.
902 * @param fUnrestricted Unrestricted access (system) or restricted access
903 * (user)?
904 * @param ppSession Where to store the pointer to the session data.
905 */
906int VBOXCALL supdrvCreateSession(PSUPDRVDEVEXT pDevExt, bool fUser, bool fUnrestricted, PSUPDRVSESSION *ppSession)
907{
908 int rc;
909 PSUPDRVSESSION pSession;
910
911 if (!SUP_IS_DEVEXT_VALID(pDevExt))
912 return VERR_INVALID_PARAMETER;
913
914 /*
915 * Allocate memory for the session data.
916 */
917 pSession = *ppSession = (PSUPDRVSESSION)RTMemAllocZ(pDevExt->cbSession);
918 if (pSession)
919 {
920 /* Initialize session data. */
921 rc = RTSpinlockCreate(&pSession->Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_UNSAFE, "SUPDrvSession");
922 if (!rc)
923 {
924 rc = RTHandleTableCreateEx(&pSession->hHandleTable,
925 RTHANDLETABLE_FLAGS_LOCKED_IRQ_SAFE | RTHANDLETABLE_FLAGS_CONTEXT,
926 1 /*uBase*/, 32768 /*cMax*/, supdrvSessionObjHandleRetain, pSession);
927 if (RT_SUCCESS(rc))
928 {
929 Assert(pSession->Spinlock != NIL_RTSPINLOCK);
930 pSession->pDevExt = pDevExt;
931 pSession->u32Cookie = BIRD_INV;
932 pSession->fUnrestricted = fUnrestricted;
933 /*pSession->fInHashTable = false; */
934 pSession->cRefs = 1;
935 /*pSession->pCommonNextHash = NULL;
936 pSession->ppOsSessionPtr = NULL; */
937 if (fUser)
938 {
939 pSession->Process = RTProcSelf();
940 pSession->R0Process = RTR0ProcHandleSelf();
941 }
942 else
943 {
944 pSession->Process = NIL_RTPROCESS;
945 pSession->R0Process = NIL_RTR0PROCESS;
946 }
947 /*pSession->pLdrUsage = NULL;
948 pSession->pVM = NULL;
949 pSession->pUsage = NULL;
950 pSession->pGip = NULL;
951 pSession->fGipReferenced = false;
952 pSession->Bundle.cUsed = 0; */
953 pSession->Uid = NIL_RTUID;
954 pSession->Gid = NIL_RTGID;
955 /*pSession->uTracerData = 0;*/
956 pSession->hTracerCaller = NIL_RTNATIVETHREAD;
957 RTListInit(&pSession->TpProviders);
958 /*pSession->cTpProviders = 0;*/
959 /*pSession->cTpProbesFiring = 0;*/
960 RTListInit(&pSession->TpUmods);
961 /*RT_ZERO(pSession->apTpLookupTable);*/
962
963 VBOXDRV_SESSION_CREATE(pSession, fUser);
964 LogFlow(("Created session %p initial cookie=%#x\n", pSession, pSession->u32Cookie));
965 return VINF_SUCCESS;
966 }
967
968 RTSpinlockDestroy(pSession->Spinlock);
969 }
970 RTMemFree(pSession);
971 *ppSession = NULL;
972 Log(("Failed to create spinlock, rc=%d!\n", rc));
973 }
974 else
975 rc = VERR_NO_MEMORY;
976
977 return rc;
978}
979
980
981/**
982 * Cleans up the session in the context of the process to which it belongs, the
983 * caller will free the session and the session spinlock.
984 *
985 * This should normally occur when the session is closed or as the process
986 * exits. Careful reference counting in the OS specfic code makes sure that
987 * there cannot be any races between process/handle cleanup callbacks and
988 * threads doing I/O control calls.
989 *
990 * @param pDevExt The device extension.
991 * @param pSession Session data.
992 */
993static void supdrvCleanupSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
994{
995 int rc;
996 PSUPDRVBUNDLE pBundle;
997 LogFlow(("supdrvCleanupSession: pSession=%p\n", pSession));
998
999 Assert(!pSession->fInHashTable);
1000 Assert(!pSession->ppOsSessionPtr);
1001 AssertLogRelMsg(pSession->R0Process == RTR0ProcHandleSelf() || pSession->R0Process == NIL_RTR0PROCESS,
1002 ("R0Process=%p cur=%p; curpid=%u\n",
1003 pSession->R0Process, RTR0ProcHandleSelf(), RTProcSelf()));
1004
1005 /*
1006 * Remove logger instances related to this session.
1007 */
1008 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pSession);
1009
1010 /*
1011 * Destroy the handle table.
1012 */
1013 rc = RTHandleTableDestroy(pSession->hHandleTable, supdrvSessionObjHandleDelete, pSession);
1014 AssertRC(rc);
1015 pSession->hHandleTable = NIL_RTHANDLETABLE;
1016
1017 /*
1018 * Release object references made in this session.
1019 * In theory there should be noone racing us in this session.
1020 */
1021 Log2(("release objects - start\n"));
1022 if (pSession->pUsage)
1023 {
1024 PSUPDRVUSAGE pUsage;
1025 RTSpinlockAcquire(pDevExt->Spinlock);
1026
1027 while ((pUsage = pSession->pUsage) != NULL)
1028 {
1029 PSUPDRVOBJ pObj = pUsage->pObj;
1030 pSession->pUsage = pUsage->pNext;
1031
1032 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
1033 if (pUsage->cUsage < pObj->cUsage)
1034 {
1035 pObj->cUsage -= pUsage->cUsage;
1036 RTSpinlockRelease(pDevExt->Spinlock);
1037 }
1038 else
1039 {
1040 /* Destroy the object and free the record. */
1041 if (pDevExt->pObjs == pObj)
1042 pDevExt->pObjs = pObj->pNext;
1043 else
1044 {
1045 PSUPDRVOBJ pObjPrev;
1046 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
1047 if (pObjPrev->pNext == pObj)
1048 {
1049 pObjPrev->pNext = pObj->pNext;
1050 break;
1051 }
1052 Assert(pObjPrev);
1053 }
1054 RTSpinlockRelease(pDevExt->Spinlock);
1055
1056 Log(("supdrvCleanupSession: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
1057 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
1058 if (pObj->pfnDestructor)
1059 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
1060 RTMemFree(pObj);
1061 }
1062
1063 /* free it and continue. */
1064 RTMemFree(pUsage);
1065
1066 RTSpinlockAcquire(pDevExt->Spinlock);
1067 }
1068
1069 RTSpinlockRelease(pDevExt->Spinlock);
1070 AssertMsg(!pSession->pUsage, ("Some buster reregistered an object during desturction!\n"));
1071 }
1072 Log2(("release objects - done\n"));
1073
1074 /*
1075 * Make sure the associated VM pointers are NULL.
1076 */
1077 if (pSession->pSessionGVM || pSession->pSessionVM || pSession->pFastIoCtrlVM)
1078 {
1079 SUPR0Printf("supdrvCleanupSession: VM not disassociated! pSessionGVM=%p pSessionVM=%p pFastIoCtrlVM=%p\n",
1080 pSession->pSessionGVM, pSession->pSessionVM, pSession->pFastIoCtrlVM);
1081 pSession->pSessionGVM = NULL;
1082 pSession->pSessionVM = NULL;
1083 pSession->pFastIoCtrlVM = NULL;
1084 }
1085
1086 /*
1087 * Do tracer cleanups related to this session.
1088 */
1089 Log2(("release tracer stuff - start\n"));
1090 supdrvTracerCleanupSession(pDevExt, pSession);
1091 Log2(("release tracer stuff - end\n"));
1092
1093 /*
1094 * Release memory allocated in the session.
1095 *
1096 * We do not serialize this as we assume that the application will
1097 * not allocated memory while closing the file handle object.
1098 */
1099 Log2(("freeing memory:\n"));
1100 pBundle = &pSession->Bundle;
1101 while (pBundle)
1102 {
1103 PSUPDRVBUNDLE pToFree;
1104 unsigned i;
1105
1106 /*
1107 * Check and unlock all entries in the bundle.
1108 */
1109 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
1110 {
1111 if (pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ)
1112 {
1113 Log2(("eType=%d pvR0=%p pvR3=%p cb=%ld\n", pBundle->aMem[i].eType, RTR0MemObjAddress(pBundle->aMem[i].MemObj),
1114 (void *)RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3), (long)RTR0MemObjSize(pBundle->aMem[i].MemObj)));
1115 if (pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ)
1116 {
1117 rc = RTR0MemObjFree(pBundle->aMem[i].MapObjR3, false);
1118 AssertRC(rc); /** @todo figure out how to handle this. */
1119 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
1120 }
1121 rc = RTR0MemObjFree(pBundle->aMem[i].MemObj, true /* fFreeMappings */);
1122 AssertRC(rc); /** @todo figure out how to handle this. */
1123 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
1124 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
1125 }
1126 }
1127
1128 /*
1129 * Advance and free previous bundle.
1130 */
1131 pToFree = pBundle;
1132 pBundle = pBundle->pNext;
1133
1134 pToFree->pNext = NULL;
1135 pToFree->cUsed = 0;
1136 if (pToFree != &pSession->Bundle)
1137 RTMemFree(pToFree);
1138 }
1139 Log2(("freeing memory - done\n"));
1140
1141 /*
1142 * Deregister component factories.
1143 */
1144 RTSemFastMutexRequest(pDevExt->mtxComponentFactory);
1145 Log2(("deregistering component factories:\n"));
1146 if (pDevExt->pComponentFactoryHead)
1147 {
1148 PSUPDRVFACTORYREG pPrev = NULL;
1149 PSUPDRVFACTORYREG pCur = pDevExt->pComponentFactoryHead;
1150 while (pCur)
1151 {
1152 if (pCur->pSession == pSession)
1153 {
1154 /* unlink it */
1155 PSUPDRVFACTORYREG pNext = pCur->pNext;
1156 if (pPrev)
1157 pPrev->pNext = pNext;
1158 else
1159 pDevExt->pComponentFactoryHead = pNext;
1160
1161 /* free it */
1162 pCur->pNext = NULL;
1163 pCur->pSession = NULL;
1164 pCur->pFactory = NULL;
1165 RTMemFree(pCur);
1166
1167 /* next */
1168 pCur = pNext;
1169 }
1170 else
1171 {
1172 /* next */
1173 pPrev = pCur;
1174 pCur = pCur->pNext;
1175 }
1176 }
1177 }
1178 RTSemFastMutexRelease(pDevExt->mtxComponentFactory);
1179 Log2(("deregistering component factories - done\n"));
1180
1181 /*
1182 * Loaded images needs to be dereferenced and possibly freed up.
1183 */
1184 supdrvLdrLock(pDevExt);
1185 Log2(("freeing images:\n"));
1186 if (pSession->pLdrUsage)
1187 {
1188 PSUPDRVLDRUSAGE pUsage = pSession->pLdrUsage;
1189 pSession->pLdrUsage = NULL;
1190 while (pUsage)
1191 {
1192 void *pvFree = pUsage;
1193 PSUPDRVLDRIMAGE pImage = pUsage->pImage;
1194 uint32_t cUsage = pUsage->cRing0Usage + pUsage->cRing3Usage;
1195 if (pImage->cImgUsage > cUsage)
1196 supdrvLdrSubtractUsage(pDevExt, pImage, cUsage);
1197 else
1198 supdrvLdrFree(pDevExt, pImage);
1199 pUsage->pImage = NULL;
1200 pUsage = pUsage->pNext;
1201 RTMemFree(pvFree);
1202 }
1203 }
1204 supdrvLdrUnlock(pDevExt);
1205 Log2(("freeing images - done\n"));
1206
1207 /*
1208 * Unmap the GIP.
1209 */
1210 Log2(("umapping GIP:\n"));
1211 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
1212 {
1213 SUPR0GipUnmap(pSession);
1214 pSession->fGipReferenced = 0;
1215 }
1216 Log2(("umapping GIP - done\n"));
1217}
1218
1219
1220/**
1221 * Common code for freeing a session when the reference count reaches zero.
1222 *
1223 * @param pDevExt Device extension.
1224 * @param pSession Session data.
1225 * This data will be freed by this routine.
1226 */
1227static void supdrvDestroySession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
1228{
1229 VBOXDRV_SESSION_CLOSE(pSession);
1230
1231 /*
1232 * Cleanup the session first.
1233 */
1234 supdrvCleanupSession(pDevExt, pSession);
1235 supdrvOSCleanupSession(pDevExt, pSession);
1236
1237 /*
1238 * Free the rest of the session stuff.
1239 */
1240 RTSpinlockDestroy(pSession->Spinlock);
1241 pSession->Spinlock = NIL_RTSPINLOCK;
1242 pSession->pDevExt = NULL;
1243 RTMemFree(pSession);
1244 LogFlow(("supdrvDestroySession: returns\n"));
1245}
1246
1247
1248/**
1249 * Inserts the session into the global hash table.
1250 *
1251 * @retval VINF_SUCCESS on success.
1252 * @retval VERR_WRONG_ORDER if the session was already inserted (asserted).
1253 * @retval VERR_INVALID_PARAMETER if the session handle is invalid or a ring-0
1254 * session (asserted).
1255 * @retval VERR_DUPLICATE if there is already a session for that pid.
1256 *
1257 * @param pDevExt The device extension.
1258 * @param pSession The session.
1259 * @param ppOsSessionPtr Pointer to the OS session pointer, if any is
1260 * available and used. This will set to point to the
1261 * session while under the protection of the session
1262 * hash table spinlock. It will also be kept in
1263 * PSUPDRVSESSION::ppOsSessionPtr for lookup and
1264 * cleanup use.
1265 * @param pvUser Argument for supdrvOSSessionHashTabInserted.
1266 */
1267int VBOXCALL supdrvSessionHashTabInsert(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVSESSION *ppOsSessionPtr,
1268 void *pvUser)
1269{
1270 PSUPDRVSESSION pCur;
1271 unsigned iHash;
1272
1273 /*
1274 * Validate input.
1275 */
1276 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1277 AssertReturn(pSession->R0Process != NIL_RTR0PROCESS, VERR_INVALID_PARAMETER);
1278
1279 /*
1280 * Calculate the hash table index and acquire the spinlock.
1281 */
1282 iHash = SUPDRV_SESSION_HASH(pSession->Process);
1283
1284 RTSpinlockAcquire(pDevExt->hSessionHashTabSpinlock);
1285
1286 /*
1287 * If there are a collisions, we need to carefully check if we got a
1288 * duplicate. There can only be one open session per process.
1289 */
1290 pCur = pDevExt->apSessionHashTab[iHash];
1291 if (pCur)
1292 {
1293 while (pCur && pCur->Process != pSession->Process)
1294 pCur = pCur->pCommonNextHash;
1295
1296 if (pCur)
1297 {
1298 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1299 if (pCur == pSession)
1300 {
1301 Assert(pSession->fInHashTable);
1302 AssertFailed();
1303 return VERR_WRONG_ORDER;
1304 }
1305 Assert(!pSession->fInHashTable);
1306 if (pCur->R0Process == pSession->R0Process)
1307 return VERR_RESOURCE_IN_USE;
1308 return VERR_DUPLICATE;
1309 }
1310 }
1311 Assert(!pSession->fInHashTable);
1312 Assert(!pSession->ppOsSessionPtr);
1313
1314 /*
1315 * Insert it, doing a callout to the OS specific code in case it has
1316 * anything it wishes to do while we're holding the spinlock.
1317 */
1318 pSession->pCommonNextHash = pDevExt->apSessionHashTab[iHash];
1319 pDevExt->apSessionHashTab[iHash] = pSession;
1320 pSession->fInHashTable = true;
1321 ASMAtomicIncS32(&pDevExt->cSessions);
1322
1323 pSession->ppOsSessionPtr = ppOsSessionPtr;
1324 if (ppOsSessionPtr)
1325 ASMAtomicWritePtr(ppOsSessionPtr, pSession);
1326
1327 supdrvOSSessionHashTabInserted(pDevExt, pSession, pvUser);
1328
1329 /*
1330 * Retain a reference for the pointer in the session table.
1331 */
1332 ASMAtomicIncU32(&pSession->cRefs);
1333
1334 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1335 return VINF_SUCCESS;
1336}
1337
1338
1339/**
1340 * Removes the session from the global hash table.
1341 *
1342 * @retval VINF_SUCCESS on success.
1343 * @retval VERR_NOT_FOUND if the session was already removed (asserted).
1344 * @retval VERR_INVALID_PARAMETER if the session handle is invalid or a ring-0
1345 * session (asserted).
1346 *
1347 * @param pDevExt The device extension.
1348 * @param pSession The session. The caller is expected to have a reference
1349 * to this so it won't croak on us when we release the hash
1350 * table reference.
1351 * @param pvUser OS specific context value for the
1352 * supdrvOSSessionHashTabInserted callback.
1353 */
1354int VBOXCALL supdrvSessionHashTabRemove(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, void *pvUser)
1355{
1356 PSUPDRVSESSION pCur;
1357 unsigned iHash;
1358 int32_t cRefs;
1359
1360 /*
1361 * Validate input.
1362 */
1363 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1364 AssertReturn(pSession->R0Process != NIL_RTR0PROCESS, VERR_INVALID_PARAMETER);
1365
1366 /*
1367 * Calculate the hash table index and acquire the spinlock.
1368 */
1369 iHash = SUPDRV_SESSION_HASH(pSession->Process);
1370
1371 RTSpinlockAcquire(pDevExt->hSessionHashTabSpinlock);
1372
1373 /*
1374 * Unlink it.
1375 */
1376 pCur = pDevExt->apSessionHashTab[iHash];
1377 if (pCur == pSession)
1378 pDevExt->apSessionHashTab[iHash] = pSession->pCommonNextHash;
1379 else
1380 {
1381 PSUPDRVSESSION pPrev = pCur;
1382 while (pCur && pCur != pSession)
1383 {
1384 pPrev = pCur;
1385 pCur = pCur->pCommonNextHash;
1386 }
1387 if (pCur)
1388 pPrev->pCommonNextHash = pCur->pCommonNextHash;
1389 else
1390 {
1391 Assert(!pSession->fInHashTable);
1392 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1393 return VERR_NOT_FOUND;
1394 }
1395 }
1396
1397 pSession->pCommonNextHash = NULL;
1398 pSession->fInHashTable = false;
1399
1400 ASMAtomicDecS32(&pDevExt->cSessions);
1401
1402 /*
1403 * Clear OS specific session pointer if available and do the OS callback.
1404 */
1405 if (pSession->ppOsSessionPtr)
1406 {
1407 ASMAtomicCmpXchgPtr(pSession->ppOsSessionPtr, NULL, pSession);
1408 pSession->ppOsSessionPtr = NULL;
1409 }
1410
1411 supdrvOSSessionHashTabRemoved(pDevExt, pSession, pvUser);
1412
1413 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1414
1415 /*
1416 * Drop the reference the hash table had to the session. This shouldn't
1417 * be the last reference!
1418 */
1419 cRefs = ASMAtomicDecU32(&pSession->cRefs);
1420 Assert(cRefs > 0 && cRefs < _1M);
1421 if (cRefs == 0)
1422 supdrvDestroySession(pDevExt, pSession);
1423
1424 return VINF_SUCCESS;
1425}
1426
1427
1428/**
1429 * Looks up the session for the current process in the global hash table or in
1430 * OS specific pointer.
1431 *
1432 * @returns Pointer to the session with a reference that the caller must
1433 * release. If no valid session was found, NULL is returned.
1434 *
1435 * @param pDevExt The device extension.
1436 * @param Process The process ID.
1437 * @param R0Process The ring-0 process handle.
1438 * @param ppOsSessionPtr The OS session pointer if available. If not NULL,
1439 * this is used instead of the hash table. For
1440 * additional safety it must then be equal to the
1441 * SUPDRVSESSION::ppOsSessionPtr member.
1442 * This can be NULL even if the OS has a session
1443 * pointer.
1444 */
1445PSUPDRVSESSION VBOXCALL supdrvSessionHashTabLookup(PSUPDRVDEVEXT pDevExt, RTPROCESS Process, RTR0PROCESS R0Process,
1446 PSUPDRVSESSION *ppOsSessionPtr)
1447{
1448 PSUPDRVSESSION pCur;
1449 unsigned iHash;
1450
1451 /*
1452 * Validate input.
1453 */
1454 AssertReturn(R0Process != NIL_RTR0PROCESS, NULL);
1455
1456 /*
1457 * Calculate the hash table index and acquire the spinlock.
1458 */
1459 iHash = SUPDRV_SESSION_HASH(Process);
1460
1461 RTSpinlockAcquire(pDevExt->hSessionHashTabSpinlock);
1462
1463 /*
1464 * If an OS session pointer is provided, always use it.
1465 */
1466 if (ppOsSessionPtr)
1467 {
1468 pCur = *ppOsSessionPtr;
1469 if ( pCur
1470 && ( pCur->ppOsSessionPtr != ppOsSessionPtr
1471 || pCur->Process != Process
1472 || pCur->R0Process != R0Process) )
1473 pCur = NULL;
1474 }
1475 else
1476 {
1477 /*
1478 * Otherwise, do the hash table lookup.
1479 */
1480 pCur = pDevExt->apSessionHashTab[iHash];
1481 while ( pCur
1482 && ( pCur->Process != Process
1483 || pCur->R0Process != R0Process) )
1484 pCur = pCur->pCommonNextHash;
1485 }
1486
1487 /*
1488 * Retain the session.
1489 */
1490 if (pCur)
1491 {
1492 uint32_t cRefs = ASMAtomicIncU32(&pCur->cRefs);
1493 NOREF(cRefs);
1494 Assert(cRefs > 1 && cRefs < _1M);
1495 }
1496
1497 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1498
1499 return pCur;
1500}
1501
1502
1503/**
1504 * Retain a session to make sure it doesn't go away while it is in use.
1505 *
1506 * @returns New reference count on success, UINT32_MAX on failure.
1507 * @param pSession Session data.
1508 */
1509uint32_t VBOXCALL supdrvSessionRetain(PSUPDRVSESSION pSession)
1510{
1511 uint32_t cRefs;
1512 AssertPtrReturn(pSession, UINT32_MAX);
1513 AssertReturn(SUP_IS_SESSION_VALID(pSession), UINT32_MAX);
1514
1515 cRefs = ASMAtomicIncU32(&pSession->cRefs);
1516 AssertMsg(cRefs > 1 && cRefs < _1M, ("%#x %p\n", cRefs, pSession));
1517 return cRefs;
1518}
1519
1520
1521/**
1522 * Releases a given session.
1523 *
1524 * @returns New reference count on success (0 if closed), UINT32_MAX on failure.
1525 * @param pSession Session data.
1526 */
1527uint32_t VBOXCALL supdrvSessionRelease(PSUPDRVSESSION pSession)
1528{
1529 uint32_t cRefs;
1530 AssertPtrReturn(pSession, UINT32_MAX);
1531 AssertReturn(SUP_IS_SESSION_VALID(pSession), UINT32_MAX);
1532
1533 cRefs = ASMAtomicDecU32(&pSession->cRefs);
1534 AssertMsg(cRefs < _1M, ("%#x %p\n", cRefs, pSession));
1535 if (cRefs == 0)
1536 supdrvDestroySession(pSession->pDevExt, pSession);
1537 return cRefs;
1538}
1539
1540
1541/**
1542 * RTHandleTableDestroy callback used by supdrvCleanupSession.
1543 *
1544 * @returns IPRT status code, see SUPR0ObjAddRef.
1545 * @param hHandleTable The handle table handle. Ignored.
1546 * @param pvObj The object pointer.
1547 * @param pvCtx Context, the handle type. Ignored.
1548 * @param pvUser Session pointer.
1549 */
1550static DECLCALLBACK(int) supdrvSessionObjHandleRetain(RTHANDLETABLE hHandleTable, void *pvObj, void *pvCtx, void *pvUser)
1551{
1552 NOREF(pvCtx);
1553 NOREF(hHandleTable);
1554 return SUPR0ObjAddRefEx(pvObj, (PSUPDRVSESSION)pvUser, true /*fNoBlocking*/);
1555}
1556
1557
1558/**
1559 * RTHandleTableDestroy callback used by supdrvCleanupSession.
1560 *
1561 * @param hHandleTable The handle table handle. Ignored.
1562 * @param h The handle value. Ignored.
1563 * @param pvObj The object pointer.
1564 * @param pvCtx Context, the handle type. Ignored.
1565 * @param pvUser Session pointer.
1566 */
1567static DECLCALLBACK(void) supdrvSessionObjHandleDelete(RTHANDLETABLE hHandleTable, uint32_t h, void *pvObj, void *pvCtx, void *pvUser)
1568{
1569 NOREF(pvCtx);
1570 NOREF(h);
1571 NOREF(hHandleTable);
1572 SUPR0ObjRelease(pvObj, (PSUPDRVSESSION)pvUser);
1573}
1574
1575
1576/**
1577 * Fast path I/O Control worker.
1578 *
1579 * @returns VBox status code that should be passed down to ring-3 unchanged.
1580 * @param uOperation SUP_VMMR0_DO_XXX (not the I/O control number!).
1581 * @param idCpu VMCPU id.
1582 * @param pDevExt Device extention.
1583 * @param pSession Session data.
1584 */
1585int VBOXCALL supdrvIOCtlFast(uintptr_t uOperation, VMCPUID idCpu, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
1586{
1587 /*
1588 * Validate input and check that the VM has a session.
1589 */
1590 if (RT_LIKELY(RT_VALID_PTR(pSession)))
1591 {
1592 PVM pVM = pSession->pSessionVM;
1593 PGVM pGVM = pSession->pSessionGVM;
1594 if (RT_LIKELY( pGVM != NULL
1595 && pVM != NULL
1596 && pVM == pSession->pFastIoCtrlVM))
1597 {
1598 if (RT_LIKELY(pDevExt->pfnVMMR0EntryFast))
1599 {
1600 /*
1601 * Make the call.
1602 */
1603 pDevExt->pfnVMMR0EntryFast(pGVM, pVM, idCpu, uOperation);
1604 return VINF_SUCCESS;
1605 }
1606
1607 SUPR0Printf("supdrvIOCtlFast: pfnVMMR0EntryFast is NULL\n");
1608 }
1609 else
1610 SUPR0Printf("supdrvIOCtlFast: Misconfig session: pGVM=%p pVM=%p pFastIoCtrlVM=%p\n",
1611 pGVM, pVM, pSession->pFastIoCtrlVM);
1612 }
1613 else
1614 SUPR0Printf("supdrvIOCtlFast: Bad session pointer %p\n", pSession);
1615 return VERR_INTERNAL_ERROR;
1616}
1617
1618
1619/**
1620 * Helper for supdrvIOCtl used to validate module names passed to SUP_IOCTL_LDR_OPEN.
1621 *
1622 * Check if pszStr contains any character of pszChars. We would use strpbrk
1623 * here if this function would be contained in the RedHat kABI white list, see
1624 * http://www.kerneldrivers.org/RHEL5.
1625 *
1626 * @returns true if fine, false if not.
1627 * @param pszName The module name to check.
1628 */
1629static bool supdrvIsLdrModuleNameValid(const char *pszName)
1630{
1631 int chCur;
1632 while ((chCur = *pszName++) != '\0')
1633 {
1634 static const char s_szInvalidChars[] = ";:()[]{}/\\|&*%#@!~`\"'";
1635 unsigned offInv = RT_ELEMENTS(s_szInvalidChars);
1636 while (offInv-- > 0)
1637 if (s_szInvalidChars[offInv] == chCur)
1638 return false;
1639 }
1640 return true;
1641}
1642
1643
1644
1645/**
1646 * I/O Control inner worker (tracing reasons).
1647 *
1648 * @returns IPRT status code.
1649 * @retval VERR_INVALID_PARAMETER if the request is invalid.
1650 *
1651 * @param uIOCtl Function number.
1652 * @param pDevExt Device extention.
1653 * @param pSession Session data.
1654 * @param pReqHdr The request header.
1655 */
1656static int supdrvIOCtlInnerUnrestricted(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr)
1657{
1658 /*
1659 * Validation macros
1660 */
1661#define REQ_CHECK_SIZES_EX(Name, cbInExpect, cbOutExpect) \
1662 do { \
1663 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect) || pReqHdr->cbOut != (cbOutExpect))) \
1664 { \
1665 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n", \
1666 (long)pReqHdr->cbIn, (long)(cbInExpect), (long)pReqHdr->cbOut, (long)(cbOutExpect))); \
1667 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1668 } \
1669 } while (0)
1670
1671#define REQ_CHECK_SIZES(Name) REQ_CHECK_SIZES_EX(Name, Name ## _SIZE_IN, Name ## _SIZE_OUT)
1672
1673#define REQ_CHECK_SIZE_IN(Name, cbInExpect) \
1674 do { \
1675 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect))) \
1676 { \
1677 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld.\n", \
1678 (long)pReqHdr->cbIn, (long)(cbInExpect))); \
1679 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1680 } \
1681 } while (0)
1682
1683#define REQ_CHECK_SIZE_OUT(Name, cbOutExpect) \
1684 do { \
1685 if (RT_UNLIKELY(pReqHdr->cbOut != (cbOutExpect))) \
1686 { \
1687 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbOut=%ld expected %ld.\n", \
1688 (long)pReqHdr->cbOut, (long)(cbOutExpect))); \
1689 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1690 } \
1691 } while (0)
1692
1693#define REQ_CHECK_EXPR(Name, expr) \
1694 do { \
1695 if (RT_UNLIKELY(!(expr))) \
1696 { \
1697 OSDBGPRINT(( #Name ": %s\n", #expr)); \
1698 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1699 } \
1700 } while (0)
1701
1702#define REQ_CHECK_EXPR_FMT(expr, fmt) \
1703 do { \
1704 if (RT_UNLIKELY(!(expr))) \
1705 { \
1706 OSDBGPRINT( fmt ); \
1707 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1708 } \
1709 } while (0)
1710
1711 /*
1712 * The switch.
1713 */
1714 switch (SUP_CTL_CODE_NO_SIZE(uIOCtl))
1715 {
1716 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_COOKIE):
1717 {
1718 PSUPCOOKIE pReq = (PSUPCOOKIE)pReqHdr;
1719 REQ_CHECK_SIZES(SUP_IOCTL_COOKIE);
1720 if (strncmp(pReq->u.In.szMagic, SUPCOOKIE_MAGIC, sizeof(pReq->u.In.szMagic)))
1721 {
1722 OSDBGPRINT(("SUP_IOCTL_COOKIE: invalid magic %.16s\n", pReq->u.In.szMagic));
1723 pReq->Hdr.rc = VERR_INVALID_MAGIC;
1724 return 0;
1725 }
1726
1727#if 0
1728 /*
1729 * Call out to the OS specific code and let it do permission checks on the
1730 * client process.
1731 */
1732 if (!supdrvOSValidateClientProcess(pDevExt, pSession))
1733 {
1734 pReq->u.Out.u32Cookie = 0xffffffff;
1735 pReq->u.Out.u32SessionCookie = 0xffffffff;
1736 pReq->u.Out.u32SessionVersion = 0xffffffff;
1737 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1738 pReq->u.Out.pSession = NULL;
1739 pReq->u.Out.cFunctions = 0;
1740 pReq->Hdr.rc = VERR_PERMISSION_DENIED;
1741 return 0;
1742 }
1743#endif
1744
1745 /*
1746 * Match the version.
1747 * The current logic is very simple, match the major interface version.
1748 */
1749 if ( pReq->u.In.u32MinVersion > SUPDRV_IOC_VERSION
1750 || (pReq->u.In.u32MinVersion & 0xffff0000) != (SUPDRV_IOC_VERSION & 0xffff0000))
1751 {
1752 OSDBGPRINT(("SUP_IOCTL_COOKIE: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1753 pReq->u.In.u32ReqVersion, pReq->u.In.u32MinVersion, SUPDRV_IOC_VERSION));
1754 pReq->u.Out.u32Cookie = 0xffffffff;
1755 pReq->u.Out.u32SessionCookie = 0xffffffff;
1756 pReq->u.Out.u32SessionVersion = 0xffffffff;
1757 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1758 pReq->u.Out.pSession = NULL;
1759 pReq->u.Out.cFunctions = 0;
1760 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
1761 return 0;
1762 }
1763
1764 /*
1765 * Fill in return data and be gone.
1766 * N.B. The first one to change SUPDRV_IOC_VERSION shall makes sure that
1767 * u32SessionVersion <= u32ReqVersion!
1768 */
1769 /** @todo Somehow validate the client and negotiate a secure cookie... */
1770 pReq->u.Out.u32Cookie = pDevExt->u32Cookie;
1771 pReq->u.Out.u32SessionCookie = pSession->u32Cookie;
1772 pReq->u.Out.u32SessionVersion = SUPDRV_IOC_VERSION;
1773 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1774 pReq->u.Out.pSession = pSession;
1775 pReq->u.Out.cFunctions = sizeof(g_aFunctions) / sizeof(g_aFunctions[0]);
1776 pReq->Hdr.rc = VINF_SUCCESS;
1777 return 0;
1778 }
1779
1780 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_QUERY_FUNCS(0)):
1781 {
1782 /* validate */
1783 PSUPQUERYFUNCS pReq = (PSUPQUERYFUNCS)pReqHdr;
1784 REQ_CHECK_SIZES_EX(SUP_IOCTL_QUERY_FUNCS, SUP_IOCTL_QUERY_FUNCS_SIZE_IN, SUP_IOCTL_QUERY_FUNCS_SIZE_OUT(RT_ELEMENTS(g_aFunctions)));
1785
1786 /* execute */
1787 pReq->u.Out.cFunctions = RT_ELEMENTS(g_aFunctions);
1788 RT_BCOPY_UNFORTIFIED(&pReq->u.Out.aFunctions[0], g_aFunctions, sizeof(g_aFunctions));
1789 pReq->Hdr.rc = VINF_SUCCESS;
1790 return 0;
1791 }
1792
1793 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_LOCK):
1794 {
1795 /* validate */
1796 PSUPPAGELOCK pReq = (PSUPPAGELOCK)pReqHdr;
1797 REQ_CHECK_SIZE_IN(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_IN);
1798 REQ_CHECK_SIZE_OUT(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_OUT(pReq->u.In.cPages));
1799 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.cPages > 0);
1800 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.pvR3 >= PAGE_SIZE);
1801
1802 /* execute */
1803 pReq->Hdr.rc = SUPR0LockMem(pSession, pReq->u.In.pvR3, pReq->u.In.cPages, &pReq->u.Out.aPages[0]);
1804 if (RT_FAILURE(pReq->Hdr.rc))
1805 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1806 return 0;
1807 }
1808
1809 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_UNLOCK):
1810 {
1811 /* validate */
1812 PSUPPAGEUNLOCK pReq = (PSUPPAGEUNLOCK)pReqHdr;
1813 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_UNLOCK);
1814
1815 /* execute */
1816 pReq->Hdr.rc = SUPR0UnlockMem(pSession, pReq->u.In.pvR3);
1817 return 0;
1818 }
1819
1820 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_ALLOC):
1821 {
1822 /* validate */
1823 PSUPCONTALLOC pReq = (PSUPCONTALLOC)pReqHdr;
1824 REQ_CHECK_SIZES(SUP_IOCTL_CONT_ALLOC);
1825
1826 /* execute */
1827 pReq->Hdr.rc = SUPR0ContAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.HCPhys);
1828 if (RT_FAILURE(pReq->Hdr.rc))
1829 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1830 return 0;
1831 }
1832
1833 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_FREE):
1834 {
1835 /* validate */
1836 PSUPCONTFREE pReq = (PSUPCONTFREE)pReqHdr;
1837 REQ_CHECK_SIZES(SUP_IOCTL_CONT_FREE);
1838
1839 /* execute */
1840 pReq->Hdr.rc = SUPR0ContFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1841 return 0;
1842 }
1843
1844 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_OPEN):
1845 {
1846 /* validate */
1847 PSUPLDROPEN pReq = (PSUPLDROPEN)pReqHdr;
1848 REQ_CHECK_SIZES(SUP_IOCTL_LDR_OPEN);
1849 if ( pReq->u.In.cbImageWithEverything != 0
1850 || pReq->u.In.cbImageBits != 0)
1851 {
1852 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageWithEverything > 0);
1853 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageWithEverything < 16*_1M);
1854 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageBits > 0);
1855 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageBits < pReq->u.In.cbImageWithEverything);
1856 }
1857 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.szName[0]);
1858 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, RTStrEnd(pReq->u.In.szName, sizeof(pReq->u.In.szName)));
1859 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, supdrvIsLdrModuleNameValid(pReq->u.In.szName));
1860 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, RTStrEnd(pReq->u.In.szFilename, sizeof(pReq->u.In.szFilename)));
1861
1862 /* execute */
1863 pReq->Hdr.rc = supdrvIOCtl_LdrOpen(pDevExt, pSession, pReq);
1864 return 0;
1865 }
1866
1867 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_LOAD):
1868 {
1869 /* validate */
1870 PSUPLDRLOAD pReq = (PSUPLDRLOAD)pReqHdr;
1871 uint8_t const * const pbSrcImage = pReq->u.In.abImage;
1872 REQ_CHECK_EXPR(Name, pReq->Hdr.cbIn >= SUP_IOCTL_LDR_LOAD_SIZE_IN(32));
1873 REQ_CHECK_SIZES_EX(SUP_IOCTL_LDR_LOAD, SUP_IOCTL_LDR_LOAD_SIZE_IN(pReq->u.In.cbImageWithEverything), SUP_IOCTL_LDR_LOAD_SIZE_OUT);
1874 REQ_CHECK_EXPR_FMT( !pReq->u.In.cSymbols
1875 || ( pReq->u.In.cSymbols <= 16384
1876 && pReq->u.In.offSymbols >= pReq->u.In.cbImageBits
1877 && pReq->u.In.offSymbols < pReq->u.In.cbImageWithEverything
1878 && pReq->u.In.offSymbols + pReq->u.In.cSymbols * sizeof(SUPLDRSYM) <= pReq->u.In.cbImageWithEverything),
1879 ("SUP_IOCTL_LDR_LOAD: offSymbols=%#lx cSymbols=%#lx cbImageWithEverything=%#lx\n", (long)pReq->u.In.offSymbols,
1880 (long)pReq->u.In.cSymbols, (long)pReq->u.In.cbImageWithEverything));
1881 REQ_CHECK_EXPR_FMT( !pReq->u.In.cbStrTab
1882 || ( pReq->u.In.offStrTab < pReq->u.In.cbImageWithEverything
1883 && pReq->u.In.offStrTab >= pReq->u.In.cbImageBits
1884 && pReq->u.In.offStrTab + pReq->u.In.cbStrTab <= pReq->u.In.cbImageWithEverything
1885 && pReq->u.In.cbStrTab <= pReq->u.In.cbImageWithEverything),
1886 ("SUP_IOCTL_LDR_LOAD: offStrTab=%#lx cbStrTab=%#lx cbImageWithEverything=%#lx\n", (long)pReq->u.In.offStrTab,
1887 (long)pReq->u.In.cbStrTab, (long)pReq->u.In.cbImageWithEverything));
1888 REQ_CHECK_EXPR_FMT( pReq->u.In.cSegments >= 1
1889 && pReq->u.In.cSegments <= 128
1890 && pReq->u.In.cSegments <= (pReq->u.In.cbImageBits + PAGE_SIZE - 1) / PAGE_SIZE
1891 && pReq->u.In.offSegments >= pReq->u.In.cbImageBits
1892 && pReq->u.In.offSegments < pReq->u.In.cbImageWithEverything
1893 && pReq->u.In.offSegments + pReq->u.In.cSegments * sizeof(SUPLDRSEG) <= pReq->u.In.cbImageWithEverything,
1894 ("SUP_IOCTL_LDR_LOAD: offSegments=%#lx cSegments=%#lx cbImageWithEverything=%#lx\n", (long)pReq->u.In.offSegments,
1895 (long)pReq->u.In.cSegments, (long)pReq->u.In.cbImageWithEverything));
1896
1897 if (pReq->u.In.cSymbols)
1898 {
1899 uint32_t i;
1900 PSUPLDRSYM paSyms = (PSUPLDRSYM)(&pbSrcImage[pReq->u.In.offSymbols]);
1901 for (i = 0; i < pReq->u.In.cSymbols; i++)
1902 {
1903 REQ_CHECK_EXPR_FMT(paSyms[i].offSymbol < pReq->u.In.cbImageWithEverything,
1904 ("SUP_IOCTL_LDR_LOAD: sym #%ld: symb off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offSymbol, (long)pReq->u.In.cbImageWithEverything));
1905 REQ_CHECK_EXPR_FMT(paSyms[i].offName < pReq->u.In.cbStrTab,
1906 ("SUP_IOCTL_LDR_LOAD: sym #%ld: name off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImageWithEverything));
1907 REQ_CHECK_EXPR_FMT(RTStrEnd((char const *)(&pbSrcImage[pReq->u.In.offStrTab + paSyms[i].offName]),
1908 pReq->u.In.cbStrTab - paSyms[i].offName),
1909 ("SUP_IOCTL_LDR_LOAD: sym #%ld: unterminated name! (%#lx / %#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImageWithEverything));
1910 }
1911 }
1912 {
1913 uint32_t i;
1914 uint32_t offPrevEnd = 0;
1915 PSUPLDRSEG paSegs = (PSUPLDRSEG)(&pbSrcImage[pReq->u.In.offSegments]);
1916 for (i = 0; i < pReq->u.In.cSegments; i++)
1917 {
1918 REQ_CHECK_EXPR_FMT(paSegs[i].off < pReq->u.In.cbImageBits && !(paSegs[i].off & PAGE_OFFSET_MASK),
1919 ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx (max=%#lx)\n", (long)i, (long)paSegs[i].off, (long)pReq->u.In.cbImageBits));
1920 REQ_CHECK_EXPR_FMT(paSegs[i].cb <= pReq->u.In.cbImageBits,
1921 ("SUP_IOCTL_LDR_LOAD: seg #%ld: cb %#lx (max=%#lx)\n", (long)i, (long)paSegs[i].cb, (long)pReq->u.In.cbImageBits));
1922 REQ_CHECK_EXPR_FMT(paSegs[i].off + paSegs[i].cb <= pReq->u.In.cbImageBits,
1923 ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx + cb %#lx = %#lx (max=%#lx)\n", (long)i, (long)paSegs[i].off, (long)paSegs[i].cb, (long)(paSegs[i].off + paSegs[i].cb), (long)pReq->u.In.cbImageBits));
1924 REQ_CHECK_EXPR_FMT(paSegs[i].fProt != 0,
1925 ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx + cb %#lx\n", (long)i, (long)paSegs[i].off, (long)paSegs[i].cb));
1926 REQ_CHECK_EXPR_FMT(paSegs[i].fUnused == 0, ("SUP_IOCTL_LDR_LOAD: seg #%ld: fUnused=1\n", (long)i));
1927 REQ_CHECK_EXPR_FMT(offPrevEnd == paSegs[i].off,
1928 ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx offPrevEnd %#lx\n", (long)i, (long)paSegs[i].off, (long)offPrevEnd));
1929 offPrevEnd = paSegs[i].off + paSegs[i].cb;
1930 }
1931 REQ_CHECK_EXPR_FMT(offPrevEnd == pReq->u.In.cbImageBits,
1932 ("SUP_IOCTL_LDR_LOAD: offPrevEnd %#lx cbImageBits %#lx\n", (long)i, (long)offPrevEnd, (long)pReq->u.In.cbImageBits));
1933 }
1934 REQ_CHECK_EXPR_FMT(!(pReq->u.In.fFlags & ~SUPLDRLOAD_F_VALID_MASK),
1935 ("SUP_IOCTL_LDR_LOAD: fFlags=%#x\n", (unsigned)pReq->u.In.fFlags));
1936
1937 /* execute */
1938 pReq->Hdr.rc = supdrvIOCtl_LdrLoad(pDevExt, pSession, pReq);
1939 return 0;
1940 }
1941
1942 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_FREE):
1943 {
1944 /* validate */
1945 PSUPLDRFREE pReq = (PSUPLDRFREE)pReqHdr;
1946 REQ_CHECK_SIZES(SUP_IOCTL_LDR_FREE);
1947
1948 /* execute */
1949 pReq->Hdr.rc = supdrvIOCtl_LdrFree(pDevExt, pSession, pReq);
1950 return 0;
1951 }
1952
1953 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_LOCK_DOWN):
1954 {
1955 /* validate */
1956 REQ_CHECK_SIZES(SUP_IOCTL_LDR_LOCK_DOWN);
1957
1958 /* execute */
1959 pReqHdr->rc = supdrvIOCtl_LdrLockDown(pDevExt);
1960 return 0;
1961 }
1962
1963 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_GET_SYMBOL):
1964 {
1965 /* validate */
1966 PSUPLDRGETSYMBOL pReq = (PSUPLDRGETSYMBOL)pReqHdr;
1967 REQ_CHECK_SIZES(SUP_IOCTL_LDR_GET_SYMBOL);
1968 REQ_CHECK_EXPR(SUP_IOCTL_LDR_GET_SYMBOL, RTStrEnd(pReq->u.In.szSymbol, sizeof(pReq->u.In.szSymbol)));
1969
1970 /* execute */
1971 pReq->Hdr.rc = supdrvIOCtl_LdrQuerySymbol(pDevExt, pSession, pReq);
1972 return 0;
1973 }
1974
1975 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_VMMR0_NO_SIZE()):
1976 {
1977 /* validate */
1978 PSUPCALLVMMR0 pReq = (PSUPCALLVMMR0)pReqHdr;
1979 Log4(("SUP_IOCTL_CALL_VMMR0: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1980 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1981
1982 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_VMMR0_SIZE(0))
1983 {
1984 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(0), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(0));
1985
1986 /* execute */
1987 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1988 {
1989 if (pReq->u.In.pVMR0 == NULL)
1990 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(NULL, NULL, pReq->u.In.idCpu,
1991 pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1992 else if (pReq->u.In.pVMR0 == pSession->pSessionVM)
1993 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pSession->pSessionGVM, pSession->pSessionVM, pReq->u.In.idCpu,
1994 pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1995 else
1996 pReq->Hdr.rc = VERR_INVALID_VM_HANDLE;
1997 }
1998 else
1999 pReq->Hdr.rc = VERR_WRONG_ORDER;
2000 }
2001 else
2002 {
2003 PSUPVMMR0REQHDR pVMMReq = (PSUPVMMR0REQHDR)&pReq->abReqPkt[0];
2004 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR)),
2005 ("SUP_IOCTL_CALL_VMMR0: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR))));
2006 REQ_CHECK_EXPR(SUP_IOCTL_CALL_VMMR0, pVMMReq->u32Magic == SUPVMMR0REQHDR_MAGIC);
2007 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(pVMMReq->cbReq), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(pVMMReq->cbReq));
2008
2009 /* execute */
2010 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
2011 {
2012 if (pReq->u.In.pVMR0 == NULL)
2013 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(NULL, NULL, pReq->u.In.idCpu,
2014 pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
2015 else if (pReq->u.In.pVMR0 == pSession->pSessionVM)
2016 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pSession->pSessionGVM, pSession->pSessionVM, pReq->u.In.idCpu,
2017 pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
2018 else
2019 pReq->Hdr.rc = VERR_INVALID_VM_HANDLE;
2020 }
2021 else
2022 pReq->Hdr.rc = VERR_WRONG_ORDER;
2023 }
2024
2025 if ( RT_FAILURE(pReq->Hdr.rc)
2026 && pReq->Hdr.rc != VERR_INTERRUPTED
2027 && pReq->Hdr.rc != VERR_TIMEOUT)
2028 Log(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
2029 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
2030 else
2031 Log4(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
2032 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
2033 return 0;
2034 }
2035
2036 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_VMMR0_BIG):
2037 {
2038 /* validate */
2039 PSUPCALLVMMR0 pReq = (PSUPCALLVMMR0)pReqHdr;
2040 PSUPVMMR0REQHDR pVMMReq;
2041 Log4(("SUP_IOCTL_CALL_VMMR0_BIG: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
2042 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
2043
2044 pVMMReq = (PSUPVMMR0REQHDR)&pReq->abReqPkt[0];
2045 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_VMMR0_BIG_SIZE(sizeof(SUPVMMR0REQHDR)),
2046 ("SUP_IOCTL_CALL_VMMR0_BIG: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_VMMR0_BIG_SIZE(sizeof(SUPVMMR0REQHDR))));
2047 REQ_CHECK_EXPR(SUP_IOCTL_CALL_VMMR0_BIG, pVMMReq->u32Magic == SUPVMMR0REQHDR_MAGIC);
2048 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0_BIG, SUP_IOCTL_CALL_VMMR0_BIG_SIZE_IN(pVMMReq->cbReq), SUP_IOCTL_CALL_VMMR0_BIG_SIZE_OUT(pVMMReq->cbReq));
2049
2050 /* execute */
2051 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
2052 {
2053 if (pReq->u.In.pVMR0 == NULL)
2054 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(NULL, NULL, pReq->u.In.idCpu, pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
2055 else if (pReq->u.In.pVMR0 == pSession->pSessionVM)
2056 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pSession->pSessionGVM, pSession->pSessionVM, pReq->u.In.idCpu,
2057 pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
2058 else
2059 pReq->Hdr.rc = VERR_INVALID_VM_HANDLE;
2060 }
2061 else
2062 pReq->Hdr.rc = VERR_WRONG_ORDER;
2063
2064 if ( RT_FAILURE(pReq->Hdr.rc)
2065 && pReq->Hdr.rc != VERR_INTERRUPTED
2066 && pReq->Hdr.rc != VERR_TIMEOUT)
2067 Log(("SUP_IOCTL_CALL_VMMR0_BIG: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
2068 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
2069 else
2070 Log4(("SUP_IOCTL_CALL_VMMR0_BIG: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
2071 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
2072 return 0;
2073 }
2074
2075 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GET_PAGING_MODE):
2076 {
2077 /* validate */
2078 PSUPGETPAGINGMODE pReq = (PSUPGETPAGINGMODE)pReqHdr;
2079 REQ_CHECK_SIZES(SUP_IOCTL_GET_PAGING_MODE);
2080
2081 /* execute */
2082 pReq->Hdr.rc = VINF_SUCCESS;
2083 pReq->u.Out.enmMode = SUPR0GetPagingMode();
2084 return 0;
2085 }
2086
2087 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_ALLOC):
2088 {
2089 /* validate */
2090 PSUPLOWALLOC pReq = (PSUPLOWALLOC)pReqHdr;
2091 REQ_CHECK_EXPR(SUP_IOCTL_LOW_ALLOC, pReq->Hdr.cbIn <= SUP_IOCTL_LOW_ALLOC_SIZE_IN);
2092 REQ_CHECK_SIZES_EX(SUP_IOCTL_LOW_ALLOC, SUP_IOCTL_LOW_ALLOC_SIZE_IN, SUP_IOCTL_LOW_ALLOC_SIZE_OUT(pReq->u.In.cPages));
2093
2094 /* execute */
2095 pReq->Hdr.rc = SUPR0LowAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.aPages[0]);
2096 if (RT_FAILURE(pReq->Hdr.rc))
2097 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2098 return 0;
2099 }
2100
2101 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_FREE):
2102 {
2103 /* validate */
2104 PSUPLOWFREE pReq = (PSUPLOWFREE)pReqHdr;
2105 REQ_CHECK_SIZES(SUP_IOCTL_LOW_FREE);
2106
2107 /* execute */
2108 pReq->Hdr.rc = SUPR0LowFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
2109 return 0;
2110 }
2111
2112 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_MAP):
2113 {
2114 /* validate */
2115 PSUPGIPMAP pReq = (PSUPGIPMAP)pReqHdr;
2116 REQ_CHECK_SIZES(SUP_IOCTL_GIP_MAP);
2117
2118 /* execute */
2119 pReq->Hdr.rc = SUPR0GipMap(pSession, &pReq->u.Out.pGipR3, &pReq->u.Out.HCPhysGip);
2120 if (RT_SUCCESS(pReq->Hdr.rc))
2121 pReq->u.Out.pGipR0 = pDevExt->pGip;
2122 return 0;
2123 }
2124
2125 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_UNMAP):
2126 {
2127 /* validate */
2128 PSUPGIPUNMAP pReq = (PSUPGIPUNMAP)pReqHdr;
2129 REQ_CHECK_SIZES(SUP_IOCTL_GIP_UNMAP);
2130
2131 /* execute */
2132 pReq->Hdr.rc = SUPR0GipUnmap(pSession);
2133 return 0;
2134 }
2135
2136 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SET_VM_FOR_FAST):
2137 {
2138 /* validate */
2139 PSUPSETVMFORFAST pReq = (PSUPSETVMFORFAST)pReqHdr;
2140 REQ_CHECK_SIZES(SUP_IOCTL_SET_VM_FOR_FAST);
2141 REQ_CHECK_EXPR_FMT( !pReq->u.In.pVMR0
2142 || ( RT_VALID_PTR(pReq->u.In.pVMR0)
2143 && !((uintptr_t)pReq->u.In.pVMR0 & (PAGE_SIZE - 1))),
2144 ("SUP_IOCTL_SET_VM_FOR_FAST: pVMR0=%p!\n", pReq->u.In.pVMR0));
2145
2146 /* execute */
2147 RTSpinlockAcquire(pDevExt->Spinlock);
2148 if (pSession->pSessionVM == pReq->u.In.pVMR0)
2149 {
2150 if (pSession->pFastIoCtrlVM == NULL)
2151 {
2152 pSession->pFastIoCtrlVM = pSession->pSessionVM;
2153 RTSpinlockRelease(pDevExt->Spinlock);
2154 pReq->Hdr.rc = VINF_SUCCESS;
2155 }
2156 else
2157 {
2158 RTSpinlockRelease(pDevExt->Spinlock);
2159 OSDBGPRINT(("SUP_IOCTL_SET_VM_FOR_FAST: pSession->pFastIoCtrlVM=%p! (pVMR0=%p)\n",
2160 pSession->pFastIoCtrlVM, pReq->u.In.pVMR0));
2161 pReq->Hdr.rc = VERR_ALREADY_EXISTS;
2162 }
2163 }
2164 else
2165 {
2166 RTSpinlockRelease(pDevExt->Spinlock);
2167 OSDBGPRINT(("SUP_IOCTL_SET_VM_FOR_FAST: pSession->pSessionVM=%p vs pVMR0=%p)\n",
2168 pSession->pSessionVM, pReq->u.In.pVMR0));
2169 pReq->Hdr.rc = pSession->pSessionVM ? VERR_ACCESS_DENIED : VERR_WRONG_ORDER;
2170 }
2171 return 0;
2172 }
2173
2174 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_ALLOC_EX):
2175 {
2176 /* validate */
2177 PSUPPAGEALLOCEX pReq = (PSUPPAGEALLOCEX)pReqHdr;
2178 REQ_CHECK_SIZE_IN(SUP_IOCTL_PAGE_ALLOC_EX, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN);
2179 REQ_CHECK_SIZE_OUT(SUP_IOCTL_PAGE_ALLOC_EX, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_OUT(pReq->u.In.cPages));
2180 REQ_CHECK_EXPR_FMT(pReq->u.In.fKernelMapping || pReq->u.In.fUserMapping,
2181 ("SUP_IOCTL_PAGE_ALLOC_EX: No mapping requested!\n"));
2182 REQ_CHECK_EXPR_FMT(pReq->u.In.fUserMapping,
2183 ("SUP_IOCTL_PAGE_ALLOC_EX: Must have user mapping!\n"));
2184 REQ_CHECK_EXPR_FMT(!pReq->u.In.fReserved0 && !pReq->u.In.fReserved1,
2185 ("SUP_IOCTL_PAGE_ALLOC_EX: fReserved0=%d fReserved1=%d\n", pReq->u.In.fReserved0, pReq->u.In.fReserved1));
2186
2187 /* execute */
2188 pReq->Hdr.rc = SUPR0PageAllocEx(pSession, pReq->u.In.cPages, 0 /* fFlags */,
2189 pReq->u.In.fUserMapping ? &pReq->u.Out.pvR3 : NULL,
2190 pReq->u.In.fKernelMapping ? &pReq->u.Out.pvR0 : NULL,
2191 &pReq->u.Out.aPages[0]);
2192 if (RT_FAILURE(pReq->Hdr.rc))
2193 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2194 return 0;
2195 }
2196
2197 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_MAP_KERNEL):
2198 {
2199 /* validate */
2200 PSUPPAGEMAPKERNEL pReq = (PSUPPAGEMAPKERNEL)pReqHdr;
2201 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_MAP_KERNEL);
2202 REQ_CHECK_EXPR_FMT(!pReq->u.In.fFlags, ("SUP_IOCTL_PAGE_MAP_KERNEL: fFlags=%#x! MBZ\n", pReq->u.In.fFlags));
2203 REQ_CHECK_EXPR_FMT(!(pReq->u.In.offSub & PAGE_OFFSET_MASK), ("SUP_IOCTL_PAGE_MAP_KERNEL: offSub=%#x\n", pReq->u.In.offSub));
2204 REQ_CHECK_EXPR_FMT(pReq->u.In.cbSub && !(pReq->u.In.cbSub & PAGE_OFFSET_MASK),
2205 ("SUP_IOCTL_PAGE_MAP_KERNEL: cbSub=%#x\n", pReq->u.In.cbSub));
2206
2207 /* execute */
2208 pReq->Hdr.rc = SUPR0PageMapKernel(pSession, pReq->u.In.pvR3, pReq->u.In.offSub, pReq->u.In.cbSub,
2209 pReq->u.In.fFlags, &pReq->u.Out.pvR0);
2210 if (RT_FAILURE(pReq->Hdr.rc))
2211 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2212 return 0;
2213 }
2214
2215 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_PROTECT):
2216 {
2217 /* validate */
2218 PSUPPAGEPROTECT pReq = (PSUPPAGEPROTECT)pReqHdr;
2219 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_PROTECT);
2220 REQ_CHECK_EXPR_FMT(!(pReq->u.In.fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC | RTMEM_PROT_NONE)),
2221 ("SUP_IOCTL_PAGE_PROTECT: fProt=%#x!\n", pReq->u.In.fProt));
2222 REQ_CHECK_EXPR_FMT(!(pReq->u.In.offSub & PAGE_OFFSET_MASK), ("SUP_IOCTL_PAGE_PROTECT: offSub=%#x\n", pReq->u.In.offSub));
2223 REQ_CHECK_EXPR_FMT(pReq->u.In.cbSub && !(pReq->u.In.cbSub & PAGE_OFFSET_MASK),
2224 ("SUP_IOCTL_PAGE_PROTECT: cbSub=%#x\n", pReq->u.In.cbSub));
2225
2226 /* execute */
2227 pReq->Hdr.rc = SUPR0PageProtect(pSession, pReq->u.In.pvR3, pReq->u.In.pvR0, pReq->u.In.offSub, pReq->u.In.cbSub, pReq->u.In.fProt);
2228 return 0;
2229 }
2230
2231 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_FREE):
2232 {
2233 /* validate */
2234 PSUPPAGEFREE pReq = (PSUPPAGEFREE)pReqHdr;
2235 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_FREE);
2236
2237 /* execute */
2238 pReq->Hdr.rc = SUPR0PageFree(pSession, pReq->u.In.pvR3);
2239 return 0;
2240 }
2241
2242 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_SERVICE_NO_SIZE()):
2243 {
2244 /* validate */
2245 PSUPCALLSERVICE pReq = (PSUPCALLSERVICE)pReqHdr;
2246 Log4(("SUP_IOCTL_CALL_SERVICE: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
2247 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
2248
2249 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
2250 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(0), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(0));
2251 else
2252 {
2253 PSUPR0SERVICEREQHDR pSrvReq = (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0];
2254 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR)),
2255 ("SUP_IOCTL_CALL_SERVICE: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR))));
2256 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, pSrvReq->u32Magic == SUPR0SERVICEREQHDR_MAGIC);
2257 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(pSrvReq->cbReq), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(pSrvReq->cbReq));
2258 }
2259 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, RTStrEnd(pReq->u.In.szName, sizeof(pReq->u.In.szName)));
2260
2261 /* execute */
2262 pReq->Hdr.rc = supdrvIOCtl_CallServiceModule(pDevExt, pSession, pReq);
2263 return 0;
2264 }
2265
2266 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOGGER_SETTINGS_NO_SIZE()):
2267 {
2268 /* validate */
2269 PSUPLOGGERSETTINGS pReq = (PSUPLOGGERSETTINGS)pReqHdr;
2270 size_t cbStrTab;
2271 REQ_CHECK_SIZE_OUT(SUP_IOCTL_LOGGER_SETTINGS, SUP_IOCTL_LOGGER_SETTINGS_SIZE_OUT);
2272 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->Hdr.cbIn >= SUP_IOCTL_LOGGER_SETTINGS_SIZE_IN(1));
2273 cbStrTab = pReq->Hdr.cbIn - SUP_IOCTL_LOGGER_SETTINGS_SIZE_IN(0);
2274 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offGroups < cbStrTab);
2275 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offFlags < cbStrTab);
2276 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offDestination < cbStrTab);
2277 REQ_CHECK_EXPR_FMT(pReq->u.In.szStrings[cbStrTab - 1] == '\0',
2278 ("SUP_IOCTL_LOGGER_SETTINGS: cbIn=%#x cbStrTab=%#zx LastChar=%d\n",
2279 pReq->Hdr.cbIn, cbStrTab, pReq->u.In.szStrings[cbStrTab - 1]));
2280 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.fWhich <= SUPLOGGERSETTINGS_WHICH_RELEASE);
2281 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.fWhat <= SUPLOGGERSETTINGS_WHAT_DESTROY);
2282
2283 /* execute */
2284 pReq->Hdr.rc = supdrvIOCtl_LoggerSettings(pReq);
2285 return 0;
2286 }
2287
2288 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SEM_OP2):
2289 {
2290 /* validate */
2291 PSUPSEMOP2 pReq = (PSUPSEMOP2)pReqHdr;
2292 REQ_CHECK_SIZES_EX(SUP_IOCTL_SEM_OP2, SUP_IOCTL_SEM_OP2_SIZE_IN, SUP_IOCTL_SEM_OP2_SIZE_OUT);
2293 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP2, pReq->u.In.uReserved == 0);
2294
2295 /* execute */
2296 switch (pReq->u.In.uType)
2297 {
2298 case SUP_SEM_TYPE_EVENT:
2299 {
2300 SUPSEMEVENT hEvent = (SUPSEMEVENT)(uintptr_t)pReq->u.In.hSem;
2301 switch (pReq->u.In.uOp)
2302 {
2303 case SUPSEMOP2_WAIT_MS_REL:
2304 pReq->Hdr.rc = SUPSemEventWaitNoResume(pSession, hEvent, pReq->u.In.uArg.cRelMsTimeout);
2305 break;
2306 case SUPSEMOP2_WAIT_NS_ABS:
2307 pReq->Hdr.rc = SUPSemEventWaitNsAbsIntr(pSession, hEvent, pReq->u.In.uArg.uAbsNsTimeout);
2308 break;
2309 case SUPSEMOP2_WAIT_NS_REL:
2310 pReq->Hdr.rc = SUPSemEventWaitNsRelIntr(pSession, hEvent, pReq->u.In.uArg.cRelNsTimeout);
2311 break;
2312 case SUPSEMOP2_SIGNAL:
2313 pReq->Hdr.rc = SUPSemEventSignal(pSession, hEvent);
2314 break;
2315 case SUPSEMOP2_CLOSE:
2316 pReq->Hdr.rc = SUPSemEventClose(pSession, hEvent);
2317 break;
2318 case SUPSEMOP2_RESET:
2319 default:
2320 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2321 break;
2322 }
2323 break;
2324 }
2325
2326 case SUP_SEM_TYPE_EVENT_MULTI:
2327 {
2328 SUPSEMEVENTMULTI hEventMulti = (SUPSEMEVENTMULTI)(uintptr_t)pReq->u.In.hSem;
2329 switch (pReq->u.In.uOp)
2330 {
2331 case SUPSEMOP2_WAIT_MS_REL:
2332 pReq->Hdr.rc = SUPSemEventMultiWaitNoResume(pSession, hEventMulti, pReq->u.In.uArg.cRelMsTimeout);
2333 break;
2334 case SUPSEMOP2_WAIT_NS_ABS:
2335 pReq->Hdr.rc = SUPSemEventMultiWaitNsAbsIntr(pSession, hEventMulti, pReq->u.In.uArg.uAbsNsTimeout);
2336 break;
2337 case SUPSEMOP2_WAIT_NS_REL:
2338 pReq->Hdr.rc = SUPSemEventMultiWaitNsRelIntr(pSession, hEventMulti, pReq->u.In.uArg.cRelNsTimeout);
2339 break;
2340 case SUPSEMOP2_SIGNAL:
2341 pReq->Hdr.rc = SUPSemEventMultiSignal(pSession, hEventMulti);
2342 break;
2343 case SUPSEMOP2_CLOSE:
2344 pReq->Hdr.rc = SUPSemEventMultiClose(pSession, hEventMulti);
2345 break;
2346 case SUPSEMOP2_RESET:
2347 pReq->Hdr.rc = SUPSemEventMultiReset(pSession, hEventMulti);
2348 break;
2349 default:
2350 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2351 break;
2352 }
2353 break;
2354 }
2355
2356 default:
2357 pReq->Hdr.rc = VERR_INVALID_PARAMETER;
2358 break;
2359 }
2360 return 0;
2361 }
2362
2363 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SEM_OP3):
2364 {
2365 /* validate */
2366 PSUPSEMOP3 pReq = (PSUPSEMOP3)pReqHdr;
2367 REQ_CHECK_SIZES_EX(SUP_IOCTL_SEM_OP3, SUP_IOCTL_SEM_OP3_SIZE_IN, SUP_IOCTL_SEM_OP3_SIZE_OUT);
2368 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, pReq->u.In.u32Reserved == 0 && pReq->u.In.u64Reserved == 0);
2369
2370 /* execute */
2371 switch (pReq->u.In.uType)
2372 {
2373 case SUP_SEM_TYPE_EVENT:
2374 {
2375 SUPSEMEVENT hEvent = (SUPSEMEVENT)(uintptr_t)pReq->u.In.hSem;
2376 switch (pReq->u.In.uOp)
2377 {
2378 case SUPSEMOP3_CREATE:
2379 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEvent == NIL_SUPSEMEVENT);
2380 pReq->Hdr.rc = SUPSemEventCreate(pSession, &hEvent);
2381 pReq->u.Out.hSem = (uint32_t)(uintptr_t)hEvent;
2382 break;
2383 case SUPSEMOP3_GET_RESOLUTION:
2384 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEvent == NIL_SUPSEMEVENT);
2385 pReq->Hdr.rc = VINF_SUCCESS;
2386 pReq->Hdr.cbOut = sizeof(*pReq);
2387 pReq->u.Out.cNsResolution = SUPSemEventGetResolution(pSession);
2388 break;
2389 default:
2390 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2391 break;
2392 }
2393 break;
2394 }
2395
2396 case SUP_SEM_TYPE_EVENT_MULTI:
2397 {
2398 SUPSEMEVENTMULTI hEventMulti = (SUPSEMEVENTMULTI)(uintptr_t)pReq->u.In.hSem;
2399 switch (pReq->u.In.uOp)
2400 {
2401 case SUPSEMOP3_CREATE:
2402 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEventMulti == NIL_SUPSEMEVENTMULTI);
2403 pReq->Hdr.rc = SUPSemEventMultiCreate(pSession, &hEventMulti);
2404 pReq->u.Out.hSem = (uint32_t)(uintptr_t)hEventMulti;
2405 break;
2406 case SUPSEMOP3_GET_RESOLUTION:
2407 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEventMulti == NIL_SUPSEMEVENTMULTI);
2408 pReq->Hdr.rc = VINF_SUCCESS;
2409 pReq->u.Out.cNsResolution = SUPSemEventMultiGetResolution(pSession);
2410 break;
2411 default:
2412 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2413 break;
2414 }
2415 break;
2416 }
2417
2418 default:
2419 pReq->Hdr.rc = VERR_INVALID_PARAMETER;
2420 break;
2421 }
2422 return 0;
2423 }
2424
2425 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_OPEN):
2426 {
2427 /* validate */
2428 PSUPTRACEROPEN pReq = (PSUPTRACEROPEN)pReqHdr;
2429 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_OPEN);
2430
2431 /* execute */
2432 pReq->Hdr.rc = supdrvIOCtl_TracerOpen(pDevExt, pSession, pReq->u.In.uCookie, pReq->u.In.uArg);
2433 return 0;
2434 }
2435
2436 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_CLOSE):
2437 {
2438 /* validate */
2439 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_CLOSE);
2440
2441 /* execute */
2442 pReqHdr->rc = supdrvIOCtl_TracerClose(pDevExt, pSession);
2443 return 0;
2444 }
2445
2446 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_IOCTL):
2447 {
2448 /* validate */
2449 PSUPTRACERIOCTL pReq = (PSUPTRACERIOCTL)pReqHdr;
2450 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_IOCTL);
2451
2452 /* execute */
2453 pReqHdr->rc = supdrvIOCtl_TracerIOCtl(pDevExt, pSession, pReq->u.In.uCmd, pReq->u.In.uArg, &pReq->u.Out.iRetVal);
2454 return 0;
2455 }
2456
2457 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_UMOD_REG):
2458 {
2459 /* validate */
2460 PSUPTRACERUMODREG pReq = (PSUPTRACERUMODREG)pReqHdr;
2461 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_UMOD_REG);
2462 if (!RTStrEnd(pReq->u.In.szName, sizeof(pReq->u.In.szName)))
2463 return VERR_INVALID_PARAMETER;
2464
2465 /* execute */
2466 pReqHdr->rc = supdrvIOCtl_TracerUmodRegister(pDevExt, pSession,
2467 pReq->u.In.R3PtrVtgHdr, pReq->u.In.uVtgHdrAddr,
2468 pReq->u.In.R3PtrStrTab, pReq->u.In.cbStrTab,
2469 pReq->u.In.szName, pReq->u.In.fFlags);
2470 return 0;
2471 }
2472
2473 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_UMOD_DEREG):
2474 {
2475 /* validate */
2476 PSUPTRACERUMODDEREG pReq = (PSUPTRACERUMODDEREG)pReqHdr;
2477 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_UMOD_DEREG);
2478
2479 /* execute */
2480 pReqHdr->rc = supdrvIOCtl_TracerUmodDeregister(pDevExt, pSession, pReq->u.In.pVtgHdr);
2481 return 0;
2482 }
2483
2484 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_UMOD_FIRE_PROBE):
2485 {
2486 /* validate */
2487 PSUPTRACERUMODFIREPROBE pReq = (PSUPTRACERUMODFIREPROBE)pReqHdr;
2488 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_UMOD_FIRE_PROBE);
2489
2490 supdrvIOCtl_TracerUmodProbeFire(pDevExt, pSession, &pReq->u.In);
2491 pReqHdr->rc = VINF_SUCCESS;
2492 return 0;
2493 }
2494
2495#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
2496 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_MSR_PROBER):
2497 {
2498 /* validate */
2499 PSUPMSRPROBER pReq = (PSUPMSRPROBER)pReqHdr;
2500 REQ_CHECK_SIZES(SUP_IOCTL_MSR_PROBER);
2501 REQ_CHECK_EXPR(SUP_IOCTL_MSR_PROBER,
2502 pReq->u.In.enmOp > SUPMSRPROBEROP_INVALID && pReq->u.In.enmOp < SUPMSRPROBEROP_END);
2503
2504 pReqHdr->rc = supdrvIOCtl_X86MsrProber(pDevExt, pReq);
2505
2506 return 0;
2507 }
2508
2509#elif defined(RT_ARCH_ARM64)
2510 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_ARM_GET_SYSREGS):
2511 {
2512 /* validate */
2513 PSUPARMGETSYSREGS pReq = (PSUPARMGETSYSREGS)pReqHdr;
2514 uint32_t const cMaxRegs = pReq->Hdr.cbOut <= RT_UOFFSETOF(SUPARMGETSYSREGS, u.Out.aRegs) ? 0
2515 : (pReq->Hdr.cbOut - RT_UOFFSETOF(SUPARMGETSYSREGS, u.Out.aRegs)) / sizeof(SUPARMSYSREGVAL);
2516 REQ_CHECK_SIZE_IN(SUP_IOCTL_ARM_GET_SYSREGS, SUP_IOCTL_ARM_GET_SYSREGS_SIZE_IN);
2517
2518 REQ_CHECK_SIZE_OUT(SUP_IOCTL_ARM_GET_SYSREGS, SUP_IOCTL_ARM_GET_SYSREGS_SIZE_OUT(cMaxRegs));
2519 REQ_CHECK_EXPR_FMT(!(pReq->u.In.fFlags & ~SUP_ARM_SYS_REG_F_VALID_MASK),
2520 ("SUP_IOCTL_ARM_GET_SYSREGS: fFlags=%#x!\n", pReq->u.In.fFlags));
2521
2522 pReqHdr->rc = supdrvIOCtl_ArmGetSysRegs(pReq, cMaxRegs, pReq->u.In.idCpu, pReq->u.In.fFlags);
2523 if (RT_FAILURE(pReqHdr->rc))
2524 pReqHdr->cbOut = sizeof(*pReqHdr);
2525
2526 return 0;
2527 }
2528#endif
2529
2530 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_RESUME_SUSPENDED_KBDS):
2531 {
2532 /* validate */
2533 REQ_CHECK_SIZES(SUP_IOCTL_RESUME_SUSPENDED_KBDS);
2534
2535 pReqHdr->rc = supdrvIOCtl_ResumeSuspendedKbds();
2536 return 0;
2537 }
2538
2539 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TSC_DELTA_MEASURE):
2540 {
2541 /* validate */
2542 PSUPTSCDELTAMEASURE pReq = (PSUPTSCDELTAMEASURE)pReqHdr;
2543 REQ_CHECK_SIZES(SUP_IOCTL_TSC_DELTA_MEASURE);
2544
2545 pReqHdr->rc = supdrvIOCtl_TscDeltaMeasure(pDevExt, pSession, pReq);
2546 return 0;
2547 }
2548
2549 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TSC_READ):
2550 {
2551 /* validate */
2552 PSUPTSCREAD pReq = (PSUPTSCREAD)pReqHdr;
2553 REQ_CHECK_SIZES(SUP_IOCTL_TSC_READ);
2554
2555 pReqHdr->rc = supdrvIOCtl_TscRead(pDevExt, pSession, pReq);
2556 return 0;
2557 }
2558
2559 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_SET_FLAGS):
2560 {
2561 /* validate */
2562 PSUPGIPSETFLAGS pReq = (PSUPGIPSETFLAGS)pReqHdr;
2563 REQ_CHECK_SIZES(SUP_IOCTL_GIP_SET_FLAGS);
2564
2565 pReqHdr->rc = supdrvIOCtl_GipSetFlags(pDevExt, pSession, pReq->u.In.fOrMask, pReq->u.In.fAndMask);
2566 return 0;
2567 }
2568
2569#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
2570
2571 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_VT_CAPS):
2572 {
2573 /* validate */
2574 PSUPVTCAPS pReq = (PSUPVTCAPS)pReqHdr;
2575 REQ_CHECK_SIZES(SUP_IOCTL_VT_CAPS);
2576
2577 /* execute */
2578 pReq->Hdr.rc = SUPR0QueryVTCaps(pSession, &pReq->u.Out.fCaps);
2579 if (RT_FAILURE(pReq->Hdr.rc))
2580 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2581 return 0;
2582 }
2583
2584 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_UCODE_REV):
2585 {
2586 /* validate */
2587 PSUPUCODEREV pReq = (PSUPUCODEREV)pReqHdr;
2588 REQ_CHECK_SIZES(SUP_IOCTL_UCODE_REV);
2589
2590 /* execute */
2591 pReq->Hdr.rc = SUPR0QueryUcodeRev(pSession, &pReq->u.Out.MicrocodeRev);
2592 if (RT_FAILURE(pReq->Hdr.rc))
2593 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2594 return 0;
2595 }
2596
2597 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GET_HWVIRT_MSRS):
2598 {
2599 /* validate */
2600 PSUPGETHWVIRTMSRS pReq = (PSUPGETHWVIRTMSRS)pReqHdr;
2601 REQ_CHECK_SIZES(SUP_IOCTL_GET_HWVIRT_MSRS);
2602 REQ_CHECK_EXPR_FMT(!pReq->u.In.fReserved0 && !pReq->u.In.fReserved1 && !pReq->u.In.fReserved2,
2603 ("SUP_IOCTL_GET_HWVIRT_MSRS: fReserved0=%d fReserved1=%d fReserved2=%d\n", pReq->u.In.fReserved0,
2604 pReq->u.In.fReserved1, pReq->u.In.fReserved2));
2605
2606 /* execute */
2607 pReq->Hdr.rc = SUPR0GetHwvirtMsrs(&pReq->u.Out.HwvirtMsrs, 0 /* fCaps */, pReq->u.In.fForce);
2608 if (RT_FAILURE(pReq->Hdr.rc))
2609 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2610 return 0;
2611 }
2612
2613#endif /* defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) */
2614
2615 default:
2616 Log(("Unknown IOCTL %#lx\n", (long)uIOCtl));
2617 break;
2618 }
2619 return VERR_GENERAL_FAILURE;
2620}
2621
2622
2623/**
2624 * I/O Control inner worker for the restricted operations.
2625 *
2626 * @returns IPRT status code.
2627 * @retval VERR_INVALID_PARAMETER if the request is invalid.
2628 *
2629 * @param uIOCtl Function number.
2630 * @param pDevExt Device extention.
2631 * @param pSession Session data.
2632 * @param pReqHdr The request header.
2633 */
2634static int supdrvIOCtlInnerRestricted(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr)
2635{
2636 /*
2637 * The switch.
2638 */
2639 switch (SUP_CTL_CODE_NO_SIZE(uIOCtl))
2640 {
2641 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_COOKIE):
2642 {
2643 PSUPCOOKIE pReq = (PSUPCOOKIE)pReqHdr;
2644 REQ_CHECK_SIZES(SUP_IOCTL_COOKIE);
2645 if (strncmp(pReq->u.In.szMagic, SUPCOOKIE_MAGIC, sizeof(pReq->u.In.szMagic)))
2646 {
2647 OSDBGPRINT(("SUP_IOCTL_COOKIE: invalid magic %.16s\n", pReq->u.In.szMagic));
2648 pReq->Hdr.rc = VERR_INVALID_MAGIC;
2649 return 0;
2650 }
2651
2652 /*
2653 * Match the version.
2654 * The current logic is very simple, match the major interface version.
2655 */
2656 if ( pReq->u.In.u32MinVersion > SUPDRV_IOC_VERSION
2657 || (pReq->u.In.u32MinVersion & 0xffff0000) != (SUPDRV_IOC_VERSION & 0xffff0000))
2658 {
2659 OSDBGPRINT(("SUP_IOCTL_COOKIE: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
2660 pReq->u.In.u32ReqVersion, pReq->u.In.u32MinVersion, SUPDRV_IOC_VERSION));
2661 pReq->u.Out.u32Cookie = 0xffffffff;
2662 pReq->u.Out.u32SessionCookie = 0xffffffff;
2663 pReq->u.Out.u32SessionVersion = 0xffffffff;
2664 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
2665 pReq->u.Out.pSession = NULL;
2666 pReq->u.Out.cFunctions = 0;
2667 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
2668 return 0;
2669 }
2670
2671 /*
2672 * Fill in return data and be gone.
2673 * N.B. The first one to change SUPDRV_IOC_VERSION shall makes sure that
2674 * u32SessionVersion <= u32ReqVersion!
2675 */
2676 /** @todo Somehow validate the client and negotiate a secure cookie... */
2677 pReq->u.Out.u32Cookie = pDevExt->u32Cookie;
2678 pReq->u.Out.u32SessionCookie = pSession->u32Cookie;
2679 pReq->u.Out.u32SessionVersion = SUPDRV_IOC_VERSION;
2680 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
2681 pReq->u.Out.pSession = NULL;
2682 pReq->u.Out.cFunctions = 0;
2683 pReq->Hdr.rc = VINF_SUCCESS;
2684 return 0;
2685 }
2686
2687#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
2688 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_VT_CAPS):
2689 {
2690 /* validate */
2691 PSUPVTCAPS pReq = (PSUPVTCAPS)pReqHdr;
2692 REQ_CHECK_SIZES(SUP_IOCTL_VT_CAPS);
2693
2694 /* execute */
2695 pReq->Hdr.rc = SUPR0QueryVTCaps(pSession, &pReq->u.Out.fCaps);
2696 if (RT_FAILURE(pReq->Hdr.rc))
2697 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2698 return 0;
2699 }
2700#endif /* defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) */
2701
2702 default:
2703 Log(("Unknown IOCTL %#lx\n", (long)uIOCtl));
2704 break;
2705 }
2706 return VERR_GENERAL_FAILURE;
2707}
2708
2709
2710/**
2711 * I/O Control worker.
2712 *
2713 * @returns IPRT status code.
2714 * @retval VERR_INVALID_PARAMETER if the request is invalid.
2715 *
2716 * @param uIOCtl Function number.
2717 * @param pDevExt Device extention.
2718 * @param pSession Session data.
2719 * @param pReqHdr The request header.
2720 * @param cbReq The size of the request buffer.
2721 */
2722int VBOXCALL supdrvIOCtl(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr, size_t cbReq)
2723{
2724 int rc;
2725 VBOXDRV_IOCTL_ENTRY(pSession, uIOCtl, pReqHdr);
2726
2727 /*
2728 * Validate the request.
2729 */
2730 if (RT_UNLIKELY(cbReq < sizeof(*pReqHdr)))
2731 {
2732 OSDBGPRINT(("vboxdrv: Bad ioctl request size; cbReq=%#lx\n", (long)cbReq));
2733 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2734 return VERR_INVALID_PARAMETER;
2735 }
2736 if (RT_UNLIKELY( (pReqHdr->fFlags & SUPREQHDR_FLAGS_MAGIC_MASK) != SUPREQHDR_FLAGS_MAGIC
2737 || pReqHdr->cbIn < sizeof(*pReqHdr)
2738 || pReqHdr->cbIn > cbReq
2739 || pReqHdr->cbOut < sizeof(*pReqHdr)
2740 || pReqHdr->cbOut > cbReq))
2741 {
2742 OSDBGPRINT(("vboxdrv: Bad ioctl request header; cbIn=%#lx cbOut=%#lx fFlags=%#lx\n",
2743 (long)pReqHdr->cbIn, (long)pReqHdr->cbOut, (long)pReqHdr->fFlags));
2744 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2745 return VERR_INVALID_PARAMETER;
2746 }
2747 if (RT_UNLIKELY(!RT_VALID_PTR(pSession)))
2748 {
2749 OSDBGPRINT(("vboxdrv: Invalid pSession value %p (ioctl=%p)\n", pSession, (void *)uIOCtl));
2750 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2751 return VERR_INVALID_PARAMETER;
2752 }
2753 if (RT_UNLIKELY(uIOCtl == SUP_IOCTL_COOKIE))
2754 {
2755 if (pReqHdr->u32Cookie != SUPCOOKIE_INITIAL_COOKIE)
2756 {
2757 OSDBGPRINT(("SUP_IOCTL_COOKIE: bad cookie %#lx\n", (long)pReqHdr->u32Cookie));
2758 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2759 return VERR_INVALID_PARAMETER;
2760 }
2761 }
2762 else if (RT_UNLIKELY( pReqHdr->u32Cookie != pDevExt->u32Cookie
2763 || pReqHdr->u32SessionCookie != pSession->u32Cookie))
2764 {
2765 OSDBGPRINT(("vboxdrv: bad cookie %#lx / %#lx.\n", (long)pReqHdr->u32Cookie, (long)pReqHdr->u32SessionCookie));
2766 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2767 return VERR_INVALID_PARAMETER;
2768 }
2769
2770 /*
2771 * Hand it to an inner function to avoid lots of unnecessary return tracepoints.
2772 */
2773 if (pSession->fUnrestricted)
2774 rc = supdrvIOCtlInnerUnrestricted(uIOCtl, pDevExt, pSession, pReqHdr);
2775 else
2776 rc = supdrvIOCtlInnerRestricted(uIOCtl, pDevExt, pSession, pReqHdr);
2777
2778 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, pReqHdr->rc, rc);
2779 return rc;
2780}
2781
2782
2783/**
2784 * Inter-Driver Communication (IDC) worker.
2785 *
2786 * @returns VBox status code.
2787 * @retval VINF_SUCCESS on success.
2788 * @retval VERR_INVALID_PARAMETER if the request is invalid.
2789 * @retval VERR_NOT_SUPPORTED if the request isn't supported.
2790 *
2791 * @param uReq The request (function) code.
2792 * @param pDevExt Device extention.
2793 * @param pSession Session data.
2794 * @param pReqHdr The request header.
2795 */
2796int VBOXCALL supdrvIDC(uintptr_t uReq, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQHDR pReqHdr)
2797{
2798 /*
2799 * The OS specific code has already validated the pSession
2800 * pointer, and the request size being greater or equal to
2801 * size of the header.
2802 *
2803 * So, just check that pSession is a kernel context session.
2804 */
2805 if (RT_UNLIKELY( pSession
2806 && pSession->R0Process != NIL_RTR0PROCESS))
2807 return VERR_INVALID_PARAMETER;
2808
2809/*
2810 * Validation macro.
2811 */
2812#define REQ_CHECK_IDC_SIZE(Name, cbExpect) \
2813 do { \
2814 if (RT_UNLIKELY(pReqHdr->cb != (cbExpect))) \
2815 { \
2816 OSDBGPRINT(( #Name ": Invalid input/output sizes. cb=%ld expected %ld.\n", \
2817 (long)pReqHdr->cb, (long)(cbExpect))); \
2818 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
2819 } \
2820 } while (0)
2821
2822 switch (uReq)
2823 {
2824 case SUPDRV_IDC_REQ_CONNECT:
2825 {
2826 PSUPDRVIDCREQCONNECT pReq = (PSUPDRVIDCREQCONNECT)pReqHdr;
2827 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_CONNECT, sizeof(*pReq));
2828
2829 /*
2830 * Validate the cookie and other input.
2831 */
2832 if (pReq->Hdr.pSession != NULL)
2833 {
2834 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: Hdr.pSession=%p expected NULL!\n", pReq->Hdr.pSession));
2835 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2836 }
2837 if (pReq->u.In.u32MagicCookie != SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE)
2838 {
2839 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: u32MagicCookie=%#x expected %#x!\n",
2840 (unsigned)pReq->u.In.u32MagicCookie, (unsigned)SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE));
2841 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2842 }
2843 if ( pReq->u.In.uMinVersion > pReq->u.In.uReqVersion
2844 || (pReq->u.In.uMinVersion & UINT32_C(0xffff0000)) != (pReq->u.In.uReqVersion & UINT32_C(0xffff0000)))
2845 {
2846 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: uMinVersion=%#x uMaxVersion=%#x doesn't match!\n",
2847 pReq->u.In.uMinVersion, pReq->u.In.uReqVersion));
2848 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2849 }
2850 if (pSession != NULL)
2851 {
2852 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: pSession=%p expected NULL!\n", pSession));
2853 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2854 }
2855
2856 /*
2857 * Match the version.
2858 * The current logic is very simple, match the major interface version.
2859 */
2860 if ( pReq->u.In.uMinVersion > SUPDRV_IDC_VERSION
2861 || (pReq->u.In.uMinVersion & 0xffff0000) != (SUPDRV_IDC_VERSION & 0xffff0000))
2862 {
2863 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
2864 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, (unsigned)SUPDRV_IDC_VERSION));
2865 pReq->u.Out.pSession = NULL;
2866 pReq->u.Out.uSessionVersion = 0xffffffff;
2867 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
2868 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
2869 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
2870 return VINF_SUCCESS;
2871 }
2872
2873 pReq->u.Out.pSession = NULL;
2874 pReq->u.Out.uSessionVersion = SUPDRV_IDC_VERSION;
2875 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
2876 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
2877
2878 pReq->Hdr.rc = supdrvCreateSession(pDevExt, false /* fUser */, true /*fUnrestricted*/, &pSession);
2879 if (RT_FAILURE(pReq->Hdr.rc))
2880 {
2881 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: failed to create session, rc=%d\n", pReq->Hdr.rc));
2882 return VINF_SUCCESS;
2883 }
2884
2885 pReq->u.Out.pSession = pSession;
2886 pReq->Hdr.pSession = pSession;
2887
2888 return VINF_SUCCESS;
2889 }
2890
2891 case SUPDRV_IDC_REQ_DISCONNECT:
2892 {
2893 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_DISCONNECT, sizeof(*pReqHdr));
2894
2895 supdrvSessionRelease(pSession);
2896 return pReqHdr->rc = VINF_SUCCESS;
2897 }
2898
2899 case SUPDRV_IDC_REQ_GET_SYMBOL:
2900 {
2901 PSUPDRVIDCREQGETSYM pReq = (PSUPDRVIDCREQGETSYM)pReqHdr;
2902 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_GET_SYMBOL, sizeof(*pReq));
2903
2904 pReq->Hdr.rc = supdrvIDC_LdrGetSymbol(pDevExt, pSession, pReq);
2905 return VINF_SUCCESS;
2906 }
2907
2908 case SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY:
2909 {
2910 PSUPDRVIDCREQCOMPREGFACTORY pReq = (PSUPDRVIDCREQCOMPREGFACTORY)pReqHdr;
2911 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY, sizeof(*pReq));
2912
2913 pReq->Hdr.rc = SUPR0ComponentRegisterFactory(pSession, pReq->u.In.pFactory);
2914 return VINF_SUCCESS;
2915 }
2916
2917 case SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY:
2918 {
2919 PSUPDRVIDCREQCOMPDEREGFACTORY pReq = (PSUPDRVIDCREQCOMPDEREGFACTORY)pReqHdr;
2920 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY, sizeof(*pReq));
2921
2922 pReq->Hdr.rc = SUPR0ComponentDeregisterFactory(pSession, pReq->u.In.pFactory);
2923 return VINF_SUCCESS;
2924 }
2925
2926 default:
2927 Log(("Unknown IDC %#lx\n", (long)uReq));
2928 break;
2929 }
2930
2931#undef REQ_CHECK_IDC_SIZE
2932 return VERR_NOT_SUPPORTED;
2933}
2934
2935
2936/**
2937 * Register a object for reference counting.
2938 * The object is registered with one reference in the specified session.
2939 *
2940 * @returns Unique identifier on success (pointer).
2941 * All future reference must use this identifier.
2942 * @returns NULL on failure.
2943 * @param pSession The caller's session.
2944 * @param enmType The object type.
2945 * @param pfnDestructor The destructore function which will be called when the reference count reaches 0.
2946 * @param pvUser1 The first user argument.
2947 * @param pvUser2 The second user argument.
2948 */
2949SUPR0DECL(void *) SUPR0ObjRegister(PSUPDRVSESSION pSession, SUPDRVOBJTYPE enmType, PFNSUPDRVDESTRUCTOR pfnDestructor, void *pvUser1, void *pvUser2)
2950{
2951 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2952 PSUPDRVOBJ pObj;
2953 PSUPDRVUSAGE pUsage;
2954
2955 /*
2956 * Validate the input.
2957 */
2958 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
2959 AssertReturn(enmType > SUPDRVOBJTYPE_INVALID && enmType < SUPDRVOBJTYPE_END, NULL);
2960 AssertPtrReturn(pfnDestructor, NULL);
2961
2962 /*
2963 * Allocate and initialize the object.
2964 */
2965 pObj = (PSUPDRVOBJ)RTMemAlloc(sizeof(*pObj));
2966 if (!pObj)
2967 return NULL;
2968 pObj->u32Magic = SUPDRVOBJ_MAGIC;
2969 pObj->enmType = enmType;
2970 pObj->pNext = NULL;
2971 pObj->cUsage = 1;
2972 pObj->pfnDestructor = pfnDestructor;
2973 pObj->pvUser1 = pvUser1;
2974 pObj->pvUser2 = pvUser2;
2975 pObj->CreatorUid = pSession->Uid;
2976 pObj->CreatorGid = pSession->Gid;
2977 pObj->CreatorProcess= pSession->Process;
2978 supdrvOSObjInitCreator(pObj, pSession);
2979
2980 /*
2981 * Allocate the usage record.
2982 * (We keep freed usage records around to simplify SUPR0ObjAddRefEx().)
2983 */
2984 RTSpinlockAcquire(pDevExt->Spinlock);
2985
2986 pUsage = pDevExt->pUsageFree;
2987 if (pUsage)
2988 pDevExt->pUsageFree = pUsage->pNext;
2989 else
2990 {
2991 RTSpinlockRelease(pDevExt->Spinlock);
2992 pUsage = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsage));
2993 if (!pUsage)
2994 {
2995 RTMemFree(pObj);
2996 return NULL;
2997 }
2998 RTSpinlockAcquire(pDevExt->Spinlock);
2999 }
3000
3001 /*
3002 * Insert the object and create the session usage record.
3003 */
3004 /* The object. */
3005 pObj->pNext = pDevExt->pObjs;
3006 pDevExt->pObjs = pObj;
3007
3008 /* The session record. */
3009 pUsage->cUsage = 1;
3010 pUsage->pObj = pObj;
3011 pUsage->pNext = pSession->pUsage;
3012 /* Log2(("SUPR0ObjRegister: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext)); */
3013 pSession->pUsage = pUsage;
3014
3015 RTSpinlockRelease(pDevExt->Spinlock);
3016
3017 Log(("SUPR0ObjRegister: returns %p (pvUser1=%p, pvUser=%p)\n", pObj, pvUser1, pvUser2));
3018 return pObj;
3019}
3020SUPR0_EXPORT_SYMBOL(SUPR0ObjRegister);
3021
3022
3023/**
3024 * Increment the reference counter for the object associating the reference
3025 * with the specified session.
3026 *
3027 * @returns IPRT status code.
3028 * @param pvObj The identifier returned by SUPR0ObjRegister().
3029 * @param pSession The session which is referencing the object.
3030 *
3031 * @remarks The caller should not own any spinlocks and must carefully protect
3032 * itself against potential race with the destructor so freed memory
3033 * isn't accessed here.
3034 */
3035SUPR0DECL(int) SUPR0ObjAddRef(void *pvObj, PSUPDRVSESSION pSession)
3036{
3037 return SUPR0ObjAddRefEx(pvObj, pSession, false /* fNoBlocking */);
3038}
3039SUPR0_EXPORT_SYMBOL(SUPR0ObjAddRef);
3040
3041
3042/**
3043 * Increment the reference counter for the object associating the reference
3044 * with the specified session.
3045 *
3046 * @returns IPRT status code.
3047 * @retval VERR_TRY_AGAIN if fNoBlocking was set and a new usage record
3048 * couldn't be allocated. (If you see this you're not doing the right
3049 * thing and it won't ever work reliably.)
3050 *
3051 * @param pvObj The identifier returned by SUPR0ObjRegister().
3052 * @param pSession The session which is referencing the object.
3053 * @param fNoBlocking Set if it's not OK to block. Never try to make the
3054 * first reference to an object in a session with this
3055 * argument set.
3056 *
3057 * @remarks The caller should not own any spinlocks and must carefully protect
3058 * itself against potential race with the destructor so freed memory
3059 * isn't accessed here.
3060 */
3061SUPR0DECL(int) SUPR0ObjAddRefEx(void *pvObj, PSUPDRVSESSION pSession, bool fNoBlocking)
3062{
3063 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
3064 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
3065 int rc = VINF_SUCCESS;
3066 PSUPDRVUSAGE pUsagePre;
3067 PSUPDRVUSAGE pUsage;
3068
3069 /*
3070 * Validate the input.
3071 * Be ready for the destruction race (someone might be stuck in the
3072 * destructor waiting a lock we own).
3073 */
3074 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3075 AssertPtrReturn(pObj, VERR_INVALID_POINTER);
3076 AssertMsgReturn(pObj->u32Magic == SUPDRVOBJ_MAGIC || pObj->u32Magic == SUPDRVOBJ_MAGIC_DEAD,
3077 ("Invalid pvObj=%p magic=%#x (expected %#x or %#x)\n", pvObj, pObj->u32Magic, SUPDRVOBJ_MAGIC, SUPDRVOBJ_MAGIC_DEAD),
3078 VERR_INVALID_PARAMETER);
3079
3080 RTSpinlockAcquire(pDevExt->Spinlock);
3081
3082 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
3083 {
3084 RTSpinlockRelease(pDevExt->Spinlock);
3085
3086 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
3087 return VERR_WRONG_ORDER;
3088 }
3089
3090 /*
3091 * Preallocate the usage record if we can.
3092 */
3093 pUsagePre = pDevExt->pUsageFree;
3094 if (pUsagePre)
3095 pDevExt->pUsageFree = pUsagePre->pNext;
3096 else if (!fNoBlocking)
3097 {
3098 RTSpinlockRelease(pDevExt->Spinlock);
3099 pUsagePre = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsagePre));
3100 if (!pUsagePre)
3101 return VERR_NO_MEMORY;
3102
3103 RTSpinlockAcquire(pDevExt->Spinlock);
3104 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
3105 {
3106 RTSpinlockRelease(pDevExt->Spinlock);
3107
3108 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
3109 return VERR_WRONG_ORDER;
3110 }
3111 }
3112
3113 /*
3114 * Reference the object.
3115 */
3116 pObj->cUsage++;
3117
3118 /*
3119 * Look for the session record.
3120 */
3121 for (pUsage = pSession->pUsage; pUsage; pUsage = pUsage->pNext)
3122 {
3123 /*Log(("SUPR0AddRef: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
3124 if (pUsage->pObj == pObj)
3125 break;
3126 }
3127 if (pUsage)
3128 pUsage->cUsage++;
3129 else if (pUsagePre)
3130 {
3131 /* create a new session record. */
3132 pUsagePre->cUsage = 1;
3133 pUsagePre->pObj = pObj;
3134 pUsagePre->pNext = pSession->pUsage;
3135 pSession->pUsage = pUsagePre;
3136 /*Log(("SUPR0AddRef: pUsagePre=%p:{.pObj=%p, .pNext=%p}\n", pUsagePre, pUsagePre->pObj, pUsagePre->pNext));*/
3137
3138 pUsagePre = NULL;
3139 }
3140 else
3141 {
3142 pObj->cUsage--;
3143 rc = VERR_TRY_AGAIN;
3144 }
3145
3146 /*
3147 * Put any unused usage record into the free list..
3148 */
3149 if (pUsagePre)
3150 {
3151 pUsagePre->pNext = pDevExt->pUsageFree;
3152 pDevExt->pUsageFree = pUsagePre;
3153 }
3154
3155 RTSpinlockRelease(pDevExt->Spinlock);
3156
3157 return rc;
3158}
3159SUPR0_EXPORT_SYMBOL(SUPR0ObjAddRefEx);
3160
3161
3162/**
3163 * Decrement / destroy a reference counter record for an object.
3164 *
3165 * The object is uniquely identified by pfnDestructor+pvUser1+pvUser2.
3166 *
3167 * @returns IPRT status code.
3168 * @retval VINF_SUCCESS if not destroyed.
3169 * @retval VINF_OBJECT_DESTROYED if it's destroyed by this release call.
3170 * @retval VERR_INVALID_PARAMETER if the object isn't valid. Will assert in
3171 * string builds.
3172 *
3173 * @param pvObj The identifier returned by SUPR0ObjRegister().
3174 * @param pSession The session which is referencing the object.
3175 */
3176SUPR0DECL(int) SUPR0ObjRelease(void *pvObj, PSUPDRVSESSION pSession)
3177{
3178 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
3179 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
3180 int rc = VERR_INVALID_PARAMETER;
3181 PSUPDRVUSAGE pUsage;
3182 PSUPDRVUSAGE pUsagePrev;
3183
3184 /*
3185 * Validate the input.
3186 */
3187 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3188 AssertMsgReturn(RT_VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
3189 ("Invalid pvObj=%p magic=%#x (expected %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
3190 VERR_INVALID_PARAMETER);
3191
3192 /*
3193 * Acquire the spinlock and look for the usage record.
3194 */
3195 RTSpinlockAcquire(pDevExt->Spinlock);
3196
3197 for (pUsagePrev = NULL, pUsage = pSession->pUsage;
3198 pUsage;
3199 pUsagePrev = pUsage, pUsage = pUsage->pNext)
3200 {
3201 /*Log2(("SUPR0ObjRelease: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
3202 if (pUsage->pObj == pObj)
3203 {
3204 rc = VINF_SUCCESS;
3205 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
3206 if (pUsage->cUsage > 1)
3207 {
3208 pObj->cUsage--;
3209 pUsage->cUsage--;
3210 }
3211 else
3212 {
3213 /*
3214 * Free the session record.
3215 */
3216 if (pUsagePrev)
3217 pUsagePrev->pNext = pUsage->pNext;
3218 else
3219 pSession->pUsage = pUsage->pNext;
3220 pUsage->pNext = pDevExt->pUsageFree;
3221 pDevExt->pUsageFree = pUsage;
3222
3223 /* What about the object? */
3224 if (pObj->cUsage > 1)
3225 pObj->cUsage--;
3226 else
3227 {
3228 /*
3229 * Object is to be destroyed, unlink it.
3230 */
3231 pObj->u32Magic = SUPDRVOBJ_MAGIC_DEAD;
3232 rc = VINF_OBJECT_DESTROYED;
3233 if (pDevExt->pObjs == pObj)
3234 pDevExt->pObjs = pObj->pNext;
3235 else
3236 {
3237 PSUPDRVOBJ pObjPrev;
3238 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
3239 if (pObjPrev->pNext == pObj)
3240 {
3241 pObjPrev->pNext = pObj->pNext;
3242 break;
3243 }
3244 Assert(pObjPrev);
3245 }
3246 }
3247 }
3248 break;
3249 }
3250 }
3251
3252 RTSpinlockRelease(pDevExt->Spinlock);
3253
3254 /*
3255 * Call the destructor and free the object if required.
3256 */
3257 if (rc == VINF_OBJECT_DESTROYED)
3258 {
3259 Log(("SUPR0ObjRelease: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
3260 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
3261 if (pObj->pfnDestructor)
3262 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
3263 RTMemFree(pObj);
3264 }
3265
3266 AssertMsg(pUsage, ("pvObj=%p\n", pvObj));
3267 return rc;
3268}
3269SUPR0_EXPORT_SYMBOL(SUPR0ObjRelease);
3270
3271
3272/**
3273 * Verifies that the current process can access the specified object.
3274 *
3275 * @returns The following IPRT status code:
3276 * @retval VINF_SUCCESS if access was granted.
3277 * @retval VERR_PERMISSION_DENIED if denied access.
3278 * @retval VERR_INVALID_PARAMETER if invalid parameter.
3279 *
3280 * @param pvObj The identifier returned by SUPR0ObjRegister().
3281 * @param pSession The session which wishes to access the object.
3282 * @param pszObjName Object string name. This is optional and depends on the object type.
3283 *
3284 * @remark The caller is responsible for making sure the object isn't removed while
3285 * we're inside this function. If uncertain about this, just call AddRef before calling us.
3286 */
3287SUPR0DECL(int) SUPR0ObjVerifyAccess(void *pvObj, PSUPDRVSESSION pSession, const char *pszObjName)
3288{
3289 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
3290 int rc;
3291
3292 /*
3293 * Validate the input.
3294 */
3295 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3296 AssertMsgReturn(RT_VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
3297 ("Invalid pvObj=%p magic=%#x (exepcted %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
3298 VERR_INVALID_PARAMETER);
3299
3300 /*
3301 * Check access. (returns true if a decision has been made.)
3302 */
3303 rc = VERR_INTERNAL_ERROR;
3304 if (supdrvOSObjCanAccess(pObj, pSession, pszObjName, &rc))
3305 return rc;
3306
3307 /*
3308 * Default policy is to allow the user to access his own
3309 * stuff but nothing else.
3310 */
3311 if (pObj->CreatorUid == pSession->Uid)
3312 return VINF_SUCCESS;
3313 return VERR_PERMISSION_DENIED;
3314}
3315SUPR0_EXPORT_SYMBOL(SUPR0ObjVerifyAccess);
3316
3317
3318/**
3319 * API for the VMMR0 module to get the SUPDRVSESSION::pSessionVM member.
3320 *
3321 * @returns The associated VM pointer.
3322 * @param pSession The session of the current thread.
3323 */
3324SUPR0DECL(PVM) SUPR0GetSessionVM(PSUPDRVSESSION pSession)
3325{
3326 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
3327 return pSession->pSessionVM;
3328}
3329SUPR0_EXPORT_SYMBOL(SUPR0GetSessionVM);
3330
3331
3332/**
3333 * API for the VMMR0 module to get the SUPDRVSESSION::pSessionGVM member.
3334 *
3335 * @returns The associated GVM pointer.
3336 * @param pSession The session of the current thread.
3337 */
3338SUPR0DECL(PGVM) SUPR0GetSessionGVM(PSUPDRVSESSION pSession)
3339{
3340 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
3341 return pSession->pSessionGVM;
3342}
3343SUPR0_EXPORT_SYMBOL(SUPR0GetSessionGVM);
3344
3345
3346/**
3347 * API for the VMMR0 module to work the SUPDRVSESSION::pSessionVM member.
3348 *
3349 * This will fail if there is already a VM associated with the session and pVM
3350 * isn't NULL.
3351 *
3352 * @retval VINF_SUCCESS
3353 * @retval VERR_ALREADY_EXISTS if there already is a VM associated with the
3354 * session.
3355 * @retval VERR_INVALID_PARAMETER if only one of the parameters are NULL or if
3356 * the session is invalid.
3357 *
3358 * @param pSession The session of the current thread.
3359 * @param pGVM The GVM to associate with the session. Pass NULL to
3360 * dissassociate.
3361 * @param pVM The VM to associate with the session. Pass NULL to
3362 * dissassociate.
3363 */
3364SUPR0DECL(int) SUPR0SetSessionVM(PSUPDRVSESSION pSession, PGVM pGVM, PVM pVM)
3365{
3366 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3367 AssertReturn((pGVM != NULL) == (pVM != NULL), VERR_INVALID_PARAMETER);
3368
3369 RTSpinlockAcquire(pSession->pDevExt->Spinlock);
3370 if (pGVM)
3371 {
3372 if (!pSession->pSessionGVM)
3373 {
3374 pSession->pSessionGVM = pGVM;
3375 pSession->pSessionVM = pVM;
3376 pSession->pFastIoCtrlVM = NULL;
3377 }
3378 else
3379 {
3380 RTSpinlockRelease(pSession->pDevExt->Spinlock);
3381 SUPR0Printf("SUPR0SetSessionVM: Unable to associated GVM/VM %p/%p with session %p as it has %p/%p already!\n",
3382 pGVM, pVM, pSession, pSession->pSessionGVM, pSession->pSessionVM);
3383 return VERR_ALREADY_EXISTS;
3384 }
3385 }
3386 else
3387 {
3388 pSession->pSessionGVM = NULL;
3389 pSession->pSessionVM = NULL;
3390 pSession->pFastIoCtrlVM = NULL;
3391 }
3392 RTSpinlockRelease(pSession->pDevExt->Spinlock);
3393 return VINF_SUCCESS;
3394}
3395SUPR0_EXPORT_SYMBOL(SUPR0SetSessionVM);
3396
3397
3398/**
3399 * For getting SUPDRVSESSION::Uid.
3400 *
3401 * @returns The session UID. NIL_RTUID if invalid pointer or not successfully
3402 * set by the host code.
3403 * @param pSession The session of the current thread.
3404 */
3405SUPR0DECL(RTUID) SUPR0GetSessionUid(PSUPDRVSESSION pSession)
3406{
3407 AssertReturn(SUP_IS_SESSION_VALID(pSession), NIL_RTUID);
3408 return pSession->Uid;
3409}
3410SUPR0_EXPORT_SYMBOL(SUPR0GetSessionUid);
3411
3412
3413/** @copydoc RTLogDefaultInstanceEx
3414 * @remarks To allow overriding RTLogDefaultInstanceEx locally. */
3415SUPR0DECL(struct RTLOGGER *) SUPR0DefaultLogInstanceEx(uint32_t fFlagsAndGroup)
3416{
3417 return RTLogDefaultInstanceEx(fFlagsAndGroup);
3418}
3419SUPR0_EXPORT_SYMBOL(SUPR0DefaultLogInstanceEx);
3420
3421
3422/** @copydoc RTLogGetDefaultInstanceEx
3423 * @remarks To allow overriding RTLogGetDefaultInstanceEx locally. */
3424SUPR0DECL(struct RTLOGGER *) SUPR0GetDefaultLogInstanceEx(uint32_t fFlagsAndGroup)
3425{
3426 return RTLogGetDefaultInstanceEx(fFlagsAndGroup);
3427}
3428SUPR0_EXPORT_SYMBOL(SUPR0GetDefaultLogInstanceEx);
3429
3430
3431/** @copydoc RTLogRelGetDefaultInstanceEx
3432 * @remarks To allow overriding RTLogRelGetDefaultInstanceEx locally. */
3433SUPR0DECL(struct RTLOGGER *) SUPR0GetDefaultLogRelInstanceEx(uint32_t fFlagsAndGroup)
3434{
3435 return RTLogRelGetDefaultInstanceEx(fFlagsAndGroup);
3436}
3437SUPR0_EXPORT_SYMBOL(SUPR0GetDefaultLogRelInstanceEx);
3438
3439
3440/**
3441 * Lock pages.
3442 *
3443 * @returns IPRT status code.
3444 * @param pSession Session to which the locked memory should be associated.
3445 * @param pvR3 Start of the memory range to lock.
3446 * This must be page aligned.
3447 * @param cPages Number of pages to lock.
3448 * @param paPages Where to put the physical addresses of locked memory.
3449 */
3450SUPR0DECL(int) SUPR0LockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages)
3451{
3452 int rc;
3453 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3454 const size_t cb = (size_t)cPages << PAGE_SHIFT;
3455 LogFlow(("SUPR0LockMem: pSession=%p pvR3=%p cPages=%d paPages=%p\n", pSession, (void *)pvR3, cPages, paPages));
3456
3457 /*
3458 * Verify input.
3459 */
3460 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3461 AssertPtrReturn(paPages, VERR_INVALID_PARAMETER);
3462 if ( RT_ALIGN_R3PT(pvR3, PAGE_SIZE, RTR3PTR) != pvR3
3463 || !pvR3)
3464 {
3465 Log(("pvR3 (%p) must be page aligned and not NULL!\n", (void *)pvR3));
3466 return VERR_INVALID_PARAMETER;
3467 }
3468
3469 /*
3470 * Let IPRT do the job.
3471 */
3472 Mem.eType = MEMREF_TYPE_LOCKED;
3473 rc = RTR0MemObjLockUser(&Mem.MemObj, pvR3, cb, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
3474 if (RT_SUCCESS(rc))
3475 {
3476 uint32_t iPage = cPages;
3477 AssertMsg(RTR0MemObjAddressR3(Mem.MemObj) == pvR3, ("%p == %p\n", RTR0MemObjAddressR3(Mem.MemObj), pvR3));
3478 AssertMsg(RTR0MemObjSize(Mem.MemObj) == cb, ("%x == %x\n", RTR0MemObjSize(Mem.MemObj), cb));
3479
3480 while (iPage-- > 0)
3481 {
3482 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
3483 if (RT_UNLIKELY(paPages[iPage] == NIL_RTCCPHYS))
3484 {
3485 AssertMsgFailed(("iPage=%d\n", iPage));
3486 rc = VERR_INTERNAL_ERROR;
3487 break;
3488 }
3489 }
3490 if (RT_SUCCESS(rc))
3491 rc = supdrvMemAdd(&Mem, pSession);
3492 if (RT_FAILURE(rc))
3493 {
3494 int rc2 = RTR0MemObjFree(Mem.MemObj, false);
3495 AssertRC(rc2);
3496 }
3497 }
3498
3499 return rc;
3500}
3501SUPR0_EXPORT_SYMBOL(SUPR0LockMem);
3502
3503
3504/**
3505 * Unlocks the memory pointed to by pv.
3506 *
3507 * @returns IPRT status code.
3508 * @param pSession Session to which the memory was locked.
3509 * @param pvR3 Memory to unlock.
3510 */
3511SUPR0DECL(int) SUPR0UnlockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3)
3512{
3513 LogFlow(("SUPR0UnlockMem: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
3514 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3515 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_LOCKED);
3516}
3517SUPR0_EXPORT_SYMBOL(SUPR0UnlockMem);
3518
3519
3520/**
3521 * Allocates a chunk of page aligned memory with contiguous and fixed physical
3522 * backing.
3523 *
3524 * @returns IPRT status code.
3525 * @param pSession Session data.
3526 * @param cPages Number of pages to allocate.
3527 * @param ppvR0 Where to put the address of Ring-0 mapping the allocated memory.
3528 * @param ppvR3 Where to put the address of Ring-3 mapping the allocated memory.
3529 * @param pHCPhys Where to put the physical address of allocated memory.
3530 */
3531SUPR0DECL(int) SUPR0ContAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS pHCPhys)
3532{
3533 int rc;
3534 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3535 LogFlow(("SUPR0ContAlloc: pSession=%p cPages=%d ppvR0=%p ppvR3=%p pHCPhys=%p\n", pSession, cPages, ppvR0, ppvR3, pHCPhys));
3536
3537 /*
3538 * Validate input.
3539 */
3540 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3541 if (!ppvR3 || !ppvR0 || !pHCPhys)
3542 {
3543 Log(("Null pointer. All of these should be set: pSession=%p ppvR0=%p ppvR3=%p pHCPhys=%p\n",
3544 pSession, ppvR0, ppvR3, pHCPhys));
3545 return VERR_INVALID_PARAMETER;
3546
3547 }
3548 if (cPages < 1 || cPages >= 256)
3549 {
3550 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
3551 return VERR_PAGE_COUNT_OUT_OF_RANGE;
3552 }
3553
3554 /*
3555 * Let IPRT do the job.
3556 */
3557 /** @todo Is the 4GiB requirement actually necessray? */
3558 rc = RTR0MemObjAllocCont(&Mem.MemObj, cPages << PAGE_SHIFT, _4G-1 /*PhysHighest*/, true /* executable R0 mapping */);
3559 if (RT_SUCCESS(rc))
3560 {
3561 int rc2;
3562 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
3563 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3564 if (RT_SUCCESS(rc))
3565 {
3566 Mem.eType = MEMREF_TYPE_CONT;
3567 rc = supdrvMemAdd(&Mem, pSession);
3568 if (!rc)
3569 {
3570 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3571 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3572 *pHCPhys = RTR0MemObjGetPagePhysAddr(Mem.MemObj, 0);
3573 return 0;
3574 }
3575
3576 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3577 AssertRC(rc2);
3578 }
3579 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3580 AssertRC(rc2);
3581 }
3582
3583 return rc;
3584}
3585SUPR0_EXPORT_SYMBOL(SUPR0ContAlloc);
3586
3587
3588/**
3589 * Frees memory allocated using SUPR0ContAlloc().
3590 *
3591 * @returns IPRT status code.
3592 * @param pSession The session to which the memory was allocated.
3593 * @param uPtr Pointer to the memory (ring-3 or ring-0).
3594 */
3595SUPR0DECL(int) SUPR0ContFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
3596{
3597 LogFlow(("SUPR0ContFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
3598 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3599 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_CONT);
3600}
3601SUPR0_EXPORT_SYMBOL(SUPR0ContFree);
3602
3603
3604/**
3605 * Allocates a chunk of page aligned memory with fixed physical backing below 4GB.
3606 *
3607 * The memory isn't zeroed.
3608 *
3609 * @returns IPRT status code.
3610 * @param pSession Session data.
3611 * @param cPages Number of pages to allocate.
3612 * @param ppvR0 Where to put the address of Ring-0 mapping of the allocated memory.
3613 * @param ppvR3 Where to put the address of Ring-3 mapping of the allocated memory.
3614 * @param paPages Where to put the physical addresses of allocated memory.
3615 */
3616SUPR0DECL(int) SUPR0LowAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS paPages)
3617{
3618 unsigned iPage;
3619 int rc;
3620 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3621 LogFlow(("SUPR0LowAlloc: pSession=%p cPages=%d ppvR3=%p ppvR0=%p paPages=%p\n", pSession, cPages, ppvR3, ppvR0, paPages));
3622
3623 /*
3624 * Validate input.
3625 */
3626 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3627 if (!ppvR3 || !ppvR0 || !paPages)
3628 {
3629 Log(("Null pointer. All of these should be set: pSession=%p ppvR3=%p ppvR0=%p paPages=%p\n",
3630 pSession, ppvR3, ppvR0, paPages));
3631 return VERR_INVALID_PARAMETER;
3632
3633 }
3634 if (cPages < 1 || cPages >= 256)
3635 {
3636 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
3637 return VERR_PAGE_COUNT_OUT_OF_RANGE;
3638 }
3639
3640 /*
3641 * Let IPRT do the work.
3642 */
3643 rc = RTR0MemObjAllocLow(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable ring-0 mapping */);
3644 if (RT_SUCCESS(rc))
3645 {
3646 int rc2;
3647 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
3648 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3649 if (RT_SUCCESS(rc))
3650 {
3651 Mem.eType = MEMREF_TYPE_LOW;
3652 rc = supdrvMemAdd(&Mem, pSession);
3653 if (!rc)
3654 {
3655 for (iPage = 0; iPage < cPages; iPage++)
3656 {
3657 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
3658 AssertMsg(!(paPages[iPage] & (PAGE_SIZE - 1)), ("iPage=%d Phys=%RHp\n", paPages[iPage]));
3659 }
3660 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3661 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3662 return 0;
3663 }
3664
3665 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3666 AssertRC(rc2);
3667 }
3668
3669 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3670 AssertRC(rc2);
3671 }
3672
3673 return rc;
3674}
3675SUPR0_EXPORT_SYMBOL(SUPR0LowAlloc);
3676
3677
3678/**
3679 * Frees memory allocated using SUPR0LowAlloc().
3680 *
3681 * @returns IPRT status code.
3682 * @param pSession The session to which the memory was allocated.
3683 * @param uPtr Pointer to the memory (ring-3 or ring-0).
3684 */
3685SUPR0DECL(int) SUPR0LowFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
3686{
3687 LogFlow(("SUPR0LowFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
3688 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3689 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_LOW);
3690}
3691SUPR0_EXPORT_SYMBOL(SUPR0LowFree);
3692
3693
3694
3695/**
3696 * Allocates a chunk of memory with both R0 and R3 mappings.
3697 * The memory is fixed and it's possible to query the physical addresses using SUPR0MemGetPhys().
3698 *
3699 * @returns IPRT status code.
3700 * @param pSession The session to associated the allocation with.
3701 * @param cb Number of bytes to allocate.
3702 * @param ppvR0 Where to store the address of the Ring-0 mapping.
3703 * @param ppvR3 Where to store the address of the Ring-3 mapping.
3704 */
3705SUPR0DECL(int) SUPR0MemAlloc(PSUPDRVSESSION pSession, uint32_t cb, PRTR0PTR ppvR0, PRTR3PTR ppvR3)
3706{
3707 int rc;
3708 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3709 LogFlow(("SUPR0MemAlloc: pSession=%p cb=%d ppvR0=%p ppvR3=%p\n", pSession, cb, ppvR0, ppvR3));
3710
3711 /*
3712 * Validate input.
3713 */
3714 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3715 AssertPtrReturn(ppvR0, VERR_INVALID_POINTER);
3716 AssertPtrReturn(ppvR3, VERR_INVALID_POINTER);
3717 if (cb < 1 || cb >= _4M)
3718 {
3719 Log(("Illegal request cb=%u; must be greater than 0 and smaller than 4MB.\n", cb));
3720 return VERR_INVALID_PARAMETER;
3721 }
3722
3723 /*
3724 * Let IPRT do the work.
3725 */
3726 rc = RTR0MemObjAllocPage(&Mem.MemObj, cb, true /* executable ring-0 mapping */);
3727 if (RT_SUCCESS(rc))
3728 {
3729 int rc2;
3730 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
3731 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3732 if (RT_SUCCESS(rc))
3733 {
3734 Mem.eType = MEMREF_TYPE_MEM;
3735 rc = supdrvMemAdd(&Mem, pSession);
3736 if (!rc)
3737 {
3738 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3739 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3740 return VINF_SUCCESS;
3741 }
3742
3743 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3744 AssertRC(rc2);
3745 }
3746
3747 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3748 AssertRC(rc2);
3749 }
3750
3751 return rc;
3752}
3753SUPR0_EXPORT_SYMBOL(SUPR0MemAlloc);
3754
3755
3756/**
3757 * Get the physical addresses of memory allocated using SUPR0MemAlloc().
3758 *
3759 * @returns IPRT status code.
3760 * @param pSession The session to which the memory was allocated.
3761 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
3762 * @param paPages Where to store the physical addresses.
3763 */
3764SUPR0DECL(int) SUPR0MemGetPhys(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, PSUPPAGE paPages) /** @todo switch this bugger to RTHCPHYS */
3765{
3766 PSUPDRVBUNDLE pBundle;
3767 LogFlow(("SUPR0MemGetPhys: pSession=%p uPtr=%p paPages=%p\n", pSession, (void *)uPtr, paPages));
3768
3769 /*
3770 * Validate input.
3771 */
3772 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3773 AssertPtrReturn(paPages, VERR_INVALID_POINTER);
3774 AssertReturn(uPtr, VERR_INVALID_PARAMETER);
3775
3776 /*
3777 * Search for the address.
3778 */
3779 RTSpinlockAcquire(pSession->Spinlock);
3780 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3781 {
3782 if (pBundle->cUsed > 0)
3783 {
3784 unsigned i;
3785 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3786 {
3787 if ( pBundle->aMem[i].eType == MEMREF_TYPE_MEM
3788 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3789 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
3790 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3791 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr)
3792 )
3793 )
3794 {
3795 const size_t cPages = RTR0MemObjSize(pBundle->aMem[i].MemObj) >> PAGE_SHIFT;
3796 size_t iPage;
3797 for (iPage = 0; iPage < cPages; iPage++)
3798 {
3799 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(pBundle->aMem[i].MemObj, iPage);
3800 paPages[iPage].uReserved = 0;
3801 }
3802 RTSpinlockRelease(pSession->Spinlock);
3803 return VINF_SUCCESS;
3804 }
3805 }
3806 }
3807 }
3808 RTSpinlockRelease(pSession->Spinlock);
3809 Log(("Failed to find %p!!!\n", (void *)uPtr));
3810 return VERR_INVALID_PARAMETER;
3811}
3812SUPR0_EXPORT_SYMBOL(SUPR0MemGetPhys);
3813
3814
3815/**
3816 * Free memory allocated by SUPR0MemAlloc().
3817 *
3818 * @returns IPRT status code.
3819 * @param pSession The session owning the allocation.
3820 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
3821 */
3822SUPR0DECL(int) SUPR0MemFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
3823{
3824 LogFlow(("SUPR0MemFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
3825 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3826 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_MEM);
3827}
3828SUPR0_EXPORT_SYMBOL(SUPR0MemFree);
3829
3830
3831/**
3832 * Allocates a chunk of memory with a kernel or/and a user mode mapping.
3833 *
3834 * The memory is fixed and it's possible to query the physical addresses using
3835 * SUPR0MemGetPhys().
3836 *
3837 * @returns IPRT status code.
3838 * @param pSession The session to associated the allocation with.
3839 * @param cPages The number of pages to allocate.
3840 * @param fFlags Flags, reserved for the future. Must be zero.
3841 * @param ppvR3 Where to store the address of the Ring-3 mapping.
3842 * NULL if no ring-3 mapping.
3843 * @param ppvR0 Where to store the address of the Ring-0 mapping.
3844 * NULL if no ring-0 mapping.
3845 * @param paPages Where to store the addresses of the pages. Optional.
3846 */
3847SUPR0DECL(int) SUPR0PageAllocEx(PSUPDRVSESSION pSession, uint32_t cPages, uint32_t fFlags, PRTR3PTR ppvR3, PRTR0PTR ppvR0, PRTHCPHYS paPages)
3848{
3849 int rc;
3850 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3851 LogFlow(("SUPR0PageAlloc: pSession=%p cb=%d ppvR3=%p\n", pSession, cPages, ppvR3));
3852
3853 /*
3854 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
3855 */
3856 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3857 AssertPtrNullReturn(ppvR3, VERR_INVALID_POINTER);
3858 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
3859 AssertReturn(ppvR3 || ppvR0, VERR_INVALID_PARAMETER);
3860 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
3861 if (cPages < 1 || cPages > VBOX_MAX_ALLOC_PAGE_COUNT)
3862 {
3863 Log(("SUPR0PageAlloc: Illegal request cb=%u; must be greater than 0 and smaller than %uMB (VBOX_MAX_ALLOC_PAGE_COUNT pages).\n", cPages, VBOX_MAX_ALLOC_PAGE_COUNT * (_1M / _4K)));
3864 return VERR_PAGE_COUNT_OUT_OF_RANGE;
3865 }
3866
3867 /*
3868 * Let IPRT do the work.
3869 */
3870 if (ppvR0)
3871 rc = RTR0MemObjAllocPage(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, false /*fExecutable*/);
3872 else
3873 rc = RTR0MemObjAllocPhysNC(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, NIL_RTHCPHYS);
3874 if (RT_SUCCESS(rc))
3875 {
3876 int rc2;
3877 if (ppvR3)
3878 {
3879 /* Make sure memory mapped into ring-3 is zero initialized if we can: */
3880 if ( ppvR0
3881 && !RTR0MemObjWasZeroInitialized(Mem.MemObj))
3882 {
3883 void *pv = RTR0MemObjAddress(Mem.MemObj);
3884 Assert(pv || !ppvR0);
3885 if (pv)
3886 RT_BZERO(pv, (size_t)cPages * PAGE_SIZE);
3887 }
3888
3889 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0, RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3890 }
3891 else
3892 Mem.MapObjR3 = NIL_RTR0MEMOBJ;
3893 if (RT_SUCCESS(rc))
3894 {
3895 Mem.eType = MEMREF_TYPE_PAGE;
3896 rc = supdrvMemAdd(&Mem, pSession);
3897 if (!rc)
3898 {
3899 if (ppvR3)
3900 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3901 if (ppvR0)
3902 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3903 if (paPages)
3904 {
3905 uint32_t iPage = cPages;
3906 while (iPage-- > 0)
3907 {
3908 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MapObjR3, iPage);
3909 Assert(paPages[iPage] != NIL_RTHCPHYS);
3910 }
3911 }
3912 return VINF_SUCCESS;
3913 }
3914
3915 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3916 AssertRC(rc2);
3917 }
3918
3919 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3920 AssertRC(rc2);
3921 }
3922 return rc;
3923}
3924SUPR0_EXPORT_SYMBOL(SUPR0PageAllocEx);
3925
3926
3927/**
3928 * Maps a chunk of memory previously allocated by SUPR0PageAllocEx into kernel
3929 * space.
3930 *
3931 * @returns IPRT status code.
3932 * @param pSession The session to associated the allocation with.
3933 * @param pvR3 The ring-3 address returned by SUPR0PageAllocEx.
3934 * @param offSub Where to start mapping. Must be page aligned.
3935 * @param cbSub How much to map. Must be page aligned.
3936 * @param fFlags Flags, MBZ.
3937 * @param ppvR0 Where to return the address of the ring-0 mapping on
3938 * success.
3939 */
3940SUPR0DECL(int) SUPR0PageMapKernel(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t offSub, uint32_t cbSub,
3941 uint32_t fFlags, PRTR0PTR ppvR0)
3942{
3943 int rc;
3944 PSUPDRVBUNDLE pBundle;
3945 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ;
3946 LogFlow(("SUPR0PageMapKernel: pSession=%p pvR3=%p offSub=%#x cbSub=%#x\n", pSession, pvR3, offSub, cbSub));
3947
3948 /*
3949 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
3950 */
3951 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3952 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
3953 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
3954 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3955 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3956 AssertReturn(cbSub, VERR_INVALID_PARAMETER);
3957
3958 /*
3959 * Find the memory object.
3960 */
3961 RTSpinlockAcquire(pSession->Spinlock);
3962 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3963 {
3964 if (pBundle->cUsed > 0)
3965 {
3966 unsigned i;
3967 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3968 {
3969 if ( ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
3970 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3971 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3972 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
3973 || ( pBundle->aMem[i].eType == MEMREF_TYPE_LOCKED
3974 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3975 && pBundle->aMem[i].MapObjR3 == NIL_RTR0MEMOBJ
3976 && RTR0MemObjAddressR3(pBundle->aMem[i].MemObj) == pvR3))
3977 {
3978 hMemObj = pBundle->aMem[i].MemObj;
3979 break;
3980 }
3981 }
3982 }
3983 }
3984 RTSpinlockRelease(pSession->Spinlock);
3985
3986 rc = VERR_INVALID_PARAMETER;
3987 if (hMemObj != NIL_RTR0MEMOBJ)
3988 {
3989 /*
3990 * Do some further input validations before calling IPRT.
3991 * (Cleanup is done indirectly by telling RTR0MemObjFree to include mappings.)
3992 */
3993 size_t cbMemObj = RTR0MemObjSize(hMemObj);
3994 if ( offSub < cbMemObj
3995 && cbSub <= cbMemObj
3996 && offSub + cbSub <= cbMemObj)
3997 {
3998 RTR0MEMOBJ hMapObj;
3999 rc = RTR0MemObjMapKernelEx(&hMapObj, hMemObj, (void *)-1, 0,
4000 RTMEM_PROT_READ | RTMEM_PROT_WRITE, offSub, cbSub);
4001 if (RT_SUCCESS(rc))
4002 *ppvR0 = RTR0MemObjAddress(hMapObj);
4003 }
4004 else
4005 SUPR0Printf("SUPR0PageMapKernel: cbMemObj=%#x offSub=%#x cbSub=%#x\n", cbMemObj, offSub, cbSub);
4006
4007 }
4008 return rc;
4009}
4010SUPR0_EXPORT_SYMBOL(SUPR0PageMapKernel);
4011
4012
4013/**
4014 * Changes the page level protection of one or more pages previously allocated
4015 * by SUPR0PageAllocEx.
4016 *
4017 * @returns IPRT status code.
4018 * @param pSession The session to associated the allocation with.
4019 * @param pvR3 The ring-3 address returned by SUPR0PageAllocEx.
4020 * NIL_RTR3PTR if the ring-3 mapping should be unaffected.
4021 * @param pvR0 The ring-0 address returned by SUPR0PageAllocEx.
4022 * NIL_RTR0PTR if the ring-0 mapping should be unaffected.
4023 * @param offSub Where to start changing. Must be page aligned.
4024 * @param cbSub How much to change. Must be page aligned.
4025 * @param fProt The new page level protection, see RTMEM_PROT_*.
4026 */
4027SUPR0DECL(int) SUPR0PageProtect(PSUPDRVSESSION pSession, RTR3PTR pvR3, RTR0PTR pvR0, uint32_t offSub, uint32_t cbSub, uint32_t fProt)
4028{
4029 int rc;
4030 PSUPDRVBUNDLE pBundle;
4031 RTR0MEMOBJ hMemObjR0 = NIL_RTR0MEMOBJ;
4032 RTR0MEMOBJ hMemObjR3 = NIL_RTR0MEMOBJ;
4033 LogFlow(("SUPR0PageProtect: pSession=%p pvR3=%p pvR0=%p offSub=%#x cbSub=%#x fProt-%#x\n", pSession, pvR3, pvR0, offSub, cbSub, fProt));
4034
4035 /*
4036 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
4037 */
4038 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4039 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC | RTMEM_PROT_NONE)), VERR_INVALID_PARAMETER);
4040 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
4041 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
4042 AssertReturn(cbSub, VERR_INVALID_PARAMETER);
4043
4044 /*
4045 * Find the memory object.
4046 */
4047 RTSpinlockAcquire(pSession->Spinlock);
4048 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
4049 {
4050 if (pBundle->cUsed > 0)
4051 {
4052 unsigned i;
4053 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
4054 {
4055 if ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
4056 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
4057 && ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
4058 || pvR3 == NIL_RTR3PTR)
4059 && ( pvR0 == NIL_RTR0PTR
4060 || RTR0MemObjAddress(pBundle->aMem[i].MemObj) == pvR0)
4061 && ( pvR3 == NIL_RTR3PTR
4062 || RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3))
4063 {
4064 if (pvR0 != NIL_RTR0PTR)
4065 hMemObjR0 = pBundle->aMem[i].MemObj;
4066 if (pvR3 != NIL_RTR3PTR)
4067 hMemObjR3 = pBundle->aMem[i].MapObjR3;
4068 break;
4069 }
4070 }
4071 }
4072 }
4073 RTSpinlockRelease(pSession->Spinlock);
4074
4075 rc = VERR_INVALID_PARAMETER;
4076 if ( hMemObjR0 != NIL_RTR0MEMOBJ
4077 || hMemObjR3 != NIL_RTR0MEMOBJ)
4078 {
4079 /*
4080 * Do some further input validations before calling IPRT.
4081 */
4082 size_t cbMemObj = hMemObjR0 != NIL_RTR0PTR ? RTR0MemObjSize(hMemObjR0) : RTR0MemObjSize(hMemObjR3);
4083 if ( offSub < cbMemObj
4084 && cbSub <= cbMemObj
4085 && offSub + cbSub <= cbMemObj)
4086 {
4087 rc = VINF_SUCCESS;
4088 if (hMemObjR3 != NIL_RTR0PTR)
4089 rc = RTR0MemObjProtect(hMemObjR3, offSub, cbSub, fProt);
4090 if (hMemObjR0 != NIL_RTR0PTR && RT_SUCCESS(rc))
4091 rc = RTR0MemObjProtect(hMemObjR0, offSub, cbSub, fProt);
4092 }
4093 else
4094 SUPR0Printf("SUPR0PageMapKernel: cbMemObj=%#x offSub=%#x cbSub=%#x\n", cbMemObj, offSub, cbSub);
4095
4096 }
4097 return rc;
4098
4099}
4100SUPR0_EXPORT_SYMBOL(SUPR0PageProtect);
4101
4102
4103/**
4104 * Free memory allocated by SUPR0PageAlloc() and SUPR0PageAllocEx().
4105 *
4106 * @returns IPRT status code.
4107 * @param pSession The session owning the allocation.
4108 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc() or
4109 * SUPR0PageAllocEx().
4110 */
4111SUPR0DECL(int) SUPR0PageFree(PSUPDRVSESSION pSession, RTR3PTR pvR3)
4112{
4113 LogFlow(("SUPR0PageFree: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
4114 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4115 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_PAGE);
4116}
4117SUPR0_EXPORT_SYMBOL(SUPR0PageFree);
4118
4119
4120/**
4121 * Reports a bad context, currenctly that means EFLAGS.AC is 0 instead of 1.
4122 *
4123 * @param pDevExt The device extension.
4124 * @param pszFile The source file where the caller detected the bad
4125 * context.
4126 * @param uLine The line number in @a pszFile.
4127 * @param pszExtra Optional additional message to give further hints.
4128 */
4129void VBOXCALL supdrvBadContext(PSUPDRVDEVEXT pDevExt, const char *pszFile, uint32_t uLine, const char *pszExtra)
4130{
4131 uint32_t cCalls;
4132
4133 /*
4134 * Shorten the filename before displaying the message.
4135 */
4136 for (;;)
4137 {
4138 const char *pszTmp = strchr(pszFile, '/');
4139 if (!pszTmp)
4140 pszTmp = strchr(pszFile, '\\');
4141 if (!pszTmp)
4142 break;
4143 pszFile = pszTmp + 1;
4144 }
4145 if (RT_VALID_PTR(pszExtra) && *pszExtra)
4146 SUPR0Printf("vboxdrv: Bad CPU context error at line %u in %s: %s\n", uLine, pszFile, pszExtra);
4147 else
4148 SUPR0Printf("vboxdrv: Bad CPU context error at line %u in %s!\n", uLine, pszFile);
4149
4150 /*
4151 * Record the incident so that we stand a chance of blocking I/O controls
4152 * before panicing the system.
4153 */
4154 cCalls = ASMAtomicIncU32(&pDevExt->cBadContextCalls);
4155 if (cCalls > UINT32_MAX - _1K)
4156 ASMAtomicWriteU32(&pDevExt->cBadContextCalls, UINT32_MAX - _1K);
4157}
4158
4159
4160/**
4161 * Reports a bad context, currenctly that means EFLAGS.AC is 0 instead of 1.
4162 *
4163 * @param pSession The session of the caller.
4164 * @param pszFile The source file where the caller detected the bad
4165 * context.
4166 * @param uLine The line number in @a pszFile.
4167 * @param pszExtra Optional additional message to give further hints.
4168 */
4169SUPR0DECL(void) SUPR0BadContext(PSUPDRVSESSION pSession, const char *pszFile, uint32_t uLine, const char *pszExtra)
4170{
4171 PSUPDRVDEVEXT pDevExt;
4172
4173 AssertReturnVoid(SUP_IS_SESSION_VALID(pSession));
4174 pDevExt = pSession->pDevExt;
4175
4176 supdrvBadContext(pDevExt, pszFile, uLine, pszExtra);
4177}
4178SUPR0_EXPORT_SYMBOL(SUPR0BadContext);
4179
4180
4181/**
4182 * Gets the paging mode of the current CPU.
4183 *
4184 * @returns Paging mode, SUPPAGEINGMODE_INVALID on error.
4185 */
4186SUPR0DECL(SUPPAGINGMODE) SUPR0GetPagingMode(void)
4187{
4188#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
4189 SUPPAGINGMODE enmMode;
4190
4191 RTR0UINTREG cr0 = ASMGetCR0();
4192 if ((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
4193 enmMode = SUPPAGINGMODE_INVALID;
4194 else
4195 {
4196 RTR0UINTREG cr4 = ASMGetCR4();
4197 uint32_t fNXEPlusLMA = 0;
4198 if (cr4 & X86_CR4_PAE)
4199 {
4200 uint32_t fExtFeatures = ASMCpuId_EDX(0x80000001);
4201 if (fExtFeatures & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
4202 {
4203 uint64_t efer = ASMRdMsr(MSR_K6_EFER);
4204 if ((fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_NX) && (efer & MSR_K6_EFER_NXE))
4205 fNXEPlusLMA |= RT_BIT(0);
4206 if ((fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE) && (efer & MSR_K6_EFER_LMA))
4207 fNXEPlusLMA |= RT_BIT(1);
4208 }
4209 }
4210
4211 switch ((cr4 & (X86_CR4_PAE | X86_CR4_PGE)) | fNXEPlusLMA)
4212 {
4213 case 0:
4214 enmMode = SUPPAGINGMODE_32_BIT;
4215 break;
4216
4217 case X86_CR4_PGE:
4218 enmMode = SUPPAGINGMODE_32_BIT_GLOBAL;
4219 break;
4220
4221 case X86_CR4_PAE:
4222 enmMode = SUPPAGINGMODE_PAE;
4223 break;
4224
4225 case X86_CR4_PAE | RT_BIT(0):
4226 enmMode = SUPPAGINGMODE_PAE_NX;
4227 break;
4228
4229 case X86_CR4_PAE | X86_CR4_PGE:
4230 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
4231 break;
4232
4233 case X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):
4234 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
4235 break;
4236
4237 case RT_BIT(1) | X86_CR4_PAE:
4238 enmMode = SUPPAGINGMODE_AMD64;
4239 break;
4240
4241 case RT_BIT(1) | X86_CR4_PAE | RT_BIT(0):
4242 enmMode = SUPPAGINGMODE_AMD64_NX;
4243 break;
4244
4245 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE:
4246 enmMode = SUPPAGINGMODE_AMD64_GLOBAL;
4247 break;
4248
4249 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):
4250 enmMode = SUPPAGINGMODE_AMD64_GLOBAL_NX;
4251 break;
4252
4253 default:
4254 AssertMsgFailed(("Cannot happen! cr4=%#x fNXEPlusLMA=%d\n", cr4, fNXEPlusLMA));
4255 enmMode = SUPPAGINGMODE_INVALID;
4256 break;
4257 }
4258 }
4259 return enmMode;
4260
4261#elif defined(RT_ARCH_ARM64)
4262 /** @todo portme? */
4263 return SUPPAGINGMODE_INVALID;
4264
4265#else
4266# error "port me"
4267#endif
4268}
4269SUPR0_EXPORT_SYMBOL(SUPR0GetPagingMode);
4270
4271
4272#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
4273
4274/**
4275 * Change CR4 and take care of the kernel CR4 shadow if applicable.
4276 *
4277 * CR4 shadow handling is required for Linux >= 4.0. Calling this function
4278 * instead of ASMSetCR4() is only necessary for semi-permanent CR4 changes
4279 * for code with interrupts enabled.
4280 *
4281 * @returns the old CR4 value.
4282 *
4283 * @param fOrMask bits to be set in CR4.
4284 * @param fAndMask bits to be cleard in CR4.
4285 *
4286 * @remarks Must be called with preemption/interrupts disabled.
4287 */
4288SUPR0DECL(RTCCUINTREG) SUPR0ChangeCR4(RTCCUINTREG fOrMask, RTCCUINTREG fAndMask)
4289{
4290# ifdef RT_OS_LINUX
4291 return supdrvOSChangeCR4(fOrMask, fAndMask);
4292# else
4293 RTCCUINTREG uOld = ASMGetCR4();
4294 RTCCUINTREG uNew = (uOld & fAndMask) | fOrMask;
4295 if (uNew != uOld)
4296 ASMSetCR4(uNew);
4297 return uOld;
4298# endif
4299}
4300SUPR0_EXPORT_SYMBOL(SUPR0ChangeCR4);
4301
4302
4303/**
4304 * Enables or disabled hardware virtualization extensions using native OS APIs.
4305 *
4306 * @returns VBox status code.
4307 * @retval VINF_SUCCESS on success.
4308 * @retval VERR_NOT_SUPPORTED if not supported by the native OS.
4309 *
4310 * @param fEnable Whether to enable or disable.
4311 */
4312SUPR0DECL(int) SUPR0EnableVTx(bool fEnable)
4313{
4314# if defined(RT_OS_DARWIN) && (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86))
4315 return supdrvOSEnableVTx(fEnable);
4316# else
4317 RT_NOREF1(fEnable);
4318 return VERR_NOT_SUPPORTED;
4319# endif
4320}
4321SUPR0_EXPORT_SYMBOL(SUPR0EnableVTx);
4322
4323
4324/**
4325 * Suspends hardware virtualization extensions using the native OS API.
4326 *
4327 * This is called prior to entering raw-mode context.
4328 *
4329 * @returns @c true if suspended, @c false if not.
4330 */
4331SUPR0DECL(bool) SUPR0SuspendVTxOnCpu(void)
4332{
4333# if defined(RT_OS_DARWIN) && (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86))
4334 return supdrvOSSuspendVTxOnCpu();
4335# else
4336 return false;
4337# endif
4338}
4339SUPR0_EXPORT_SYMBOL(SUPR0SuspendVTxOnCpu);
4340
4341
4342/**
4343 * Resumes hardware virtualization extensions using the native OS API.
4344 *
4345 * This is called after to entering raw-mode context.
4346 *
4347 * @param fSuspended The return value of SUPR0SuspendVTxOnCpu.
4348 */
4349SUPR0DECL(void) SUPR0ResumeVTxOnCpu(bool fSuspended)
4350{
4351# if defined(RT_OS_DARWIN) && (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86))
4352 supdrvOSResumeVTxOnCpu(fSuspended);
4353# else
4354 RT_NOREF1(fSuspended);
4355 Assert(!fSuspended);
4356# endif
4357}
4358SUPR0_EXPORT_SYMBOL(SUPR0ResumeVTxOnCpu);
4359
4360
4361SUPR0DECL(int) SUPR0GetCurrentGdtRw(RTHCUINTPTR *pGdtRw)
4362{
4363# if defined(RT_OS_LINUX) && (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86))
4364 return supdrvOSGetCurrentGdtRw(pGdtRw);
4365# else
4366 NOREF(pGdtRw);
4367 return VERR_NOT_IMPLEMENTED;
4368# endif
4369}
4370SUPR0_EXPORT_SYMBOL(SUPR0GetCurrentGdtRw);
4371
4372
4373/**
4374 * Gets AMD-V and VT-x support for the calling CPU.
4375 *
4376 * @returns VBox status code.
4377 * @param pfCaps Where to store whether VT-x (SUPVTCAPS_VT_X) or AMD-V
4378 * (SUPVTCAPS_AMD_V) is supported.
4379 */
4380SUPR0DECL(int) SUPR0GetVTSupport(uint32_t *pfCaps)
4381{
4382 Assert(pfCaps);
4383 *pfCaps = 0;
4384
4385 /* Check if the CPU even supports CPUID (extremely ancient CPUs). */
4386 if (ASMHasCpuId())
4387 {
4388 /* Check the range of standard CPUID leafs. */
4389 uint32_t uMaxLeaf, uVendorEbx, uVendorEcx, uVendorEdx;
4390 ASMCpuId(0, &uMaxLeaf, &uVendorEbx, &uVendorEcx, &uVendorEdx);
4391 if (RTX86IsValidStdRange(uMaxLeaf))
4392 {
4393 /* Query the standard CPUID leaf. */
4394 uint32_t fFeatEcx, fFeatEdx, uDummy;
4395 ASMCpuId(1, &uDummy, &uDummy, &fFeatEcx, &fFeatEdx);
4396
4397 /* Check if the vendor is Intel (or compatible). */
4398 if ( RTX86IsIntelCpu(uVendorEbx, uVendorEcx, uVendorEdx)
4399 || RTX86IsViaCentaurCpu(uVendorEbx, uVendorEcx, uVendorEdx)
4400 || RTX86IsShanghaiCpu(uVendorEbx, uVendorEcx, uVendorEdx))
4401 {
4402 /* Check VT-x support. In addition, VirtualBox requires MSR and FXSAVE/FXRSTOR to function. */
4403 if ( (fFeatEcx & X86_CPUID_FEATURE_ECX_VMX)
4404 && (fFeatEdx & X86_CPUID_FEATURE_EDX_MSR)
4405 && (fFeatEdx & X86_CPUID_FEATURE_EDX_FXSR))
4406 {
4407 *pfCaps = SUPVTCAPS_VT_X;
4408 return VINF_SUCCESS;
4409 }
4410 return VERR_VMX_NO_VMX;
4411 }
4412
4413 /* Check if the vendor is AMD (or compatible). */
4414 if ( RTX86IsAmdCpu(uVendorEbx, uVendorEcx, uVendorEdx)
4415 || RTX86IsHygonCpu(uVendorEbx, uVendorEcx, uVendorEdx))
4416 {
4417 uint32_t fExtFeatEcx, uExtMaxId;
4418 ASMCpuId(0x80000000, &uExtMaxId, &uDummy, &uDummy, &uDummy);
4419 ASMCpuId(0x80000001, &uDummy, &uDummy, &fExtFeatEcx, &uDummy);
4420
4421 /* Check AMD-V support. In addition, VirtualBox requires MSR and FXSAVE/FXRSTOR to function. */
4422 if ( RTX86IsValidExtRange(uExtMaxId)
4423 && uExtMaxId >= 0x8000000a
4424 && (fExtFeatEcx & X86_CPUID_AMD_FEATURE_ECX_SVM)
4425 && (fFeatEdx & X86_CPUID_FEATURE_EDX_MSR)
4426 && (fFeatEdx & X86_CPUID_FEATURE_EDX_FXSR))
4427 {
4428 *pfCaps = SUPVTCAPS_AMD_V;
4429 return VINF_SUCCESS;
4430 }
4431 return VERR_SVM_NO_SVM;
4432 }
4433 }
4434 }
4435 return VERR_UNSUPPORTED_CPU;
4436}
4437SUPR0_EXPORT_SYMBOL(SUPR0GetVTSupport);
4438
4439
4440/**
4441 * Checks if Intel VT-x feature is usable on this CPU.
4442 *
4443 * @returns VBox status code.
4444 * @param pfIsSmxModeAmbiguous Where to return whether the SMX mode causes
4445 * ambiguity that makes us unsure whether we
4446 * really can use VT-x or not.
4447 *
4448 * @remarks Must be called with preemption disabled.
4449 * The caller is also expected to check that the CPU is an Intel (or
4450 * VIA/Shanghai) CPU -and- that it supports VT-x. Otherwise, this
4451 * function might throw a \#GP fault as it tries to read/write MSRs
4452 * that may not be present!
4453 */
4454SUPR0DECL(int) SUPR0GetVmxUsability(bool *pfIsSmxModeAmbiguous)
4455{
4456 uint64_t fFeatMsr;
4457 bool fMaybeSmxMode;
4458 bool fMsrLocked;
4459 bool fSmxVmxAllowed;
4460 bool fVmxAllowed;
4461 bool fIsSmxModeAmbiguous;
4462 int rc;
4463
4464 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
4465
4466 fFeatMsr = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
4467 fMaybeSmxMode = RT_BOOL(ASMGetCR4() & X86_CR4_SMXE);
4468 fMsrLocked = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_LOCK);
4469 fSmxVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_SMX_VMXON);
4470 fVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_VMXON);
4471 fIsSmxModeAmbiguous = false;
4472 rc = VERR_INTERNAL_ERROR_5;
4473
4474 /* Check if the LOCK bit is set but excludes the required VMXON bit. */
4475 if (fMsrLocked)
4476 {
4477 if (fVmxAllowed && fSmxVmxAllowed)
4478 rc = VINF_SUCCESS;
4479 else if (!fVmxAllowed && !fSmxVmxAllowed)
4480 rc = VERR_VMX_MSR_ALL_VMX_DISABLED;
4481 else if (!fMaybeSmxMode)
4482 {
4483 if (fVmxAllowed)
4484 rc = VINF_SUCCESS;
4485 else
4486 rc = VERR_VMX_MSR_VMX_DISABLED;
4487 }
4488 else
4489 {
4490 /*
4491 * CR4.SMXE is set but this doesn't mean the CPU is necessarily in SMX mode. We shall assume
4492 * that it is -not- and that it is a stupid BIOS/OS setting CR4.SMXE for no good reason.
4493 * See @bugref{6873}.
4494 */
4495 Assert(fMaybeSmxMode == true);
4496 fIsSmxModeAmbiguous = true;
4497 rc = VINF_SUCCESS;
4498 }
4499 }
4500 else
4501 {
4502 /*
4503 * MSR is not yet locked; we can change it ourselves here. Once the lock bit is set,
4504 * this MSR can no longer be modified.
4505 *
4506 * Set both the VMX and SMX_VMX bits (if supported) as we can't determine SMX mode
4507 * accurately. See @bugref{6873}.
4508 *
4509 * We need to check for SMX hardware support here, before writing the MSR as
4510 * otherwise we will #GP fault on CPUs that do not support it. Callers do not check
4511 * for it.
4512 */
4513 uint32_t fFeaturesECX, uDummy;
4514# ifdef VBOX_STRICT
4515 /* Callers should have verified these at some point. */
4516 uint32_t uMaxId, uVendorEBX, uVendorECX, uVendorEDX;
4517 ASMCpuId(0, &uMaxId, &uVendorEBX, &uVendorECX, &uVendorEDX);
4518 Assert(RTX86IsValidStdRange(uMaxId));
4519 Assert( RTX86IsIntelCpu( uVendorEBX, uVendorECX, uVendorEDX)
4520 || RTX86IsViaCentaurCpu(uVendorEBX, uVendorECX, uVendorEDX)
4521 || RTX86IsShanghaiCpu( uVendorEBX, uVendorECX, uVendorEDX));
4522# endif
4523 ASMCpuId(1, &uDummy, &uDummy, &fFeaturesECX, &uDummy);
4524 bool fSmxVmxHwSupport = false;
4525 if ( (fFeaturesECX & X86_CPUID_FEATURE_ECX_VMX)
4526 && (fFeaturesECX & X86_CPUID_FEATURE_ECX_SMX))
4527 fSmxVmxHwSupport = true;
4528
4529 fFeatMsr |= MSR_IA32_FEATURE_CONTROL_LOCK
4530 | MSR_IA32_FEATURE_CONTROL_VMXON;
4531 if (fSmxVmxHwSupport)
4532 fFeatMsr |= MSR_IA32_FEATURE_CONTROL_SMX_VMXON;
4533
4534 /*
4535 * Commit.
4536 */
4537 ASMWrMsr(MSR_IA32_FEATURE_CONTROL, fFeatMsr);
4538
4539 /*
4540 * Verify.
4541 */
4542 fFeatMsr = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
4543 fMsrLocked = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_LOCK);
4544 if (fMsrLocked)
4545 {
4546 fSmxVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_SMX_VMXON);
4547 fVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_VMXON);
4548 if ( fVmxAllowed
4549 && ( !fSmxVmxHwSupport
4550 || fSmxVmxAllowed))
4551 rc = VINF_SUCCESS;
4552 else
4553 rc = !fSmxVmxHwSupport ? VERR_VMX_MSR_VMX_ENABLE_FAILED : VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED;
4554 }
4555 else
4556 rc = VERR_VMX_MSR_LOCKING_FAILED;
4557 }
4558
4559 if (pfIsSmxModeAmbiguous)
4560 *pfIsSmxModeAmbiguous = fIsSmxModeAmbiguous;
4561
4562 return rc;
4563}
4564SUPR0_EXPORT_SYMBOL(SUPR0GetVmxUsability);
4565
4566
4567/**
4568 * Checks if AMD-V SVM feature is usable on this CPU.
4569 *
4570 * @returns VBox status code.
4571 * @param fInitSvm If usable, try to initialize SVM on this CPU.
4572 *
4573 * @remarks Must be called with preemption disabled.
4574 */
4575SUPR0DECL(int) SUPR0GetSvmUsability(bool fInitSvm)
4576{
4577 int rc;
4578 uint64_t fVmCr;
4579 uint64_t fEfer;
4580
4581 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
4582 fVmCr = ASMRdMsr(MSR_K8_VM_CR);
4583 if (!(fVmCr & MSR_K8_VM_CR_SVM_DISABLE))
4584 {
4585 rc = VINF_SUCCESS;
4586 if (fInitSvm)
4587 {
4588 /* Turn on SVM in the EFER MSR. */
4589 fEfer = ASMRdMsr(MSR_K6_EFER);
4590 if (fEfer & MSR_K6_EFER_SVME)
4591 rc = VERR_SVM_IN_USE;
4592 else
4593 {
4594 ASMWrMsr(MSR_K6_EFER, fEfer | MSR_K6_EFER_SVME);
4595
4596 /* Paranoia. */
4597 fEfer = ASMRdMsr(MSR_K6_EFER);
4598 if (fEfer & MSR_K6_EFER_SVME)
4599 {
4600 /* Restore previous value. */
4601 ASMWrMsr(MSR_K6_EFER, fEfer & ~MSR_K6_EFER_SVME);
4602 }
4603 else
4604 rc = VERR_SVM_ILLEGAL_EFER_MSR;
4605 }
4606 }
4607 }
4608 else
4609 rc = VERR_SVM_DISABLED;
4610 return rc;
4611}
4612SUPR0_EXPORT_SYMBOL(SUPR0GetSvmUsability);
4613
4614
4615/**
4616 * Queries the AMD-V and VT-x capabilities of the calling CPU.
4617 *
4618 * @returns VBox status code.
4619 * @retval VERR_VMX_NO_VMX
4620 * @retval VERR_VMX_MSR_ALL_VMX_DISABLED
4621 * @retval VERR_VMX_MSR_VMX_DISABLED
4622 * @retval VERR_VMX_MSR_LOCKING_FAILED
4623 * @retval VERR_VMX_MSR_VMX_ENABLE_FAILED
4624 * @retval VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED
4625 * @retval VERR_SVM_NO_SVM
4626 * @retval VERR_SVM_DISABLED
4627 * @retval VERR_UNSUPPORTED_CPU if not identifiable as an AMD, Intel or VIA
4628 * (centaur)/Shanghai CPU.
4629 *
4630 * @param pfCaps Where to store the capabilities.
4631 */
4632int VBOXCALL supdrvQueryVTCapsInternal(uint32_t *pfCaps)
4633{
4634 int rc = VERR_UNSUPPORTED_CPU;
4635 bool fIsSmxModeAmbiguous = false;
4636 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
4637
4638 /*
4639 * Input validation.
4640 */
4641 AssertPtrReturn(pfCaps, VERR_INVALID_POINTER);
4642 *pfCaps = 0;
4643
4644 /* We may modify MSRs and re-read them, disable preemption so we make sure we don't migrate CPUs. */
4645 RTThreadPreemptDisable(&PreemptState);
4646
4647 /* Check if VT-x/AMD-V is supported. */
4648 rc = SUPR0GetVTSupport(pfCaps);
4649 if (RT_SUCCESS(rc))
4650 {
4651 /* Check if VT-x is supported. */
4652 if (*pfCaps & SUPVTCAPS_VT_X)
4653 {
4654 /* Check if VT-x is usable. */
4655 rc = SUPR0GetVmxUsability(&fIsSmxModeAmbiguous);
4656 if (RT_SUCCESS(rc))
4657 {
4658 /* Query some basic VT-x capabilities (mainly required by our GUI). */
4659 VMXCTLSMSR vtCaps;
4660 vtCaps.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
4661 if (vtCaps.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
4662 {
4663 vtCaps.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);
4664 if (vtCaps.n.allowed1 & VMX_PROC_CTLS2_EPT)
4665 *pfCaps |= SUPVTCAPS_NESTED_PAGING;
4666 if (vtCaps.n.allowed1 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)
4667 *pfCaps |= SUPVTCAPS_VTX_UNRESTRICTED_GUEST;
4668 if (vtCaps.n.allowed1 & VMX_PROC_CTLS2_VMCS_SHADOWING)
4669 *pfCaps |= SUPVTCAPS_VTX_VMCS_SHADOWING;
4670 }
4671 }
4672 }
4673 /* Check if AMD-V is supported. */
4674 else if (*pfCaps & SUPVTCAPS_AMD_V)
4675 {
4676 /* Check is SVM is usable. */
4677 rc = SUPR0GetSvmUsability(false /* fInitSvm */);
4678 if (RT_SUCCESS(rc))
4679 {
4680 /* Query some basic AMD-V capabilities (mainly required by our GUI). */
4681 uint32_t uDummy, fSvmFeatures;
4682 ASMCpuId(0x8000000a, &uDummy, &uDummy, &uDummy, &fSvmFeatures);
4683 if (fSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
4684 *pfCaps |= SUPVTCAPS_NESTED_PAGING;
4685 if (fSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_VIRT_VMSAVE_VMLOAD)
4686 *pfCaps |= SUPVTCAPS_AMDV_VIRT_VMSAVE_VMLOAD;
4687 }
4688 }
4689 }
4690
4691 /* Restore preemption. */
4692 RTThreadPreemptRestore(&PreemptState);
4693
4694 /* After restoring preemption, if we may be in SMX mode, print a warning as it's difficult to debug such problems. */
4695 if (fIsSmxModeAmbiguous)
4696 SUPR0Printf(("WARNING! CR4 hints SMX mode but your CPU is too secretive. Proceeding anyway... We wish you good luck!\n"));
4697
4698 return rc;
4699}
4700
4701
4702/**
4703 * Queries the AMD-V and VT-x capabilities of the calling CPU.
4704 *
4705 * @returns VBox status code.
4706 * @retval VERR_VMX_NO_VMX
4707 * @retval VERR_VMX_MSR_ALL_VMX_DISABLED
4708 * @retval VERR_VMX_MSR_VMX_DISABLED
4709 * @retval VERR_VMX_MSR_LOCKING_FAILED
4710 * @retval VERR_VMX_MSR_VMX_ENABLE_FAILED
4711 * @retval VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED
4712 * @retval VERR_SVM_NO_SVM
4713 * @retval VERR_SVM_DISABLED
4714 * @retval VERR_UNSUPPORTED_CPU if not identifiable as an AMD, Intel or VIA
4715 * (centaur)/Shanghai CPU.
4716 *
4717 * @param pSession The session handle.
4718 * @param pfCaps Where to store the capabilities.
4719 */
4720SUPR0DECL(int) SUPR0QueryVTCaps(PSUPDRVSESSION pSession, uint32_t *pfCaps)
4721{
4722 /*
4723 * Input validation.
4724 */
4725 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4726 AssertPtrReturn(pfCaps, VERR_INVALID_POINTER);
4727
4728 /*
4729 * Call common worker.
4730 */
4731 return supdrvQueryVTCapsInternal(pfCaps);
4732}
4733SUPR0_EXPORT_SYMBOL(SUPR0QueryVTCaps);
4734
4735
4736/**
4737 * Queries the CPU microcode revision.
4738 *
4739 * @returns VBox status code.
4740 * @retval VERR_UNSUPPORTED_CPU if not identifiable as a processor with
4741 * readable microcode rev.
4742 *
4743 * @param puRevision Where to store the microcode revision.
4744 */
4745static int VBOXCALL supdrvQueryUcodeRev(uint32_t *puRevision)
4746{
4747 int rc = VERR_UNSUPPORTED_CPU;
4748 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
4749
4750 /*
4751 * Input validation.
4752 */
4753 AssertPtrReturn(puRevision, VERR_INVALID_POINTER);
4754
4755 *puRevision = 0;
4756
4757 /* Disable preemption so we make sure we don't migrate CPUs, just in case. */
4758 /* NB: We assume that there aren't mismatched microcode revs in the system. */
4759 RTThreadPreemptDisable(&PreemptState);
4760
4761 if (ASMHasCpuId())
4762 {
4763 uint32_t uDummy, uTFMSEAX;
4764 uint32_t uMaxId, uVendorEBX, uVendorECX, uVendorEDX;
4765
4766 ASMCpuId(0, &uMaxId, &uVendorEBX, &uVendorECX, &uVendorEDX);
4767 ASMCpuId(1, &uTFMSEAX, &uDummy, &uDummy, &uDummy);
4768
4769 if (RTX86IsValidStdRange(uMaxId))
4770 {
4771 uint64_t uRevMsr;
4772 if (RTX86IsIntelCpu(uVendorEBX, uVendorECX, uVendorEDX))
4773 {
4774 /* Architectural MSR available on Pentium Pro and later. */
4775 if (RTX86GetCpuFamily(uTFMSEAX) >= 6)
4776 {
4777 /* Revision is in the high dword. */
4778 uRevMsr = ASMRdMsr(MSR_IA32_BIOS_SIGN_ID);
4779 *puRevision = RT_HIDWORD(uRevMsr);
4780 rc = VINF_SUCCESS;
4781 }
4782 }
4783 else if ( RTX86IsAmdCpu(uVendorEBX, uVendorECX, uVendorEDX)
4784 || RTX86IsHygonCpu(uVendorEBX, uVendorECX, uVendorEDX))
4785 {
4786 /* Not well documented, but at least all AMD64 CPUs support this. */
4787 if (RTX86GetCpuFamily(uTFMSEAX) >= 15)
4788 {
4789 /* Revision is in the low dword. */
4790 uRevMsr = ASMRdMsr(MSR_IA32_BIOS_SIGN_ID); /* Same MSR as Intel. */
4791 *puRevision = RT_LODWORD(uRevMsr);
4792 rc = VINF_SUCCESS;
4793 }
4794 }
4795 }
4796 }
4797
4798 RTThreadPreemptRestore(&PreemptState);
4799
4800 return rc;
4801}
4802
4803
4804/**
4805 * Queries the CPU microcode revision.
4806 *
4807 * @returns VBox status code.
4808 * @retval VERR_UNSUPPORTED_CPU if not identifiable as a processor with
4809 * readable microcode rev.
4810 *
4811 * @param pSession The session handle.
4812 * @param puRevision Where to store the microcode revision.
4813 */
4814SUPR0DECL(int) SUPR0QueryUcodeRev(PSUPDRVSESSION pSession, uint32_t *puRevision)
4815{
4816 /*
4817 * Input validation.
4818 */
4819 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4820 AssertPtrReturn(puRevision, VERR_INVALID_POINTER);
4821
4822 /*
4823 * Call common worker.
4824 */
4825 return supdrvQueryUcodeRev(puRevision);
4826}
4827SUPR0_EXPORT_SYMBOL(SUPR0QueryUcodeRev);
4828
4829
4830/**
4831 * Gets hardware-virtualization MSRs of the calling CPU.
4832 *
4833 * @returns VBox status code.
4834 * @param pMsrs Where to store the hardware-virtualization MSRs.
4835 * @param fCaps Hardware virtualization capabilities (SUPVTCAPS_XXX). Pass 0
4836 * to explicitly check for the presence of VT-x/AMD-V before
4837 * querying MSRs.
4838 * @param fForce Force querying of MSRs from the hardware.
4839 */
4840SUPR0DECL(int) SUPR0GetHwvirtMsrs(PSUPHWVIRTMSRS pMsrs, uint32_t fCaps, bool fForce)
4841{
4842 int rc;
4843 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
4844 RT_NOREF_PV(fForce);
4845
4846 /*
4847 * Input validation.
4848 */
4849 AssertPtrReturn(pMsrs, VERR_INVALID_POINTER);
4850
4851 /*
4852 * Disable preemption so we make sure we don't migrate CPUs and because
4853 * we access global data.
4854 */
4855 RTThreadPreemptDisable(&PreemptState);
4856
4857 /*
4858 * Query the MSRs from the hardware.
4859 */
4860 SUPHWVIRTMSRS Msrs;
4861 RT_ZERO(Msrs);
4862
4863 /* If the caller claims VT-x/AMD-V is supported, don't need to recheck it. */
4864 if (!(fCaps & (SUPVTCAPS_VT_X | SUPVTCAPS_AMD_V)))
4865 rc = SUPR0GetVTSupport(&fCaps);
4866 else
4867 rc = VINF_SUCCESS;
4868 if (RT_SUCCESS(rc))
4869 {
4870 if (fCaps & SUPVTCAPS_VT_X)
4871 {
4872 Msrs.u.vmx.u64FeatCtrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
4873 Msrs.u.vmx.u64Basic = ASMRdMsr(MSR_IA32_VMX_BASIC);
4874 Msrs.u.vmx.PinCtls.u = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
4875 Msrs.u.vmx.ProcCtls.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
4876 Msrs.u.vmx.ExitCtls.u = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
4877 Msrs.u.vmx.EntryCtls.u = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
4878 Msrs.u.vmx.u64Misc = ASMRdMsr(MSR_IA32_VMX_MISC);
4879 Msrs.u.vmx.u64Cr0Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
4880 Msrs.u.vmx.u64Cr0Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);
4881 Msrs.u.vmx.u64Cr4Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);
4882 Msrs.u.vmx.u64Cr4Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
4883 Msrs.u.vmx.u64VmcsEnum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
4884
4885 if (RT_BF_GET(Msrs.u.vmx.u64Basic, VMX_BF_BASIC_TRUE_CTLS))
4886 {
4887 Msrs.u.vmx.TruePinCtls.u = ASMRdMsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS);
4888 Msrs.u.vmx.TrueProcCtls.u = ASMRdMsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS);
4889 Msrs.u.vmx.TrueEntryCtls.u = ASMRdMsr(MSR_IA32_VMX_TRUE_ENTRY_CTLS);
4890 Msrs.u.vmx.TrueExitCtls.u = ASMRdMsr(MSR_IA32_VMX_TRUE_EXIT_CTLS);
4891 }
4892
4893 if (Msrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
4894 {
4895 Msrs.u.vmx.ProcCtls2.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);
4896
4897 if (Msrs.u.vmx.ProcCtls2.n.allowed1 & (VMX_PROC_CTLS2_EPT | VMX_PROC_CTLS2_VPID))
4898 Msrs.u.vmx.u64EptVpidCaps = ASMRdMsr(MSR_IA32_VMX_EPT_VPID_CAP);
4899
4900 if (Msrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VMFUNC)
4901 Msrs.u.vmx.u64VmFunc = ASMRdMsr(MSR_IA32_VMX_VMFUNC);
4902 }
4903
4904 if (Msrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
4905 Msrs.u.vmx.u64ProcCtls3 = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS3);
4906
4907 if (Msrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_USE_SECONDARY_CTLS)
4908 Msrs.u.vmx.u64ExitCtls2 = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS2);
4909 }
4910 else if (fCaps & SUPVTCAPS_AMD_V)
4911 {
4912 Msrs.u.svm.u64MsrHwcr = ASMRdMsr(MSR_K8_HWCR);
4913 Msrs.u.svm.u64MsrSmmAddr = ASMRdMsr(MSR_K7_SMM_ADDR);
4914 Msrs.u.svm.u64MsrSmmMask = ASMRdMsr(MSR_K7_SMM_MASK);
4915 }
4916 else
4917 {
4918 RTThreadPreemptRestore(&PreemptState);
4919 AssertMsgFailedReturn(("SUPR0GetVTSupport returns success but neither VT-x nor AMD-V reported!\n"),
4920 VERR_INTERNAL_ERROR_2);
4921 }
4922
4923 /*
4924 * Copy the MSRs out.
4925 */
4926 memcpy(pMsrs, &Msrs, sizeof(*pMsrs));
4927 }
4928
4929 RTThreadPreemptRestore(&PreemptState);
4930
4931 return rc;
4932}
4933SUPR0_EXPORT_SYMBOL(SUPR0GetHwvirtMsrs);
4934
4935#endif /* defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) */
4936
4937
4938/**
4939 * Register a component factory with the support driver.
4940 *
4941 * This is currently restricted to kernel sessions only.
4942 *
4943 * @returns VBox status code.
4944 * @retval VINF_SUCCESS on success.
4945 * @retval VERR_NO_MEMORY if we're out of memory.
4946 * @retval VERR_ALREADY_EXISTS if the factory has already been registered.
4947 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
4948 * @retval VERR_INVALID_PARAMETER on invalid parameter.
4949 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
4950 *
4951 * @param pSession The SUPDRV session (must be a ring-0 session).
4952 * @param pFactory Pointer to the component factory registration structure.
4953 *
4954 * @remarks This interface is also available via SUPR0IdcComponentRegisterFactory.
4955 */
4956SUPR0DECL(int) SUPR0ComponentRegisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
4957{
4958 PSUPDRVFACTORYREG pNewReg;
4959 const char *psz;
4960 int rc;
4961
4962 /*
4963 * Validate parameters.
4964 */
4965 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4966 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
4967 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
4968 AssertPtrReturn(pFactory->pfnQueryFactoryInterface, VERR_INVALID_POINTER);
4969 psz = RTStrEnd(pFactory->szName, sizeof(pFactory->szName));
4970 AssertReturn(psz, VERR_INVALID_PARAMETER);
4971
4972 /*
4973 * Allocate and initialize a new registration structure.
4974 */
4975 pNewReg = (PSUPDRVFACTORYREG)RTMemAlloc(sizeof(SUPDRVFACTORYREG));
4976 if (pNewReg)
4977 {
4978 pNewReg->pNext = NULL;
4979 pNewReg->pFactory = pFactory;
4980 pNewReg->pSession = pSession;
4981 pNewReg->cchName = psz - &pFactory->szName[0];
4982
4983 /*
4984 * Add it to the tail of the list after checking for prior registration.
4985 */
4986 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
4987 if (RT_SUCCESS(rc))
4988 {
4989 PSUPDRVFACTORYREG pPrev = NULL;
4990 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
4991 while (pCur && pCur->pFactory != pFactory)
4992 {
4993 pPrev = pCur;
4994 pCur = pCur->pNext;
4995 }
4996 if (!pCur)
4997 {
4998 if (pPrev)
4999 pPrev->pNext = pNewReg;
5000 else
5001 pSession->pDevExt->pComponentFactoryHead = pNewReg;
5002 rc = VINF_SUCCESS;
5003 }
5004 else
5005 rc = VERR_ALREADY_EXISTS;
5006
5007 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
5008 }
5009
5010 if (RT_FAILURE(rc))
5011 RTMemFree(pNewReg);
5012 }
5013 else
5014 rc = VERR_NO_MEMORY;
5015 return rc;
5016}
5017SUPR0_EXPORT_SYMBOL(SUPR0ComponentRegisterFactory);
5018
5019
5020/**
5021 * Deregister a component factory.
5022 *
5023 * @returns VBox status code.
5024 * @retval VINF_SUCCESS on success.
5025 * @retval VERR_NOT_FOUND if the factory wasn't registered.
5026 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
5027 * @retval VERR_INVALID_PARAMETER on invalid parameter.
5028 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
5029 *
5030 * @param pSession The SUPDRV session (must be a ring-0 session).
5031 * @param pFactory Pointer to the component factory registration structure
5032 * previously passed SUPR0ComponentRegisterFactory().
5033 *
5034 * @remarks This interface is also available via SUPR0IdcComponentDeregisterFactory.
5035 */
5036SUPR0DECL(int) SUPR0ComponentDeregisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
5037{
5038 int rc;
5039
5040 /*
5041 * Validate parameters.
5042 */
5043 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
5044 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
5045 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
5046
5047 /*
5048 * Take the lock and look for the registration record.
5049 */
5050 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
5051 if (RT_SUCCESS(rc))
5052 {
5053 PSUPDRVFACTORYREG pPrev = NULL;
5054 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
5055 while (pCur && pCur->pFactory != pFactory)
5056 {
5057 pPrev = pCur;
5058 pCur = pCur->pNext;
5059 }
5060 if (pCur)
5061 {
5062 if (!pPrev)
5063 pSession->pDevExt->pComponentFactoryHead = pCur->pNext;
5064 else
5065 pPrev->pNext = pCur->pNext;
5066
5067 pCur->pNext = NULL;
5068 pCur->pFactory = NULL;
5069 pCur->pSession = NULL;
5070 rc = VINF_SUCCESS;
5071 }
5072 else
5073 rc = VERR_NOT_FOUND;
5074
5075 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
5076
5077 RTMemFree(pCur);
5078 }
5079 return rc;
5080}
5081SUPR0_EXPORT_SYMBOL(SUPR0ComponentDeregisterFactory);
5082
5083
5084/**
5085 * Queries a component factory.
5086 *
5087 * @returns VBox status code.
5088 * @retval VERR_INVALID_PARAMETER on invalid parameter.
5089 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
5090 * @retval VERR_SUPDRV_COMPONENT_NOT_FOUND if the component factory wasn't found.
5091 * @retval VERR_SUPDRV_INTERFACE_NOT_SUPPORTED if the interface wasn't supported.
5092 *
5093 * @param pSession The SUPDRV session.
5094 * @param pszName The name of the component factory.
5095 * @param pszInterfaceUuid The UUID of the factory interface (stringified).
5096 * @param ppvFactoryIf Where to store the factory interface.
5097 */
5098SUPR0DECL(int) SUPR0ComponentQueryFactory(PSUPDRVSESSION pSession, const char *pszName, const char *pszInterfaceUuid, void **ppvFactoryIf)
5099{
5100 const char *pszEnd;
5101 size_t cchName;
5102 int rc;
5103
5104 /*
5105 * Validate parameters.
5106 */
5107 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
5108
5109 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
5110 pszEnd = RTStrEnd(pszName, RT_SIZEOFMEMB(SUPDRVFACTORY, szName));
5111 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
5112 cchName = pszEnd - pszName;
5113
5114 AssertPtrReturn(pszInterfaceUuid, VERR_INVALID_POINTER);
5115 pszEnd = RTStrEnd(pszInterfaceUuid, RTUUID_STR_LENGTH);
5116 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
5117
5118 AssertPtrReturn(ppvFactoryIf, VERR_INVALID_POINTER);
5119 *ppvFactoryIf = NULL;
5120
5121 /*
5122 * Take the lock and try all factories by this name.
5123 */
5124 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
5125 if (RT_SUCCESS(rc))
5126 {
5127 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
5128 rc = VERR_SUPDRV_COMPONENT_NOT_FOUND;
5129 while (pCur)
5130 {
5131 if ( pCur->cchName == cchName
5132 && !memcmp(pCur->pFactory->szName, pszName, cchName))
5133 {
5134 void *pvFactory = pCur->pFactory->pfnQueryFactoryInterface(pCur->pFactory, pSession, pszInterfaceUuid);
5135 if (pvFactory)
5136 {
5137 *ppvFactoryIf = pvFactory;
5138 rc = VINF_SUCCESS;
5139 break;
5140 }
5141 rc = VERR_SUPDRV_INTERFACE_NOT_SUPPORTED;
5142 }
5143
5144 /* next */
5145 pCur = pCur->pNext;
5146 }
5147
5148 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
5149 }
5150 return rc;
5151}
5152SUPR0_EXPORT_SYMBOL(SUPR0ComponentQueryFactory);
5153
5154
5155/**
5156 * Adds a memory object to the session.
5157 *
5158 * @returns IPRT status code.
5159 * @param pMem Memory tracking structure containing the
5160 * information to track.
5161 * @param pSession The session.
5162 */
5163static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession)
5164{
5165 PSUPDRVBUNDLE pBundle;
5166
5167 /*
5168 * Find free entry and record the allocation.
5169 */
5170 RTSpinlockAcquire(pSession->Spinlock);
5171 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
5172 {
5173 if (pBundle->cUsed < RT_ELEMENTS(pBundle->aMem))
5174 {
5175 unsigned i;
5176 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
5177 {
5178 if (pBundle->aMem[i].MemObj == NIL_RTR0MEMOBJ)
5179 {
5180 pBundle->cUsed++;
5181 pBundle->aMem[i] = *pMem;
5182 RTSpinlockRelease(pSession->Spinlock);
5183 return VINF_SUCCESS;
5184 }
5185 }
5186 AssertFailed(); /* !!this can't be happening!!! */
5187 }
5188 }
5189 RTSpinlockRelease(pSession->Spinlock);
5190
5191 /*
5192 * Need to allocate a new bundle.
5193 * Insert into the last entry in the bundle.
5194 */
5195 pBundle = (PSUPDRVBUNDLE)RTMemAllocZ(sizeof(*pBundle));
5196 if (!pBundle)
5197 return VERR_NO_MEMORY;
5198
5199 /* take last entry. */
5200 pBundle->cUsed++;
5201 pBundle->aMem[RT_ELEMENTS(pBundle->aMem) - 1] = *pMem;
5202
5203 /* insert into list. */
5204 RTSpinlockAcquire(pSession->Spinlock);
5205 pBundle->pNext = pSession->Bundle.pNext;
5206 pSession->Bundle.pNext = pBundle;
5207 RTSpinlockRelease(pSession->Spinlock);
5208
5209 return VINF_SUCCESS;
5210}
5211
5212
5213/**
5214 * Releases a memory object referenced by pointer and type.
5215 *
5216 * @returns IPRT status code.
5217 * @param pSession Session data.
5218 * @param uPtr Pointer to memory. This is matched against both the R0 and R3 addresses.
5219 * @param eType Memory type.
5220 */
5221static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType)
5222{
5223 PSUPDRVBUNDLE pBundle;
5224
5225 /*
5226 * Validate input.
5227 */
5228 if (!uPtr)
5229 {
5230 Log(("Illegal address %p\n", (void *)uPtr));
5231 return VERR_INVALID_PARAMETER;
5232 }
5233
5234 /*
5235 * Search for the address.
5236 */
5237 RTSpinlockAcquire(pSession->Spinlock);
5238 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
5239 {
5240 if (pBundle->cUsed > 0)
5241 {
5242 unsigned i;
5243 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
5244 {
5245 if ( pBundle->aMem[i].eType == eType
5246 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
5247 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
5248 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
5249 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr))
5250 )
5251 {
5252 /* Make a copy of it and release it outside the spinlock. */
5253 SUPDRVMEMREF Mem = pBundle->aMem[i];
5254 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
5255 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
5256 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
5257 RTSpinlockRelease(pSession->Spinlock);
5258
5259 if (Mem.MapObjR3 != NIL_RTR0MEMOBJ)
5260 {
5261 int rc = RTR0MemObjFree(Mem.MapObjR3, false);
5262 AssertRC(rc); /** @todo figure out how to handle this. */
5263 }
5264 if (Mem.MemObj != NIL_RTR0MEMOBJ)
5265 {
5266 int rc = RTR0MemObjFree(Mem.MemObj, true /* fFreeMappings */);
5267 AssertRC(rc); /** @todo figure out how to handle this. */
5268 }
5269 return VINF_SUCCESS;
5270 }
5271 }
5272 }
5273 }
5274 RTSpinlockRelease(pSession->Spinlock);
5275 Log(("Failed to find %p!!! (eType=%d)\n", (void *)uPtr, eType));
5276 return VERR_INVALID_PARAMETER;
5277}
5278
5279
5280/**
5281 * Opens an image. If it's the first time it's opened the call must upload
5282 * the bits using the supdrvIOCtl_LdrLoad() / SUPDRV_IOCTL_LDR_LOAD function.
5283 *
5284 * This is the 1st step of the loading.
5285 *
5286 * @returns IPRT status code.
5287 * @param pDevExt Device globals.
5288 * @param pSession Session data.
5289 * @param pReq The open request.
5290 */
5291static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq)
5292{
5293 int rc;
5294 PSUPDRVLDRIMAGE pImage;
5295 void *pv;
5296 size_t cchName = strlen(pReq->u.In.szName); /* (caller checked < 32). */
5297 SUPDRV_CHECK_SMAP_SETUP();
5298 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5299 LogFlow(("supdrvIOCtl_LdrOpen: szName=%s cbImageWithEverything=%d\n", pReq->u.In.szName, pReq->u.In.cbImageWithEverything));
5300
5301 /*
5302 * Check if we got an instance of the image already.
5303 */
5304 supdrvLdrLock(pDevExt);
5305 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5306 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
5307 {
5308 if ( pImage->szName[cchName] == '\0'
5309 && !memcmp(pImage->szName, pReq->u.In.szName, cchName))
5310 {
5311 /** @todo Add an _1M (or something) per session reference. */
5312 if (RT_LIKELY(pImage->cImgUsage < UINT32_MAX / 2U))
5313 {
5314 /** @todo check cbImageBits and cbImageWithEverything here, if they differs
5315 * that indicates that the images are different. */
5316 pReq->u.Out.pvImageBase = pImage->pvImage;
5317 pReq->u.Out.fNeedsLoading = pImage->uState == SUP_IOCTL_LDR_OPEN;
5318 pReq->u.Out.fNativeLoader = pImage->fNative;
5319 supdrvLdrAddUsage(pDevExt, pSession, pImage, true /*fRing3Usage*/);
5320 supdrvLdrUnlock(pDevExt);
5321 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5322 return VINF_SUCCESS;
5323 }
5324 supdrvLdrUnlock(pDevExt);
5325 Log(("supdrvIOCtl_LdrOpen: Too many existing references to '%s'!\n", pReq->u.In.szName));
5326 return VERR_TOO_MANY_REFERENCES;
5327 }
5328 }
5329 /* (not found - add it!) */
5330
5331 /* If the loader interface is locked down, make userland fail early */
5332 if (pDevExt->fLdrLockedDown)
5333 {
5334 supdrvLdrUnlock(pDevExt);
5335 Log(("supdrvIOCtl_LdrOpen: Not adding '%s' to image list, loader interface is locked down!\n", pReq->u.In.szName));
5336 return VERR_PERMISSION_DENIED;
5337 }
5338
5339 /* Stop if caller doesn't wish to prepare loading things. */
5340 if (!pReq->u.In.cbImageBits)
5341 {
5342 supdrvLdrUnlock(pDevExt);
5343 Log(("supdrvIOCtl_LdrOpen: Returning VERR_MODULE_NOT_FOUND for '%s'!\n", pReq->u.In.szName));
5344 return VERR_MODULE_NOT_FOUND;
5345 }
5346
5347 /*
5348 * Allocate memory.
5349 */
5350 Assert(cchName < sizeof(pImage->szName));
5351 pv = RTMemAllocZ(sizeof(SUPDRVLDRIMAGE));
5352 if (!pv)
5353 {
5354 supdrvLdrUnlock(pDevExt);
5355 Log(("supdrvIOCtl_LdrOpen: RTMemAllocZ() failed\n"));
5356 return VERR_NO_MEMORY;
5357 }
5358 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5359
5360 /*
5361 * Setup and link in the LDR stuff.
5362 */
5363 pImage = (PSUPDRVLDRIMAGE)pv;
5364 pImage->pvImage = NULL;
5365 pImage->hMemObjImage = NIL_RTR0MEMOBJ;
5366 pImage->cbImageWithEverything = pReq->u.In.cbImageWithEverything;
5367 pImage->cbImageBits = pReq->u.In.cbImageBits;
5368 pImage->cSymbols = 0;
5369 pImage->paSymbols = NULL;
5370 pImage->pachStrTab = NULL;
5371 pImage->cbStrTab = 0;
5372 pImage->cSegments = 0;
5373 pImage->paSegments = NULL;
5374 pImage->pfnModuleInit = NULL;
5375 pImage->pfnModuleTerm = NULL;
5376 pImage->pfnServiceReqHandler = NULL;
5377 pImage->uState = SUP_IOCTL_LDR_OPEN;
5378 pImage->cImgUsage = 0; /* Increased by supdrvLdrAddUsage later */
5379 pImage->pDevExt = pDevExt;
5380 pImage->pImageImport = NULL;
5381 pImage->uMagic = SUPDRVLDRIMAGE_MAGIC;
5382 pImage->pWrappedModInfo = NULL;
5383 memcpy(pImage->szName, pReq->u.In.szName, cchName + 1);
5384
5385 /*
5386 * Try load it using the native loader, if that isn't supported, fall back
5387 * on the older method.
5388 */
5389 pImage->fNative = true;
5390 rc = supdrvOSLdrOpen(pDevExt, pImage, pReq->u.In.szFilename);
5391 if (rc == VERR_NOT_SUPPORTED)
5392 {
5393 rc = RTR0MemObjAllocPage(&pImage->hMemObjImage, pImage->cbImageBits, true /*fExecutable*/);
5394 if (RT_SUCCESS(rc))
5395 {
5396 pImage->pvImage = RTR0MemObjAddress(pImage->hMemObjImage);
5397 pImage->fNative = false;
5398 }
5399 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5400 }
5401 if (RT_SUCCESS(rc))
5402 rc = supdrvLdrAddUsage(pDevExt, pSession, pImage, true /*fRing3Usage*/);
5403 if (RT_FAILURE(rc))
5404 {
5405 supdrvLdrUnlock(pDevExt);
5406 pImage->uMagic = SUPDRVLDRIMAGE_MAGIC_DEAD;
5407 RTMemFree(pImage);
5408 Log(("supdrvIOCtl_LdrOpen(%s): failed - %Rrc\n", pReq->u.In.szName, rc));
5409 return rc;
5410 }
5411 Assert(RT_VALID_PTR(pImage->pvImage) || RT_FAILURE(rc));
5412
5413 /*
5414 * Link it.
5415 */
5416 pImage->pNext = pDevExt->pLdrImages;
5417 pDevExt->pLdrImages = pImage;
5418
5419 pReq->u.Out.pvImageBase = pImage->pvImage;
5420 pReq->u.Out.fNeedsLoading = true;
5421 pReq->u.Out.fNativeLoader = pImage->fNative;
5422 supdrvOSLdrNotifyOpened(pDevExt, pImage, pReq->u.In.szFilename);
5423
5424 supdrvLdrUnlock(pDevExt);
5425 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5426 return VINF_SUCCESS;
5427}
5428
5429
5430/**
5431 * Formats a load error message.
5432 *
5433 * @returns @a rc
5434 * @param rc Return code.
5435 * @param pReq The request.
5436 * @param pszFormat The error message format string.
5437 * @param ... Argument to the format string.
5438 */
5439int VBOXCALL supdrvLdrLoadError(int rc, PSUPLDRLOAD pReq, const char *pszFormat, ...)
5440{
5441 va_list va;
5442 va_start(va, pszFormat);
5443 pReq->u.Out.uErrorMagic = SUPLDRLOAD_ERROR_MAGIC;
5444 RTStrPrintfV(pReq->u.Out.szError, sizeof(pReq->u.Out.szError), pszFormat, va);
5445 va_end(va);
5446 Log(("SUP_IOCTL_LDR_LOAD: %s [rc=%Rrc]\n", pReq->u.Out.szError, rc));
5447 return rc;
5448}
5449
5450
5451/**
5452 * Worker that validates a pointer to an image entrypoint.
5453 *
5454 * Calls supdrvLdrLoadError on error.
5455 *
5456 * @returns IPRT status code.
5457 * @param pDevExt The device globals.
5458 * @param pImage The loader image.
5459 * @param pv The pointer into the image.
5460 * @param fMayBeNull Whether it may be NULL.
5461 * @param pszSymbol The entrypoint name or log name. If the symbol is
5462 * capitalized it signifies a specific symbol, otherwise it
5463 * for logging.
5464 * @param pbImageBits The image bits prepared by ring-3.
5465 * @param pReq The request for passing to supdrvLdrLoadError.
5466 *
5467 * @note Will leave the loader lock on failure!
5468 */
5469static int supdrvLdrValidatePointer(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage, void *pv, bool fMayBeNull,
5470 const uint8_t *pbImageBits, const char *pszSymbol, PSUPLDRLOAD pReq)
5471{
5472 if (!fMayBeNull || pv)
5473 {
5474 uint32_t iSeg;
5475
5476 /* Must be within the image bits: */
5477 uintptr_t const uRva = (uintptr_t)pv - (uintptr_t)pImage->pvImage;
5478 if (uRva >= pImage->cbImageBits)
5479 {
5480 supdrvLdrUnlock(pDevExt);
5481 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
5482 "Invalid entry point address %p given for %s: RVA %#zx, image size %#zx",
5483 pv, pszSymbol, uRva, pImage->cbImageBits);
5484 }
5485
5486 /* Must be in an executable segment: */
5487 for (iSeg = 0; iSeg < pImage->cSegments; iSeg++)
5488 if (uRva - pImage->paSegments[iSeg].off < (uintptr_t)pImage->paSegments[iSeg].cb)
5489 {
5490 if (pImage->paSegments[iSeg].fProt & SUPLDR_PROT_EXEC)
5491 break;
5492 supdrvLdrUnlock(pDevExt);
5493 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
5494 "Bad entry point %p given for %s: not executable (seg #%u: %#RX32 LB %#RX32 prot %#x)",
5495 pv, pszSymbol, iSeg, pImage->paSegments[iSeg].off, pImage->paSegments[iSeg].cb,
5496 pImage->paSegments[iSeg].fProt);
5497 }
5498 if (iSeg >= pImage->cSegments)
5499 {
5500 supdrvLdrUnlock(pDevExt);
5501 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
5502 "Bad entry point %p given for %s: no matching segment found (RVA %#zx)!",
5503 pv, pszSymbol, uRva);
5504 }
5505
5506 if (pImage->fNative)
5507 {
5508 /** @todo pass pReq along to the native code. */
5509 int rc = supdrvOSLdrValidatePointer(pDevExt, pImage, pv, pbImageBits, pszSymbol);
5510 if (RT_FAILURE(rc))
5511 {
5512 supdrvLdrUnlock(pDevExt);
5513 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
5514 "Bad entry point address %p for %s: rc=%Rrc\n", pv, pszSymbol, rc);
5515 }
5516 }
5517 }
5518 return VINF_SUCCESS;
5519}
5520
5521
5522/**
5523 * Loads the image bits.
5524 *
5525 * This is the 2nd step of the loading.
5526 *
5527 * @returns IPRT status code.
5528 * @param pDevExt Device globals.
5529 * @param pSession Session data.
5530 * @param pReq The request.
5531 */
5532static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq)
5533{
5534 PSUPDRVLDRUSAGE pUsage;
5535 PSUPDRVLDRIMAGE pImage;
5536 PSUPDRVLDRIMAGE pImageImport;
5537 int rc;
5538 SUPDRV_CHECK_SMAP_SETUP();
5539 LogFlow(("supdrvIOCtl_LdrLoad: pvImageBase=%p cbImageWithEverything=%d\n", pReq->u.In.pvImageBase, pReq->u.In.cbImageWithEverything));
5540 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5541
5542 /*
5543 * Find the ldr image.
5544 */
5545 supdrvLdrLock(pDevExt);
5546 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5547
5548 pUsage = pSession->pLdrUsage;
5549 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
5550 pUsage = pUsage->pNext;
5551 if (!pUsage)
5552 {
5553 supdrvLdrUnlock(pDevExt);
5554 return supdrvLdrLoadError(VERR_INVALID_HANDLE, pReq, "Image not found");
5555 }
5556 pImage = pUsage->pImage;
5557
5558 /*
5559 * Validate input.
5560 */
5561 if ( pImage->cbImageWithEverything != pReq->u.In.cbImageWithEverything
5562 || pImage->cbImageBits != pReq->u.In.cbImageBits)
5563 {
5564 supdrvLdrUnlock(pDevExt);
5565 return supdrvLdrLoadError(VERR_INVALID_HANDLE, pReq, "Image size mismatch found: %u(prep) != %u(load) or %u != %u",
5566 pImage->cbImageWithEverything, pReq->u.In.cbImageWithEverything, pImage->cbImageBits, pReq->u.In.cbImageBits);
5567 }
5568
5569 if (pImage->uState != SUP_IOCTL_LDR_OPEN)
5570 {
5571 unsigned uState = pImage->uState;
5572 supdrvLdrUnlock(pDevExt);
5573 if (uState != SUP_IOCTL_LDR_LOAD)
5574 AssertMsgFailed(("SUP_IOCTL_LDR_LOAD: invalid image state %d (%#x)!\n", uState, uState));
5575 pReq->u.Out.uErrorMagic = 0;
5576 return VERR_ALREADY_LOADED;
5577 }
5578
5579 /* If the loader interface is locked down, don't load new images */
5580 if (pDevExt->fLdrLockedDown)
5581 {
5582 supdrvLdrUnlock(pDevExt);
5583 return supdrvLdrLoadError(VERR_PERMISSION_DENIED, pReq, "Loader is locked down");
5584 }
5585
5586 /*
5587 * If the new image is a dependant of VMMR0.r0, resolve it via the
5588 * caller's usage list and make sure it's in ready state.
5589 */
5590 pImageImport = NULL;
5591 if (pReq->u.In.fFlags & SUPLDRLOAD_F_DEP_VMMR0)
5592 {
5593 PSUPDRVLDRUSAGE pUsageDependency = pSession->pLdrUsage;
5594 while (pUsageDependency && pUsageDependency->pImage->pvImage != pDevExt->pvVMMR0)
5595 pUsageDependency = pUsageDependency->pNext;
5596 if (!pUsageDependency || !pDevExt->pvVMMR0)
5597 {
5598 supdrvLdrUnlock(pDevExt);
5599 return supdrvLdrLoadError(VERR_MODULE_NOT_FOUND, pReq, "VMMR0.r0 not loaded by session");
5600 }
5601 pImageImport = pUsageDependency->pImage;
5602 if (pImageImport->uState != SUP_IOCTL_LDR_LOAD)
5603 {
5604 supdrvLdrUnlock(pDevExt);
5605 return supdrvLdrLoadError(VERR_MODULE_NOT_FOUND, pReq, "VMMR0.r0 is not ready (state %#x)", pImageImport->uState);
5606 }
5607 }
5608
5609 /*
5610 * Copy the segments before we start using supdrvLdrValidatePointer for entrypoint validation.
5611 */
5612 pImage->cSegments = pReq->u.In.cSegments;
5613 {
5614 size_t cbSegments = pImage->cSegments * sizeof(SUPLDRSEG);
5615 uint8_t const * const pbSrcImage = pReq->u.In.abImage;
5616 pImage->paSegments = (PSUPLDRSEG)RTMemDup(&pbSrcImage[pReq->u.In.offSegments], cbSegments);
5617 if (pImage->paSegments) /* Align the last segment size to avoid upsetting RTR0MemObjProtect. */ /** @todo relax RTR0MemObjProtect */
5618 pImage->paSegments[pImage->cSegments - 1].cb = RT_ALIGN_32(pImage->paSegments[pImage->cSegments - 1].cb, PAGE_SIZE);
5619 else
5620 {
5621 supdrvLdrUnlock(pDevExt);
5622 return supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for segment table: %#x", cbSegments);
5623 }
5624 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5625 }
5626
5627 /*
5628 * Validate entrypoints.
5629 */
5630 switch (pReq->u.In.eEPType)
5631 {
5632 case SUPLDRLOADEP_NOTHING:
5633 break;
5634
5635 case SUPLDRLOADEP_VMMR0:
5636 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, false, pReq->u.In.abImage, "VMMR0EntryFast", pReq);
5637 if (RT_FAILURE(rc))
5638 return rc;
5639 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx, false, pReq->u.In.abImage, "VMMR0EntryEx", pReq);
5640 if (RT_FAILURE(rc))
5641 return rc;
5642
5643 /* Fail here if there is already a VMMR0 module. */
5644 if (pDevExt->pvVMMR0 != NULL)
5645 {
5646 supdrvLdrUnlock(pDevExt);
5647 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq, "There is already a VMMR0 module loaded (%p)", pDevExt->pvVMMR0);
5648 }
5649 break;
5650
5651 case SUPLDRLOADEP_SERVICE:
5652 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.Service.pfnServiceReq, false, pReq->u.In.abImage, "pfnServiceReq", pReq);
5653 if (RT_FAILURE(rc))
5654 return rc;
5655 if ( pReq->u.In.EP.Service.apvReserved[0] != NIL_RTR0PTR
5656 || pReq->u.In.EP.Service.apvReserved[1] != NIL_RTR0PTR
5657 || pReq->u.In.EP.Service.apvReserved[2] != NIL_RTR0PTR)
5658 {
5659 supdrvLdrUnlock(pDevExt);
5660 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq, "apvReserved={%p,%p,%p} MBZ!",
5661 pReq->u.In.EP.Service.apvReserved[0], pReq->u.In.EP.Service.apvReserved[1],
5662 pReq->u.In.EP.Service.apvReserved[2]);
5663 }
5664 break;
5665
5666 default:
5667 supdrvLdrUnlock(pDevExt);
5668 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq, "Invalid eEPType=%d", pReq->u.In.eEPType);
5669 }
5670
5671 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.pfnModuleInit, true, pReq->u.In.abImage, "ModuleInit", pReq);
5672 if (RT_FAILURE(rc))
5673 return rc;
5674 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.pfnModuleTerm, true, pReq->u.In.abImage, "ModuleTerm", pReq);
5675 if (RT_FAILURE(rc))
5676 return rc;
5677 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5678
5679 /*
5680 * Allocate and copy the tables if non-native.
5681 * (No need to do try/except as this is a buffered request.)
5682 */
5683 if (!pImage->fNative)
5684 {
5685 uint8_t const * const pbSrcImage = pReq->u.In.abImage;
5686 pImage->cbStrTab = pReq->u.In.cbStrTab;
5687 if (pImage->cbStrTab)
5688 {
5689 pImage->pachStrTab = (char *)RTMemDup(&pbSrcImage[pReq->u.In.offStrTab], pImage->cbStrTab);
5690 if (!pImage->pachStrTab)
5691 rc = supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for string table: %#x", pImage->cbStrTab);
5692 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5693 }
5694
5695 pImage->cSymbols = pReq->u.In.cSymbols;
5696 if (RT_SUCCESS(rc) && pImage->cSymbols)
5697 {
5698 size_t cbSymbols = pImage->cSymbols * sizeof(SUPLDRSYM);
5699 pImage->paSymbols = (PSUPLDRSYM)RTMemDup(&pbSrcImage[pReq->u.In.offSymbols], cbSymbols);
5700 if (!pImage->paSymbols)
5701 rc = supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for symbol table: %#x", cbSymbols);
5702 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5703 }
5704 }
5705
5706 /*
5707 * Copy the bits and apply permissions / complete native loading.
5708 */
5709 if (RT_SUCCESS(rc))
5710 {
5711 pImage->uState = SUP_IOCTL_LDR_LOAD;
5712 pImage->pfnModuleInit = (PFNR0MODULEINIT)(uintptr_t)pReq->u.In.pfnModuleInit;
5713 pImage->pfnModuleTerm = (PFNR0MODULETERM)(uintptr_t)pReq->u.In.pfnModuleTerm;
5714
5715 if (pImage->fNative)
5716 rc = supdrvOSLdrLoad(pDevExt, pImage, pReq->u.In.abImage, pReq);
5717 else
5718 {
5719 uint32_t i;
5720 memcpy(pImage->pvImage, &pReq->u.In.abImage[0], pImage->cbImageBits);
5721
5722 for (i = 0; i < pImage->cSegments; i++)
5723 {
5724 rc = RTR0MemObjProtect(pImage->hMemObjImage, pImage->paSegments[i].off, pImage->paSegments[i].cb,
5725 pImage->paSegments[i].fProt);
5726 if (RT_SUCCESS(rc))
5727 continue;
5728 if (rc == VERR_NOT_SUPPORTED)
5729 rc = VINF_SUCCESS;
5730 else
5731 rc = supdrvLdrLoadError(rc, pReq, "RTR0MemObjProtect failed on seg#%u %#RX32 LB %#RX32 fProt=%#x",
5732 i, pImage->paSegments[i].off, pImage->paSegments[i].cb, pImage->paSegments[i].fProt);
5733 break;
5734 }
5735 Log(("vboxdrv: Loaded '%s' at %p\n", pImage->szName, pImage->pvImage));
5736 }
5737 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5738 }
5739
5740 /*
5741 * On success call the module initialization.
5742 */
5743 LogFlow(("supdrvIOCtl_LdrLoad: pfnModuleInit=%p\n", pImage->pfnModuleInit));
5744 if (RT_SUCCESS(rc) && pImage->pfnModuleInit)
5745 {
5746 Log(("supdrvIOCtl_LdrLoad: calling pfnModuleInit=%p\n", pImage->pfnModuleInit));
5747 pDevExt->pLdrInitImage = pImage;
5748 pDevExt->hLdrInitThread = RTThreadNativeSelf();
5749 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5750 rc = pImage->pfnModuleInit(pImage);
5751 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5752 pDevExt->pLdrInitImage = NULL;
5753 pDevExt->hLdrInitThread = NIL_RTNATIVETHREAD;
5754 if (RT_FAILURE(rc))
5755 supdrvLdrLoadError(rc, pReq, "ModuleInit failed: %Rrc", rc);
5756 }
5757 if (RT_SUCCESS(rc))
5758 {
5759 /*
5760 * Publish any standard entry points.
5761 */
5762 switch (pReq->u.In.eEPType)
5763 {
5764 case SUPLDRLOADEP_VMMR0:
5765 Assert(!pDevExt->pvVMMR0);
5766 Assert(!pDevExt->pfnVMMR0EntryFast);
5767 Assert(!pDevExt->pfnVMMR0EntryEx);
5768 ASMAtomicWritePtrVoid(&pDevExt->pvVMMR0, pImage->pvImage);
5769 ASMAtomicWritePtrVoid((void * volatile *)(uintptr_t)&pDevExt->pfnVMMR0EntryFast,
5770 (void *)(uintptr_t) pReq->u.In.EP.VMMR0.pvVMMR0EntryFast);
5771 ASMAtomicWritePtrVoid((void * volatile *)(uintptr_t)&pDevExt->pfnVMMR0EntryEx,
5772 (void *)(uintptr_t) pReq->u.In.EP.VMMR0.pvVMMR0EntryEx);
5773 break;
5774 case SUPLDRLOADEP_SERVICE:
5775 pImage->pfnServiceReqHandler = (PFNSUPR0SERVICEREQHANDLER)(uintptr_t)pReq->u.In.EP.Service.pfnServiceReq;
5776 break;
5777 default:
5778 break;
5779 }
5780
5781 /*
5782 * Increase the usage counter of any imported image.
5783 */
5784 if (pImageImport)
5785 {
5786 pImageImport->cImgUsage++;
5787 if (pImageImport->cImgUsage == 2 && pImageImport->pWrappedModInfo)
5788 supdrvOSLdrRetainWrapperModule(pDevExt, pImageImport);
5789 pImage->pImageImport = pImageImport;
5790 }
5791
5792 /*
5793 * Done!
5794 */
5795 SUPR0Printf("vboxdrv: %RKv %s\n", pImage->pvImage, pImage->szName);
5796 pReq->u.Out.uErrorMagic = 0;
5797 pReq->u.Out.szError[0] = '\0';
5798 }
5799 else
5800 {
5801 /* Inform the tracing component in case ModuleInit registered TPs. */
5802 supdrvTracerModuleUnloading(pDevExt, pImage);
5803
5804 pImage->uState = SUP_IOCTL_LDR_OPEN;
5805 pImage->pfnModuleInit = NULL;
5806 pImage->pfnModuleTerm = NULL;
5807 pImage->pfnServiceReqHandler= NULL;
5808 pImage->cbStrTab = 0;
5809 RTMemFree(pImage->pachStrTab);
5810 pImage->pachStrTab = NULL;
5811 RTMemFree(pImage->paSymbols);
5812 pImage->paSymbols = NULL;
5813 pImage->cSymbols = 0;
5814 }
5815
5816 supdrvLdrUnlock(pDevExt);
5817 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5818 return rc;
5819}
5820
5821
5822/**
5823 * Registers a .r0 module wrapped in a native one and manually loaded.
5824 *
5825 * @returns VINF_SUCCESS or error code (no info statuses).
5826 * @param pDevExt Device globals.
5827 * @param pWrappedModInfo The wrapped module info.
5828 * @param pvNative OS specific information.
5829 * @param phMod Where to store the module handle.
5830 */
5831int VBOXCALL supdrvLdrRegisterWrappedModule(PSUPDRVDEVEXT pDevExt, PCSUPLDRWRAPPEDMODULE pWrappedModInfo,
5832 void *pvNative, void **phMod)
5833{
5834 size_t cchName;
5835 PSUPDRVLDRIMAGE pImage;
5836 PCSUPLDRWRAPMODSYMBOL paSymbols;
5837 uint16_t idx;
5838 const char *pszPrevSymbol;
5839 int rc;
5840 SUPDRV_CHECK_SMAP_SETUP();
5841 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5842
5843 /*
5844 * Validate input.
5845 */
5846 AssertPtrReturn(phMod, VERR_INVALID_POINTER);
5847 *phMod = NULL;
5848 AssertPtrReturn(pDevExt, VERR_INTERNAL_ERROR_2);
5849
5850 AssertPtrReturn(pWrappedModInfo, VERR_INVALID_POINTER);
5851 AssertMsgReturn(pWrappedModInfo->uMagic == SUPLDRWRAPPEDMODULE_MAGIC,
5852 ("uMagic=%#x, expected %#x\n", pWrappedModInfo->uMagic, SUPLDRWRAPPEDMODULE_MAGIC),
5853 VERR_INVALID_MAGIC);
5854 AssertMsgReturn(pWrappedModInfo->uVersion == SUPLDRWRAPPEDMODULE_VERSION,
5855 ("Unsupported uVersion=%#x, current version %#x\n", pWrappedModInfo->uVersion, SUPLDRWRAPPEDMODULE_VERSION),
5856 VERR_VERSION_MISMATCH);
5857 AssertMsgReturn(pWrappedModInfo->uEndMagic == SUPLDRWRAPPEDMODULE_MAGIC,
5858 ("uEndMagic=%#x, expected %#x\n", pWrappedModInfo->uEndMagic, SUPLDRWRAPPEDMODULE_MAGIC),
5859 VERR_INVALID_MAGIC);
5860 AssertMsgReturn(pWrappedModInfo->fFlags <= SUPLDRWRAPPEDMODULE_F_VMMR0, ("Unknown flags in: %#x\n", pWrappedModInfo->fFlags),
5861 VERR_INVALID_FLAGS);
5862
5863 /* szName: */
5864 AssertReturn(RTStrEnd(pWrappedModInfo->szName, sizeof(pWrappedModInfo->szName)) != NULL, VERR_INVALID_NAME);
5865 AssertReturn(supdrvIsLdrModuleNameValid(pWrappedModInfo->szName), VERR_INVALID_NAME);
5866 AssertCompile(sizeof(pImage->szName) == sizeof(pWrappedModInfo->szName));
5867 cchName = strlen(pWrappedModInfo->szName);
5868
5869 /* Image range: */
5870 AssertPtrReturn(pWrappedModInfo->pvImageStart, VERR_INVALID_POINTER);
5871 AssertPtrReturn(pWrappedModInfo->pvImageEnd, VERR_INVALID_POINTER);
5872 AssertReturn((uintptr_t)pWrappedModInfo->pvImageEnd > (uintptr_t)pWrappedModInfo->pvImageStart, VERR_INVALID_PARAMETER);
5873
5874 /* Symbol table: */
5875 AssertMsgReturn(pWrappedModInfo->cSymbols <= _8K, ("Too many symbols: %u, max 8192\n", pWrappedModInfo->cSymbols),
5876 VERR_TOO_MANY_SYMLINKS);
5877 pszPrevSymbol = "\x7f";
5878 paSymbols = pWrappedModInfo->paSymbols;
5879 idx = pWrappedModInfo->cSymbols;
5880 while (idx-- > 0)
5881 {
5882 const char *pszSymbol = paSymbols[idx].pszSymbol;
5883 AssertMsgReturn(RT_VALID_PTR(pszSymbol) && RT_VALID_PTR(paSymbols[idx].pfnValue),
5884 ("paSymbols[%u]: %p/%p\n", idx, pszSymbol, paSymbols[idx].pfnValue),
5885 VERR_INVALID_POINTER);
5886 AssertReturn(*pszSymbol != '\0', VERR_EMPTY_STRING);
5887 AssertMsgReturn(strcmp(pszSymbol, pszPrevSymbol) < 0,
5888 ("symbol table out of order at index %u: '%s' vs '%s'\n", idx, pszSymbol, pszPrevSymbol),
5889 VERR_WRONG_ORDER);
5890 pszPrevSymbol = pszSymbol;
5891 }
5892
5893 /* Standard entry points: */
5894 AssertPtrNullReturn(pWrappedModInfo->pfnModuleInit, VERR_INVALID_POINTER);
5895 AssertPtrNullReturn(pWrappedModInfo->pfnModuleTerm, VERR_INVALID_POINTER);
5896 AssertReturn((uintptr_t)pWrappedModInfo->pfnModuleInit != (uintptr_t)pWrappedModInfo->pfnModuleTerm || pWrappedModInfo->pfnModuleInit == NULL,
5897 VERR_INVALID_PARAMETER);
5898 if (pWrappedModInfo->fFlags & SUPLDRWRAPPEDMODULE_F_VMMR0)
5899 {
5900 AssertReturn(pWrappedModInfo->pfnServiceReqHandler == NULL, VERR_INVALID_PARAMETER);
5901 AssertPtrReturn(pWrappedModInfo->pfnVMMR0EntryFast, VERR_INVALID_POINTER);
5902 AssertPtrReturn(pWrappedModInfo->pfnVMMR0EntryEx, VERR_INVALID_POINTER);
5903 AssertReturn(pWrappedModInfo->pfnVMMR0EntryFast != pWrappedModInfo->pfnVMMR0EntryEx, VERR_INVALID_PARAMETER);
5904 }
5905 else
5906 {
5907 AssertPtrNullReturn(pWrappedModInfo->pfnServiceReqHandler, VERR_INVALID_POINTER);
5908 AssertReturn(pWrappedModInfo->pfnVMMR0EntryFast == NULL, VERR_INVALID_PARAMETER);
5909 AssertReturn(pWrappedModInfo->pfnVMMR0EntryEx == NULL, VERR_INVALID_PARAMETER);
5910 }
5911
5912 /*
5913 * Check if we got an instance of the image already.
5914 */
5915 supdrvLdrLock(pDevExt);
5916 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5917 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
5918 {
5919 if ( pImage->szName[cchName] == '\0'
5920 && !memcmp(pImage->szName, pWrappedModInfo->szName, cchName))
5921 {
5922 supdrvLdrUnlock(pDevExt);
5923 Log(("supdrvLdrRegisterWrappedModule: '%s' already loaded!\n", pWrappedModInfo->szName));
5924 return VERR_ALREADY_LOADED;
5925 }
5926 }
5927 /* (not found - add it!) */
5928
5929 /* If the loader interface is locked down, make userland fail early */
5930 if (pDevExt->fLdrLockedDown)
5931 {
5932 supdrvLdrUnlock(pDevExt);
5933 Log(("supdrvLdrRegisterWrappedModule: Not adding '%s' to image list, loader interface is locked down!\n", pWrappedModInfo->szName));
5934 return VERR_PERMISSION_DENIED;
5935 }
5936
5937 /* Only one VMMR0: */
5938 if ( pDevExt->pvVMMR0 != NULL
5939 && (pWrappedModInfo->fFlags & SUPLDRWRAPPEDMODULE_F_VMMR0))
5940 {
5941 supdrvLdrUnlock(pDevExt);
5942 Log(("supdrvLdrRegisterWrappedModule: Rejecting '%s' as we already got a VMMR0 module!\n", pWrappedModInfo->szName));
5943 return VERR_ALREADY_EXISTS;
5944 }
5945
5946 /*
5947 * Allocate memory.
5948 */
5949 Assert(cchName < sizeof(pImage->szName));
5950 pImage = (PSUPDRVLDRIMAGE)RTMemAllocZ(sizeof(SUPDRVLDRIMAGE));
5951 if (!pImage)
5952 {
5953 supdrvLdrUnlock(pDevExt);
5954 Log(("supdrvLdrRegisterWrappedModule: RTMemAllocZ() failed\n"));
5955 return VERR_NO_MEMORY;
5956 }
5957 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5958
5959 /*
5960 * Setup and link in the LDR stuff.
5961 */
5962 pImage->pvImage = (void *)pWrappedModInfo->pvImageStart;
5963 pImage->hMemObjImage = NIL_RTR0MEMOBJ;
5964 pImage->cbImageWithEverything
5965 = pImage->cbImageBits = (uintptr_t)pWrappedModInfo->pvImageEnd - (uintptr_t)pWrappedModInfo->pvImageStart;
5966 pImage->cSymbols = 0;
5967 pImage->paSymbols = NULL;
5968 pImage->pachStrTab = NULL;
5969 pImage->cbStrTab = 0;
5970 pImage->cSegments = 0;
5971 pImage->paSegments = NULL;
5972 pImage->pfnModuleInit = pWrappedModInfo->pfnModuleInit;
5973 pImage->pfnModuleTerm = pWrappedModInfo->pfnModuleTerm;
5974 pImage->pfnServiceReqHandler = NULL; /* Only setting this after module init */
5975 pImage->uState = SUP_IOCTL_LDR_LOAD;
5976 pImage->cImgUsage = 1; /* Held by the wrapper module till unload. */
5977 pImage->pDevExt = pDevExt;
5978 pImage->pImageImport = NULL;
5979 pImage->uMagic = SUPDRVLDRIMAGE_MAGIC;
5980 pImage->pWrappedModInfo = pWrappedModInfo;
5981 pImage->pvWrappedNative = pvNative;
5982 pImage->fNative = true;
5983 memcpy(pImage->szName, pWrappedModInfo->szName, cchName + 1);
5984
5985 /*
5986 * Link it.
5987 */
5988 pImage->pNext = pDevExt->pLdrImages;
5989 pDevExt->pLdrImages = pImage;
5990
5991 /*
5992 * Call module init function if found.
5993 */
5994 rc = VINF_SUCCESS;
5995 if (pImage->pfnModuleInit)
5996 {
5997 Log(("supdrvIOCtl_LdrLoad: calling pfnModuleInit=%p\n", pImage->pfnModuleInit));
5998 pDevExt->pLdrInitImage = pImage;
5999 pDevExt->hLdrInitThread = RTThreadNativeSelf();
6000 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
6001 rc = pImage->pfnModuleInit(pImage);
6002 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
6003 pDevExt->pLdrInitImage = NULL;
6004 pDevExt->hLdrInitThread = NIL_RTNATIVETHREAD;
6005 }
6006 if (RT_SUCCESS(rc))
6007 {
6008 /*
6009 * Update entry points.
6010 */
6011 if (pWrappedModInfo->fFlags & SUPLDRWRAPPEDMODULE_F_VMMR0)
6012 {
6013 Assert(!pDevExt->pvVMMR0);
6014 Assert(!pDevExt->pfnVMMR0EntryFast);
6015 Assert(!pDevExt->pfnVMMR0EntryEx);
6016 ASMAtomicWritePtrVoid(&pDevExt->pvVMMR0, pImage->pvImage);
6017 ASMAtomicWritePtrVoid((void * volatile *)(uintptr_t)&pDevExt->pfnVMMR0EntryFast,
6018 (void *)(uintptr_t) pWrappedModInfo->pfnVMMR0EntryFast);
6019 ASMAtomicWritePtrVoid((void * volatile *)(uintptr_t)&pDevExt->pfnVMMR0EntryEx,
6020 (void *)(uintptr_t) pWrappedModInfo->pfnVMMR0EntryEx);
6021 }
6022 else
6023 pImage->pfnServiceReqHandler = pWrappedModInfo->pfnServiceReqHandler;
6024#ifdef IN_RING3
6025# error "WTF?"
6026#endif
6027 *phMod = pImage;
6028 }
6029 else
6030 {
6031 /*
6032 * Module init failed - bail, no module term callout.
6033 */
6034 SUPR0Printf("ModuleInit failed for '%s': %Rrc\n", pImage->szName, rc);
6035
6036 pImage->pfnModuleTerm = NULL;
6037 pImage->uState = SUP_IOCTL_LDR_OPEN;
6038 supdrvLdrFree(pDevExt, pImage);
6039 }
6040
6041 supdrvLdrUnlock(pDevExt);
6042 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
6043 return VINF_SUCCESS;
6044}
6045
6046
6047/**
6048 * Decrements SUPDRVLDRIMAGE::cImgUsage when two or greater.
6049 *
6050 * @param pDevExt Device globals.
6051 * @param pImage The image.
6052 * @param cReference Number of references being removed.
6053 */
6054DECLINLINE(void) supdrvLdrSubtractUsage(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage, uint32_t cReference)
6055{
6056 Assert(cReference > 0);
6057 Assert(pImage->cImgUsage > cReference);
6058 pImage->cImgUsage -= cReference;
6059 if (pImage->cImgUsage == 1 && pImage->pWrappedModInfo)
6060 supdrvOSLdrReleaseWrapperModule(pDevExt, pImage);
6061}
6062
6063
6064/**
6065 * Frees a previously loaded (prep'ed) image.
6066 *
6067 * @returns IPRT status code.
6068 * @param pDevExt Device globals.
6069 * @param pSession Session data.
6070 * @param pReq The request.
6071 */
6072static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq)
6073{
6074 int rc;
6075 PSUPDRVLDRUSAGE pUsagePrev;
6076 PSUPDRVLDRUSAGE pUsage;
6077 PSUPDRVLDRIMAGE pImage;
6078 LogFlow(("supdrvIOCtl_LdrFree: pvImageBase=%p\n", pReq->u.In.pvImageBase));
6079
6080 /*
6081 * Find the ldr image.
6082 */
6083 supdrvLdrLock(pDevExt);
6084 pUsagePrev = NULL;
6085 pUsage = pSession->pLdrUsage;
6086 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
6087 {
6088 pUsagePrev = pUsage;
6089 pUsage = pUsage->pNext;
6090 }
6091 if (!pUsage)
6092 {
6093 supdrvLdrUnlock(pDevExt);
6094 Log(("SUP_IOCTL_LDR_FREE: couldn't find image!\n"));
6095 return VERR_INVALID_HANDLE;
6096 }
6097 if (pUsage->cRing3Usage == 0)
6098 {
6099 supdrvLdrUnlock(pDevExt);
6100 Log(("SUP_IOCTL_LDR_FREE: No ring-3 reference to the image!\n"));
6101 return VERR_CALLER_NO_REFERENCE;
6102 }
6103
6104 /*
6105 * Check if we can remove anything.
6106 */
6107 rc = VINF_SUCCESS;
6108 pImage = pUsage->pImage;
6109 Log(("SUP_IOCTL_LDR_FREE: pImage=%p %s cImgUsage=%d r3=%d r0=%u\n",
6110 pImage, pImage->szName, pImage->cImgUsage, pUsage->cRing3Usage, pUsage->cRing0Usage));
6111 if (pImage->cImgUsage <= 1 || pUsage->cRing3Usage + pUsage->cRing0Usage <= 1)
6112 {
6113 /*
6114 * Check if there are any objects with destructors in the image, if
6115 * so leave it for the session cleanup routine so we get a chance to
6116 * clean things up in the right order and not leave them all dangling.
6117 */
6118 RTSpinlockAcquire(pDevExt->Spinlock);
6119 if (pImage->cImgUsage <= 1)
6120 {
6121 PSUPDRVOBJ pObj;
6122 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
6123 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImageBits))
6124 {
6125 rc = VERR_DANGLING_OBJECTS;
6126 break;
6127 }
6128 }
6129 else
6130 {
6131 PSUPDRVUSAGE pGenUsage;
6132 for (pGenUsage = pSession->pUsage; pGenUsage; pGenUsage = pGenUsage->pNext)
6133 if (RT_UNLIKELY((uintptr_t)pGenUsage->pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImageBits))
6134 {
6135 rc = VERR_DANGLING_OBJECTS;
6136 break;
6137 }
6138 }
6139 RTSpinlockRelease(pDevExt->Spinlock);
6140 if (rc == VINF_SUCCESS)
6141 {
6142 /* unlink it */
6143 if (pUsagePrev)
6144 pUsagePrev->pNext = pUsage->pNext;
6145 else
6146 pSession->pLdrUsage = pUsage->pNext;
6147
6148 /* free it */
6149 pUsage->pImage = NULL;
6150 pUsage->pNext = NULL;
6151 RTMemFree(pUsage);
6152
6153 /*
6154 * Dereference the image.
6155 */
6156 if (pImage->cImgUsage <= 1)
6157 supdrvLdrFree(pDevExt, pImage);
6158 else
6159 supdrvLdrSubtractUsage(pDevExt, pImage, 1);
6160 }
6161 else
6162 Log(("supdrvIOCtl_LdrFree: Dangling objects in %p/%s!\n", pImage->pvImage, pImage->szName));
6163 }
6164 else
6165 {
6166 /*
6167 * Dereference both image and usage.
6168 */
6169 pUsage->cRing3Usage--;
6170 supdrvLdrSubtractUsage(pDevExt, pImage, 1);
6171 }
6172
6173 supdrvLdrUnlock(pDevExt);
6174 return rc;
6175}
6176
6177
6178/**
6179 * Deregisters a wrapped .r0 module.
6180 *
6181 * @param pDevExt Device globals.
6182 * @param pWrappedModInfo The wrapped module info.
6183 * @param phMod Where to store the module is stored (NIL'ed on
6184 * success).
6185 */
6186int VBOXCALL supdrvLdrDeregisterWrappedModule(PSUPDRVDEVEXT pDevExt, PCSUPLDRWRAPPEDMODULE pWrappedModInfo, void **phMod)
6187{
6188 PSUPDRVLDRIMAGE pImage;
6189 uint32_t cSleeps;
6190
6191 /*
6192 * Validate input.
6193 */
6194 AssertPtrReturn(pWrappedModInfo, VERR_INVALID_POINTER);
6195 AssertMsgReturn(pWrappedModInfo->uMagic == SUPLDRWRAPPEDMODULE_MAGIC,
6196 ("uMagic=%#x, expected %#x\n", pWrappedModInfo->uMagic, SUPLDRWRAPPEDMODULE_MAGIC),
6197 VERR_INVALID_MAGIC);
6198 AssertMsgReturn(pWrappedModInfo->uEndMagic == SUPLDRWRAPPEDMODULE_MAGIC,
6199 ("uEndMagic=%#x, expected %#x\n", pWrappedModInfo->uEndMagic, SUPLDRWRAPPEDMODULE_MAGIC),
6200 VERR_INVALID_MAGIC);
6201
6202 AssertPtrReturn(phMod, VERR_INVALID_POINTER);
6203 pImage = *(PSUPDRVLDRIMAGE *)phMod;
6204 if (!pImage)
6205 return VINF_SUCCESS;
6206 AssertPtrReturn(pImage, VERR_INVALID_POINTER);
6207 AssertMsgReturn(pImage->uMagic == SUPDRVLDRIMAGE_MAGIC, ("pImage=%p uMagic=%#x\n", pImage, pImage->uMagic),
6208 VERR_INVALID_MAGIC);
6209 AssertMsgReturn(pImage->pvImage == pWrappedModInfo->pvImageStart,
6210 ("pWrappedModInfo(%p)->pvImageStart=%p vs. pImage(=%p)->pvImage=%p\n",
6211 pWrappedModInfo, pWrappedModInfo->pvImageStart, pImage, pImage->pvImage),
6212 VERR_MISMATCH);
6213
6214 AssertPtrReturn(pDevExt, VERR_INVALID_POINTER);
6215
6216 /*
6217 * Try free it, but first we have to wait for its usage count to reach 1 (our).
6218 */
6219 supdrvLdrLock(pDevExt);
6220 for (cSleeps = 0; ; cSleeps++)
6221 {
6222 PSUPDRVLDRIMAGE pCur;
6223
6224 /* Check that the image is in the list. */
6225 for (pCur = pDevExt->pLdrImages; pCur; pCur = pCur->pNext)
6226 if (pCur == pImage)
6227 break;
6228 AssertBreak(pCur == pImage);
6229
6230 /* Anyone still using it? */
6231 if (pImage->cImgUsage <= 1)
6232 break;
6233
6234 /* Someone is using it, wait and check again. */
6235 if (!(cSleeps % 60))
6236 SUPR0Printf("supdrvLdrUnregisterWrappedModule: Still %u users of wrapped image '%s' ...\n",
6237 pImage->cImgUsage, pImage->szName);
6238 supdrvLdrUnlock(pDevExt);
6239 RTThreadSleep(1000);
6240 supdrvLdrLock(pDevExt);
6241 }
6242
6243 /* We're the last 'user', free it. */
6244 supdrvLdrFree(pDevExt, pImage);
6245
6246 supdrvLdrUnlock(pDevExt);
6247
6248 *phMod = NULL;
6249 return VINF_SUCCESS;
6250}
6251
6252
6253/**
6254 * Lock down the image loader interface.
6255 *
6256 * @returns IPRT status code.
6257 * @param pDevExt Device globals.
6258 */
6259static int supdrvIOCtl_LdrLockDown(PSUPDRVDEVEXT pDevExt)
6260{
6261 LogFlow(("supdrvIOCtl_LdrLockDown:\n"));
6262
6263 supdrvLdrLock(pDevExt);
6264 if (!pDevExt->fLdrLockedDown)
6265 {
6266 pDevExt->fLdrLockedDown = true;
6267 Log(("supdrvIOCtl_LdrLockDown: Image loader interface locked down\n"));
6268 }
6269 supdrvLdrUnlock(pDevExt);
6270
6271 return VINF_SUCCESS;
6272}
6273
6274
6275/**
6276 * Worker for getting the address of a symbol in an image.
6277 *
6278 * @returns IPRT status code.
6279 * @param pDevExt Device globals.
6280 * @param pImage The image to search.
6281 * @param pszSymbol The symbol name.
6282 * @param cchSymbol The length of the symbol name.
6283 * @param ppvValue Where to return the symbol
6284 * @note Caller owns the loader lock.
6285 */
6286static int supdrvLdrQuerySymbolWorker(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage,
6287 const char *pszSymbol, size_t cchSymbol, void **ppvValue)
6288{
6289 int rc = VERR_SYMBOL_NOT_FOUND;
6290 if (pImage->fNative && !pImage->pWrappedModInfo)
6291 rc = supdrvOSLdrQuerySymbol(pDevExt, pImage, pszSymbol, cchSymbol, ppvValue);
6292 else if (pImage->fNative && pImage->pWrappedModInfo)
6293 {
6294 PCSUPLDRWRAPMODSYMBOL paSymbols = pImage->pWrappedModInfo->paSymbols;
6295 uint32_t iEnd = pImage->pWrappedModInfo->cSymbols;
6296 uint32_t iStart = 0;
6297 while (iStart < iEnd)
6298 {
6299 uint32_t const i = iStart + (iEnd - iStart) / 2;
6300 int const iDiff = strcmp(paSymbols[i].pszSymbol, pszSymbol);
6301 if (iDiff < 0)
6302 iStart = i + 1;
6303 else if (iDiff > 0)
6304 iEnd = i;
6305 else
6306 {
6307 *ppvValue = (void *)(uintptr_t)paSymbols[i].pfnValue;
6308 rc = VINF_SUCCESS;
6309 break;
6310 }
6311 }
6312#ifdef VBOX_STRICT
6313 if (rc != VINF_SUCCESS)
6314 for (iStart = 0, iEnd = pImage->pWrappedModInfo->cSymbols; iStart < iEnd; iStart++)
6315 Assert(strcmp(paSymbols[iStart].pszSymbol, pszSymbol));
6316#endif
6317 }
6318 else
6319 {
6320 const char *pchStrings = pImage->pachStrTab;
6321 PSUPLDRSYM paSyms = pImage->paSymbols;
6322 uint32_t i;
6323 Assert(!pImage->pWrappedModInfo);
6324 for (i = 0; i < pImage->cSymbols; i++)
6325 {
6326 if ( paSyms[i].offName + cchSymbol + 1 <= pImage->cbStrTab
6327 && !memcmp(pchStrings + paSyms[i].offName, pszSymbol, cchSymbol + 1))
6328 {
6329 /*
6330 * Note! The int32_t is for native loading on solaris where the data
6331 * and text segments are in very different places.
6332 */
6333 *ppvValue = (uint8_t *)pImage->pvImage + (int32_t)paSyms[i].offSymbol;
6334 rc = VINF_SUCCESS;
6335 break;
6336 }
6337 }
6338 }
6339 return rc;
6340}
6341
6342
6343/**
6344 * Queries the address of a symbol in an open image.
6345 *
6346 * @returns IPRT status code.
6347 * @param pDevExt Device globals.
6348 * @param pSession Session data.
6349 * @param pReq The request buffer.
6350 */
6351static int supdrvIOCtl_LdrQuerySymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq)
6352{
6353 PSUPDRVLDRIMAGE pImage;
6354 PSUPDRVLDRUSAGE pUsage;
6355 const size_t cchSymbol = strlen(pReq->u.In.szSymbol);
6356 void *pvSymbol = NULL;
6357 int rc;
6358 Log3(("supdrvIOCtl_LdrQuerySymbol: pvImageBase=%p szSymbol=\"%s\"\n", pReq->u.In.pvImageBase, pReq->u.In.szSymbol));
6359
6360 /*
6361 * Find the ldr image.
6362 */
6363 supdrvLdrLock(pDevExt);
6364
6365 pUsage = pSession->pLdrUsage;
6366 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
6367 pUsage = pUsage->pNext;
6368 if (pUsage)
6369 {
6370 pImage = pUsage->pImage;
6371 if (pImage->uState == SUP_IOCTL_LDR_LOAD)
6372 {
6373 /*
6374 * Search the image exports / symbol strings.
6375 */
6376 rc = supdrvLdrQuerySymbolWorker(pDevExt, pImage, pReq->u.In.szSymbol, cchSymbol, &pvSymbol);
6377 }
6378 else
6379 {
6380 Log(("SUP_IOCTL_LDR_GET_SYMBOL: invalid image state %d (%#x)!\n", pImage->uState, pImage->uState));
6381 rc = VERR_WRONG_ORDER;
6382 }
6383 }
6384 else
6385 {
6386 Log(("SUP_IOCTL_LDR_GET_SYMBOL: couldn't find image!\n"));
6387 rc = VERR_INVALID_HANDLE;
6388 }
6389
6390 supdrvLdrUnlock(pDevExt);
6391
6392 pReq->u.Out.pvSymbol = pvSymbol;
6393 return rc;
6394}
6395
6396
6397/**
6398 * Gets the address of a symbol in an open image or the support driver.
6399 *
6400 * @returns VBox status code.
6401 * @param pDevExt Device globals.
6402 * @param pSession Session data.
6403 * @param pReq The request buffer.
6404 */
6405static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq)
6406{
6407 const char *pszSymbol = pReq->u.In.pszSymbol;
6408 const char *pszModule = pReq->u.In.pszModule;
6409 size_t cchSymbol;
6410 char const *pszEnd;
6411 uint32_t i;
6412 int rc;
6413
6414 /*
6415 * Input validation.
6416 */
6417 AssertPtrReturn(pszSymbol, VERR_INVALID_POINTER);
6418 pszEnd = RTStrEnd(pszSymbol, 512);
6419 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
6420 cchSymbol = pszEnd - pszSymbol;
6421
6422 if (pszModule)
6423 {
6424 AssertPtrReturn(pszModule, VERR_INVALID_POINTER);
6425 pszEnd = RTStrEnd(pszModule, 64);
6426 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
6427 }
6428 Log3(("supdrvIDC_LdrGetSymbol: pszModule=%p:{%s} pszSymbol=%p:{%s}\n", pszModule, pszModule, pszSymbol, pszSymbol));
6429
6430 if ( !pszModule
6431 || !strcmp(pszModule, "SupDrv"))
6432 {
6433 /*
6434 * Search the support driver export table.
6435 */
6436 rc = VERR_SYMBOL_NOT_FOUND;
6437 for (i = 0; i < RT_ELEMENTS(g_aFunctions); i++)
6438 if (!strcmp(g_aFunctions[i].szName, pszSymbol))
6439 {
6440 pReq->u.Out.pfnSymbol = (PFNRT)(uintptr_t)g_aFunctions[i].pfn;
6441 rc = VINF_SUCCESS;
6442 break;
6443 }
6444 }
6445 else
6446 {
6447 /*
6448 * Find the loader image.
6449 */
6450 PSUPDRVLDRIMAGE pImage;
6451
6452 supdrvLdrLock(pDevExt);
6453
6454 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
6455 if (!strcmp(pImage->szName, pszModule))
6456 break;
6457 if (pImage && pImage->uState == SUP_IOCTL_LDR_LOAD)
6458 {
6459 /*
6460 * Search the image exports / symbol strings. Do usage counting on the session.
6461 */
6462 rc = supdrvLdrQuerySymbolWorker(pDevExt, pImage, pszSymbol, cchSymbol, (void **)&pReq->u.Out.pfnSymbol);
6463 if (RT_SUCCESS(rc))
6464 rc = supdrvLdrAddUsage(pDevExt, pSession, pImage, true /*fRing3Usage*/);
6465 }
6466 else
6467 rc = pImage ? VERR_WRONG_ORDER : VERR_MODULE_NOT_FOUND;
6468
6469 supdrvLdrUnlock(pDevExt);
6470 }
6471 return rc;
6472}
6473
6474
6475/**
6476 * Looks up a symbol in g_aFunctions
6477 *
6478 * @returns VINF_SUCCESS on success, VERR_SYMBOL_NOT_FOUND on failure.
6479 * @param pszSymbol The symbol to look up.
6480 * @param puValue Where to return the value.
6481 */
6482int VBOXCALL supdrvLdrGetExportedSymbol(const char *pszSymbol, uintptr_t *puValue)
6483{
6484 uint32_t i;
6485 for (i = 0; i < RT_ELEMENTS(g_aFunctions); i++)
6486 if (!strcmp(g_aFunctions[i].szName, pszSymbol))
6487 {
6488 *puValue = (uintptr_t)g_aFunctions[i].pfn;
6489 return VINF_SUCCESS;
6490 }
6491
6492 if (!strcmp(pszSymbol, "g_SUPGlobalInfoPage"))
6493 {
6494 *puValue = (uintptr_t)g_pSUPGlobalInfoPage;
6495 return VINF_SUCCESS;
6496 }
6497
6498 return VERR_SYMBOL_NOT_FOUND;
6499}
6500
6501
6502/**
6503 * Adds a usage reference in the specified session of an image.
6504 *
6505 * Called while owning the loader semaphore.
6506 *
6507 * @returns VINF_SUCCESS on success and VERR_NO_MEMORY on failure.
6508 * @param pDevExt Pointer to device extension.
6509 * @param pSession Session in question.
6510 * @param pImage Image which the session is using.
6511 * @param fRing3Usage Set if it's ring-3 usage, clear if ring-0.
6512 */
6513static int supdrvLdrAddUsage(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage, bool fRing3Usage)
6514{
6515 PSUPDRVLDRUSAGE pUsage;
6516 LogFlow(("supdrvLdrAddUsage: pImage=%p %d\n", pImage, fRing3Usage));
6517
6518 /*
6519 * Referenced it already?
6520 */
6521 pUsage = pSession->pLdrUsage;
6522 while (pUsage)
6523 {
6524 if (pUsage->pImage == pImage)
6525 {
6526 if (fRing3Usage)
6527 pUsage->cRing3Usage++;
6528 else
6529 pUsage->cRing0Usage++;
6530 Assert(pImage->cImgUsage > 1 || !pImage->pWrappedModInfo);
6531 pImage->cImgUsage++;
6532 return VINF_SUCCESS;
6533 }
6534 pUsage = pUsage->pNext;
6535 }
6536
6537 /*
6538 * Allocate new usage record.
6539 */
6540 pUsage = (PSUPDRVLDRUSAGE)RTMemAlloc(sizeof(*pUsage));
6541 AssertReturn(pUsage, VERR_NO_MEMORY);
6542 pUsage->cRing3Usage = fRing3Usage ? 1 : 0;
6543 pUsage->cRing0Usage = fRing3Usage ? 0 : 1;
6544 pUsage->pImage = pImage;
6545 pUsage->pNext = pSession->pLdrUsage;
6546 pSession->pLdrUsage = pUsage;
6547
6548 /*
6549 * Wrapped modules needs to retain a native module reference.
6550 */
6551 pImage->cImgUsage++;
6552 if (pImage->cImgUsage == 2 && pImage->pWrappedModInfo)
6553 supdrvOSLdrRetainWrapperModule(pDevExt, pImage);
6554
6555 return VINF_SUCCESS;
6556}
6557
6558
6559/**
6560 * Frees a load image.
6561 *
6562 * @param pDevExt Pointer to device extension.
6563 * @param pImage Pointer to the image we're gonna free.
6564 * This image must exit!
6565 * @remark The caller MUST own SUPDRVDEVEXT::mtxLdr!
6566 */
6567static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage)
6568{
6569 unsigned cLoops;
6570 for (cLoops = 0; ; cLoops++)
6571 {
6572 PSUPDRVLDRIMAGE pImagePrev;
6573 PSUPDRVLDRIMAGE pImageImport;
6574 LogFlow(("supdrvLdrFree: pImage=%p %s [loop %u]\n", pImage, pImage->szName, cLoops));
6575 AssertBreak(cLoops < 2);
6576
6577 /*
6578 * Warn if we're releasing images while the image loader interface is
6579 * locked down -- we won't be able to reload them!
6580 */
6581 if (pDevExt->fLdrLockedDown)
6582 Log(("supdrvLdrFree: Warning: unloading '%s' image, while loader interface is locked down!\n", pImage->szName));
6583
6584 /* find it - arg. should've used doubly linked list. */
6585 Assert(pDevExt->pLdrImages);
6586 pImagePrev = NULL;
6587 if (pDevExt->pLdrImages != pImage)
6588 {
6589 pImagePrev = pDevExt->pLdrImages;
6590 while (pImagePrev->pNext != pImage)
6591 pImagePrev = pImagePrev->pNext;
6592 Assert(pImagePrev->pNext == pImage);
6593 }
6594
6595 /* unlink */
6596 if (pImagePrev)
6597 pImagePrev->pNext = pImage->pNext;
6598 else
6599 pDevExt->pLdrImages = pImage->pNext;
6600
6601 /* check if this is VMMR0.r0 unset its entry point pointers. */
6602 if (pDevExt->pvVMMR0 == pImage->pvImage)
6603 {
6604 pDevExt->pvVMMR0 = NULL;
6605 pDevExt->pfnVMMR0EntryFast = NULL;
6606 pDevExt->pfnVMMR0EntryEx = NULL;
6607 }
6608
6609 /* check for objects with destructors in this image. (Shouldn't happen.) */
6610 if (pDevExt->pObjs)
6611 {
6612 unsigned cObjs = 0;
6613 PSUPDRVOBJ pObj;
6614 RTSpinlockAcquire(pDevExt->Spinlock);
6615 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
6616 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImageBits))
6617 {
6618 pObj->pfnDestructor = NULL;
6619 cObjs++;
6620 }
6621 RTSpinlockRelease(pDevExt->Spinlock);
6622 if (cObjs)
6623 OSDBGPRINT(("supdrvLdrFree: Image '%s' has %d dangling objects!\n", pImage->szName, cObjs));
6624 }
6625
6626 /* call termination function if fully loaded. */
6627 if ( pImage->pfnModuleTerm
6628 && pImage->uState == SUP_IOCTL_LDR_LOAD)
6629 {
6630 LogFlow(("supdrvIOCtl_LdrLoad: calling pfnModuleTerm=%p\n", pImage->pfnModuleTerm));
6631 pDevExt->hLdrTermThread = RTThreadNativeSelf();
6632 pImage->pfnModuleTerm(pImage);
6633 pDevExt->hLdrTermThread = NIL_RTNATIVETHREAD;
6634 }
6635
6636 /* Inform the tracing component. */
6637 supdrvTracerModuleUnloading(pDevExt, pImage);
6638
6639 /* Do native unload if appropriate, then inform the native code about the
6640 unloading (mainly for non-native loading case). */
6641 if (pImage->fNative)
6642 supdrvOSLdrUnload(pDevExt, pImage);
6643 supdrvOSLdrNotifyUnloaded(pDevExt, pImage);
6644
6645 /* free the image */
6646 pImage->uMagic = SUPDRVLDRIMAGE_MAGIC_DEAD;
6647 pImage->cImgUsage = 0;
6648 pImage->pDevExt = NULL;
6649 pImage->pNext = NULL;
6650 pImage->uState = SUP_IOCTL_LDR_FREE;
6651 RTR0MemObjFree(pImage->hMemObjImage, true /*fMappings*/);
6652 pImage->hMemObjImage = NIL_RTR0MEMOBJ;
6653 pImage->pvImage = NULL;
6654 RTMemFree(pImage->pachStrTab);
6655 pImage->pachStrTab = NULL;
6656 RTMemFree(pImage->paSymbols);
6657 pImage->paSymbols = NULL;
6658 RTMemFree(pImage->paSegments);
6659 pImage->paSegments = NULL;
6660
6661 pImageImport = pImage->pImageImport;
6662 pImage->pImageImport = NULL;
6663
6664 RTMemFree(pImage);
6665
6666 /*
6667 * Deal with any import image.
6668 */
6669 if (!pImageImport)
6670 break;
6671 if (pImageImport->cImgUsage > 1)
6672 {
6673 supdrvLdrSubtractUsage(pDevExt, pImageImport, 1);
6674 break;
6675 }
6676 pImage = pImageImport;
6677 }
6678}
6679
6680
6681/**
6682 * Acquires the loader lock.
6683 *
6684 * @returns IPRT status code.
6685 * @param pDevExt The device extension.
6686 * @note Not recursive on all platforms yet.
6687 */
6688DECLINLINE(int) supdrvLdrLock(PSUPDRVDEVEXT pDevExt)
6689{
6690#ifdef SUPDRV_USE_MUTEX_FOR_LDR
6691 int rc = RTSemMutexRequest(pDevExt->mtxLdr, RT_INDEFINITE_WAIT);
6692#else
6693 int rc = RTSemFastMutexRequest(pDevExt->mtxLdr);
6694#endif
6695 AssertRC(rc);
6696 return rc;
6697}
6698
6699
6700/**
6701 * Releases the loader lock.
6702 *
6703 * @returns IPRT status code.
6704 * @param pDevExt The device extension.
6705 */
6706DECLINLINE(int) supdrvLdrUnlock(PSUPDRVDEVEXT pDevExt)
6707{
6708#ifdef SUPDRV_USE_MUTEX_FOR_LDR
6709 return RTSemMutexRelease(pDevExt->mtxLdr);
6710#else
6711 return RTSemFastMutexRelease(pDevExt->mtxLdr);
6712#endif
6713}
6714
6715
6716/**
6717 * Acquires the global loader lock.
6718 *
6719 * This can be useful when accessing structures being modified by the ModuleInit
6720 * and ModuleTerm. Use SUPR0LdrUnlock() to unlock.
6721 *
6722 * @returns VBox status code.
6723 * @param pSession The session doing the locking.
6724 *
6725 * @note Cannot be used during ModuleInit or ModuleTerm callbacks.
6726 */
6727SUPR0DECL(int) SUPR0LdrLock(PSUPDRVSESSION pSession)
6728{
6729 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6730 return supdrvLdrLock(pSession->pDevExt);
6731}
6732SUPR0_EXPORT_SYMBOL(SUPR0LdrLock);
6733
6734
6735/**
6736 * Releases the global loader lock.
6737 *
6738 * Must correspond to a SUPR0LdrLock call!
6739 *
6740 * @returns VBox status code.
6741 * @param pSession The session doing the locking.
6742 *
6743 * @note Cannot be used during ModuleInit or ModuleTerm callbacks.
6744 */
6745SUPR0DECL(int) SUPR0LdrUnlock(PSUPDRVSESSION pSession)
6746{
6747 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6748 return supdrvLdrUnlock(pSession->pDevExt);
6749}
6750SUPR0_EXPORT_SYMBOL(SUPR0LdrUnlock);
6751
6752
6753/**
6754 * For checking lock ownership in Assert() statements during ModuleInit and
6755 * ModuleTerm.
6756 *
6757 * @returns Whether we own the loader lock or not.
6758 * @param hMod The module in question.
6759 * @param fWantToHear For hosts where it is difficult to know who owns the
6760 * lock, this will be returned instead.
6761 */
6762SUPR0DECL(bool) SUPR0LdrIsLockOwnerByMod(void *hMod, bool fWantToHear)
6763{
6764 PSUPDRVDEVEXT pDevExt;
6765 RTNATIVETHREAD hOwner;
6766
6767 PSUPDRVLDRIMAGE pImage = (PSUPDRVLDRIMAGE)hMod;
6768 AssertPtrReturn(pImage, fWantToHear);
6769 AssertReturn(pImage->uMagic == SUPDRVLDRIMAGE_MAGIC, fWantToHear);
6770
6771 pDevExt = pImage->pDevExt;
6772 AssertPtrReturn(pDevExt, fWantToHear);
6773
6774 /*
6775 * Expecting this to be called at init/term time only, so this will be sufficient.
6776 */
6777 hOwner = pDevExt->hLdrInitThread;
6778 if (hOwner == NIL_RTNATIVETHREAD)
6779 hOwner = pDevExt->hLdrTermThread;
6780 if (hOwner != NIL_RTNATIVETHREAD)
6781 return hOwner == RTThreadNativeSelf();
6782
6783 /*
6784 * Neither of the two semaphore variants currently offers very good
6785 * introspection, so we wing it for now. This API is VBOX_STRICT only.
6786 */
6787#ifdef SUPDRV_USE_MUTEX_FOR_LDR
6788 return RTSemMutexIsOwned(pDevExt->mtxLdr) && fWantToHear;
6789#else
6790 return fWantToHear;
6791#endif
6792}
6793SUPR0_EXPORT_SYMBOL(SUPR0LdrIsLockOwnerByMod);
6794
6795
6796/**
6797 * Locates and retains the given module for ring-0 usage.
6798 *
6799 * @returns VBox status code.
6800 * @param pSession The session to associate the module reference with.
6801 * @param pszName The module name (no path).
6802 * @param phMod Where to return the module handle. The module is
6803 * referenced and a call to SUPR0LdrModRelease() is
6804 * necessary when done with it.
6805 */
6806SUPR0DECL(int) SUPR0LdrModByName(PSUPDRVSESSION pSession, const char *pszName, void **phMod)
6807{
6808 int rc;
6809 size_t cchName;
6810 PSUPDRVDEVEXT pDevExt;
6811
6812 /*
6813 * Validate input.
6814 */
6815 AssertPtrReturn(phMod, VERR_INVALID_POINTER);
6816 *phMod = NULL;
6817 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6818 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
6819 cchName = strlen(pszName);
6820 AssertReturn(cchName > 0, VERR_EMPTY_STRING);
6821 AssertReturn(cchName < RT_SIZEOFMEMB(SUPDRVLDRIMAGE, szName), VERR_MODULE_NOT_FOUND);
6822
6823 /*
6824 * Do the lookup.
6825 */
6826 pDevExt = pSession->pDevExt;
6827 rc = supdrvLdrLock(pDevExt);
6828 if (RT_SUCCESS(rc))
6829 {
6830 PSUPDRVLDRIMAGE pImage;
6831 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
6832 {
6833 if ( pImage->szName[cchName] == '\0'
6834 && !memcmp(pImage->szName, pszName, cchName))
6835 {
6836 /*
6837 * Check the state and make sure we don't overflow the reference counter before return it.
6838 */
6839 uint32_t uState = pImage->uState;
6840 if (uState == SUP_IOCTL_LDR_LOAD)
6841 {
6842 if (RT_LIKELY(pImage->cImgUsage < UINT32_MAX / 2U))
6843 {
6844 supdrvLdrAddUsage(pDevExt, pSession, pImage, false /*fRing3Usage*/);
6845 *phMod = pImage;
6846 supdrvLdrUnlock(pDevExt);
6847 return VINF_SUCCESS;
6848 }
6849 supdrvLdrUnlock(pDevExt);
6850 Log(("SUPR0LdrModByName: Too many existing references to '%s'!\n", pszName));
6851 return VERR_TOO_MANY_REFERENCES;
6852 }
6853 supdrvLdrUnlock(pDevExt);
6854 Log(("SUPR0LdrModByName: Module '%s' is not in the loaded state (%d)!\n", pszName, uState));
6855 return VERR_INVALID_STATE;
6856 }
6857 }
6858 supdrvLdrUnlock(pDevExt);
6859 Log(("SUPR0LdrModByName: Module '%s' not found!\n", pszName));
6860 rc = VERR_MODULE_NOT_FOUND;
6861 }
6862 return rc;
6863}
6864SUPR0_EXPORT_SYMBOL(SUPR0LdrModByName);
6865
6866
6867/**
6868 * Retains a ring-0 module reference.
6869 *
6870 * Release reference when done by calling SUPR0LdrModRelease().
6871 *
6872 * @returns VBox status code.
6873 * @param pSession The session to reference the module in. A usage
6874 * record is added if needed.
6875 * @param hMod The handle to the module to retain.
6876 */
6877SUPR0DECL(int) SUPR0LdrModRetain(PSUPDRVSESSION pSession, void *hMod)
6878{
6879 PSUPDRVDEVEXT pDevExt;
6880 PSUPDRVLDRIMAGE pImage;
6881 int rc;
6882
6883 /* Validate input a little. */
6884 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6885 AssertPtrReturn(hMod, VERR_INVALID_HANDLE);
6886 pImage = (PSUPDRVLDRIMAGE)hMod;
6887 AssertReturn(pImage->uMagic == SUPDRVLDRIMAGE_MAGIC, VERR_INVALID_HANDLE);
6888
6889 /* Reference the module: */
6890 pDevExt = pSession->pDevExt;
6891 rc = supdrvLdrLock(pDevExt);
6892 if (RT_SUCCESS(rc))
6893 {
6894 if (pImage->uMagic == SUPDRVLDRIMAGE_MAGIC)
6895 {
6896 if (RT_LIKELY(pImage->cImgUsage < UINT32_MAX / 2U))
6897 rc = supdrvLdrAddUsage(pDevExt, pSession, pImage, false /*fRing3Usage*/);
6898 else
6899 AssertFailedStmt(rc = VERR_TOO_MANY_REFERENCES);
6900 }
6901 else
6902 AssertFailedStmt(rc = VERR_INVALID_HANDLE);
6903 supdrvLdrUnlock(pDevExt);
6904 }
6905 return rc;
6906}
6907SUPR0_EXPORT_SYMBOL(SUPR0LdrModRetain);
6908
6909
6910/**
6911 * Releases a ring-0 module reference retained by SUPR0LdrModByName() or
6912 * SUPR0LdrModRetain().
6913 *
6914 * @returns VBox status code.
6915 * @param pSession The session that the module was retained in.
6916 * @param hMod The module handle. NULL is silently ignored.
6917 */
6918SUPR0DECL(int) SUPR0LdrModRelease(PSUPDRVSESSION pSession, void *hMod)
6919{
6920 PSUPDRVDEVEXT pDevExt;
6921 PSUPDRVLDRIMAGE pImage;
6922 int rc;
6923
6924 /*
6925 * Validate input.
6926 */
6927 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6928 if (!hMod)
6929 return VINF_SUCCESS;
6930 AssertPtrReturn(hMod, VERR_INVALID_HANDLE);
6931 pImage = (PSUPDRVLDRIMAGE)hMod;
6932 AssertReturn(pImage->uMagic == SUPDRVLDRIMAGE_MAGIC, VERR_INVALID_HANDLE);
6933
6934 /*
6935 * Take the loader lock and revalidate the module:
6936 */
6937 pDevExt = pSession->pDevExt;
6938 rc = supdrvLdrLock(pDevExt);
6939 if (RT_SUCCESS(rc))
6940 {
6941 if (pImage->uMagic == SUPDRVLDRIMAGE_MAGIC)
6942 {
6943 /*
6944 * Find the usage record for the module:
6945 */
6946 PSUPDRVLDRUSAGE pPrevUsage = NULL;
6947 PSUPDRVLDRUSAGE pUsage;
6948
6949 rc = VERR_MODULE_NOT_FOUND;
6950 for (pUsage = pSession->pLdrUsage; pUsage; pUsage = pUsage->pNext)
6951 {
6952 if (pUsage->pImage == pImage)
6953 {
6954 /*
6955 * Drop a ring-0 reference:
6956 */
6957 Assert(pImage->cImgUsage >= pUsage->cRing0Usage + pUsage->cRing3Usage);
6958 if (pUsage->cRing0Usage > 0)
6959 {
6960 if (pImage->cImgUsage > 1)
6961 {
6962 pUsage->cRing0Usage -= 1;
6963 supdrvLdrSubtractUsage(pDevExt, pImage, 1);
6964 rc = VINF_SUCCESS;
6965 }
6966 else
6967 {
6968 Assert(!pImage->pWrappedModInfo /* (The wrapper kmod has the last reference.) */);
6969 supdrvLdrFree(pDevExt, pImage);
6970
6971 if (pPrevUsage)
6972 pPrevUsage->pNext = pUsage->pNext;
6973 else
6974 pSession->pLdrUsage = pUsage->pNext;
6975 pUsage->pNext = NULL;
6976 pUsage->pImage = NULL;
6977 pUsage->cRing0Usage = 0;
6978 pUsage->cRing3Usage = 0;
6979 RTMemFree(pUsage);
6980
6981 rc = VINF_OBJECT_DESTROYED;
6982 }
6983 }
6984 else
6985 AssertFailedStmt(rc = VERR_CALLER_NO_REFERENCE);
6986 break;
6987 }
6988 pPrevUsage = pUsage;
6989 }
6990 }
6991 else
6992 AssertFailedStmt(rc = VERR_INVALID_HANDLE);
6993 supdrvLdrUnlock(pDevExt);
6994 }
6995 return rc;
6996
6997}
6998SUPR0_EXPORT_SYMBOL(SUPR0LdrModRelease);
6999
7000
7001/**
7002 * Implements the service call request.
7003 *
7004 * @returns VBox status code.
7005 * @param pDevExt The device extension.
7006 * @param pSession The calling session.
7007 * @param pReq The request packet, valid.
7008 */
7009static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq)
7010{
7011#if !defined(RT_OS_WINDOWS) || defined(RT_ARCH_AMD64) || defined(DEBUG)
7012 int rc;
7013
7014 /*
7015 * Find the module first in the module referenced by the calling session.
7016 */
7017 rc = supdrvLdrLock(pDevExt);
7018 if (RT_SUCCESS(rc))
7019 {
7020 PFNSUPR0SERVICEREQHANDLER pfnServiceReqHandler = NULL;
7021 PSUPDRVLDRUSAGE pUsage;
7022
7023 for (pUsage = pSession->pLdrUsage; pUsage; pUsage = pUsage->pNext)
7024 if ( pUsage->pImage->pfnServiceReqHandler
7025 && !strcmp(pUsage->pImage->szName, pReq->u.In.szName))
7026 {
7027 pfnServiceReqHandler = pUsage->pImage->pfnServiceReqHandler;
7028 break;
7029 }
7030 supdrvLdrUnlock(pDevExt);
7031
7032 if (pfnServiceReqHandler)
7033 {
7034 /*
7035 * Call it.
7036 */
7037 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
7038 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, NULL);
7039 else
7040 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0]);
7041 }
7042 else
7043 rc = VERR_SUPDRV_SERVICE_NOT_FOUND;
7044 }
7045
7046 /* log it */
7047 if ( RT_FAILURE(rc)
7048 && rc != VERR_INTERRUPTED
7049 && rc != VERR_TIMEOUT)
7050 Log(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
7051 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
7052 else
7053 Log4(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
7054 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
7055 return rc;
7056#else /* RT_OS_WINDOWS && !RT_ARCH_AMD64 && !DEBUG */
7057 RT_NOREF3(pDevExt, pSession, pReq);
7058 return VERR_NOT_IMPLEMENTED;
7059#endif /* RT_OS_WINDOWS && !RT_ARCH_AMD64 && !DEBUG */
7060}
7061
7062
7063/**
7064 * Implements the logger settings request.
7065 *
7066 * @returns VBox status code.
7067 * @param pReq The request.
7068 */
7069static int supdrvIOCtl_LoggerSettings(PSUPLOGGERSETTINGS pReq)
7070{
7071 const char *pszGroup = &pReq->u.In.szStrings[pReq->u.In.offGroups];
7072 const char *pszFlags = &pReq->u.In.szStrings[pReq->u.In.offFlags];
7073 const char *pszDest = &pReq->u.In.szStrings[pReq->u.In.offDestination];
7074 PRTLOGGER pLogger = NULL;
7075 int rc;
7076
7077 /*
7078 * Some further validation.
7079 */
7080 switch (pReq->u.In.fWhat)
7081 {
7082 case SUPLOGGERSETTINGS_WHAT_SETTINGS:
7083 case SUPLOGGERSETTINGS_WHAT_CREATE:
7084 break;
7085
7086 case SUPLOGGERSETTINGS_WHAT_DESTROY:
7087 if (*pszGroup || *pszFlags || *pszDest)
7088 return VERR_INVALID_PARAMETER;
7089 if (pReq->u.In.fWhich == SUPLOGGERSETTINGS_WHICH_RELEASE)
7090 return VERR_ACCESS_DENIED;
7091 break;
7092
7093 default:
7094 return VERR_INTERNAL_ERROR;
7095 }
7096
7097 /*
7098 * Get the logger.
7099 */
7100 switch (pReq->u.In.fWhich)
7101 {
7102 case SUPLOGGERSETTINGS_WHICH_DEBUG:
7103 pLogger = RTLogGetDefaultInstance();
7104 break;
7105
7106 case SUPLOGGERSETTINGS_WHICH_RELEASE:
7107 pLogger = RTLogRelGetDefaultInstance();
7108 break;
7109
7110 default:
7111 return VERR_INTERNAL_ERROR;
7112 }
7113
7114 /*
7115 * Do the job.
7116 */
7117 switch (pReq->u.In.fWhat)
7118 {
7119 case SUPLOGGERSETTINGS_WHAT_SETTINGS:
7120 if (pLogger)
7121 {
7122 rc = RTLogFlags(pLogger, pszFlags);
7123 if (RT_SUCCESS(rc))
7124 rc = RTLogGroupSettings(pLogger, pszGroup);
7125 NOREF(pszDest);
7126 }
7127 else
7128 rc = VERR_NOT_FOUND;
7129 break;
7130
7131 case SUPLOGGERSETTINGS_WHAT_CREATE:
7132 {
7133 if (pLogger)
7134 rc = VERR_ALREADY_EXISTS;
7135 else
7136 {
7137 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
7138
7139 rc = RTLogCreate(&pLogger,
7140 0 /* fFlags */,
7141 pszGroup,
7142 pReq->u.In.fWhich == SUPLOGGERSETTINGS_WHICH_DEBUG
7143 ? "VBOX_LOG"
7144 : "VBOX_RELEASE_LOG",
7145 RT_ELEMENTS(s_apszGroups),
7146 s_apszGroups,
7147 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER,
7148 NULL);
7149 if (RT_SUCCESS(rc))
7150 {
7151 rc = RTLogFlags(pLogger, pszFlags);
7152 NOREF(pszDest);
7153 if (RT_SUCCESS(rc))
7154 {
7155 switch (pReq->u.In.fWhich)
7156 {
7157 case SUPLOGGERSETTINGS_WHICH_DEBUG:
7158 pLogger = RTLogSetDefaultInstance(pLogger);
7159 break;
7160 case SUPLOGGERSETTINGS_WHICH_RELEASE:
7161 pLogger = RTLogRelSetDefaultInstance(pLogger);
7162 break;
7163 }
7164 }
7165 RTLogDestroy(pLogger);
7166 }
7167 }
7168 break;
7169 }
7170
7171 case SUPLOGGERSETTINGS_WHAT_DESTROY:
7172 switch (pReq->u.In.fWhich)
7173 {
7174 case SUPLOGGERSETTINGS_WHICH_DEBUG:
7175 pLogger = RTLogSetDefaultInstance(NULL);
7176 break;
7177 case SUPLOGGERSETTINGS_WHICH_RELEASE:
7178 pLogger = RTLogRelSetDefaultInstance(NULL);
7179 break;
7180 }
7181 rc = RTLogDestroy(pLogger);
7182 break;
7183
7184 default:
7185 {
7186 rc = VERR_INTERNAL_ERROR;
7187 break;
7188 }
7189 }
7190
7191 return rc;
7192}
7193
7194
7195#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
7196/**
7197 * Implements the MSR prober operations.
7198 *
7199 * @returns VBox status code.
7200 * @param pDevExt The device extension.
7201 * @param pReq The request.
7202 */
7203static int supdrvIOCtl_X86MsrProber(PSUPDRVDEVEXT pDevExt, PSUPMSRPROBER pReq)
7204{
7205# ifdef SUPDRV_WITH_MSR_PROBER
7206 RTCPUID const idCpu = pReq->u.In.idCpu == UINT32_MAX ? NIL_RTCPUID : pReq->u.In.idCpu;
7207 int rc;
7208
7209 switch (pReq->u.In.enmOp)
7210 {
7211 case SUPMSRPROBEROP_READ:
7212 {
7213 uint64_t uValue;
7214 rc = supdrvOSMsrProberRead(pReq->u.In.uMsr, idCpu, &uValue);
7215 if (RT_SUCCESS(rc))
7216 {
7217 pReq->u.Out.uResults.Read.uValue = uValue;
7218 pReq->u.Out.uResults.Read.fGp = false;
7219 }
7220 else if (rc == VERR_ACCESS_DENIED)
7221 {
7222 pReq->u.Out.uResults.Read.uValue = 0;
7223 pReq->u.Out.uResults.Read.fGp = true;
7224 rc = VINF_SUCCESS;
7225 }
7226 break;
7227 }
7228
7229 case SUPMSRPROBEROP_WRITE:
7230 rc = supdrvOSMsrProberWrite(pReq->u.In.uMsr, idCpu, pReq->u.In.uArgs.Write.uToWrite);
7231 if (RT_SUCCESS(rc))
7232 pReq->u.Out.uResults.Write.fGp = false;
7233 else if (rc == VERR_ACCESS_DENIED)
7234 {
7235 pReq->u.Out.uResults.Write.fGp = true;
7236 rc = VINF_SUCCESS;
7237 }
7238 break;
7239
7240 case SUPMSRPROBEROP_MODIFY:
7241 case SUPMSRPROBEROP_MODIFY_FASTER:
7242 rc = supdrvOSMsrProberModify(idCpu, pReq);
7243 break;
7244
7245 default:
7246 return VERR_INVALID_FUNCTION;
7247 }
7248 RT_NOREF1(pDevExt);
7249 return rc;
7250# else
7251 RT_NOREF2(pDevExt, pReq);
7252 return VERR_NOT_IMPLEMENTED;
7253# endif
7254}
7255#endif /* RT_ARCH_AMD64 || RT_ARCH_X86 */
7256
7257
7258#if defined(RT_ARCH_ARM64)
7259
7260/**
7261 * Gathers ARM system registers.
7262 *
7263 * This is either called directly or via RTMpOnSpecific. The latter means that
7264 * we must not trigger any paging activity or block.
7265 */
7266static void supdrvIOCtl_ArmGetSysRegsOnCpu(PSUPARMGETSYSREGS pReq, uint32_t const cMaxRegs, uint32_t fFlags)
7267{
7268 /*
7269 * Reader macro.
7270 */
7271 uint32_t const fSavedFlags = fFlags;
7272 uint32_t idxReg = 0;
7273 uint64_t uRegVal;
7274# ifdef _MSC_VER
7275# define COMPILER_READ_SYS_REG(a_u64Dst, a_Op0, a_Op1, a_CRn, a_CRm, a_Op2) \
7276 (a_u64Dst) = (uint64_t)_ReadStatusReg(ARMV8_AARCH64_SYSREG_ID_CREATE(a_Op0, a_Op1, a_CRn, a_CRm, a_Op2) & 0x7fff)
7277# define COMPILER_READ_SYS_REG_NAMED(a_u64Dst, a_SysRegName) \
7278 (a_u64Dst) = (uint64_t)_ReadStatusReg(RT_CONCAT(ARMV8_AARCH64_SYSREG_,a_SysRegName) & 0x7fff)
7279# else
7280# define COMPILER_READ_SYS_REG(a_u64Dst, a_Op0, a_Op1, a_CRn, a_CRm, a_Op2) \
7281 __asm__ __volatile__ ("mrs %0, s" #a_Op0 "_" #a_Op1 "_c" #a_CRn "_c" #a_CRm "_" #a_Op2 : "=r" (a_u64Dst))
7282# define COMPILER_READ_SYS_REG_NAMED(a_u64Dst, a_SysRegName) \
7283 __asm__ __volatile__ ("mrs %0, " #a_SysRegName : "=r" (a_u64Dst))
7284# endif
7285# define READ_SYS_REG_UNDEF(a_Op0, a_Op1, a_CRn, a_CRm, a_Op2) do { \
7286 uRegVal = 0; \
7287 COMPILER_READ_SYS_REG(uRegVal, a_Op0, a_Op1, a_CRn, a_CRm, a_Op2); \
7288 if (uRegVal != 0 || (fFlags & SUP_ARM_SYS_REG_F_INC_ZERO_REG_VAL)) \
7289 { \
7290 if (idxReg < cMaxRegs) \
7291 { \
7292 pReq->u.Out.aRegs[idxReg].uValue = uRegVal; \
7293 pReq->u.Out.aRegs[idxReg].idReg = ARMV8_AARCH64_SYSREG_ID_CREATE(a_Op0, a_Op1, a_CRn, a_CRm, a_Op2); \
7294 pReq->u.Out.aRegs[idxReg].fFlags = 0; \
7295 } \
7296 idxReg += 1; \
7297 } \
7298 } while (0)
7299
7300# define READ_SYS_REG_NAMED(a_Op0, a_Op1, a_CRn, a_CRm, a_Op2, a_SysRegName) do { \
7301 AssertCompile( ARMV8_AARCH64_SYSREG_ID_CREATE(a_Op0, a_Op1, a_CRn, a_CRm, a_Op2) \
7302 == RT_CONCAT(ARMV8_AARCH64_SYSREG_,a_SysRegName)); \
7303 READ_SYS_REG_UNDEF(a_Op0, a_Op1, a_CRn, a_CRm, a_Op2); \
7304 } while (0)
7305
7306# define READ_SYS_REG__TODO(a_Op0, a_Op1, a_CRn, a_CRm, a_Op2, a_SysRegName) READ_SYS_REG_UNDEF(a_Op0, a_Op1, a_CRn, a_CRm, a_Op2)
7307 /*
7308 * Standard ID registers.
7309 */
7310 READ_SYS_REG_NAMED(3, 0, 0, 0, 0, MIDR_EL1);
7311 READ_SYS_REG_NAMED(3, 0, 0, 0, 5, MPIDR_EL1);
7312 READ_SYS_REG_NAMED(3, 0, 0, 0, 6, REVIDR_EL1);
7313 READ_SYS_REG__TODO(3, 1, 0, 0, 0, CCSIDR_EL1);
7314 READ_SYS_REG__TODO(3, 1, 0, 0, 1, CLIDR_EL1);
7315 READ_SYS_REG__TODO(3, 1, 0, 0, 7, AIDR_EL1);
7316 READ_SYS_REG_NAMED(3, 3, 0, 0, 7, DCZID_EL0);
7317 READ_SYS_REG_NAMED(3, 3,14, 0, 0, CNTFRQ_EL0);
7318
7319
7320 READ_SYS_REG_NAMED(3, 0, 0, 4, 0, ID_AA64PFR0_EL1);
7321 uint64_t const fPfr0 = uRegVal;
7322 bool const fA32 = ((fPfr0 & ARMV8_ID_AA64PFR0_EL1_EL0_MASK) >> ARMV8_ID_AA64PFR0_EL1_EL0_SHIFT) == ARMV8_ID_AA64PFR0_EL1_EL0_AARCH64_AARCH32
7323 || ((fPfr0 & ARMV8_ID_AA64PFR0_EL1_EL1_MASK) >> ARMV8_ID_AA64PFR0_EL1_EL1_SHIFT) == ARMV8_ID_AA64PFR0_EL1_EL1_AARCH64_AARCH32
7324 || ((fPfr0 & ARMV8_ID_AA64PFR0_EL1_EL2_MASK) >> ARMV8_ID_AA64PFR0_EL1_EL2_SHIFT) == ARMV8_ID_AA64PFR0_EL1_EL2_AARCH64_AARCH32
7325 || ((fPfr0 & ARMV8_ID_AA64PFR0_EL1_EL3_MASK) >> ARMV8_ID_AA64PFR0_EL1_EL3_SHIFT) == ARMV8_ID_AA64PFR0_EL1_EL3_AARCH64_AARCH32;
7326 READ_SYS_REG_NAMED(3, 0, 0, 4, 1, ID_AA64PFR1_EL1);
7327 uint64_t const fPfr1 = uRegVal;
7328 READ_SYS_REG_UNDEF(3, 0, 0, 4, 2);
7329 READ_SYS_REG_UNDEF(3, 0, 0, 4, 3);
7330 READ_SYS_REG_NAMED(3, 0, 0, 4, 4, ID_AA64ZFR0_EL1);
7331 READ_SYS_REG_NAMED(3, 0, 0, 4, 5, ID_AA64SMFR0_EL1);
7332 READ_SYS_REG_UNDEF(3, 0, 0, 4, 6);
7333 READ_SYS_REG_UNDEF(3, 0, 0, 4, 7);
7334
7335 READ_SYS_REG_NAMED(3, 0, 0, 5, 0, ID_AA64DFR0_EL1);
7336 uint64_t const fDfr0 = uRegVal;
7337 READ_SYS_REG_NAMED(3, 0, 0, 5, 1, ID_AA64DFR1_EL1);
7338 READ_SYS_REG_UNDEF(3, 0, 0, 5, 2);
7339 READ_SYS_REG_UNDEF(3, 0, 0, 5, 3);
7340 READ_SYS_REG_NAMED(3, 0, 0, 5, 4, ID_AA64AFR0_EL1);
7341 READ_SYS_REG_NAMED(3, 0, 0, 5, 5, ID_AA64AFR1_EL1);
7342 READ_SYS_REG_UNDEF(3, 0, 0, 5, 6);
7343 READ_SYS_REG_UNDEF(3, 0, 0, 5, 7);
7344
7345 READ_SYS_REG_NAMED(3, 0, 0, 6, 0, ID_AA64ISAR0_EL1);
7346 READ_SYS_REG_NAMED(3, 0, 0, 6, 1, ID_AA64ISAR1_EL1);
7347 READ_SYS_REG_NAMED(3, 0, 0, 6, 2, ID_AA64ISAR2_EL1);
7348 READ_SYS_REG__TODO(3, 0, 0, 6, 3, ID_AA64ISAR3_EL1);
7349 READ_SYS_REG_UNDEF(3, 0, 0, 6, 4);
7350 READ_SYS_REG_UNDEF(3, 0, 0, 6, 5);
7351 READ_SYS_REG_UNDEF(3, 0, 0, 6, 6);
7352 READ_SYS_REG_UNDEF(3, 0, 0, 6, 7);
7353
7354 READ_SYS_REG_NAMED(3, 0, 0, 7, 0, ID_AA64MMFR0_EL1);
7355 READ_SYS_REG_NAMED(3, 0, 0, 7, 1, ID_AA64MMFR1_EL1);
7356 READ_SYS_REG_NAMED(3, 0, 0, 7, 2, ID_AA64MMFR2_EL1);
7357 uint64_t const fMmfr2 = uRegVal;
7358 READ_SYS_REG__TODO(3, 0, 0, 7, 3, ID_AA64MMFR3_EL1);
7359 READ_SYS_REG__TODO(3, 0, 0, 7, 4, ID_AA64MMFR4_EL1);
7360 READ_SYS_REG_UNDEF(3, 0, 0, 7, 5);
7361 READ_SYS_REG_UNDEF(3, 0, 0, 7, 6);
7362 READ_SYS_REG_UNDEF(3, 0, 0, 7, 7);
7363
7364 /*
7365 * AArch32 feature registers.
7366 * If AA64PFR0 doesn't indicate any AARCH32 support, switch to only report
7367 * these registers if they are non-zero.
7368 */
7369 if (!fA32)
7370 fFlags &= ~SUP_ARM_SYS_REG_F_INC_ZERO_REG_VAL;
7371 READ_SYS_REG_NAMED(3, 0, 0, 1, 0, ID_PFR0_EL1);
7372 READ_SYS_REG_NAMED(3, 0, 0, 1, 1, ID_PFR1_EL1);
7373
7374 READ_SYS_REG_NAMED(3, 0, 0, 1, 2, ID_DFR0_EL1);
7375
7376 READ_SYS_REG_NAMED(3, 0, 0, 1, 3, ID_AFR0_EL1);
7377
7378 READ_SYS_REG_NAMED(3, 0, 0, 1, 4, ID_MMFR0_EL1);
7379 READ_SYS_REG_NAMED(3, 0, 0, 1, 5, ID_MMFR1_EL1);
7380 READ_SYS_REG_NAMED(3, 0, 0, 1, 6, ID_MMFR2_EL1);
7381 READ_SYS_REG_NAMED(3, 0, 0, 1, 7, ID_MMFR3_EL1);
7382
7383 READ_SYS_REG_NAMED(3, 0, 0, 2, 0, ID_ISAR0_EL1);
7384 READ_SYS_REG_NAMED(3, 0, 0, 2, 1, ID_ISAR1_EL1);
7385 READ_SYS_REG_NAMED(3, 0, 0, 2, 2, ID_ISAR2_EL1);
7386 READ_SYS_REG_NAMED(3, 0, 0, 2, 3, ID_ISAR3_EL1);
7387 READ_SYS_REG_NAMED(3, 0, 0, 2, 4, ID_ISAR4_EL1);
7388 READ_SYS_REG_NAMED(3, 0, 0, 2, 5, ID_ISAR5_EL1);
7389
7390 READ_SYS_REG_NAMED(3, 0, 0, 2, 6, ID_MMFR4_EL1);
7391
7392 READ_SYS_REG_NAMED(3, 0, 0, 2, 7, ID_ISAR6_EL1);
7393
7394 READ_SYS_REG_NAMED(3, 0, 0, 3, 0, MVFR0_EL1);
7395 READ_SYS_REG_NAMED(3, 0, 0, 3, 1, MVFR1_EL1);
7396 READ_SYS_REG_NAMED(3, 0, 0, 3, 2, MVFR2_EL1);
7397
7398 READ_SYS_REG_NAMED(3, 0, 0, 3, 4, ID_PFR2_EL1);
7399
7400 READ_SYS_REG_NAMED(3, 0, 0, 3, 5, ID_DFR1_EL1);
7401
7402 READ_SYS_REG_NAMED(3, 0, 0, 3, 6, ID_MMFR5_EL1);
7403 fFlags = fSavedFlags; /* restore SUP_ARM_SYS_REG_F_INC_ZERO_REG_VAL */
7404
7405 /*
7406 * Feature dependent registers:
7407 */
7408 if ((fMmfr2 & (UINT32_C(15) << 20) /*CCIDX*/) == RT_BIT_32(20))
7409 READ_SYS_REG__TODO(3, 1, 0, 0, 2, CCSIDR2_EL1); /*?*/
7410
7411 if (fPfr0 & ARMV8_ID_AA64PFR0_EL1_RAS_MASK)
7412 READ_SYS_REG_NAMED(3, 0, 5, 3, 0, ERRIDR_EL1);
7413
7414 if ((fPfr1 & ARMV8_ID_AA64PFR1_EL1_MTE_MASK) >= (ARMV8_ID_AA64PFR1_EL1_MTE_FULL << ARMV8_ID_AA64PFR1_EL1_MTE_SHIFT))
7415 READ_SYS_REG__TODO(3, 1, 0, 0, 4, GMID_EL1);
7416 if ((fPfr0 & ARMV8_ID_AA64PFR0_EL1_MPAM_MASK) || (fPfr1 & ARMV8_ID_AA64PFR1_EL1_MPAMFRAC_MASK))
7417 {
7418 READ_SYS_REG__TODO(3, 0, 10, 4, 4, MPAMIDR_EL1);
7419 uint64_t fMpamidr;
7420 COMPILER_READ_SYS_REG(fMpamidr, 3, 0, 10, 4, 4);
7421 if (fMpamidr & RT_BIT_64(56) /*HAS_BW_CTRL*/)
7422 READ_SYS_REG__TODO(3, 0, 10, 4, 5, MPAMBWIDR_EL1);
7423 }
7424
7425 if (fDfr0 & (UINT64_C(15) << 32) /*PMSVer*/)
7426 {
7427 READ_SYS_REG__TODO(3, 0, 9, 10, 7, PMBIDR_EL1);
7428 READ_SYS_REG__TODO(3, 0, 9, 8, 7, PMSIDR_EL1);
7429 }
7430 if (fDfr0 & (UINT64_C(15) << 44) /*TraceBuffer*/)
7431 READ_SYS_REG__TODO(3, 0, 9, 11, 7, TRBIDR_EL1);
7432
7433 /** @todo FEAT_ETE: READ_SYS_REG(TRCIDR0); */
7434 /** @todo FEAT_ETE: READ_SYS_REG(TRCIDR1); */
7435 /** @todo FEAT_ETE: READ_SYS_REG(TRCIDR2); */
7436 /** @todo FEAT_ETE: READ_SYS_REG(TRCIDR3); */
7437 /** @todo FEAT_ETE: READ_SYS_REG(TRCIDR4); */
7438 /** @todo FEAT_ETE: READ_SYS_REG(TRCIDR5); */
7439 /** @todo FEAT_ETE: READ_SYS_REG(TRCIDR6); */
7440 /** @todo FEAT_ETE: READ_SYS_REG(TRCIDR7); */
7441 /** @todo FEAT_ETE: READ_SYS_REG(TRCIDR8); */
7442 /** @todo FEAT_ETE: READ_SYS_REG(TRCIDR9); */
7443
7444
7445# undef READ_SYS_REG
7446# undef COMPILER_READ_SYS_REG
7447
7448 /*
7449 * Complete the request output.
7450 */
7451 pReq->u.Out.cRegsAvailable = idxReg;
7452 if (idxReg > cMaxRegs)
7453 idxReg = cMaxRegs;
7454 pReq->u.Out.cRegs = idxReg;
7455 pReq->Hdr.cbOut = SUP_IOCTL_ARM_GET_SYSREGS_SIZE_OUT(idxReg);
7456}
7457
7458
7459/** Argument package for updrvIOCtl_ArmGetSysRegsOnCpuWorker. */
7460typedef struct SUPARMGETSYSREGSONCPUARGS
7461{
7462 uint32_t cMaxRegs;
7463 uint32_t fFlags;
7464} SUPARMGETSYSREGSONCPUARGS;
7465
7466
7467/** @callback_method_impl{FNRTMPWORKER} */
7468DECLCALLBACK(void) supdrvIOCtl_ArmGetSysRegsOnCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2)
7469{
7470 const SUPARMGETSYSREGSONCPUARGS *pArgs = (const SUPARMGETSYSREGSONCPUARGS *)pvUser2;
7471 supdrvIOCtl_ArmGetSysRegsOnCpu((PSUPARMGETSYSREGS)pvUser1, pArgs->cMaxRegs, pArgs->fFlags);
7472 RT_NOREF(idCpu);
7473}
7474
7475
7476/**
7477 * Implements the ARM ID (and system) register getter.
7478 *
7479 * @returns VBox status code.
7480 * @param pReq The request.
7481 * @param cMaxRegs The maximum number of register we can return.
7482 * @param idCpu The CPU to get system registers for.
7483 * @param fFlags The request flags.
7484 */
7485static int supdrvIOCtl_ArmGetSysRegs(PSUPARMGETSYSREGS pReq, uint32_t const cMaxRegs, RTCPUID idCpu, uint32_t fFlags)
7486{
7487 int rc;
7488
7489 /* Zero the request array just in case someone hands us a pagable buffer. */
7490 RT_BZERO(&pReq->u.Out.aRegs[0], cMaxRegs * sizeof(pReq->u.Out.aRegs[0]));
7491
7492 if (idCpu == NIL_RTCPUID)
7493 {
7494 supdrvIOCtl_ArmGetSysRegsOnCpu(pReq, cMaxRegs, fFlags);
7495 rc = VINF_SUCCESS;
7496 }
7497 else
7498 {
7499 SUPARMGETSYSREGSONCPUARGS Args;
7500 Args.cMaxRegs = cMaxRegs;
7501 Args.fFlags = fFlags;
7502 rc = RTMpOnSpecific(idCpu, supdrvIOCtl_ArmGetSysRegsOnCpuCallback, pReq, &Args);
7503 }
7504 return rc;
7505}
7506
7507#endif /* RT_ARCH_ARM64 */
7508
7509/**
7510 * Resume built-in keyboard on MacBook Air and Pro hosts.
7511 * If there is no built-in keyboard device, return success anyway.
7512 *
7513 * @returns 0 on Mac OS X platform, VERR_NOT_IMPLEMENTED on the other ones.
7514 */
7515static int supdrvIOCtl_ResumeSuspendedKbds(void)
7516{
7517#if defined(RT_OS_DARWIN)
7518 return supdrvDarwinResumeSuspendedKbds();
7519#else
7520 return VERR_NOT_IMPLEMENTED;
7521#endif
7522}
7523
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette