VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 50372

Last change on this file since 50372 was 50372, checked in by vboxsync, 11 years ago

Additions/common/VBoxGuest: make CANCEL_ALL_WAITEVENTS cancel the next WAITEVENT if none is in progress.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 111.0 KB
Line 
1/* $Id: VBoxGuest.cpp 50372 2014-02-09 16:05:37Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#define LOG_GROUP LOG_GROUP_DEFAULT
32#include "VBoxGuestInternal.h"
33#include "VBoxGuest2.h"
34#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
35#include <VBox/log.h>
36#include <iprt/mem.h>
37#include <iprt/time.h>
38#include <iprt/memobj.h>
39#include <iprt/asm.h>
40#include <iprt/asm-amd64-x86.h>
41#include <iprt/string.h>
42#include <iprt/process.h>
43#include <iprt/assert.h>
44#include <iprt/param.h>
45#ifdef VBOX_WITH_HGCM
46# include <iprt/thread.h>
47#endif
48#include "version-generated.h"
49#if defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
50# include "revision-generated.h"
51#endif
52#ifdef RT_OS_WINDOWS
53# ifndef CTL_CODE
54# include <Windows.h>
55# endif
56#endif
57#if defined(RT_OS_SOLARIS) || defined(RT_OS_DARWIN)
58# include <iprt/rand.h>
59#endif
60
61
62/*******************************************************************************
63* Internal Functions *
64*******************************************************************************/
65#ifdef VBOX_WITH_HGCM
66static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
67#endif
68#ifdef DEBUG
69static void testSetMouseStatus(void);
70#endif
71static int VBoxGuestCommonIOCtl_SetMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fFeatures);
72
73static int VBoxGuestCommonGuestCapsAcquire(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fOrMask, uint32_t fNotMask, VBOXGUESTCAPSACQUIRE_FLAGS enmFlags);
74
75#define VBOXGUEST_ACQUIRE_STYLE_EVENTS (VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST | VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST)
76
77DECLINLINE(uint32_t) VBoxGuestCommonGetHandledEventsLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
78{
79 if (!pDevExt->u32AcquireModeGuestCaps)
80 return VMMDEV_EVENT_VALID_EVENT_MASK;
81
82 uint32_t u32AllowedGuestCaps = pSession->u32AquiredGuestCaps | (VMMDEV_EVENT_VALID_EVENT_MASK & ~pDevExt->u32AcquireModeGuestCaps);
83 uint32_t u32CleanupEvents = VBOXGUEST_ACQUIRE_STYLE_EVENTS;
84 if (u32AllowedGuestCaps & VMMDEV_GUEST_SUPPORTS_GRAPHICS)
85 u32CleanupEvents &= ~VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST;
86 if (u32AllowedGuestCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
87 u32CleanupEvents &= ~VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
88
89 return VMMDEV_EVENT_VALID_EVENT_MASK & ~u32CleanupEvents;
90}
91
92DECLINLINE(uint32_t) VBoxGuestCommonGetAndCleanPendingEventsLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fReqEvents)
93{
94 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents & VBoxGuestCommonGetHandledEventsLocked(pDevExt, pSession);
95 if (fMatches)
96 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
97 return fMatches;
98}
99
100DECLINLINE(bool) VBoxGuestCommonGuestCapsModeSet(PVBOXGUESTDEVEXT pDevExt, uint32_t fCaps, bool fAcquire, uint32_t *pu32OtherVal)
101{
102 uint32_t *pVal = fAcquire ? &pDevExt->u32AcquireModeGuestCaps : &pDevExt->u32SetModeGuestCaps;
103 const uint32_t fNotVal = !fAcquire ? pDevExt->u32AcquireModeGuestCaps : pDevExt->u32SetModeGuestCaps;
104 bool fResult = true;
105 RTSpinlockAcquire(pDevExt->EventSpinlock);
106
107 if (!(fNotVal & fCaps))
108 *pVal |= fCaps;
109 else
110 {
111 AssertMsgFailed(("trying to change caps mode\n"));
112 fResult = false;
113 }
114
115 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
116
117 if (pu32OtherVal)
118 *pu32OtherVal = fNotVal;
119 return fResult;
120}
121
122/*******************************************************************************
123* Global Variables *
124*******************************************************************************/
125static const size_t cbChangeMemBalloonReq = RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
126
127#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
128/**
129 * Drag in the rest of IRPT since we share it with the
130 * rest of the kernel modules on Solaris.
131 */
132PFNRT g_apfnVBoxGuestIPRTDeps[] =
133{
134 /* VirtioNet */
135 (PFNRT)RTRandBytes,
136 /* RTSemMutex* */
137 (PFNRT)RTSemMutexCreate,
138 (PFNRT)RTSemMutexDestroy,
139 (PFNRT)RTSemMutexRequest,
140 (PFNRT)RTSemMutexRequestNoResume,
141 (PFNRT)RTSemMutexRequestDebug,
142 (PFNRT)RTSemMutexRequestNoResumeDebug,
143 (PFNRT)RTSemMutexRelease,
144 (PFNRT)RTSemMutexIsOwned,
145 NULL
146};
147#endif /* RT_OS_DARWIN || RT_OS_SOLARIS */
148
149
150/**
151 * Reserves memory in which the VMM can relocate any guest mappings
152 * that are floating around.
153 *
154 * This operation is a little bit tricky since the VMM might not accept
155 * just any address because of address clashes between the three contexts
156 * it operates in, so use a small stack to perform this operation.
157 *
158 * @returns VBox status code (ignored).
159 * @param pDevExt The device extension.
160 */
161static int vboxGuestInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
162{
163 /*
164 * Query the required space.
165 */
166 VMMDevReqHypervisorInfo *pReq;
167 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
168 if (RT_FAILURE(rc))
169 return rc;
170 pReq->hypervisorStart = 0;
171 pReq->hypervisorSize = 0;
172 rc = VbglGRPerform(&pReq->header);
173 if (RT_FAILURE(rc)) /* this shouldn't happen! */
174 {
175 VbglGRFree(&pReq->header);
176 return rc;
177 }
178
179 /*
180 * The VMM will report back if there is nothing it wants to map, like for
181 * instance in VT-x and AMD-V mode.
182 */
183 if (pReq->hypervisorSize == 0)
184 Log(("vboxGuestInitFixateGuestMappings: nothing to do\n"));
185 else
186 {
187 /*
188 * We have to try several times since the host can be picky
189 * about certain addresses.
190 */
191 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ;
192 uint32_t cbHypervisor = pReq->hypervisorSize;
193 RTR0MEMOBJ ahTries[5];
194 uint32_t iTry;
195 bool fBitched = false;
196 Log(("vboxGuestInitFixateGuestMappings: cbHypervisor=%#x\n", cbHypervisor));
197 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++)
198 {
199 /*
200 * Reserve space, or if that isn't supported, create a object for
201 * some fictive physical memory and map that in to kernel space.
202 *
203 * To make the code a bit uglier, most systems cannot help with
204 * 4MB alignment, so we have to deal with that in addition to
205 * having two ways of getting the memory.
206 */
207 uint32_t uAlignment = _4M;
208 RTR0MEMOBJ hObj;
209 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment);
210 if (rc == VERR_NOT_SUPPORTED)
211 {
212 uAlignment = PAGE_SIZE;
213 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment);
214 }
215 /*
216 * If both RTR0MemObjReserveKernel calls above failed because either not supported or
217 * not implemented at all at the current platform, try to map the memory object into the
218 * virtual kernel space.
219 */
220 if (rc == VERR_NOT_SUPPORTED)
221 {
222 if (hFictive == NIL_RTR0MEMOBJ)
223 {
224 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M, RTMEM_CACHE_POLICY_DONT_CARE);
225 if (RT_FAILURE(rc))
226 break;
227 hFictive = hObj;
228 }
229 uAlignment = _4M;
230 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
231 if (rc == VERR_NOT_SUPPORTED)
232 {
233 uAlignment = PAGE_SIZE;
234 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
235 }
236 }
237 if (RT_FAILURE(rc))
238 {
239 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
240 rc, cbHypervisor, uAlignment, iTry));
241 fBitched = true;
242 break;
243 }
244
245 /*
246 * Try set it.
247 */
248 pReq->header.requestType = VMMDevReq_SetHypervisorInfo;
249 pReq->header.rc = VERR_INTERNAL_ERROR;
250 pReq->hypervisorSize = cbHypervisor;
251 pReq->hypervisorStart = (uintptr_t)RTR0MemObjAddress(hObj);
252 if ( uAlignment == PAGE_SIZE
253 && pReq->hypervisorStart & (_4M - 1))
254 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M);
255 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
256
257 rc = VbglGRPerform(&pReq->header);
258 if (RT_SUCCESS(rc))
259 {
260 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj;
261 Log(("VBoxGuest: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
262 RTR0MemObjAddress(pDevExt->hGuestMappings),
263 RTR0MemObjSize(pDevExt->hGuestMappings),
264 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation"));
265 break;
266 }
267 ahTries[iTry] = hObj;
268 }
269
270 /*
271 * Cleanup failed attempts.
272 */
273 while (iTry-- > 0)
274 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */);
275 if ( RT_FAILURE(rc)
276 && hFictive != NIL_RTR0PTR)
277 RTR0MemObjFree(hFictive, false /* fFreeMappings */);
278 if (RT_FAILURE(rc) && !fBitched)
279 LogRel(("VBoxGuest: Warning: failed to reserve %#d of memory for guest mappings.\n", cbHypervisor));
280 }
281 VbglGRFree(&pReq->header);
282
283 /*
284 * We ignore failed attempts for now.
285 */
286 return VINF_SUCCESS;
287}
288
289
290/**
291 * Undo what vboxGuestInitFixateGuestMappings did.
292 *
293 * @param pDevExt The device extension.
294 */
295static void vboxGuestTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt)
296{
297 if (pDevExt->hGuestMappings != NIL_RTR0PTR)
298 {
299 /*
300 * Tell the host that we're going to free the memory we reserved for
301 * it, the free it up. (Leak the memory if anything goes wrong here.)
302 */
303 VMMDevReqHypervisorInfo *pReq;
304 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
305 if (RT_SUCCESS(rc))
306 {
307 pReq->hypervisorStart = 0;
308 pReq->hypervisorSize = 0;
309 rc = VbglGRPerform(&pReq->header);
310 VbglGRFree(&pReq->header);
311 }
312 if (RT_SUCCESS(rc))
313 {
314 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */);
315 AssertRC(rc);
316 }
317 else
318 LogRel(("vboxGuestTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc));
319
320 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
321 }
322}
323
324
325/**
326 * Sets the interrupt filter mask during initialization and termination.
327 *
328 * This will ASSUME that we're the ones in carge over the mask, so
329 * we'll simply clear all bits we don't set.
330 *
331 * @returns VBox status code (ignored).
332 * @param pDevExt The device extension.
333 * @param fMask The new mask.
334 */
335static int vboxGuestSetFilterMask(PVBOXGUESTDEVEXT pDevExt, uint32_t fMask)
336{
337 VMMDevCtlGuestFilterMask *pReq;
338 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
339 if (RT_SUCCESS(rc))
340 {
341 pReq->u32OrMask = fMask;
342 pReq->u32NotMask = ~fMask;
343 rc = VbglGRPerform(&pReq->header);
344 if (RT_FAILURE(rc))
345 LogRel(("vboxGuestSetFilterMask: failed with rc=%Rrc\n", rc));
346 VbglGRFree(&pReq->header);
347 }
348 return rc;
349}
350
351
352/**
353 * Inflate the balloon by one chunk represented by an R0 memory object.
354 *
355 * The caller owns the balloon mutex.
356 *
357 * @returns IPRT status code.
358 * @param pMemObj Pointer to the R0 memory object.
359 * @param pReq The pre-allocated request for performing the VMMDev call.
360 */
361static int vboxGuestBalloonInflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
362{
363 uint32_t iPage;
364 int rc;
365
366 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
367 {
368 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
369 pReq->aPhysPage[iPage] = phys;
370 }
371
372 pReq->fInflate = true;
373 pReq->header.size = cbChangeMemBalloonReq;
374 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
375
376 rc = VbglGRPerform(&pReq->header);
377 if (RT_FAILURE(rc))
378 LogRel(("vboxGuestBalloonInflate: VbglGRPerform failed. rc=%Rrc\n", rc));
379 return rc;
380}
381
382
383/**
384 * Deflate the balloon by one chunk - info the host and free the memory object.
385 *
386 * The caller owns the balloon mutex.
387 *
388 * @returns IPRT status code.
389 * @param pMemObj Pointer to the R0 memory object.
390 * The memory object will be freed afterwards.
391 * @param pReq The pre-allocated request for performing the VMMDev call.
392 */
393static int vboxGuestBalloonDeflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
394{
395 uint32_t iPage;
396 int rc;
397
398 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
399 {
400 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
401 pReq->aPhysPage[iPage] = phys;
402 }
403
404 pReq->fInflate = false;
405 pReq->header.size = cbChangeMemBalloonReq;
406 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
407
408 rc = VbglGRPerform(&pReq->header);
409 if (RT_FAILURE(rc))
410 {
411 LogRel(("vboxGuestBalloonDeflate: VbglGRPerform failed. rc=%Rrc\n", rc));
412 return rc;
413 }
414
415 rc = RTR0MemObjFree(*pMemObj, true);
416 if (RT_FAILURE(rc))
417 {
418 LogRel(("vboxGuestBalloonDeflate: RTR0MemObjFree(%p,true) -> %Rrc; this is *BAD*!\n", *pMemObj, rc));
419 return rc;
420 }
421
422 *pMemObj = NIL_RTR0MEMOBJ;
423 return VINF_SUCCESS;
424}
425
426
427/**
428 * Inflate/deflate the memory balloon and notify the host.
429 *
430 * This is a worker used by VBoxGuestCommonIOCtl_CheckMemoryBalloon - it takes
431 * the mutex.
432 *
433 * @returns VBox status code.
434 * @param pDevExt The device extension.
435 * @param pSession The session.
436 * @param cBalloonChunks The new size of the balloon in chunks of 1MB.
437 * @param pfHandleInR3 Where to return the handle-in-ring3 indicator
438 * (VINF_SUCCESS if set).
439 */
440static int vboxGuestSetBalloonSizeKernel(PVBOXGUESTDEVEXT pDevExt, uint32_t cBalloonChunks, uint32_t *pfHandleInR3)
441{
442 int rc = VINF_SUCCESS;
443
444 if (pDevExt->MemBalloon.fUseKernelAPI)
445 {
446 VMMDevChangeMemBalloon *pReq;
447 uint32_t i;
448
449 if (cBalloonChunks > pDevExt->MemBalloon.cMaxChunks)
450 {
451 LogRel(("vboxGuestSetBalloonSizeKernel: illegal balloon size %u (max=%u)\n",
452 cBalloonChunks, pDevExt->MemBalloon.cMaxChunks));
453 return VERR_INVALID_PARAMETER;
454 }
455
456 if (cBalloonChunks == pDevExt->MemBalloon.cMaxChunks)
457 return VINF_SUCCESS; /* nothing to do */
458
459 if ( cBalloonChunks > pDevExt->MemBalloon.cChunks
460 && !pDevExt->MemBalloon.paMemObj)
461 {
462 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAllocZ(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
463 if (!pDevExt->MemBalloon.paMemObj)
464 {
465 LogRel(("vboxGuestSetBalloonSizeKernel: no memory for paMemObj!\n"));
466 return VERR_NO_MEMORY;
467 }
468 }
469
470 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
471 if (RT_FAILURE(rc))
472 return rc;
473
474 if (cBalloonChunks > pDevExt->MemBalloon.cChunks)
475 {
476 /* inflate */
477 for (i = pDevExt->MemBalloon.cChunks; i < cBalloonChunks; i++)
478 {
479 rc = RTR0MemObjAllocPhysNC(&pDevExt->MemBalloon.paMemObj[i],
480 VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, NIL_RTHCPHYS);
481 if (RT_FAILURE(rc))
482 {
483 if (rc == VERR_NOT_SUPPORTED)
484 {
485 /* not supported -- fall back to the R3-allocated memory. */
486 rc = VINF_SUCCESS;
487 pDevExt->MemBalloon.fUseKernelAPI = false;
488 Assert(pDevExt->MemBalloon.cChunks == 0);
489 Log(("VBoxGuestSetBalloonSizeKernel: PhysNC allocs not supported, falling back to R3 allocs.\n"));
490 }
491 /* else if (rc == VERR_NO_MEMORY || rc == VERR_NO_PHYS_MEMORY):
492 * cannot allocate more memory => don't try further, just stop here */
493 /* else: XXX what else can fail? VERR_MEMOBJ_INIT_FAILED for instance. just stop. */
494 break;
495 }
496
497 rc = vboxGuestBalloonInflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
498 if (RT_FAILURE(rc))
499 {
500 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
501 RTR0MemObjFree(pDevExt->MemBalloon.paMemObj[i], true);
502 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
503 break;
504 }
505 pDevExt->MemBalloon.cChunks++;
506 }
507 }
508 else
509 {
510 /* deflate */
511 for (i = pDevExt->MemBalloon.cChunks; i-- > cBalloonChunks;)
512 {
513 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
514 if (RT_FAILURE(rc))
515 {
516 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
517 break;
518 }
519 pDevExt->MemBalloon.cChunks--;
520 }
521 }
522
523 VbglGRFree(&pReq->header);
524 }
525
526 /*
527 * Set the handle-in-ring3 indicator. When set Ring-3 will have to work
528 * the balloon changes via the other API.
529 */
530 *pfHandleInR3 = pDevExt->MemBalloon.fUseKernelAPI ? false : true;
531
532 return rc;
533}
534
535
536/**
537 * Helper to reinit the VBoxVMM communication after hibernation.
538 *
539 * @returns VBox status code.
540 * @param pDevExt The device extension.
541 * @param enmOSType The OS type.
542 */
543int VBoxGuestReinitDevExtAfterHibernation(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
544{
545 int rc = VBoxGuestReportGuestInfo(enmOSType);
546 if (RT_SUCCESS(rc))
547 {
548 rc = VBoxGuestReportDriverStatus(true /* Driver is active */);
549 if (RT_FAILURE(rc))
550 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: could not report guest driver status, rc=%Rrc\n", rc));
551 }
552 else
553 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: could not report guest information to host, rc=%Rrc\n", rc));
554 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: returned with rc=%Rrc\n", rc));
555 return rc;
556}
557
558
559/**
560 * Inflate/deflate the balloon by one chunk.
561 *
562 * Worker for VBoxGuestCommonIOCtl_ChangeMemoryBalloon - it takes the mutex.
563 *
564 * @returns VBox status code.
565 * @param pDevExt The device extension.
566 * @param pSession The session.
567 * @param u64ChunkAddr The address of the chunk to add to / remove from the
568 * balloon.
569 * @param fInflate Inflate if true, deflate if false.
570 */
571static int vboxGuestSetBalloonSizeFromUser(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
572 uint64_t u64ChunkAddr, bool fInflate)
573{
574 VMMDevChangeMemBalloon *pReq;
575 int rc = VINF_SUCCESS;
576 uint32_t i;
577 PRTR0MEMOBJ pMemObj = NULL;
578
579 if (fInflate)
580 {
581 if ( pDevExt->MemBalloon.cChunks > pDevExt->MemBalloon.cMaxChunks - 1
582 || pDevExt->MemBalloon.cMaxChunks == 0 /* If called without first querying. */)
583 {
584 LogRel(("vboxGuestSetBalloonSize: cannot inflate balloon, already have %u chunks (max=%u)\n",
585 pDevExt->MemBalloon.cChunks, pDevExt->MemBalloon.cMaxChunks));
586 return VERR_INVALID_PARAMETER;
587 }
588
589 if (!pDevExt->MemBalloon.paMemObj)
590 {
591 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAlloc(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
592 if (!pDevExt->MemBalloon.paMemObj)
593 {
594 LogRel(("VBoxGuestSetBalloonSizeFromUser: no memory for paMemObj!\n"));
595 return VERR_NO_MEMORY;
596 }
597 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
598 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
599 }
600 }
601 else
602 {
603 if (pDevExt->MemBalloon.cChunks == 0)
604 {
605 AssertMsgFailed(("vboxGuestSetBalloonSize: cannot decrease balloon, already at size 0\n"));
606 return VERR_INVALID_PARAMETER;
607 }
608 }
609
610 /*
611 * Enumerate all memory objects and check if the object is already registered.
612 */
613 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
614 {
615 if ( fInflate
616 && !pMemObj
617 && pDevExt->MemBalloon.paMemObj[i] == NIL_RTR0MEMOBJ)
618 pMemObj = &pDevExt->MemBalloon.paMemObj[i]; /* found free object pointer */
619 if (RTR0MemObjAddressR3(pDevExt->MemBalloon.paMemObj[i]) == u64ChunkAddr)
620 {
621 if (fInflate)
622 return VERR_ALREADY_EXISTS; /* don't provide the same memory twice */
623 pMemObj = &pDevExt->MemBalloon.paMemObj[i];
624 break;
625 }
626 }
627 if (!pMemObj)
628 {
629 if (fInflate)
630 {
631 /* no free object pointer found -- should not happen */
632 return VERR_NO_MEMORY;
633 }
634
635 /* cannot free this memory as it wasn't provided before */
636 return VERR_NOT_FOUND;
637 }
638
639 /*
640 * Try inflate / default the balloon as requested.
641 */
642 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
643 if (RT_FAILURE(rc))
644 return rc;
645
646 if (fInflate)
647 {
648 rc = RTR0MemObjLockUser(pMemObj, (RTR3PTR)u64ChunkAddr, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE,
649 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
650 if (RT_SUCCESS(rc))
651 {
652 rc = vboxGuestBalloonInflate(pMemObj, pReq);
653 if (RT_SUCCESS(rc))
654 pDevExt->MemBalloon.cChunks++;
655 else
656 {
657 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
658 RTR0MemObjFree(*pMemObj, true);
659 *pMemObj = NIL_RTR0MEMOBJ;
660 }
661 }
662 }
663 else
664 {
665 rc = vboxGuestBalloonDeflate(pMemObj, pReq);
666 if (RT_SUCCESS(rc))
667 pDevExt->MemBalloon.cChunks--;
668 else
669 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
670 }
671
672 VbglGRFree(&pReq->header);
673 return rc;
674}
675
676
677/**
678 * Cleanup the memory balloon of a session.
679 *
680 * Will request the balloon mutex, so it must be valid and the caller must not
681 * own it already.
682 *
683 * @param pDevExt The device extension.
684 * @param pDevExt The session. Can be NULL at unload.
685 */
686static void vboxGuestCloseMemBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
687{
688 RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
689 if ( pDevExt->MemBalloon.pOwner == pSession
690 || pSession == NULL /*unload*/)
691 {
692 if (pDevExt->MemBalloon.paMemObj)
693 {
694 VMMDevChangeMemBalloon *pReq;
695 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
696 if (RT_SUCCESS(rc))
697 {
698 uint32_t i;
699 for (i = pDevExt->MemBalloon.cChunks; i-- > 0;)
700 {
701 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
702 if (RT_FAILURE(rc))
703 {
704 LogRel(("vboxGuestCloseMemBalloon: Deflate failed with rc=%Rrc. Will leak %u chunks.\n",
705 rc, pDevExt->MemBalloon.cChunks));
706 break;
707 }
708 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
709 pDevExt->MemBalloon.cChunks--;
710 }
711 VbglGRFree(&pReq->header);
712 }
713 else
714 LogRel(("vboxGuestCloseMemBalloon: Failed to allocate VMMDev request buffer (rc=%Rrc). Will leak %u chunks.\n",
715 rc, pDevExt->MemBalloon.cChunks));
716 RTMemFree(pDevExt->MemBalloon.paMemObj);
717 pDevExt->MemBalloon.paMemObj = NULL;
718 }
719
720 pDevExt->MemBalloon.pOwner = NULL;
721 }
722 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
723}
724
725
726/**
727 * Initializes the VBoxGuest device extension when the
728 * device driver is loaded.
729 *
730 * The native code locates the VMMDev on the PCI bus and retrieve
731 * the MMIO and I/O port ranges, this function will take care of
732 * mapping the MMIO memory (if present). Upon successful return
733 * the native code should set up the interrupt handler.
734 *
735 * @returns VBox status code.
736 *
737 * @param pDevExt The device extension. Allocated by the native code.
738 * @param IOPortBase The base of the I/O port range.
739 * @param pvMMIOBase The base of the MMIO memory mapping.
740 * This is optional, pass NULL if not present.
741 * @param cbMMIO The size of the MMIO memory mapping.
742 * This is optional, pass 0 if not present.
743 * @param enmOSType The guest OS type to report to the VMMDev.
744 * @param fFixedEvents Events that will be enabled upon init and no client
745 * will ever be allowed to mask.
746 */
747int VBoxGuestInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
748 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
749{
750 int rc, rc2;
751 unsigned i;
752
753#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
754 /*
755 * Create the release log.
756 */
757 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
758 PRTLOGGER pRelLogger;
759 rc = RTLogCreate(&pRelLogger, 0 /* fFlags */, "all",
760 "VBOX_RELEASE_LOG", RT_ELEMENTS(s_apszGroups), s_apszGroups, RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER, NULL);
761 if (RT_SUCCESS(rc))
762 RTLogRelSetDefaultInstance(pRelLogger);
763 /** @todo Add native hook for getting logger config parameters and setting
764 * them. On linux we should use the module parameter stuff... */
765#endif
766
767 /*
768 * Adjust fFixedEvents.
769 */
770#ifdef VBOX_WITH_HGCM
771 fFixedEvents |= VMMDEV_EVENT_HGCM;
772#endif
773
774 /*
775 * Initialize the data.
776 */
777 pDevExt->IOPortBase = IOPortBase;
778 pDevExt->pVMMDevMemory = NULL;
779 pDevExt->fFixedEvents = fFixedEvents;
780 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
781 pDevExt->EventSpinlock = NIL_RTSPINLOCK;
782 pDevExt->pIrqAckEvents = NULL;
783 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
784 RTListInit(&pDevExt->WaitList);
785#ifdef VBOX_WITH_HGCM
786 RTListInit(&pDevExt->HGCMWaitList);
787#endif
788#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
789 RTListInit(&pDevExt->WakeUpList);
790#endif
791 RTListInit(&pDevExt->WokenUpList);
792 RTListInit(&pDevExt->FreeList);
793#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
794 pDevExt->fVRDPEnabled = false;
795#endif
796 pDevExt->fLoggingEnabled = false;
797 pDevExt->f32PendingEvents = 0;
798 pDevExt->u32MousePosChangedSeq = 0;
799 pDevExt->SessionSpinlock = NIL_RTSPINLOCK;
800 pDevExt->MemBalloon.hMtx = NIL_RTSEMFASTMUTEX;
801 pDevExt->MemBalloon.cChunks = 0;
802 pDevExt->MemBalloon.cMaxChunks = 0;
803 pDevExt->MemBalloon.fUseKernelAPI = true;
804 pDevExt->MemBalloon.paMemObj = NULL;
805 pDevExt->MemBalloon.pOwner = NULL;
806 for (i = 0; i < RT_ELEMENTS(pDevExt->acMouseFeatureUsage); ++i)
807 pDevExt->acMouseFeatureUsage[i] = 0;
808 pDevExt->fMouseStatus = 0;
809 pDevExt->MouseNotifyCallback.pfnNotify = NULL;
810 pDevExt->MouseNotifyCallback.pvUser = NULL;
811 pDevExt->cISR = 0;
812
813 /*
814 * If there is an MMIO region validate the version and size.
815 */
816 if (pvMMIOBase)
817 {
818 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
819 Assert(cbMMIO);
820 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
821 && pVMMDev->u32Size >= 32
822 && pVMMDev->u32Size <= cbMMIO)
823 {
824 pDevExt->pVMMDevMemory = pVMMDev;
825 Log(("VBoxGuestInitDevExt: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
826 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
827 }
828 else /* try live without it. */
829 LogRel(("VBoxGuestInitDevExt: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
830 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
831 }
832
833 pDevExt->u32AcquireModeGuestCaps = 0;
834 pDevExt->u32SetModeGuestCaps = 0;
835 pDevExt->u32GuestCaps = 0;
836
837 /*
838 * Create the wait and session spinlocks as well as the ballooning mutex.
839 */
840 rc = RTSpinlockCreate(&pDevExt->EventSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestEvent");
841 if (RT_SUCCESS(rc))
842 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestSession");
843 if (RT_FAILURE(rc))
844 {
845 LogRel(("VBoxGuestInitDevExt: failed to create spinlock, rc=%Rrc!\n", rc));
846 if (pDevExt->EventSpinlock != NIL_RTSPINLOCK)
847 RTSpinlockDestroy(pDevExt->EventSpinlock);
848 return rc;
849 }
850
851 rc = RTSemFastMutexCreate(&pDevExt->MemBalloon.hMtx);
852 if (RT_FAILURE(rc))
853 {
854 LogRel(("VBoxGuestInitDevExt: failed to create mutex, rc=%Rrc!\n", rc));
855 RTSpinlockDestroy(pDevExt->SessionSpinlock);
856 RTSpinlockDestroy(pDevExt->EventSpinlock);
857 return rc;
858 }
859
860 /*
861 * Initialize the guest library and report the guest info back to VMMDev,
862 * set the interrupt control filter mask, and fixate the guest mappings
863 * made by the VMM.
864 */
865 rc = VbglInit(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
866 if (RT_SUCCESS(rc))
867 {
868 rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
869 if (RT_SUCCESS(rc))
870 {
871 pDevExt->PhysIrqAckEvents = VbglPhysHeapGetPhysAddr(pDevExt->pIrqAckEvents);
872 Assert(pDevExt->PhysIrqAckEvents != 0);
873
874 rc = VBoxGuestReportGuestInfo(enmOSType);
875 if (RT_SUCCESS(rc))
876 {
877 rc = vboxGuestSetFilterMask(pDevExt, fFixedEvents);
878 if (RT_SUCCESS(rc))
879 {
880 /*
881 * Disable guest graphics capability by default. The guest specific
882 * graphics driver will re-enable this when it is necessary.
883 */
884 rc = VBoxGuestSetGuestCapabilities(0, VMMDEV_GUEST_SUPPORTS_GRAPHICS);
885 if (RT_SUCCESS(rc))
886 {
887 vboxGuestInitFixateGuestMappings(pDevExt);
888
889#ifdef DEBUG
890 testSetMouseStatus(); /* Other tests? */
891#endif
892
893 rc = VBoxGuestReportDriverStatus(true /* Driver is active */);
894 if (RT_FAILURE(rc))
895 LogRel(("VBoxGuestInitDevExt: VBoxReportGuestDriverStatus failed, rc=%Rrc\n", rc));
896
897 Log(("VBoxGuestInitDevExt: returns success\n"));
898 return VINF_SUCCESS;
899 }
900
901 LogRel(("VBoxGuestInitDevExt: VBoxGuestSetGuestCapabilities failed, rc=%Rrc\n", rc));
902 }
903 else
904 LogRel(("VBoxGuestInitDevExt: vboxGuestSetFilterMask failed, rc=%Rrc\n", rc));
905 }
906 else
907 LogRel(("VBoxGuestInitDevExt: VBoxReportGuestInfo failed, rc=%Rrc\n", rc));
908 VbglGRFree((VMMDevRequestHeader *)pDevExt->pIrqAckEvents);
909 }
910 else
911 LogRel(("VBoxGuestInitDevExt: VBoxGRAlloc failed, rc=%Rrc\n", rc));
912
913 VbglTerminate();
914 }
915 else
916 LogRel(("VBoxGuestInitDevExt: VbglInit failed, rc=%Rrc\n", rc));
917
918 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
919 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
920 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
921
922#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
923 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
924 RTLogDestroy(RTLogSetDefaultInstance(NULL));
925#endif
926 return rc; /* (failed) */
927}
928
929
930/**
931 * Deletes all the items in a wait chain.
932 * @param pList The head of the chain.
933 */
934static void VBoxGuestDeleteWaitList(PRTLISTNODE pList)
935{
936 while (!RTListIsEmpty(pList))
937 {
938 int rc2;
939 PVBOXGUESTWAIT pWait = RTListGetFirst(pList, VBOXGUESTWAIT, ListNode);
940 RTListNodeRemove(&pWait->ListNode);
941
942 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
943 pWait->Event = NIL_RTSEMEVENTMULTI;
944 pWait->pSession = NULL;
945 RTMemFree(pWait);
946 }
947}
948
949
950/**
951 * Destroys the VBoxGuest device extension.
952 *
953 * The native code should call this before the driver is loaded,
954 * but don't call this on shutdown.
955 *
956 * @param pDevExt The device extension.
957 */
958void VBoxGuestDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
959{
960 int rc2;
961 Log(("VBoxGuestDeleteDevExt:\n"));
962 Log(("VBoxGuest: The additions driver is terminating.\n"));
963
964 /*
965 * Clean up the bits that involves the host first.
966 */
967 vboxGuestTermUnfixGuestMappings(pDevExt);
968 VBoxGuestSetGuestCapabilities(0, UINT32_MAX); /* clears all capabilities */
969 vboxGuestSetFilterMask(pDevExt, 0); /* filter all events */
970 vboxGuestCloseMemBalloon(pDevExt, (PVBOXGUESTSESSION)NULL);
971
972 /*
973 * Cleanup all the other resources.
974 */
975 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
976 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
977 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
978
979 VBoxGuestDeleteWaitList(&pDevExt->WaitList);
980#ifdef VBOX_WITH_HGCM
981 VBoxGuestDeleteWaitList(&pDevExt->HGCMWaitList);
982#endif
983#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
984 VBoxGuestDeleteWaitList(&pDevExt->WakeUpList);
985#endif
986 VBoxGuestDeleteWaitList(&pDevExt->WokenUpList);
987 VBoxGuestDeleteWaitList(&pDevExt->FreeList);
988
989 VbglTerminate();
990
991 pDevExt->pVMMDevMemory = NULL;
992
993 pDevExt->IOPortBase = 0;
994 pDevExt->pIrqAckEvents = NULL;
995
996#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
997 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
998 RTLogDestroy(RTLogSetDefaultInstance(NULL));
999#endif
1000
1001}
1002
1003
1004/**
1005 * Creates a VBoxGuest user session.
1006 *
1007 * The native code calls this when a ring-3 client opens the device.
1008 * Use VBoxGuestCreateKernelSession when a ring-0 client connects.
1009 *
1010 * @returns VBox status code.
1011 * @param pDevExt The device extension.
1012 * @param ppSession Where to store the session on success.
1013 */
1014int VBoxGuestCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
1015{
1016 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
1017 if (RT_UNLIKELY(!pSession))
1018 {
1019 LogRel(("VBoxGuestCreateUserSession: no memory!\n"));
1020 return VERR_NO_MEMORY;
1021 }
1022
1023 pSession->Process = RTProcSelf();
1024 pSession->R0Process = RTR0ProcHandleSelf();
1025 pSession->pDevExt = pDevExt;
1026
1027 *ppSession = pSession;
1028 LogFlow(("VBoxGuestCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1029 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1030 return VINF_SUCCESS;
1031}
1032
1033
1034/**
1035 * Creates a VBoxGuest kernel session.
1036 *
1037 * The native code calls this when a ring-0 client connects to the device.
1038 * Use VBoxGuestCreateUserSession when a ring-3 client opens the device.
1039 *
1040 * @returns VBox status code.
1041 * @param pDevExt The device extension.
1042 * @param ppSession Where to store the session on success.
1043 */
1044int VBoxGuestCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
1045{
1046 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
1047 if (RT_UNLIKELY(!pSession))
1048 {
1049 LogRel(("VBoxGuestCreateKernelSession: no memory!\n"));
1050 return VERR_NO_MEMORY;
1051 }
1052
1053 pSession->Process = NIL_RTPROCESS;
1054 pSession->R0Process = NIL_RTR0PROCESS;
1055 pSession->pDevExt = pDevExt;
1056
1057 *ppSession = pSession;
1058 LogFlow(("VBoxGuestCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1059 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1060 return VINF_SUCCESS;
1061}
1062
1063static int VBoxGuestCommonIOCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession);
1064
1065/**
1066 * Closes a VBoxGuest session.
1067 *
1068 * @param pDevExt The device extension.
1069 * @param pSession The session to close (and free).
1070 */
1071void VBoxGuestCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1072{
1073 unsigned i; NOREF(i);
1074 Log(("VBoxGuestCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1075 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1076
1077 VBoxGuestCommonGuestCapsAcquire(pDevExt, pSession, 0, UINT32_MAX, VBOXGUESTCAPSACQUIRE_FLAGS_NONE);
1078
1079 VBoxGuestCommonIOCtl_CancelAllWaitEvents(pDevExt, pSession);
1080
1081#ifdef VBOX_WITH_HGCM
1082 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1083 if (pSession->aHGCMClientIds[i])
1084 {
1085 VBoxGuestHGCMDisconnectInfo Info;
1086 Info.result = 0;
1087 Info.u32ClientID = pSession->aHGCMClientIds[i];
1088 pSession->aHGCMClientIds[i] = 0;
1089 Log(("VBoxGuestCloseSession: disconnecting client id %#RX32\n", Info.u32ClientID));
1090 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1091 }
1092#endif
1093
1094 pSession->pDevExt = NULL;
1095 pSession->Process = NIL_RTPROCESS;
1096 pSession->R0Process = NIL_RTR0PROCESS;
1097 vboxGuestCloseMemBalloon(pDevExt, pSession);
1098 /* Reset any mouse status flags which the session may have set. */
1099 VBoxGuestCommonIOCtl_SetMouseStatus(pDevExt, pSession, 0);
1100 RTMemFree(pSession);
1101}
1102
1103
1104/**
1105 * Allocates a wait-for-event entry.
1106 *
1107 * @returns The wait-for-event entry.
1108 * @param pDevExt The device extension.
1109 * @param pSession The session that's allocating this. Can be NULL.
1110 */
1111static PVBOXGUESTWAIT VBoxGuestWaitAlloc(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1112{
1113 /*
1114 * Allocate it one way or the other.
1115 */
1116 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1117 if (pWait)
1118 {
1119 RTSpinlockAcquire(pDevExt->EventSpinlock);
1120
1121 pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1122 if (pWait)
1123 RTListNodeRemove(&pWait->ListNode);
1124
1125 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1126 }
1127 if (!pWait)
1128 {
1129 static unsigned s_cErrors = 0;
1130 int rc;
1131
1132 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
1133 if (!pWait)
1134 {
1135 if (s_cErrors++ < 32)
1136 LogRel(("VBoxGuestWaitAlloc: out-of-memory!\n"));
1137 return NULL;
1138 }
1139
1140 rc = RTSemEventMultiCreate(&pWait->Event);
1141 if (RT_FAILURE(rc))
1142 {
1143 if (s_cErrors++ < 32)
1144 LogRel(("VBoxGuestCommonIOCtl: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
1145 RTMemFree(pWait);
1146 return NULL;
1147 }
1148
1149 pWait->ListNode.pNext = NULL;
1150 pWait->ListNode.pPrev = NULL;
1151 }
1152
1153 /*
1154 * Zero members just as an precaution.
1155 */
1156 pWait->fReqEvents = 0;
1157 pWait->fResEvents = 0;
1158#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1159 pWait->fPendingWakeUp = false;
1160 pWait->fFreeMe = false;
1161#endif
1162 pWait->pSession = pSession;
1163#ifdef VBOX_WITH_HGCM
1164 pWait->pHGCMReq = NULL;
1165#endif
1166 RTSemEventMultiReset(pWait->Event);
1167 return pWait;
1168}
1169
1170
1171/**
1172 * Frees the wait-for-event entry.
1173 *
1174 * The caller must own the wait spinlock !
1175 * The entry must be in a list!
1176 *
1177 * @param pDevExt The device extension.
1178 * @param pWait The wait-for-event entry to free.
1179 */
1180static void VBoxGuestWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1181{
1182 pWait->fReqEvents = 0;
1183 pWait->fResEvents = 0;
1184#ifdef VBOX_WITH_HGCM
1185 pWait->pHGCMReq = NULL;
1186#endif
1187#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1188 Assert(!pWait->fFreeMe);
1189 if (pWait->fPendingWakeUp)
1190 pWait->fFreeMe = true;
1191 else
1192#endif
1193 {
1194 RTListNodeRemove(&pWait->ListNode);
1195 RTListAppend(&pDevExt->FreeList, &pWait->ListNode);
1196 }
1197}
1198
1199
1200/**
1201 * Frees the wait-for-event entry.
1202 *
1203 * @param pDevExt The device extension.
1204 * @param pWait The wait-for-event entry to free.
1205 */
1206static void VBoxGuestWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1207{
1208 RTSpinlockAcquire(pDevExt->EventSpinlock);
1209 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1210 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1211}
1212
1213
1214#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1215/**
1216 * Processes the wake-up list.
1217 *
1218 * All entries in the wake-up list gets signalled and moved to the woken-up
1219 * list.
1220 *
1221 * @param pDevExt The device extension.
1222 */
1223void VBoxGuestWaitDoWakeUps(PVBOXGUESTDEVEXT pDevExt)
1224{
1225 if (!RTListIsEmpty(&pDevExt->WakeUpList))
1226 {
1227 RTSpinlockAcquire(pDevExt->EventSpinlock);
1228 for (;;)
1229 {
1230 int rc;
1231 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->WakeUpList, VBOXGUESTWAIT, ListNode);
1232 if (!pWait)
1233 break;
1234 pWait->fPendingWakeUp = true;
1235 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1236
1237 rc = RTSemEventMultiSignal(pWait->Event);
1238 AssertRC(rc);
1239
1240 RTSpinlockAcquire(pDevExt->EventSpinlock);
1241 pWait->fPendingWakeUp = false;
1242 if (!pWait->fFreeMe)
1243 {
1244 RTListNodeRemove(&pWait->ListNode);
1245 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1246 }
1247 else
1248 {
1249 pWait->fFreeMe = false;
1250 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1251 }
1252 }
1253 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1254 }
1255}
1256#endif /* VBOXGUEST_USE_DEFERRED_WAKE_UP */
1257
1258
1259/**
1260 * Modifies the guest capabilities.
1261 *
1262 * Should be called during driver init and termination.
1263 *
1264 * @returns VBox status code.
1265 * @param fOr The Or mask (what to enable).
1266 * @param fNot The Not mask (what to disable).
1267 */
1268int VBoxGuestSetGuestCapabilities(uint32_t fOr, uint32_t fNot)
1269{
1270 VMMDevReqGuestCapabilities2 *pReq;
1271 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
1272 if (RT_FAILURE(rc))
1273 {
1274 Log(("VBoxGuestSetGuestCapabilities: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1275 sizeof(*pReq), sizeof(*pReq), rc));
1276 return rc;
1277 }
1278
1279 pReq->u32OrMask = fOr;
1280 pReq->u32NotMask = fNot;
1281
1282 rc = VbglGRPerform(&pReq->header);
1283 if (RT_FAILURE(rc))
1284 Log(("VBoxGuestSetGuestCapabilities: VbglGRPerform failed, rc=%Rrc!\n", rc));
1285
1286 VbglGRFree(&pReq->header);
1287 return rc;
1288}
1289
1290
1291/**
1292 * Implements the fast (no input or output) type of IOCtls.
1293 *
1294 * This is currently just a placeholder stub inherited from the support driver code.
1295 *
1296 * @returns VBox status code.
1297 * @param iFunction The IOCtl function number.
1298 * @param pDevExt The device extension.
1299 * @param pSession The session.
1300 */
1301int VBoxGuestCommonIOCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1302{
1303 Log(("VBoxGuestCommonIOCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
1304
1305 NOREF(iFunction);
1306 NOREF(pDevExt);
1307 NOREF(pSession);
1308 return VERR_NOT_SUPPORTED;
1309}
1310
1311
1312/**
1313 * Return the VMM device port.
1314 *
1315 * returns IPRT status code.
1316 * @param pDevExt The device extension.
1317 * @param pInfo The request info.
1318 * @param pcbDataReturned (out) contains the number of bytes to return.
1319 */
1320static int VBoxGuestCommonIOCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
1321{
1322 Log(("VBoxGuestCommonIOCtl: GETVMMDEVPORT\n"));
1323 pInfo->portAddress = pDevExt->IOPortBase;
1324 pInfo->pVMMDevMemory = (VMMDevMemory *)pDevExt->pVMMDevMemory;
1325 if (pcbDataReturned)
1326 *pcbDataReturned = sizeof(*pInfo);
1327 return VINF_SUCCESS;
1328}
1329
1330
1331#ifndef RT_OS_WINDOWS
1332/**
1333 * Set the callback for the kernel mouse handler.
1334 *
1335 * returns IPRT status code.
1336 * @param pDevExt The device extension.
1337 * @param pNotify The new callback information.
1338 * @note This function takes the session spinlock to update the callback
1339 * information, but the interrupt handler will not do this. To make
1340 * sure that the interrupt handler sees a consistent structure, we
1341 * set the function pointer to NULL before updating the data and only
1342 * set it to the correct value once the data is updated. Since the
1343 * interrupt handler executes atomically this ensures that the data is
1344 * valid if the function pointer is non-NULL.
1345 */
1346int VBoxGuestCommonIOCtl_SetMouseNotifyCallback(PVBOXGUESTDEVEXT pDevExt, VBoxGuestMouseSetNotifyCallback *pNotify)
1347{
1348 Log(("VBoxGuestCommonIOCtl: SET_MOUSE_NOTIFY_CALLBACK\n"));
1349
1350 RTSpinlockAcquire(pDevExt->EventSpinlock);
1351 pDevExt->MouseNotifyCallback = *pNotify;
1352 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1353
1354 /* Make sure no active ISR is referencing the old data - hacky but should be
1355 * effective. */
1356 while (pDevExt->cISR > 0)
1357 ASMNopPause();
1358
1359 return VINF_SUCCESS;
1360}
1361#endif
1362
1363
1364/**
1365 * Worker VBoxGuestCommonIOCtl_WaitEvent.
1366 *
1367 * The caller enters the spinlock, we leave it.
1368 *
1369 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
1370 */
1371DECLINLINE(int) WaitEventCheckCondition(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestWaitEventInfo *pInfo,
1372 int iEvent, const uint32_t fReqEvents)
1373{
1374 uint32_t fMatches = VBoxGuestCommonGetAndCleanPendingEventsLocked(pDevExt, pSession, fReqEvents);
1375 if (fMatches || pSession->fPendingCancelWaitEvents)
1376 {
1377 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1378
1379 pInfo->u32EventFlagsOut = fMatches;
1380 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1381 if (fReqEvents & ~((uint32_t)1 << iEvent))
1382 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1383 else
1384 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1385 pSession->fPendingCancelWaitEvents = false;
1386 return VINF_SUCCESS;
1387 }
1388 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1389 return VERR_TIMEOUT;
1390}
1391
1392
1393static int VBoxGuestCommonIOCtl_WaitEvent(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1394 VBoxGuestWaitEventInfo *pInfo, size_t *pcbDataReturned, bool fInterruptible)
1395{
1396 const uint32_t fReqEvents = pInfo->u32EventMaskIn;
1397 uint32_t fResEvents;
1398 int iEvent;
1399 PVBOXGUESTWAIT pWait;
1400 int rc;
1401
1402 pInfo->u32EventFlagsOut = 0;
1403 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1404 if (pcbDataReturned)
1405 *pcbDataReturned = sizeof(*pInfo);
1406
1407 /*
1408 * Copy and verify the input mask.
1409 */
1410 iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
1411 if (RT_UNLIKELY(iEvent < 0))
1412 {
1413 LogRel(("VBoxGuestCommonIOCtl: WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
1414 return VERR_INVALID_PARAMETER;
1415 }
1416
1417 /*
1418 * Check the condition up front, before doing the wait-for-event allocations.
1419 */
1420 RTSpinlockAcquire(pDevExt->EventSpinlock);
1421 rc = WaitEventCheckCondition(pDevExt, pSession, pInfo, iEvent, fReqEvents);
1422 if (rc == VINF_SUCCESS)
1423 return rc;
1424
1425 if (!pInfo->u32TimeoutIn)
1426 {
1427 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1428 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
1429 return VERR_TIMEOUT;
1430 }
1431
1432 pWait = VBoxGuestWaitAlloc(pDevExt, pSession);
1433 if (!pWait)
1434 return VERR_NO_MEMORY;
1435 pWait->fReqEvents = fReqEvents;
1436
1437 /*
1438 * We've got the wait entry now, re-enter the spinlock and check for the condition.
1439 * If the wait condition is met, return.
1440 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
1441 */
1442 RTSpinlockAcquire(pDevExt->EventSpinlock);
1443 RTListAppend(&pDevExt->WaitList, &pWait->ListNode);
1444 rc = WaitEventCheckCondition(pDevExt, pSession, pInfo, iEvent, fReqEvents);
1445 if (rc == VINF_SUCCESS)
1446 {
1447 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
1448 return rc;
1449 }
1450
1451 if (fInterruptible)
1452 rc = RTSemEventMultiWaitNoResume(pWait->Event,
1453 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1454 else
1455 rc = RTSemEventMultiWait(pWait->Event,
1456 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1457
1458 /*
1459 * There is one special case here and that's when the semaphore is
1460 * destroyed upon device driver unload. This shouldn't happen of course,
1461 * but in case it does, just get out of here ASAP.
1462 */
1463 if (rc == VERR_SEM_DESTROYED)
1464 return rc;
1465
1466 /*
1467 * Unlink the wait item and dispose of it.
1468 */
1469 RTSpinlockAcquire(pDevExt->EventSpinlock);
1470 fResEvents = pWait->fResEvents;
1471 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1472 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1473
1474 /*
1475 * Now deal with the return code.
1476 */
1477 if ( fResEvents
1478 && fResEvents != UINT32_MAX)
1479 {
1480 pInfo->u32EventFlagsOut = fResEvents;
1481 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1482 if (fReqEvents & ~((uint32_t)1 << iEvent))
1483 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1484 else
1485 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1486 rc = VINF_SUCCESS;
1487 }
1488 else if ( fResEvents == UINT32_MAX
1489 || rc == VERR_INTERRUPTED)
1490 {
1491 pInfo->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
1492 rc = VERR_INTERRUPTED;
1493 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_INTERRUPTED\n"));
1494 }
1495 else if (rc == VERR_TIMEOUT)
1496 {
1497 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1498 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT (2)\n"));
1499 }
1500 else
1501 {
1502 if (RT_SUCCESS(rc))
1503 {
1504 static unsigned s_cErrors = 0;
1505 if (s_cErrors++ < 32)
1506 LogRel(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc but no events!\n", rc));
1507 rc = VERR_INTERNAL_ERROR;
1508 }
1509 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1510 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc\n", rc));
1511 }
1512
1513 return rc;
1514}
1515
1516
1517static int VBoxGuestCommonIOCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1518{
1519 PVBOXGUESTWAIT pWait;
1520 PVBOXGUESTWAIT pSafe;
1521 int rc = 0;
1522 /* Was as least one WAITEVENT in process for this session? If not we
1523 * set a flag that the next call should be interrupted immediately. This
1524 * is needed so that a user thread can reliably interrupt another one in a
1525 * WAITEVENT loop. */
1526 bool fCancelledOne = false;
1527
1528 Log(("VBoxGuestCommonIOCtl: CANCEL_ALL_WAITEVENTS\n"));
1529
1530 /*
1531 * Walk the event list and wake up anyone with a matching session.
1532 */
1533 RTSpinlockAcquire(pDevExt->EventSpinlock);
1534 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
1535 {
1536 if (pWait->pSession == pSession)
1537 {
1538 fCancelledOne = true;
1539 pWait->fResEvents = UINT32_MAX;
1540 RTListNodeRemove(&pWait->ListNode);
1541#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1542 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
1543#else
1544 rc |= RTSemEventMultiSignal(pWait->Event);
1545 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1546#endif
1547 }
1548 }
1549 if (!fCancelledOne)
1550 pSession->fPendingCancelWaitEvents = true;
1551 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1552 Assert(rc == 0);
1553 NOREF(rc);
1554
1555#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1556 VBoxGuestWaitDoWakeUps(pDevExt);
1557#endif
1558
1559 return VINF_SUCCESS;
1560}
1561
1562/**
1563 * Checks if the VMM request is allowed in the context of the given session.
1564 *
1565 * @returns VINF_SUCCESS or VERR_PERMISSION_DENIED.
1566 * @param pSession The calling session.
1567 * @param enmType The request type.
1568 * @param pReqHdr The request.
1569 */
1570static int VBoxGuestCheckIfVMMReqAllowed(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VMMDevRequestType enmType,
1571 VMMDevRequestHeader const *pReqHdr)
1572{
1573 /*
1574 * Categorize the request being made.
1575 */
1576 /** @todo This need quite some more work! */
1577 enum
1578 {
1579 kLevel_Invalid, kLevel_NoOne, kLevel_OnlyVBoxGuest, kLevel_OnlyKernel, kLevel_TrustedUsers, kLevel_AllUsers
1580 } enmRequired;
1581 switch (enmType)
1582 {
1583 /*
1584 * Deny access to anything we don't know or provide specialized I/O controls for.
1585 */
1586#ifdef VBOX_WITH_HGCM
1587 case VMMDevReq_HGCMConnect:
1588 case VMMDevReq_HGCMDisconnect:
1589# ifdef VBOX_WITH_64_BITS_GUESTS
1590 case VMMDevReq_HGCMCall32:
1591 case VMMDevReq_HGCMCall64:
1592# else
1593 case VMMDevReq_HGCMCall:
1594# endif /* VBOX_WITH_64_BITS_GUESTS */
1595 case VMMDevReq_HGCMCancel:
1596 case VMMDevReq_HGCMCancel2:
1597#endif /* VBOX_WITH_HGCM */
1598 default:
1599 enmRequired = kLevel_NoOne;
1600 break;
1601
1602 /*
1603 * There are a few things only this driver can do (and it doesn't use
1604 * the VMMRequst I/O control route anyway, but whatever).
1605 */
1606 case VMMDevReq_ReportGuestInfo:
1607 case VMMDevReq_ReportGuestInfo2:
1608 case VMMDevReq_GetHypervisorInfo:
1609 case VMMDevReq_SetHypervisorInfo:
1610 case VMMDevReq_RegisterPatchMemory:
1611 case VMMDevReq_DeregisterPatchMemory:
1612 case VMMDevReq_GetMemBalloonChangeRequest:
1613 enmRequired = kLevel_OnlyVBoxGuest;
1614 break;
1615
1616 /*
1617 * Trusted users apps only.
1618 */
1619 case VMMDevReq_QueryCredentials:
1620 case VMMDevReq_ReportCredentialsJudgement:
1621 case VMMDevReq_RegisterSharedModule:
1622 case VMMDevReq_UnregisterSharedModule:
1623 case VMMDevReq_WriteCoreDump:
1624 case VMMDevReq_GetCpuHotPlugRequest:
1625 case VMMDevReq_SetCpuHotPlugStatus:
1626 case VMMDevReq_CheckSharedModules:
1627 case VMMDevReq_GetPageSharingStatus:
1628 case VMMDevReq_DebugIsPageShared:
1629 case VMMDevReq_ReportGuestStats:
1630 case VMMDevReq_ReportGuestUserState:
1631 case VMMDevReq_GetStatisticsChangeRequest:
1632 case VMMDevReq_ChangeMemBalloon:
1633 enmRequired = kLevel_TrustedUsers;
1634 break;
1635
1636 /*
1637 * Anyone. But not for CapsAcquire mode
1638 */
1639 case VMMDevReq_SetGuestCapabilities:
1640 {
1641 VMMDevReqGuestCapabilities2 *pCaps = (VMMDevReqGuestCapabilities2*)pReqHdr;
1642 uint32_t fAcquireCaps = 0;
1643 if (!VBoxGuestCommonGuestCapsModeSet(pDevExt, pCaps->u32OrMask, false, &fAcquireCaps))
1644 {
1645 AssertFailed();
1646 LogRel(("calling caps set for acquired caps %d\n", pCaps->u32OrMask));
1647 enmRequired = kLevel_NoOne;
1648 break;
1649 }
1650 /* hack to adjust the notcaps.
1651 * @todo: move to a better place
1652 * user-mode apps are allowed to pass any mask to the notmask,
1653 * the driver cleans up them accordingly */
1654 pCaps->u32NotMask &= ~fAcquireCaps;
1655 /* do not break, make it fall through to the below enmRequired setting */
1656 }
1657 /*
1658 * Anyone.
1659 */
1660 case VMMDevReq_GetMouseStatus:
1661 case VMMDevReq_SetMouseStatus:
1662 case VMMDevReq_SetPointerShape:
1663 case VMMDevReq_GetHostVersion:
1664 case VMMDevReq_Idle:
1665 case VMMDevReq_GetHostTime:
1666 case VMMDevReq_SetPowerStatus:
1667 case VMMDevReq_AcknowledgeEvents:
1668 case VMMDevReq_CtlGuestFilterMask:
1669 case VMMDevReq_ReportGuestStatus:
1670 case VMMDevReq_GetDisplayChangeRequest:
1671 case VMMDevReq_VideoModeSupported:
1672 case VMMDevReq_GetHeightReduction:
1673 case VMMDevReq_GetDisplayChangeRequest2:
1674 case VMMDevReq_VideoModeSupported2:
1675 case VMMDevReq_VideoAccelEnable:
1676 case VMMDevReq_VideoAccelFlush:
1677 case VMMDevReq_VideoSetVisibleRegion:
1678 case VMMDevReq_GetDisplayChangeRequestEx:
1679 case VMMDevReq_GetSeamlessChangeRequest:
1680 case VMMDevReq_GetVRDPChangeRequest:
1681 case VMMDevReq_LogString:
1682 case VMMDevReq_GetSessionId:
1683 enmRequired = kLevel_AllUsers;
1684 break;
1685
1686 /*
1687 * Depends on the request parameters...
1688 */
1689 /** @todo this have to be changed into an I/O control and the facilities
1690 * tracked in the session so they can automatically be failed when the
1691 * session terminates without reporting the new status.
1692 *
1693 * The information presented by IGuest is not reliable without this! */
1694 case VMMDevReq_ReportGuestCapabilities:
1695 switch (((VMMDevReportGuestStatus const *)pReqHdr)->guestStatus.facility)
1696 {
1697 case VBoxGuestFacilityType_All:
1698 case VBoxGuestFacilityType_VBoxGuestDriver:
1699 enmRequired = kLevel_OnlyVBoxGuest;
1700 break;
1701 case VBoxGuestFacilityType_VBoxService:
1702 enmRequired = kLevel_TrustedUsers;
1703 break;
1704 case VBoxGuestFacilityType_VBoxTrayClient:
1705 case VBoxGuestFacilityType_Seamless:
1706 case VBoxGuestFacilityType_Graphics:
1707 default:
1708 enmRequired = kLevel_AllUsers;
1709 break;
1710 }
1711 break;
1712 }
1713
1714 /*
1715 * Check against the session.
1716 */
1717 switch (enmRequired)
1718 {
1719 default:
1720 case kLevel_NoOne:
1721 break;
1722 case kLevel_OnlyVBoxGuest:
1723 case kLevel_OnlyKernel:
1724 if (pSession->R0Process == NIL_RTR0PROCESS)
1725 return VINF_SUCCESS;
1726 break;
1727 case kLevel_TrustedUsers:
1728 case kLevel_AllUsers:
1729 return VINF_SUCCESS;
1730 }
1731
1732 return VERR_PERMISSION_DENIED;
1733}
1734
1735static int VBoxGuestCommonIOCtl_VMMRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1736 VMMDevRequestHeader *pReqHdr, size_t cbData, size_t *pcbDataReturned)
1737{
1738 int rc;
1739 VMMDevRequestHeader *pReqCopy;
1740
1741 /*
1742 * Validate the header and request size.
1743 */
1744 const VMMDevRequestType enmType = pReqHdr->requestType;
1745 const uint32_t cbReq = pReqHdr->size;
1746 const uint32_t cbMinSize = vmmdevGetRequestSize(enmType);
1747
1748 Log(("VBoxGuestCommonIOCtl: VMMREQUEST type %d\n", pReqHdr->requestType));
1749
1750 if (cbReq < cbMinSize)
1751 {
1752 LogRel(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
1753 cbReq, cbMinSize, enmType));
1754 return VERR_INVALID_PARAMETER;
1755 }
1756 if (cbReq > cbData)
1757 {
1758 LogRel(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
1759 cbData, cbReq, enmType));
1760 return VERR_INVALID_PARAMETER;
1761 }
1762 rc = VbglGRVerify(pReqHdr, cbData);
1763 if (RT_FAILURE(rc))
1764 {
1765 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid header: size %#x, expected >= %#x (hdr); type=%#x; rc=%Rrc!!\n",
1766 cbData, cbReq, enmType, rc));
1767 return rc;
1768 }
1769
1770 rc = VBoxGuestCheckIfVMMReqAllowed(pDevExt, pSession, enmType, pReqHdr);
1771 if (RT_FAILURE(rc))
1772 {
1773 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: Operation not allowed! type=%#x rc=%Rrc\n", enmType, rc));
1774 return rc;
1775 }
1776
1777 /*
1778 * Make a copy of the request in the physical memory heap so
1779 * the VBoxGuestLibrary can more easily deal with the request.
1780 * (This is really a waste of time since the OS or the OS specific
1781 * code has already buffered or locked the input/output buffer, but
1782 * it does makes things a bit simpler wrt to phys address.)
1783 */
1784 rc = VbglGRAlloc(&pReqCopy, cbReq, enmType);
1785 if (RT_FAILURE(rc))
1786 {
1787 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1788 cbReq, cbReq, rc));
1789 return rc;
1790 }
1791 memcpy(pReqCopy, pReqHdr, cbReq);
1792
1793 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
1794 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
1795
1796 rc = VbglGRPerform(pReqCopy);
1797 if ( RT_SUCCESS(rc)
1798 && RT_SUCCESS(pReqCopy->rc))
1799 {
1800 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
1801 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
1802
1803 memcpy(pReqHdr, pReqCopy, cbReq);
1804 if (pcbDataReturned)
1805 *pcbDataReturned = cbReq;
1806 }
1807 else if (RT_FAILURE(rc))
1808 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: VbglGRPerform - rc=%Rrc!\n", rc));
1809 else
1810 {
1811 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
1812 rc = pReqCopy->rc;
1813 }
1814
1815 VbglGRFree(pReqCopy);
1816 return rc;
1817}
1818
1819
1820static int VBoxGuestCommonIOCtl_CtlFilterMask(PVBOXGUESTDEVEXT pDevExt, VBoxGuestFilterMaskInfo *pInfo)
1821{
1822 VMMDevCtlGuestFilterMask *pReq;
1823 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
1824 if (RT_FAILURE(rc))
1825 {
1826 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1827 sizeof(*pReq), sizeof(*pReq), rc));
1828 return rc;
1829 }
1830
1831 pReq->u32OrMask = pInfo->u32OrMask;
1832 pReq->u32NotMask = pInfo->u32NotMask;
1833 pReq->u32NotMask &= ~pDevExt->fFixedEvents; /* don't permit these to be cleared! */
1834 rc = VbglGRPerform(&pReq->header);
1835 if (RT_FAILURE(rc))
1836 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: VbglGRPerform failed, rc=%Rrc!\n", rc));
1837
1838 VbglGRFree(&pReq->header);
1839 return rc;
1840}
1841
1842#ifdef VBOX_WITH_HGCM
1843
1844AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
1845
1846/** Worker for VBoxGuestHGCMAsyncWaitCallback*. */
1847static int VBoxGuestHGCMAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
1848 bool fInterruptible, uint32_t cMillies)
1849{
1850 int rc;
1851
1852 /*
1853 * Check to see if the condition was met by the time we got here.
1854 *
1855 * We create a simple poll loop here for dealing with out-of-memory
1856 * conditions since the caller isn't necessarily able to deal with
1857 * us returning too early.
1858 */
1859 PVBOXGUESTWAIT pWait;
1860 for (;;)
1861 {
1862 RTSpinlockAcquire(pDevExt->EventSpinlock);
1863 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1864 {
1865 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1866 return VINF_SUCCESS;
1867 }
1868 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1869
1870 pWait = VBoxGuestWaitAlloc(pDevExt, NULL);
1871 if (pWait)
1872 break;
1873 if (fInterruptible)
1874 return VERR_INTERRUPTED;
1875 RTThreadSleep(1);
1876 }
1877 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
1878 pWait->pHGCMReq = pHdr;
1879
1880 /*
1881 * Re-enter the spinlock and re-check for the condition.
1882 * If the condition is met, return.
1883 * Otherwise link us into the HGCM wait list and go to sleep.
1884 */
1885 RTSpinlockAcquire(pDevExt->EventSpinlock);
1886 RTListAppend(&pDevExt->HGCMWaitList, &pWait->ListNode);
1887 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1888 {
1889 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1890 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1891 return VINF_SUCCESS;
1892 }
1893 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1894
1895 if (fInterruptible)
1896 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
1897 else
1898 rc = RTSemEventMultiWait(pWait->Event, cMillies);
1899 if (rc == VERR_SEM_DESTROYED)
1900 return rc;
1901
1902 /*
1903 * Unlink, free and return.
1904 */
1905 if ( RT_FAILURE(rc)
1906 && rc != VERR_TIMEOUT
1907 && ( !fInterruptible
1908 || rc != VERR_INTERRUPTED))
1909 LogRel(("VBoxGuestHGCMAsyncWaitCallback: wait failed! %Rrc\n", rc));
1910
1911 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
1912 return rc;
1913}
1914
1915
1916/**
1917 * This is a callback for dealing with async waits.
1918 *
1919 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1920 */
1921static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
1922{
1923 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1924 Log(("VBoxGuestHGCMAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
1925 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1926 pDevExt,
1927 false /* fInterruptible */,
1928 u32User /* cMillies */);
1929}
1930
1931
1932/**
1933 * This is a callback for dealing with async waits with a timeout.
1934 *
1935 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1936 */
1937static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr,
1938 void *pvUser, uint32_t u32User)
1939{
1940 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1941 Log(("VBoxGuestHGCMAsyncWaitCallbackInterruptible: requestType=%d\n", pHdr->header.requestType));
1942 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1943 pDevExt,
1944 true /* fInterruptible */,
1945 u32User /* cMillies */ );
1946
1947}
1948
1949
1950static int VBoxGuestCommonIOCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1951 VBoxGuestHGCMConnectInfo *pInfo, size_t *pcbDataReturned)
1952{
1953 int rc;
1954
1955 /*
1956 * The VbglHGCMConnect call will invoke the callback if the HGCM
1957 * call is performed in an ASYNC fashion. The function is not able
1958 * to deal with cancelled requests.
1959 */
1960 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: %.128s\n",
1961 pInfo->Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->Loc.type == VMMDevHGCMLoc_LocalHost_Existing
1962 ? pInfo->Loc.u.host.achName : "<not local host>"));
1963
1964 rc = VbglR0HGCMInternalConnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1965 if (RT_SUCCESS(rc))
1966 {
1967 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
1968 pInfo->u32ClientID, pInfo->result, rc));
1969 if (RT_SUCCESS(pInfo->result))
1970 {
1971 /*
1972 * Append the client id to the client id table.
1973 * If the table has somehow become filled up, we'll disconnect the session.
1974 */
1975 unsigned i;
1976 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1977 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1978 if (!pSession->aHGCMClientIds[i])
1979 {
1980 pSession->aHGCMClientIds[i] = pInfo->u32ClientID;
1981 break;
1982 }
1983 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
1984 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1985 {
1986 static unsigned s_cErrors = 0;
1987 VBoxGuestHGCMDisconnectInfo Info;
1988
1989 if (s_cErrors++ < 32)
1990 LogRel(("VBoxGuestCommonIOCtl: HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
1991
1992 Info.result = 0;
1993 Info.u32ClientID = pInfo->u32ClientID;
1994 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1995 return VERR_TOO_MANY_OPEN_FILES;
1996 }
1997 }
1998 if (pcbDataReturned)
1999 *pcbDataReturned = sizeof(*pInfo);
2000 }
2001 return rc;
2002}
2003
2004
2005static int VBoxGuestCommonIOCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMDisconnectInfo *pInfo,
2006 size_t *pcbDataReturned)
2007{
2008 /*
2009 * Validate the client id and invalidate its entry while we're in the call.
2010 */
2011 int rc;
2012 const uint32_t u32ClientId = pInfo->u32ClientID;
2013 unsigned i;
2014 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2015 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2016 if (pSession->aHGCMClientIds[i] == u32ClientId)
2017 {
2018 pSession->aHGCMClientIds[i] = UINT32_MAX;
2019 break;
2020 }
2021 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
2022 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
2023 {
2024 static unsigned s_cErrors = 0;
2025 if (s_cErrors++ > 32)
2026 LogRel(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", u32ClientId));
2027 return VERR_INVALID_HANDLE;
2028 }
2029
2030 /*
2031 * The VbglHGCMConnect call will invoke the callback if the HGCM
2032 * call is performed in an ASYNC fashion. The function is not able
2033 * to deal with cancelled requests.
2034 */
2035 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", pInfo->u32ClientID));
2036 rc = VbglR0HGCMInternalDisconnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2037 if (RT_SUCCESS(rc))
2038 {
2039 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: result=%Rrc\n", pInfo->result));
2040 if (pcbDataReturned)
2041 *pcbDataReturned = sizeof(*pInfo);
2042 }
2043
2044 /* Update the client id array according to the result. */
2045 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2046 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
2047 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) && RT_SUCCESS(pInfo->result) ? 0 : u32ClientId;
2048 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
2049
2050 return rc;
2051}
2052
2053
2054static int VBoxGuestCommonIOCtl_HGCMCall(PVBOXGUESTDEVEXT pDevExt,
2055 PVBOXGUESTSESSION pSession,
2056 VBoxGuestHGCMCallInfo *pInfo,
2057 uint32_t cMillies, bool fInterruptible, bool f32bit, bool fUserData,
2058 size_t cbExtra, size_t cbData, size_t *pcbDataReturned)
2059{
2060 const uint32_t u32ClientId = pInfo->u32ClientID;
2061 uint32_t fFlags;
2062 size_t cbActual;
2063 unsigned i;
2064 int rc;
2065
2066 /*
2067 * Some more validations.
2068 */
2069 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
2070 {
2071 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
2072 return VERR_INVALID_PARAMETER;
2073 }
2074
2075 cbActual = cbExtra + sizeof(*pInfo);
2076#ifdef RT_ARCH_AMD64
2077 if (f32bit)
2078 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
2079 else
2080#endif
2081 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
2082 if (cbData < cbActual)
2083 {
2084 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
2085 cbData, cbData, cbActual, cbActual));
2086 return VERR_INVALID_PARAMETER;
2087 }
2088
2089 /*
2090 * Validate the client id.
2091 */
2092 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2093 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2094 if (pSession->aHGCMClientIds[i] == u32ClientId)
2095 break;
2096 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
2097 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
2098 {
2099 static unsigned s_cErrors = 0;
2100 if (s_cErrors++ > 32)
2101 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
2102 return VERR_INVALID_HANDLE;
2103 }
2104
2105 /*
2106 * The VbglHGCMCall call will invoke the callback if the HGCM
2107 * call is performed in an ASYNC fashion. This function can
2108 * deal with cancelled requests, so we let user more requests
2109 * be interruptible (should add a flag for this later I guess).
2110 */
2111 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
2112 fFlags = !fUserData && pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
2113#ifdef RT_ARCH_AMD64
2114 if (f32bit)
2115 {
2116 if (fInterruptible)
2117 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
2118 else
2119 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
2120 }
2121 else
2122#endif
2123 {
2124 if (fInterruptible)
2125 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
2126 else
2127 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
2128 }
2129 if (RT_SUCCESS(rc))
2130 {
2131 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: result=%Rrc\n", pInfo->result));
2132 if (pcbDataReturned)
2133 *pcbDataReturned = cbActual;
2134 }
2135 else
2136 {
2137 if ( rc != VERR_INTERRUPTED
2138 && rc != VERR_TIMEOUT)
2139 {
2140 static unsigned s_cErrors = 0;
2141 if (s_cErrors++ < 32)
2142 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
2143 }
2144 else
2145 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
2146 }
2147 return rc;
2148}
2149
2150
2151#endif /* VBOX_WITH_HGCM */
2152
2153/**
2154 * Handle VBOXGUEST_IOCTL_CHECK_BALLOON from R3.
2155 *
2156 * Ask the host for the size of the balloon and try to set it accordingly. If
2157 * this approach fails because it's not supported, return with fHandleInR3 set
2158 * and let the user land supply memory we can lock via the other ioctl.
2159 *
2160 * @returns VBox status code.
2161 *
2162 * @param pDevExt The device extension.
2163 * @param pSession The session.
2164 * @param pInfo The output buffer.
2165 * @param pcbDataReturned Where to store the amount of returned data. Can
2166 * be NULL.
2167 */
2168static int VBoxGuestCommonIOCtl_CheckMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2169 VBoxGuestCheckBalloonInfo *pInfo, size_t *pcbDataReturned)
2170{
2171 VMMDevGetMemBalloonChangeRequest *pReq;
2172 int rc;
2173
2174 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON\n"));
2175 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2176 AssertRCReturn(rc, rc);
2177
2178 /*
2179 * The first user trying to query/change the balloon becomes the
2180 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
2181 */
2182 if ( pDevExt->MemBalloon.pOwner != pSession
2183 && pDevExt->MemBalloon.pOwner == NULL)
2184 pDevExt->MemBalloon.pOwner = pSession;
2185
2186 if (pDevExt->MemBalloon.pOwner == pSession)
2187 {
2188 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevGetMemBalloonChangeRequest), VMMDevReq_GetMemBalloonChangeRequest);
2189 if (RT_SUCCESS(rc))
2190 {
2191 /*
2192 * This is a response to that event. Setting this bit means that
2193 * we request the value from the host and change the guest memory
2194 * balloon according to this value.
2195 */
2196 pReq->eventAck = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
2197 rc = VbglGRPerform(&pReq->header);
2198 if (RT_SUCCESS(rc))
2199 {
2200 Assert(pDevExt->MemBalloon.cMaxChunks == pReq->cPhysMemChunks || pDevExt->MemBalloon.cMaxChunks == 0);
2201 pDevExt->MemBalloon.cMaxChunks = pReq->cPhysMemChunks;
2202
2203 pInfo->cBalloonChunks = pReq->cBalloonChunks;
2204 pInfo->fHandleInR3 = false;
2205
2206 rc = vboxGuestSetBalloonSizeKernel(pDevExt, pReq->cBalloonChunks, &pInfo->fHandleInR3);
2207 /* Ignore various out of memory failures. */
2208 if ( rc == VERR_NO_MEMORY
2209 || rc == VERR_NO_PHYS_MEMORY
2210 || rc == VERR_NO_CONT_MEMORY)
2211 rc = VINF_SUCCESS;
2212
2213 if (pcbDataReturned)
2214 *pcbDataReturned = sizeof(VBoxGuestCheckBalloonInfo);
2215 }
2216 else
2217 LogRel(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON: VbglGRPerform failed. rc=%Rrc\n", rc));
2218 VbglGRFree(&pReq->header);
2219 }
2220 }
2221 else
2222 rc = VERR_PERMISSION_DENIED;
2223
2224 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2225 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON returns %Rrc\n", rc));
2226 return rc;
2227}
2228
2229
2230/**
2231 * Handle a request for changing the memory balloon.
2232 *
2233 * @returns VBox status code.
2234 *
2235 * @param pDevExt The device extention.
2236 * @param pSession The session.
2237 * @param pInfo The change request structure (input).
2238 * @param pcbDataReturned Where to store the amount of returned data. Can
2239 * be NULL.
2240 */
2241static int VBoxGuestCommonIOCtl_ChangeMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2242 VBoxGuestChangeBalloonInfo *pInfo, size_t *pcbDataReturned)
2243{
2244 int rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2245 AssertRCReturn(rc, rc);
2246
2247 if (!pDevExt->MemBalloon.fUseKernelAPI)
2248 {
2249 /*
2250 * The first user trying to query/change the balloon becomes the
2251 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
2252 */
2253 if ( pDevExt->MemBalloon.pOwner != pSession
2254 && pDevExt->MemBalloon.pOwner == NULL)
2255 pDevExt->MemBalloon.pOwner = pSession;
2256
2257 if (pDevExt->MemBalloon.pOwner == pSession)
2258 {
2259 rc = vboxGuestSetBalloonSizeFromUser(pDevExt, pSession, pInfo->u64ChunkAddr, !!pInfo->fInflate);
2260 if (pcbDataReturned)
2261 *pcbDataReturned = 0;
2262 }
2263 else
2264 rc = VERR_PERMISSION_DENIED;
2265 }
2266 else
2267 rc = VERR_PERMISSION_DENIED;
2268
2269 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2270 return rc;
2271}
2272
2273
2274/**
2275 * Handle a request for writing a core dump of the guest on the host.
2276 *
2277 * @returns VBox status code.
2278 *
2279 * @param pDevExt The device extension.
2280 * @param pInfo The output buffer.
2281 */
2282static int VBoxGuestCommonIOCtl_WriteCoreDump(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWriteCoreDump *pInfo)
2283{
2284 VMMDevReqWriteCoreDump *pReq = NULL;
2285 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_WriteCoreDump);
2286 if (RT_FAILURE(rc))
2287 {
2288 Log(("VBoxGuestCommonIOCtl: WRITE_CORE_DUMP: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
2289 sizeof(*pReq), sizeof(*pReq), rc));
2290 return rc;
2291 }
2292
2293 pReq->fFlags = pInfo->fFlags;
2294 rc = VbglGRPerform(&pReq->header);
2295 if (RT_FAILURE(rc))
2296 Log(("VBoxGuestCommonIOCtl: WRITE_CORE_DUMP: VbglGRPerform failed, rc=%Rrc!\n", rc));
2297
2298 VbglGRFree(&pReq->header);
2299 return rc;
2300}
2301
2302
2303#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
2304/**
2305 * Enables the VRDP session and saves its session ID.
2306 *
2307 * @returns VBox status code.
2308 *
2309 * @param pDevExt The device extention.
2310 * @param pSession The session.
2311 */
2312static int VBoxGuestCommonIOCtl_EnableVRDPSession(VBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
2313{
2314 /* Nothing to do here right now, since this only is supported on Windows at the moment. */
2315 return VERR_NOT_IMPLEMENTED;
2316}
2317
2318
2319/**
2320 * Disables the VRDP session.
2321 *
2322 * @returns VBox status code.
2323 *
2324 * @param pDevExt The device extention.
2325 * @param pSession The session.
2326 */
2327static int VBoxGuestCommonIOCtl_DisableVRDPSession(VBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
2328{
2329 /* Nothing to do here right now, since this only is supported on Windows at the moment. */
2330 return VERR_NOT_IMPLEMENTED;
2331}
2332#endif /* VBOX_WITH_VRDP_SESSION_HANDLING */
2333
2334#ifdef DEBUG
2335/** Unit test SetMouseStatus instead of really executing the request. */
2336static bool g_test_fSetMouseStatus = false;
2337/** When unit testing SetMouseStatus, the fake RC for the GR to return. */
2338static int g_test_SetMouseStatusGRRC;
2339/** When unit testing SetMouseStatus this will be set to the status passed to
2340 * the GR. */
2341static uint32_t g_test_statusSetMouseStatus;
2342#endif
2343
2344static int vboxguestcommonSetMouseStatus(uint32_t fFeatures)
2345{
2346 VMMDevReqMouseStatus *pReq;
2347 int rc;
2348
2349 LogRelFlowFunc(("fFeatures=%u\n", (int) fFeatures));
2350 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetMouseStatus);
2351 if (RT_SUCCESS(rc))
2352 {
2353 pReq->mouseFeatures = fFeatures;
2354 pReq->pointerXPos = 0;
2355 pReq->pointerYPos = 0;
2356#ifdef DEBUG
2357 if (g_test_fSetMouseStatus)
2358 {
2359 g_test_statusSetMouseStatus = pReq->mouseFeatures;
2360 rc = g_test_SetMouseStatusGRRC;
2361 }
2362 else
2363#endif
2364 rc = VbglGRPerform(&pReq->header);
2365 VbglGRFree(&pReq->header);
2366 }
2367 LogRelFlowFunc(("rc=%Rrc\n", rc));
2368 return rc;
2369}
2370
2371
2372/**
2373 * Sets the mouse status features for this session and updates them
2374 * globally. We aim to ensure that if several threads call this in
2375 * parallel the most recent status will always end up being set.
2376 *
2377 * @returns VBox status code.
2378 *
2379 * @param pDevExt The device extention.
2380 * @param pSession The session.
2381 * @param fFeatures New bitmap of enabled features.
2382 */
2383static int VBoxGuestCommonIOCtl_SetMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fFeatures)
2384{
2385 uint32_t fNewDevExtStatus = 0;
2386 unsigned i;
2387 int rc;
2388 /* Exit early if nothing has changed - hack to work around the
2389 * Windows Additions not using the common code. */
2390 bool fNoAction;
2391
2392 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2393
2394 /* For all the bits which the guest is allowed to set, check whether the
2395 * requested value is different to the current one and adjust the global
2396 * usage counter and if appropriate the global state if so. */
2397 for (i = 0; i < sizeof(fFeatures) * 8; i++)
2398 {
2399 if (RT_BIT_32(i) & VMMDEV_MOUSE_GUEST_MASK)
2400 {
2401 if ( (RT_BIT_32(i) & fFeatures)
2402 && !(RT_BIT_32(i) & pSession->fMouseStatus))
2403 pDevExt->acMouseFeatureUsage[i]++;
2404 else if ( !(RT_BIT_32(i) & fFeatures)
2405 && (RT_BIT_32(i) & pSession->fMouseStatus))
2406 pDevExt->acMouseFeatureUsage[i]--;
2407 }
2408 if (pDevExt->acMouseFeatureUsage[i] > 0)
2409 fNewDevExtStatus |= RT_BIT_32(i);
2410 }
2411
2412 pSession->fMouseStatus = fFeatures & VMMDEV_MOUSE_GUEST_MASK;
2413 fNoAction = (pDevExt->fMouseStatus == fNewDevExtStatus);
2414 pDevExt->fMouseStatus = fNewDevExtStatus;
2415
2416 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
2417 if (fNoAction)
2418 return VINF_SUCCESS;
2419
2420 do
2421 {
2422 fNewDevExtStatus = pDevExt->fMouseStatus;
2423 rc = vboxguestcommonSetMouseStatus(fNewDevExtStatus);
2424 } while ( RT_SUCCESS(rc)
2425 && fNewDevExtStatus != pDevExt->fMouseStatus);
2426
2427 return rc;
2428}
2429
2430
2431#ifdef DEBUG
2432/** Unit test for the SET_MOUSE_STATUS IoCtl. Since this is closely tied to
2433 * the code in question it probably makes most sense to keep it next to the
2434 * code. */
2435static void testSetMouseStatus(void)
2436{
2437 uint32_t u32Data;
2438 int rc;
2439 RTSPINLOCK Spinlock;
2440
2441 g_test_fSetMouseStatus = true;
2442 rc = RTSpinlockCreate(&Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestTest");
2443 AssertRCReturnVoid(rc);
2444 {
2445 VBOXGUESTDEVEXT DevExt = { 0 };
2446 VBOXGUESTSESSION Session = { 0 };
2447
2448 g_test_statusSetMouseStatus = ~0;
2449 g_test_SetMouseStatusGRRC = VINF_SUCCESS;
2450 DevExt.SessionSpinlock = Spinlock;
2451 u32Data = VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE;
2452 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2453 &Session, &u32Data, sizeof(u32Data), NULL);
2454 AssertRCSuccess(rc);
2455 AssertMsg( g_test_statusSetMouseStatus
2456 == VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE,
2457 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2458 DevExt.acMouseFeatureUsage[ASMBitFirstSetU32(VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR) - 1] = 1;
2459 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2460 &Session, &u32Data, sizeof(u32Data), NULL);
2461 AssertRCSuccess(rc);
2462 AssertMsg( g_test_statusSetMouseStatus
2463 == ( VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE
2464 | VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR),
2465 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2466 u32Data = VMMDEV_MOUSE_HOST_WANTS_ABSOLUTE; /* Can't change this */
2467 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2468 &Session, &u32Data, sizeof(u32Data), NULL);
2469 AssertRCSuccess(rc);
2470 AssertMsg( g_test_statusSetMouseStatus
2471 == VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR,
2472 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2473 u32Data = VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR;
2474 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2475 &Session, &u32Data, sizeof(u32Data), NULL);
2476 AssertRCSuccess(rc);
2477 AssertMsg( g_test_statusSetMouseStatus
2478 == VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR,
2479 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2480 u32Data = 0;
2481 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2482 &Session, &u32Data, sizeof(u32Data), NULL);
2483 AssertRCSuccess(rc);
2484 AssertMsg( g_test_statusSetMouseStatus
2485 == VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR,
2486 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2487 AssertMsg(DevExt.acMouseFeatureUsage[ASMBitFirstSetU32(VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR) - 1] == 1,
2488 ("Actual value: %d\n", DevExt.acMouseFeatureUsage[ASMBitFirstSetU32(VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR)]));
2489 g_test_SetMouseStatusGRRC = VERR_UNRESOLVED_ERROR;
2490 /* This should succeed as the host request should not be made
2491 * since nothing has changed. */
2492 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2493 &Session, &u32Data, sizeof(u32Data), NULL);
2494 AssertRCSuccess(rc);
2495 /* This should fail with VERR_UNRESOLVED_ERROR as set above. */
2496 u32Data = VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE;
2497 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2498 &Session, &u32Data, sizeof(u32Data), NULL);
2499 AssertMsg(rc == VERR_UNRESOLVED_ERROR, ("rc == %Rrc\n", rc));
2500 /* Untested paths: out of memory; race setting status to host */
2501 }
2502 RTSpinlockDestroy(Spinlock);
2503 g_test_fSetMouseStatus = false;
2504}
2505#endif
2506
2507
2508/**
2509 * Guest backdoor logging.
2510 *
2511 * @returns VBox status code.
2512 *
2513 * @param pDevExt The device extension.
2514 * @param pch The log message (need not be NULL terminated).
2515 * @param cbData Size of the buffer.
2516 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2517 */
2518static int VBoxGuestCommonIOCtl_Log(PVBOXGUESTDEVEXT pDevExt, const char *pch, size_t cbData, size_t *pcbDataReturned)
2519{
2520 NOREF(pch);
2521 NOREF(cbData);
2522 if (pDevExt->fLoggingEnabled)
2523 RTLogBackdoorPrintf("%.*s", cbData, pch);
2524 else
2525 Log(("%.*s", cbData, pch));
2526 if (pcbDataReturned)
2527 *pcbDataReturned = 0;
2528 return VINF_SUCCESS;
2529}
2530
2531static bool VBoxGuestCommonGuestCapsValidateValues(uint32_t fCaps)
2532{
2533 if (fCaps & (~(VMMDEV_GUEST_SUPPORTS_SEAMLESS | VMMDEV_GUEST_SUPPORTS_GUEST_HOST_WINDOW_MAPPING | VMMDEV_GUEST_SUPPORTS_GRAPHICS)))
2534 return false;
2535
2536 return true;
2537}
2538
2539static void VBoxGuestCommonCheckEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fGenFakeEvents)
2540{
2541 RTSpinlockAcquire(pDevExt->EventSpinlock);
2542 uint32_t fEvents = fGenFakeEvents | pDevExt->f32PendingEvents;
2543 PVBOXGUESTWAIT pWait;
2544 PVBOXGUESTWAIT pSafe;
2545
2546 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
2547 {
2548 uint32_t fHandledEvents = VBoxGuestCommonGetHandledEventsLocked(pDevExt, pWait->pSession);
2549 if ( (pWait->fReqEvents & fEvents & fHandledEvents)
2550 && !pWait->fResEvents)
2551 {
2552 pWait->fResEvents = pWait->fReqEvents & fEvents & fHandledEvents;
2553 Assert(!(fGenFakeEvents & pWait->fResEvents) || pSession == pWait->pSession);
2554 fEvents &= ~pWait->fResEvents;
2555 RTListNodeRemove(&pWait->ListNode);
2556#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2557 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
2558#else
2559 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
2560 int rc = RTSemEventMultiSignal(pWait->Event);
2561 AssertRC(rc);
2562#endif
2563 if (!fEvents)
2564 break;
2565 }
2566 }
2567 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
2568
2569 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
2570
2571#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2572 VBoxGuestWaitDoWakeUps(pDevExt);
2573#endif
2574}
2575
2576static int VBoxGuestCommonGuestCapsAcquire(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fOrMask, uint32_t fNotMask, VBOXGUESTCAPSACQUIRE_FLAGS enmFlags)
2577{
2578 uint32_t fSetCaps = 0;
2579
2580 if (!VBoxGuestCommonGuestCapsValidateValues(fOrMask))
2581 {
2582 LogRel(("VBoxGuestCommonGuestCapsAcquire: pSession(0x%p), OR(0x%x), NOT(0x%x), flags(0x%x) -- invalid fOrMask\n",
2583 pSession, fOrMask, fNotMask, enmFlags));
2584 return VERR_INVALID_PARAMETER;
2585 }
2586
2587 if ( enmFlags != VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE
2588 && enmFlags != VBOXGUESTCAPSACQUIRE_FLAGS_NONE)
2589 {
2590 LogRel(("VBoxGuestCommonGuestCapsAcquire: pSession(0x%p), OR(0x%x), NOT(0x%x), flags(0x%x) -- invalid enmFlags %d\n",
2591 pSession, fOrMask, fNotMask, enmFlags));
2592 return VERR_INVALID_PARAMETER;
2593 }
2594
2595 if (!VBoxGuestCommonGuestCapsModeSet(pDevExt, fOrMask, true, &fSetCaps))
2596 {
2597 LogRel(("VBoxGuestCommonGuestCapsAcquire: pSession(0x%p), OR(0x%x), NOT(0x%x), flags(0x%x) -- calling caps acquire for set caps\n",
2598 pSession, fOrMask, fNotMask, enmFlags));
2599 return VERR_INVALID_STATE;
2600 }
2601
2602 if (enmFlags & VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE)
2603 {
2604 Log(("VBoxGuestCommonGuestCapsAcquire: pSession(0x%p), OR(0x%x), NOT(0x%x), flags(0x%x) -- configured acquire caps: 0x%x\n",
2605 pSession, fOrMask, fNotMask, enmFlags));
2606 return VINF_SUCCESS;
2607 }
2608
2609 /* the fNotMask no need to have all values valid,
2610 * invalid ones will simply be ignored */
2611 uint32_t fCurrentOwnedCaps;
2612 uint32_t fSessionNotCaps;
2613 uint32_t fSessionOrCaps;
2614 uint32_t fOtherConflictingCaps;
2615
2616 fNotMask &= ~fOrMask;
2617
2618 RTSpinlockAcquire(pDevExt->EventSpinlock);
2619
2620 fCurrentOwnedCaps = pSession->u32AquiredGuestCaps;
2621 fSessionNotCaps = fCurrentOwnedCaps & fNotMask;
2622 fSessionOrCaps = fOrMask & ~fCurrentOwnedCaps;
2623 fOtherConflictingCaps = pDevExt->u32GuestCaps & ~fCurrentOwnedCaps;
2624 fOtherConflictingCaps &= fSessionOrCaps;
2625
2626 if (!fOtherConflictingCaps)
2627 {
2628 if (fSessionOrCaps)
2629 {
2630 pSession->u32AquiredGuestCaps |= fSessionOrCaps;
2631 pDevExt->u32GuestCaps |= fSessionOrCaps;
2632 }
2633
2634 if (fSessionNotCaps)
2635 {
2636 pSession->u32AquiredGuestCaps &= ~fSessionNotCaps;
2637 pDevExt->u32GuestCaps &= ~fSessionNotCaps;
2638 }
2639 }
2640
2641 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
2642
2643 if (fOtherConflictingCaps)
2644 {
2645 Log(("VBoxGuest: Caps 0x%x were busy\n", fOtherConflictingCaps));
2646 return VERR_RESOURCE_BUSY;
2647 }
2648
2649 /* now do host notification outside the lock */
2650 if (!fSessionOrCaps && !fSessionNotCaps)
2651 {
2652 /* no changes, return */
2653 return VINF_SUCCESS;
2654 }
2655
2656 int rc = VBoxGuestSetGuestCapabilities(fSessionOrCaps, fSessionNotCaps);
2657 if (RT_FAILURE(rc))
2658 {
2659 LogRel(("VBoxGuestCommonGuestCapsAcquire: VBoxGuestSetGuestCapabilities failed, rc=%Rrc\n", rc));
2660
2661 /* Failure branch
2662 * this is generally bad since e.g. failure to release the caps may result in other sessions not being able to use it
2663 * so we are not trying to restore the caps back to their values before the VBoxGuestCommonGuestCapsAcquire call,
2664 * but just pretend everithing is OK.
2665 * @todo: better failure handling mechanism? */
2666 }
2667
2668 /* success! */
2669 uint32_t fGenFakeEvents = 0;
2670
2671 if (fSessionOrCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
2672 {
2673 /* generate the seamless change event so that the r3 app could synch with the seamless state
2674 * although this introduces a false alarming of r3 client, it still solve the problem of
2675 * client state inconsistency in multiuser environment */
2676 fGenFakeEvents |= VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
2677 }
2678
2679 /* since the acquire filter mask has changed, we need to process events in any way to ensure they go from pending events field
2680 * to the proper (un-filtered) entries */
2681 VBoxGuestCommonCheckEvents(pDevExt, pSession, fGenFakeEvents);
2682
2683 return VINF_SUCCESS;
2684}
2685
2686static int VBoxGuestCommonIOCTL_GuestCapsAcquire(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestCapsAquire *pAcquire)
2687{
2688 int rc = VBoxGuestCommonGuestCapsAcquire(pDevExt, pSession, pAcquire->u32OrMask, pAcquire->u32NotMask, pAcquire->enmFlags);
2689 if (RT_FAILURE(rc))
2690 LogRel(("VBoxGuestCommonGuestCapsAcquire: failed rc=%Rrc\n", rc));
2691 pAcquire->rc = rc;
2692 return VINF_SUCCESS;
2693}
2694
2695
2696/**
2697 * Common IOCtl for user to kernel and kernel to kernel communication.
2698 *
2699 * This function only does the basic validation and then invokes
2700 * worker functions that takes care of each specific function.
2701 *
2702 * @returns VBox status code.
2703 *
2704 * @param iFunction The requested function.
2705 * @param pDevExt The device extension.
2706 * @param pSession The client session.
2707 * @param pvData The input/output data buffer. Can be NULL depending on the function.
2708 * @param cbData The max size of the data buffer.
2709 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2710 */
2711int VBoxGuestCommonIOCtl(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2712 void *pvData, size_t cbData, size_t *pcbDataReturned)
2713{
2714 int rc;
2715 Log(("VBoxGuestCommonIOCtl: iFunction=%#x pDevExt=%p pSession=%p pvData=%p cbData=%zu\n",
2716 iFunction, pDevExt, pSession, pvData, cbData));
2717
2718 /*
2719 * Make sure the returned data size is set to zero.
2720 */
2721 if (pcbDataReturned)
2722 *pcbDataReturned = 0;
2723
2724 /*
2725 * Define some helper macros to simplify validation.
2726 */
2727#define CHECKRET_RING0(mnemonic) \
2728 do { \
2729 if (pSession->R0Process != NIL_RTR0PROCESS) \
2730 { \
2731 LogFunc((mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
2732 pSession->Process, (uintptr_t)pSession->R0Process)); \
2733 return VERR_PERMISSION_DENIED; \
2734 } \
2735 } while (0)
2736#define CHECKRET_MIN_SIZE(mnemonic, cbMin) \
2737 do { \
2738 if (cbData < (cbMin)) \
2739 { \
2740 LogFunc((mnemonic ": cbData=%#zx (%zu) min is %#zx (%zu)\n", \
2741 cbData, cbData, (size_t)(cbMin), (size_t)(cbMin))); \
2742 return VERR_BUFFER_OVERFLOW; \
2743 } \
2744 if ((cbMin) != 0 && !VALID_PTR(pvData)) \
2745 { \
2746 LogFunc((mnemonic ": Invalid pointer %p\n", pvData)); \
2747 return VERR_INVALID_POINTER; \
2748 } \
2749 } while (0)
2750#define CHECKRET_SIZE(mnemonic, cb) \
2751 do { \
2752 if (cbData != (cb)) \
2753 { \
2754 LogFunc((mnemonic ": cbData=%#zx (%zu) expected is %#zx (%zu)\n", \
2755 cbData, cbData, (size_t)(cb), (size_t)(cb))); \
2756 return VERR_BUFFER_OVERFLOW; \
2757 } \
2758 if ((cb) != 0 && !VALID_PTR(pvData)) \
2759 { \
2760 LogFunc((mnemonic ": Invalid pointer %p\n", pvData)); \
2761 return VERR_INVALID_POINTER; \
2762 } \
2763 } while (0)
2764
2765
2766 /*
2767 * Deal with variably sized requests first.
2768 */
2769 rc = VINF_SUCCESS;
2770 if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0)))
2771 {
2772 CHECKRET_MIN_SIZE("VMMREQUEST", sizeof(VMMDevRequestHeader));
2773 rc = VBoxGuestCommonIOCtl_VMMRequest(pDevExt, pSession, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
2774 }
2775#ifdef VBOX_WITH_HGCM
2776 /*
2777 * These ones are a bit tricky.
2778 */
2779 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0)))
2780 {
2781 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2782 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2783 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2784 fInterruptible, false /*f32bit*/, false /* fUserData */,
2785 0, cbData, pcbDataReturned);
2786 }
2787 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED(0)))
2788 {
2789 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2790 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2791 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2792 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2793 false /*f32bit*/, false /* fUserData */,
2794 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2795 }
2796 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_USERDATA(0)))
2797 {
2798 bool fInterruptible = true;
2799 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2800 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2801 fInterruptible, false /*f32bit*/, true /* fUserData */,
2802 0, cbData, pcbDataReturned);
2803 }
2804# ifdef RT_ARCH_AMD64
2805 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_32(0)))
2806 {
2807 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2808 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2809 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2810 fInterruptible, true /*f32bit*/, false /* fUserData */,
2811 0, cbData, pcbDataReturned);
2812 }
2813 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(0)))
2814 {
2815 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2816 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2817 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2818 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2819 true /*f32bit*/, false /* fUserData */,
2820 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2821 }
2822# endif
2823#endif /* VBOX_WITH_HGCM */
2824 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0)))
2825 {
2826 CHECKRET_MIN_SIZE("LOG", 1);
2827 rc = VBoxGuestCommonIOCtl_Log(pDevExt, (char *)pvData, cbData, pcbDataReturned);
2828 }
2829 else
2830 {
2831 switch (iFunction)
2832 {
2833 case VBOXGUEST_IOCTL_GETVMMDEVPORT:
2834 CHECKRET_RING0("GETVMMDEVPORT");
2835 CHECKRET_MIN_SIZE("GETVMMDEVPORT", sizeof(VBoxGuestPortInfo));
2836 rc = VBoxGuestCommonIOCtl_GetVMMDevPort(pDevExt, (VBoxGuestPortInfo *)pvData, pcbDataReturned);
2837 break;
2838
2839#ifndef RT_OS_WINDOWS /* Windows has its own implementation of this. */
2840 case VBOXGUEST_IOCTL_SET_MOUSE_NOTIFY_CALLBACK:
2841 CHECKRET_RING0("SET_MOUSE_NOTIFY_CALLBACK");
2842 CHECKRET_SIZE("SET_MOUSE_NOTIFY_CALLBACK", sizeof(VBoxGuestMouseSetNotifyCallback));
2843 rc = VBoxGuestCommonIOCtl_SetMouseNotifyCallback(pDevExt, (VBoxGuestMouseSetNotifyCallback *)pvData);
2844 break;
2845#endif
2846
2847 case VBOXGUEST_IOCTL_WAITEVENT:
2848 CHECKRET_MIN_SIZE("WAITEVENT", sizeof(VBoxGuestWaitEventInfo));
2849 rc = VBoxGuestCommonIOCtl_WaitEvent(pDevExt, pSession, (VBoxGuestWaitEventInfo *)pvData,
2850 pcbDataReturned, pSession->R0Process != NIL_RTR0PROCESS);
2851 break;
2852
2853 case VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS:
2854 if (cbData != 0)
2855 rc = VERR_INVALID_PARAMETER;
2856 rc = VBoxGuestCommonIOCtl_CancelAllWaitEvents(pDevExt, pSession);
2857 break;
2858
2859 case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
2860 CHECKRET_MIN_SIZE("CTL_FILTER_MASK", sizeof(VBoxGuestFilterMaskInfo));
2861 rc = VBoxGuestCommonIOCtl_CtlFilterMask(pDevExt, (VBoxGuestFilterMaskInfo *)pvData);
2862 break;
2863
2864#ifdef VBOX_WITH_HGCM
2865 case VBOXGUEST_IOCTL_HGCM_CONNECT:
2866# ifdef RT_ARCH_AMD64
2867 case VBOXGUEST_IOCTL_HGCM_CONNECT_32:
2868# endif
2869 CHECKRET_MIN_SIZE("HGCM_CONNECT", sizeof(VBoxGuestHGCMConnectInfo));
2870 rc = VBoxGuestCommonIOCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
2871 break;
2872
2873 case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
2874# ifdef RT_ARCH_AMD64
2875 case VBOXGUEST_IOCTL_HGCM_DISCONNECT_32:
2876# endif
2877 CHECKRET_MIN_SIZE("HGCM_DISCONNECT", sizeof(VBoxGuestHGCMDisconnectInfo));
2878 rc = VBoxGuestCommonIOCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
2879 break;
2880#endif /* VBOX_WITH_HGCM */
2881
2882 case VBOXGUEST_IOCTL_CHECK_BALLOON:
2883 CHECKRET_MIN_SIZE("CHECK_MEMORY_BALLOON", sizeof(VBoxGuestCheckBalloonInfo));
2884 rc = VBoxGuestCommonIOCtl_CheckMemoryBalloon(pDevExt, pSession, (VBoxGuestCheckBalloonInfo *)pvData, pcbDataReturned);
2885 break;
2886
2887 case VBOXGUEST_IOCTL_CHANGE_BALLOON:
2888 CHECKRET_MIN_SIZE("CHANGE_MEMORY_BALLOON", sizeof(VBoxGuestChangeBalloonInfo));
2889 rc = VBoxGuestCommonIOCtl_ChangeMemoryBalloon(pDevExt, pSession, (VBoxGuestChangeBalloonInfo *)pvData, pcbDataReturned);
2890 break;
2891
2892 case VBOXGUEST_IOCTL_WRITE_CORE_DUMP:
2893 CHECKRET_MIN_SIZE("WRITE_CORE_DUMP", sizeof(VBoxGuestWriteCoreDump));
2894 rc = VBoxGuestCommonIOCtl_WriteCoreDump(pDevExt, (VBoxGuestWriteCoreDump *)pvData);
2895 break;
2896
2897#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
2898 case VBOXGUEST_IOCTL_ENABLE_VRDP_SESSION:
2899 rc = VBoxGuestCommonIOCtl_EnableVRDPSession(pDevExt, pSession);
2900 break;
2901
2902 case VBOXGUEST_IOCTL_DISABLE_VRDP_SESSION:
2903 rc = VBoxGuestCommonIOCtl_DisableVRDPSession(pDevExt, pSession);
2904 break;
2905#endif /* VBOX_WITH_VRDP_SESSION_HANDLING */
2906 case VBOXGUEST_IOCTL_SET_MOUSE_STATUS:
2907 CHECKRET_SIZE("SET_MOUSE_STATUS", sizeof(uint32_t));
2908 rc = VBoxGuestCommonIOCtl_SetMouseStatus(pDevExt, pSession,
2909 *(uint32_t *)pvData);
2910 break;
2911
2912#ifdef VBOX_WITH_DPC_LATENCY_CHECKER
2913 case VBOXGUEST_IOCTL_DPC_LATENCY_CHECKER:
2914 CHECKRET_SIZE("DPC_LATENCY_CHECKER", 0);
2915 rc = VbgdNtIOCtl_DpcLatencyChecker();
2916 break;
2917#endif
2918
2919 case VBOXGUEST_IOCTL_GUEST_CAPS_ACQUIRE:
2920 CHECKRET_SIZE("GUEST_CAPS_ACQUIRE", sizeof(VBoxGuestCapsAquire));
2921 rc = VBoxGuestCommonIOCTL_GuestCapsAcquire(pDevExt, pSession, (VBoxGuestCapsAquire*)pvData);
2922 *pcbDataReturned = sizeof(VBoxGuestCapsAquire);
2923 break;
2924
2925 default:
2926 {
2927 LogRel(("VBoxGuestCommonIOCtl: Unknown request iFunction=%#x stripped size=%#x\n",
2928 iFunction, VBOXGUEST_IOCTL_STRIP_SIZE(iFunction)));
2929 rc = VERR_NOT_SUPPORTED;
2930 break;
2931 }
2932 }
2933 }
2934
2935 Log(("VBoxGuestCommonIOCtl: returns %Rrc *pcbDataReturned=%zu\n", rc, pcbDataReturned ? *pcbDataReturned : 0));
2936 return rc;
2937}
2938
2939
2940
2941/**
2942 * Common interrupt service routine.
2943 *
2944 * This deals with events and with waking up thread waiting for those events.
2945 *
2946 * @returns true if it was our interrupt, false if it wasn't.
2947 * @param pDevExt The VBoxGuest device extension.
2948 */
2949bool VBoxGuestCommonISR(PVBOXGUESTDEVEXT pDevExt)
2950{
2951#ifndef RT_OS_WINDOWS
2952 VBoxGuestMouseSetNotifyCallback MouseNotifyCallback = { NULL, NULL };
2953#endif
2954 bool fMousePositionChanged = false;
2955 VMMDevEvents volatile *pReq = pDevExt->pIrqAckEvents;
2956 int rc = 0;
2957 bool fOurIrq;
2958
2959 /*
2960 * Make sure we've initialized the device extension.
2961 */
2962 if (RT_UNLIKELY(!pReq))
2963 return false;
2964
2965 /*
2966 * Enter the spinlock, increase the ISR count and check if it's our IRQ or
2967 * not.
2968 */
2969 RTSpinlockAcquire(pDevExt->EventSpinlock);
2970 ASMAtomicIncU32(&pDevExt->cISR);
2971 fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
2972 if (fOurIrq)
2973 {
2974 /*
2975 * Acknowlegde events.
2976 * We don't use VbglGRPerform here as it may take another spinlocks.
2977 */
2978 pReq->header.rc = VERR_INTERNAL_ERROR;
2979 pReq->events = 0;
2980 ASMCompilerBarrier();
2981 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
2982 ASMCompilerBarrier(); /* paranoia */
2983 if (RT_SUCCESS(pReq->header.rc))
2984 {
2985 uint32_t fEvents = pReq->events;
2986 PVBOXGUESTWAIT pWait;
2987 PVBOXGUESTWAIT pSafe;
2988
2989 Log3(("VBoxGuestCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
2990
2991 /*
2992 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
2993 */
2994 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
2995 {
2996#ifndef RT_OS_WINDOWS
2997 MouseNotifyCallback = pDevExt->MouseNotifyCallback;
2998#endif
2999 fMousePositionChanged = true;
3000 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
3001 }
3002
3003#ifdef VBOX_WITH_HGCM
3004 /*
3005 * The HGCM event/list is kind of different in that we evaluate all entries.
3006 */
3007 if (fEvents & VMMDEV_EVENT_HGCM)
3008 {
3009 RTListForEachSafe(&pDevExt->HGCMWaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
3010 {
3011 if (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE)
3012 {
3013 pWait->fResEvents = VMMDEV_EVENT_HGCM;
3014 RTListNodeRemove(&pWait->ListNode);
3015# ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
3016 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
3017# else
3018 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
3019 rc |= RTSemEventMultiSignal(pWait->Event);
3020# endif
3021 }
3022 }
3023 fEvents &= ~VMMDEV_EVENT_HGCM;
3024 }
3025#endif
3026
3027 /*
3028 * Normal FIFO waiter evaluation.
3029 */
3030 fEvents |= pDevExt->f32PendingEvents;
3031 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
3032 {
3033 uint32_t fHandledEvents = VBoxGuestCommonGetHandledEventsLocked(pDevExt, pWait->pSession);
3034 if ( (pWait->fReqEvents & fEvents & fHandledEvents)
3035 && !pWait->fResEvents)
3036 {
3037 pWait->fResEvents = pWait->fReqEvents & fEvents & fHandledEvents;
3038 fEvents &= ~pWait->fResEvents;
3039 RTListNodeRemove(&pWait->ListNode);
3040#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
3041 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
3042#else
3043 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
3044 rc |= RTSemEventMultiSignal(pWait->Event);
3045#endif
3046 if (!fEvents)
3047 break;
3048 }
3049 }
3050 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
3051 }
3052 else /* something is serious wrong... */
3053 Log(("VBoxGuestCommonISR: acknowledge events failed rc=%Rrc (events=%#x)!!\n",
3054 pReq->header.rc, pReq->events));
3055 }
3056 else
3057 LogFlow(("VBoxGuestCommonISR: not ours\n"));
3058
3059 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
3060
3061#if defined(VBOXGUEST_USE_DEFERRED_WAKE_UP) && !defined(RT_OS_WINDOWS)
3062 /*
3063 * Do wake-ups.
3064 * Note. On Windows this isn't possible at this IRQL, so a DPC will take
3065 * care of it.
3066 */
3067 VBoxGuestWaitDoWakeUps(pDevExt);
3068#endif
3069
3070 /*
3071 * Work the poll and async notification queues on OSes that implements that.
3072 * (Do this outside the spinlock to prevent some recursive spinlocking.)
3073 */
3074 if (fMousePositionChanged)
3075 {
3076 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
3077 VBoxGuestNativeISRMousePollEvent(pDevExt);
3078#ifndef RT_OS_WINDOWS
3079 if (MouseNotifyCallback.pfnNotify)
3080 MouseNotifyCallback.pfnNotify(MouseNotifyCallback.pvUser);
3081#endif
3082 }
3083
3084 ASMAtomicDecU32(&pDevExt->cISR);
3085 Assert(rc == 0);
3086 NOREF(rc);
3087 return fOurIrq;
3088}
3089
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette