VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 50391

Last change on this file since 50391 was 50391, checked in by vboxsync, 11 years ago

Additions/common/VBoxGuest: in-code documentation for capability acquiring.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 112.6 KB
Line 
1/* $Id: VBoxGuest.cpp 50391 2014-02-10 14:55:45Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#define LOG_GROUP LOG_GROUP_DEFAULT
32#include "VBoxGuestInternal.h"
33#include "VBoxGuest2.h"
34#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
35#include <VBox/log.h>
36#include <iprt/mem.h>
37#include <iprt/time.h>
38#include <iprt/memobj.h>
39#include <iprt/asm.h>
40#include <iprt/asm-amd64-x86.h>
41#include <iprt/string.h>
42#include <iprt/process.h>
43#include <iprt/assert.h>
44#include <iprt/param.h>
45#ifdef VBOX_WITH_HGCM
46# include <iprt/thread.h>
47#endif
48#include "version-generated.h"
49#if defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
50# include "revision-generated.h"
51#endif
52#ifdef RT_OS_WINDOWS
53# ifndef CTL_CODE
54# include <Windows.h>
55# endif
56#endif
57#if defined(RT_OS_SOLARIS) || defined(RT_OS_DARWIN)
58# include <iprt/rand.h>
59#endif
60
61
62/*******************************************************************************
63* Internal Functions *
64*******************************************************************************/
65#ifdef VBOX_WITH_HGCM
66static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
67#endif
68#ifdef DEBUG
69static void testSetMouseStatus(void);
70#endif
71static int VBoxGuestCommonIOCtl_SetMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fFeatures);
72
73static int VBoxGuestCommonGuestCapsAcquire(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fOrMask, uint32_t fNotMask, VBOXGUESTCAPSACQUIRE_FLAGS enmFlags);
74
75#define VBOXGUEST_ACQUIRE_STYLE_EVENTS (VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST | VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST)
76
77/** Return the mask of VMM device events that this session is allowed to see,
78 * ergo, all events except those in "acquire" mode which have not been acquired
79 * by this session. */
80DECLINLINE(uint32_t) VBoxGuestCommonGetHandledEventsLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
81{
82 if (!pDevExt->u32AcquireModeGuestCaps)
83 return VMMDEV_EVENT_VALID_EVENT_MASK;
84
85 /** @note VMMDEV_EVENT_VALID_EVENT_MASK should actually be the mask of valid
86 * capabilities, but that doesn't affect this code. */
87 uint32_t u32AllowedGuestCaps = pSession->u32AquiredGuestCaps | (VMMDEV_EVENT_VALID_EVENT_MASK & ~pDevExt->u32AcquireModeGuestCaps);
88 uint32_t u32CleanupEvents = VBOXGUEST_ACQUIRE_STYLE_EVENTS;
89 if (u32AllowedGuestCaps & VMMDEV_GUEST_SUPPORTS_GRAPHICS)
90 u32CleanupEvents &= ~VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST;
91 if (u32AllowedGuestCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
92 u32CleanupEvents &= ~VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
93
94 return VMMDEV_EVENT_VALID_EVENT_MASK & ~u32CleanupEvents;
95}
96
97DECLINLINE(uint32_t) VBoxGuestCommonGetAndCleanPendingEventsLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fReqEvents)
98{
99 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents & VBoxGuestCommonGetHandledEventsLocked(pDevExt, pSession);
100 if (fMatches)
101 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
102 return fMatches;
103}
104
105/** Puts a capability in "acquire" or "set" mode and returns the mask of
106 * capabilities currently in the other mode. Once a capability has been put in
107 * one of the two modes it can no longer be removed from that mode. */
108DECLINLINE(bool) VBoxGuestCommonGuestCapsModeSet(PVBOXGUESTDEVEXT pDevExt, uint32_t fCaps, bool fAcquire, uint32_t *pu32OtherVal)
109{
110 uint32_t *pVal = fAcquire ? &pDevExt->u32AcquireModeGuestCaps : &pDevExt->u32SetModeGuestCaps;
111 const uint32_t fNotVal = !fAcquire ? pDevExt->u32AcquireModeGuestCaps : pDevExt->u32SetModeGuestCaps;
112 bool fResult = true;
113 RTSpinlockAcquire(pDevExt->EventSpinlock);
114
115 if (!(fNotVal & fCaps))
116 *pVal |= fCaps;
117 else
118 {
119 AssertMsgFailed(("trying to change caps mode\n"));
120 fResult = false;
121 }
122
123 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
124
125 if (pu32OtherVal)
126 *pu32OtherVal = fNotVal;
127 return fResult;
128}
129
130/*******************************************************************************
131* Global Variables *
132*******************************************************************************/
133static const size_t cbChangeMemBalloonReq = RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
134
135#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
136/**
137 * Drag in the rest of IRPT since we share it with the
138 * rest of the kernel modules on Solaris.
139 */
140PFNRT g_apfnVBoxGuestIPRTDeps[] =
141{
142 /* VirtioNet */
143 (PFNRT)RTRandBytes,
144 /* RTSemMutex* */
145 (PFNRT)RTSemMutexCreate,
146 (PFNRT)RTSemMutexDestroy,
147 (PFNRT)RTSemMutexRequest,
148 (PFNRT)RTSemMutexRequestNoResume,
149 (PFNRT)RTSemMutexRequestDebug,
150 (PFNRT)RTSemMutexRequestNoResumeDebug,
151 (PFNRT)RTSemMutexRelease,
152 (PFNRT)RTSemMutexIsOwned,
153 NULL
154};
155#endif /* RT_OS_DARWIN || RT_OS_SOLARIS */
156
157
158/**
159 * Reserves memory in which the VMM can relocate any guest mappings
160 * that are floating around.
161 *
162 * This operation is a little bit tricky since the VMM might not accept
163 * just any address because of address clashes between the three contexts
164 * it operates in, so use a small stack to perform this operation.
165 *
166 * @returns VBox status code (ignored).
167 * @param pDevExt The device extension.
168 */
169static int vboxGuestInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
170{
171 /*
172 * Query the required space.
173 */
174 VMMDevReqHypervisorInfo *pReq;
175 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
176 if (RT_FAILURE(rc))
177 return rc;
178 pReq->hypervisorStart = 0;
179 pReq->hypervisorSize = 0;
180 rc = VbglGRPerform(&pReq->header);
181 if (RT_FAILURE(rc)) /* this shouldn't happen! */
182 {
183 VbglGRFree(&pReq->header);
184 return rc;
185 }
186
187 /*
188 * The VMM will report back if there is nothing it wants to map, like for
189 * instance in VT-x and AMD-V mode.
190 */
191 if (pReq->hypervisorSize == 0)
192 Log(("vboxGuestInitFixateGuestMappings: nothing to do\n"));
193 else
194 {
195 /*
196 * We have to try several times since the host can be picky
197 * about certain addresses.
198 */
199 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ;
200 uint32_t cbHypervisor = pReq->hypervisorSize;
201 RTR0MEMOBJ ahTries[5];
202 uint32_t iTry;
203 bool fBitched = false;
204 Log(("vboxGuestInitFixateGuestMappings: cbHypervisor=%#x\n", cbHypervisor));
205 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++)
206 {
207 /*
208 * Reserve space, or if that isn't supported, create a object for
209 * some fictive physical memory and map that in to kernel space.
210 *
211 * To make the code a bit uglier, most systems cannot help with
212 * 4MB alignment, so we have to deal with that in addition to
213 * having two ways of getting the memory.
214 */
215 uint32_t uAlignment = _4M;
216 RTR0MEMOBJ hObj;
217 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment);
218 if (rc == VERR_NOT_SUPPORTED)
219 {
220 uAlignment = PAGE_SIZE;
221 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment);
222 }
223 /*
224 * If both RTR0MemObjReserveKernel calls above failed because either not supported or
225 * not implemented at all at the current platform, try to map the memory object into the
226 * virtual kernel space.
227 */
228 if (rc == VERR_NOT_SUPPORTED)
229 {
230 if (hFictive == NIL_RTR0MEMOBJ)
231 {
232 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M, RTMEM_CACHE_POLICY_DONT_CARE);
233 if (RT_FAILURE(rc))
234 break;
235 hFictive = hObj;
236 }
237 uAlignment = _4M;
238 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
239 if (rc == VERR_NOT_SUPPORTED)
240 {
241 uAlignment = PAGE_SIZE;
242 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
243 }
244 }
245 if (RT_FAILURE(rc))
246 {
247 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
248 rc, cbHypervisor, uAlignment, iTry));
249 fBitched = true;
250 break;
251 }
252
253 /*
254 * Try set it.
255 */
256 pReq->header.requestType = VMMDevReq_SetHypervisorInfo;
257 pReq->header.rc = VERR_INTERNAL_ERROR;
258 pReq->hypervisorSize = cbHypervisor;
259 pReq->hypervisorStart = (uintptr_t)RTR0MemObjAddress(hObj);
260 if ( uAlignment == PAGE_SIZE
261 && pReq->hypervisorStart & (_4M - 1))
262 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M);
263 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
264
265 rc = VbglGRPerform(&pReq->header);
266 if (RT_SUCCESS(rc))
267 {
268 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj;
269 Log(("VBoxGuest: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
270 RTR0MemObjAddress(pDevExt->hGuestMappings),
271 RTR0MemObjSize(pDevExt->hGuestMappings),
272 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation"));
273 break;
274 }
275 ahTries[iTry] = hObj;
276 }
277
278 /*
279 * Cleanup failed attempts.
280 */
281 while (iTry-- > 0)
282 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */);
283 if ( RT_FAILURE(rc)
284 && hFictive != NIL_RTR0PTR)
285 RTR0MemObjFree(hFictive, false /* fFreeMappings */);
286 if (RT_FAILURE(rc) && !fBitched)
287 LogRel(("VBoxGuest: Warning: failed to reserve %#d of memory for guest mappings.\n", cbHypervisor));
288 }
289 VbglGRFree(&pReq->header);
290
291 /*
292 * We ignore failed attempts for now.
293 */
294 return VINF_SUCCESS;
295}
296
297
298/**
299 * Undo what vboxGuestInitFixateGuestMappings did.
300 *
301 * @param pDevExt The device extension.
302 */
303static void vboxGuestTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt)
304{
305 if (pDevExt->hGuestMappings != NIL_RTR0PTR)
306 {
307 /*
308 * Tell the host that we're going to free the memory we reserved for
309 * it, the free it up. (Leak the memory if anything goes wrong here.)
310 */
311 VMMDevReqHypervisorInfo *pReq;
312 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
313 if (RT_SUCCESS(rc))
314 {
315 pReq->hypervisorStart = 0;
316 pReq->hypervisorSize = 0;
317 rc = VbglGRPerform(&pReq->header);
318 VbglGRFree(&pReq->header);
319 }
320 if (RT_SUCCESS(rc))
321 {
322 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */);
323 AssertRC(rc);
324 }
325 else
326 LogRel(("vboxGuestTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc));
327
328 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
329 }
330}
331
332
333/**
334 * Sets the interrupt filter mask during initialization and termination.
335 *
336 * This will ASSUME that we're the ones in carge over the mask, so
337 * we'll simply clear all bits we don't set.
338 *
339 * @returns VBox status code (ignored).
340 * @param pDevExt The device extension.
341 * @param fMask The new mask.
342 */
343static int vboxGuestSetFilterMask(PVBOXGUESTDEVEXT pDevExt, uint32_t fMask)
344{
345 VMMDevCtlGuestFilterMask *pReq;
346 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
347 if (RT_SUCCESS(rc))
348 {
349 pReq->u32OrMask = fMask;
350 pReq->u32NotMask = ~fMask;
351 rc = VbglGRPerform(&pReq->header);
352 if (RT_FAILURE(rc))
353 LogRel(("vboxGuestSetFilterMask: failed with rc=%Rrc\n", rc));
354 VbglGRFree(&pReq->header);
355 }
356 return rc;
357}
358
359
360/**
361 * Inflate the balloon by one chunk represented by an R0 memory object.
362 *
363 * The caller owns the balloon mutex.
364 *
365 * @returns IPRT status code.
366 * @param pMemObj Pointer to the R0 memory object.
367 * @param pReq The pre-allocated request for performing the VMMDev call.
368 */
369static int vboxGuestBalloonInflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
370{
371 uint32_t iPage;
372 int rc;
373
374 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
375 {
376 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
377 pReq->aPhysPage[iPage] = phys;
378 }
379
380 pReq->fInflate = true;
381 pReq->header.size = cbChangeMemBalloonReq;
382 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
383
384 rc = VbglGRPerform(&pReq->header);
385 if (RT_FAILURE(rc))
386 LogRel(("vboxGuestBalloonInflate: VbglGRPerform failed. rc=%Rrc\n", rc));
387 return rc;
388}
389
390
391/**
392 * Deflate the balloon by one chunk - info the host and free the memory object.
393 *
394 * The caller owns the balloon mutex.
395 *
396 * @returns IPRT status code.
397 * @param pMemObj Pointer to the R0 memory object.
398 * The memory object will be freed afterwards.
399 * @param pReq The pre-allocated request for performing the VMMDev call.
400 */
401static int vboxGuestBalloonDeflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
402{
403 uint32_t iPage;
404 int rc;
405
406 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
407 {
408 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
409 pReq->aPhysPage[iPage] = phys;
410 }
411
412 pReq->fInflate = false;
413 pReq->header.size = cbChangeMemBalloonReq;
414 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
415
416 rc = VbglGRPerform(&pReq->header);
417 if (RT_FAILURE(rc))
418 {
419 LogRel(("vboxGuestBalloonDeflate: VbglGRPerform failed. rc=%Rrc\n", rc));
420 return rc;
421 }
422
423 rc = RTR0MemObjFree(*pMemObj, true);
424 if (RT_FAILURE(rc))
425 {
426 LogRel(("vboxGuestBalloonDeflate: RTR0MemObjFree(%p,true) -> %Rrc; this is *BAD*!\n", *pMemObj, rc));
427 return rc;
428 }
429
430 *pMemObj = NIL_RTR0MEMOBJ;
431 return VINF_SUCCESS;
432}
433
434
435/**
436 * Inflate/deflate the memory balloon and notify the host.
437 *
438 * This is a worker used by VBoxGuestCommonIOCtl_CheckMemoryBalloon - it takes
439 * the mutex.
440 *
441 * @returns VBox status code.
442 * @param pDevExt The device extension.
443 * @param pSession The session.
444 * @param cBalloonChunks The new size of the balloon in chunks of 1MB.
445 * @param pfHandleInR3 Where to return the handle-in-ring3 indicator
446 * (VINF_SUCCESS if set).
447 */
448static int vboxGuestSetBalloonSizeKernel(PVBOXGUESTDEVEXT pDevExt, uint32_t cBalloonChunks, uint32_t *pfHandleInR3)
449{
450 int rc = VINF_SUCCESS;
451
452 if (pDevExt->MemBalloon.fUseKernelAPI)
453 {
454 VMMDevChangeMemBalloon *pReq;
455 uint32_t i;
456
457 if (cBalloonChunks > pDevExt->MemBalloon.cMaxChunks)
458 {
459 LogRel(("vboxGuestSetBalloonSizeKernel: illegal balloon size %u (max=%u)\n",
460 cBalloonChunks, pDevExt->MemBalloon.cMaxChunks));
461 return VERR_INVALID_PARAMETER;
462 }
463
464 if (cBalloonChunks == pDevExt->MemBalloon.cMaxChunks)
465 return VINF_SUCCESS; /* nothing to do */
466
467 if ( cBalloonChunks > pDevExt->MemBalloon.cChunks
468 && !pDevExt->MemBalloon.paMemObj)
469 {
470 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAllocZ(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
471 if (!pDevExt->MemBalloon.paMemObj)
472 {
473 LogRel(("vboxGuestSetBalloonSizeKernel: no memory for paMemObj!\n"));
474 return VERR_NO_MEMORY;
475 }
476 }
477
478 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
479 if (RT_FAILURE(rc))
480 return rc;
481
482 if (cBalloonChunks > pDevExt->MemBalloon.cChunks)
483 {
484 /* inflate */
485 for (i = pDevExt->MemBalloon.cChunks; i < cBalloonChunks; i++)
486 {
487 rc = RTR0MemObjAllocPhysNC(&pDevExt->MemBalloon.paMemObj[i],
488 VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, NIL_RTHCPHYS);
489 if (RT_FAILURE(rc))
490 {
491 if (rc == VERR_NOT_SUPPORTED)
492 {
493 /* not supported -- fall back to the R3-allocated memory. */
494 rc = VINF_SUCCESS;
495 pDevExt->MemBalloon.fUseKernelAPI = false;
496 Assert(pDevExt->MemBalloon.cChunks == 0);
497 Log(("VBoxGuestSetBalloonSizeKernel: PhysNC allocs not supported, falling back to R3 allocs.\n"));
498 }
499 /* else if (rc == VERR_NO_MEMORY || rc == VERR_NO_PHYS_MEMORY):
500 * cannot allocate more memory => don't try further, just stop here */
501 /* else: XXX what else can fail? VERR_MEMOBJ_INIT_FAILED for instance. just stop. */
502 break;
503 }
504
505 rc = vboxGuestBalloonInflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
506 if (RT_FAILURE(rc))
507 {
508 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
509 RTR0MemObjFree(pDevExt->MemBalloon.paMemObj[i], true);
510 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
511 break;
512 }
513 pDevExt->MemBalloon.cChunks++;
514 }
515 }
516 else
517 {
518 /* deflate */
519 for (i = pDevExt->MemBalloon.cChunks; i-- > cBalloonChunks;)
520 {
521 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
522 if (RT_FAILURE(rc))
523 {
524 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
525 break;
526 }
527 pDevExt->MemBalloon.cChunks--;
528 }
529 }
530
531 VbglGRFree(&pReq->header);
532 }
533
534 /*
535 * Set the handle-in-ring3 indicator. When set Ring-3 will have to work
536 * the balloon changes via the other API.
537 */
538 *pfHandleInR3 = pDevExt->MemBalloon.fUseKernelAPI ? false : true;
539
540 return rc;
541}
542
543
544/**
545 * Helper to reinit the VBoxVMM communication after hibernation.
546 *
547 * @returns VBox status code.
548 * @param pDevExt The device extension.
549 * @param enmOSType The OS type.
550 */
551int VBoxGuestReinitDevExtAfterHibernation(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
552{
553 int rc = VBoxGuestReportGuestInfo(enmOSType);
554 if (RT_SUCCESS(rc))
555 {
556 rc = VBoxGuestReportDriverStatus(true /* Driver is active */);
557 if (RT_FAILURE(rc))
558 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: could not report guest driver status, rc=%Rrc\n", rc));
559 }
560 else
561 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: could not report guest information to host, rc=%Rrc\n", rc));
562 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: returned with rc=%Rrc\n", rc));
563 return rc;
564}
565
566
567/**
568 * Inflate/deflate the balloon by one chunk.
569 *
570 * Worker for VBoxGuestCommonIOCtl_ChangeMemoryBalloon - it takes the mutex.
571 *
572 * @returns VBox status code.
573 * @param pDevExt The device extension.
574 * @param pSession The session.
575 * @param u64ChunkAddr The address of the chunk to add to / remove from the
576 * balloon.
577 * @param fInflate Inflate if true, deflate if false.
578 */
579static int vboxGuestSetBalloonSizeFromUser(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
580 uint64_t u64ChunkAddr, bool fInflate)
581{
582 VMMDevChangeMemBalloon *pReq;
583 int rc = VINF_SUCCESS;
584 uint32_t i;
585 PRTR0MEMOBJ pMemObj = NULL;
586
587 if (fInflate)
588 {
589 if ( pDevExt->MemBalloon.cChunks > pDevExt->MemBalloon.cMaxChunks - 1
590 || pDevExt->MemBalloon.cMaxChunks == 0 /* If called without first querying. */)
591 {
592 LogRel(("vboxGuestSetBalloonSize: cannot inflate balloon, already have %u chunks (max=%u)\n",
593 pDevExt->MemBalloon.cChunks, pDevExt->MemBalloon.cMaxChunks));
594 return VERR_INVALID_PARAMETER;
595 }
596
597 if (!pDevExt->MemBalloon.paMemObj)
598 {
599 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAlloc(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
600 if (!pDevExt->MemBalloon.paMemObj)
601 {
602 LogRel(("VBoxGuestSetBalloonSizeFromUser: no memory for paMemObj!\n"));
603 return VERR_NO_MEMORY;
604 }
605 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
606 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
607 }
608 }
609 else
610 {
611 if (pDevExt->MemBalloon.cChunks == 0)
612 {
613 AssertMsgFailed(("vboxGuestSetBalloonSize: cannot decrease balloon, already at size 0\n"));
614 return VERR_INVALID_PARAMETER;
615 }
616 }
617
618 /*
619 * Enumerate all memory objects and check if the object is already registered.
620 */
621 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
622 {
623 if ( fInflate
624 && !pMemObj
625 && pDevExt->MemBalloon.paMemObj[i] == NIL_RTR0MEMOBJ)
626 pMemObj = &pDevExt->MemBalloon.paMemObj[i]; /* found free object pointer */
627 if (RTR0MemObjAddressR3(pDevExt->MemBalloon.paMemObj[i]) == u64ChunkAddr)
628 {
629 if (fInflate)
630 return VERR_ALREADY_EXISTS; /* don't provide the same memory twice */
631 pMemObj = &pDevExt->MemBalloon.paMemObj[i];
632 break;
633 }
634 }
635 if (!pMemObj)
636 {
637 if (fInflate)
638 {
639 /* no free object pointer found -- should not happen */
640 return VERR_NO_MEMORY;
641 }
642
643 /* cannot free this memory as it wasn't provided before */
644 return VERR_NOT_FOUND;
645 }
646
647 /*
648 * Try inflate / default the balloon as requested.
649 */
650 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
651 if (RT_FAILURE(rc))
652 return rc;
653
654 if (fInflate)
655 {
656 rc = RTR0MemObjLockUser(pMemObj, (RTR3PTR)u64ChunkAddr, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE,
657 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
658 if (RT_SUCCESS(rc))
659 {
660 rc = vboxGuestBalloonInflate(pMemObj, pReq);
661 if (RT_SUCCESS(rc))
662 pDevExt->MemBalloon.cChunks++;
663 else
664 {
665 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
666 RTR0MemObjFree(*pMemObj, true);
667 *pMemObj = NIL_RTR0MEMOBJ;
668 }
669 }
670 }
671 else
672 {
673 rc = vboxGuestBalloonDeflate(pMemObj, pReq);
674 if (RT_SUCCESS(rc))
675 pDevExt->MemBalloon.cChunks--;
676 else
677 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
678 }
679
680 VbglGRFree(&pReq->header);
681 return rc;
682}
683
684
685/**
686 * Cleanup the memory balloon of a session.
687 *
688 * Will request the balloon mutex, so it must be valid and the caller must not
689 * own it already.
690 *
691 * @param pDevExt The device extension.
692 * @param pDevExt The session. Can be NULL at unload.
693 */
694static void vboxGuestCloseMemBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
695{
696 RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
697 if ( pDevExt->MemBalloon.pOwner == pSession
698 || pSession == NULL /*unload*/)
699 {
700 if (pDevExt->MemBalloon.paMemObj)
701 {
702 VMMDevChangeMemBalloon *pReq;
703 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
704 if (RT_SUCCESS(rc))
705 {
706 uint32_t i;
707 for (i = pDevExt->MemBalloon.cChunks; i-- > 0;)
708 {
709 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
710 if (RT_FAILURE(rc))
711 {
712 LogRel(("vboxGuestCloseMemBalloon: Deflate failed with rc=%Rrc. Will leak %u chunks.\n",
713 rc, pDevExt->MemBalloon.cChunks));
714 break;
715 }
716 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
717 pDevExt->MemBalloon.cChunks--;
718 }
719 VbglGRFree(&pReq->header);
720 }
721 else
722 LogRel(("vboxGuestCloseMemBalloon: Failed to allocate VMMDev request buffer (rc=%Rrc). Will leak %u chunks.\n",
723 rc, pDevExt->MemBalloon.cChunks));
724 RTMemFree(pDevExt->MemBalloon.paMemObj);
725 pDevExt->MemBalloon.paMemObj = NULL;
726 }
727
728 pDevExt->MemBalloon.pOwner = NULL;
729 }
730 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
731}
732
733
734/**
735 * Initializes the VBoxGuest device extension when the
736 * device driver is loaded.
737 *
738 * The native code locates the VMMDev on the PCI bus and retrieve
739 * the MMIO and I/O port ranges, this function will take care of
740 * mapping the MMIO memory (if present). Upon successful return
741 * the native code should set up the interrupt handler.
742 *
743 * @returns VBox status code.
744 *
745 * @param pDevExt The device extension. Allocated by the native code.
746 * @param IOPortBase The base of the I/O port range.
747 * @param pvMMIOBase The base of the MMIO memory mapping.
748 * This is optional, pass NULL if not present.
749 * @param cbMMIO The size of the MMIO memory mapping.
750 * This is optional, pass 0 if not present.
751 * @param enmOSType The guest OS type to report to the VMMDev.
752 * @param fFixedEvents Events that will be enabled upon init and no client
753 * will ever be allowed to mask.
754 */
755int VBoxGuestInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
756 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
757{
758 int rc, rc2;
759 unsigned i;
760
761#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
762 /*
763 * Create the release log.
764 */
765 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
766 PRTLOGGER pRelLogger;
767 rc = RTLogCreate(&pRelLogger, 0 /* fFlags */, "all",
768 "VBOX_RELEASE_LOG", RT_ELEMENTS(s_apszGroups), s_apszGroups, RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER, NULL);
769 if (RT_SUCCESS(rc))
770 RTLogRelSetDefaultInstance(pRelLogger);
771 /** @todo Add native hook for getting logger config parameters and setting
772 * them. On linux we should use the module parameter stuff... */
773#endif
774
775 /*
776 * Adjust fFixedEvents.
777 */
778#ifdef VBOX_WITH_HGCM
779 fFixedEvents |= VMMDEV_EVENT_HGCM;
780#endif
781
782 /*
783 * Initialize the data.
784 */
785 pDevExt->IOPortBase = IOPortBase;
786 pDevExt->pVMMDevMemory = NULL;
787 pDevExt->fFixedEvents = fFixedEvents;
788 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
789 pDevExt->EventSpinlock = NIL_RTSPINLOCK;
790 pDevExt->pIrqAckEvents = NULL;
791 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
792 RTListInit(&pDevExt->WaitList);
793#ifdef VBOX_WITH_HGCM
794 RTListInit(&pDevExt->HGCMWaitList);
795#endif
796#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
797 RTListInit(&pDevExt->WakeUpList);
798#endif
799 RTListInit(&pDevExt->WokenUpList);
800 RTListInit(&pDevExt->FreeList);
801#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
802 pDevExt->fVRDPEnabled = false;
803#endif
804 pDevExt->fLoggingEnabled = false;
805 pDevExt->f32PendingEvents = 0;
806 pDevExt->u32MousePosChangedSeq = 0;
807 pDevExt->SessionSpinlock = NIL_RTSPINLOCK;
808 pDevExt->MemBalloon.hMtx = NIL_RTSEMFASTMUTEX;
809 pDevExt->MemBalloon.cChunks = 0;
810 pDevExt->MemBalloon.cMaxChunks = 0;
811 pDevExt->MemBalloon.fUseKernelAPI = true;
812 pDevExt->MemBalloon.paMemObj = NULL;
813 pDevExt->MemBalloon.pOwner = NULL;
814 for (i = 0; i < RT_ELEMENTS(pDevExt->acMouseFeatureUsage); ++i)
815 pDevExt->acMouseFeatureUsage[i] = 0;
816 pDevExt->fMouseStatus = 0;
817 pDevExt->MouseNotifyCallback.pfnNotify = NULL;
818 pDevExt->MouseNotifyCallback.pvUser = NULL;
819 pDevExt->cISR = 0;
820
821 /*
822 * If there is an MMIO region validate the version and size.
823 */
824 if (pvMMIOBase)
825 {
826 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
827 Assert(cbMMIO);
828 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
829 && pVMMDev->u32Size >= 32
830 && pVMMDev->u32Size <= cbMMIO)
831 {
832 pDevExt->pVMMDevMemory = pVMMDev;
833 Log(("VBoxGuestInitDevExt: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
834 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
835 }
836 else /* try live without it. */
837 LogRel(("VBoxGuestInitDevExt: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
838 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
839 }
840
841 pDevExt->u32AcquireModeGuestCaps = 0;
842 pDevExt->u32SetModeGuestCaps = 0;
843 pDevExt->u32GuestCaps = 0;
844
845 /*
846 * Create the wait and session spinlocks as well as the ballooning mutex.
847 */
848 rc = RTSpinlockCreate(&pDevExt->EventSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestEvent");
849 if (RT_SUCCESS(rc))
850 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestSession");
851 if (RT_FAILURE(rc))
852 {
853 LogRel(("VBoxGuestInitDevExt: failed to create spinlock, rc=%Rrc!\n", rc));
854 if (pDevExt->EventSpinlock != NIL_RTSPINLOCK)
855 RTSpinlockDestroy(pDevExt->EventSpinlock);
856 return rc;
857 }
858
859 rc = RTSemFastMutexCreate(&pDevExt->MemBalloon.hMtx);
860 if (RT_FAILURE(rc))
861 {
862 LogRel(("VBoxGuestInitDevExt: failed to create mutex, rc=%Rrc!\n", rc));
863 RTSpinlockDestroy(pDevExt->SessionSpinlock);
864 RTSpinlockDestroy(pDevExt->EventSpinlock);
865 return rc;
866 }
867
868 /*
869 * Initialize the guest library and report the guest info back to VMMDev,
870 * set the interrupt control filter mask, and fixate the guest mappings
871 * made by the VMM.
872 */
873 rc = VbglInit(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
874 if (RT_SUCCESS(rc))
875 {
876 rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
877 if (RT_SUCCESS(rc))
878 {
879 pDevExt->PhysIrqAckEvents = VbglPhysHeapGetPhysAddr(pDevExt->pIrqAckEvents);
880 Assert(pDevExt->PhysIrqAckEvents != 0);
881
882 rc = VBoxGuestReportGuestInfo(enmOSType);
883 if (RT_SUCCESS(rc))
884 {
885 rc = vboxGuestSetFilterMask(pDevExt, fFixedEvents);
886 if (RT_SUCCESS(rc))
887 {
888 /*
889 * Disable guest graphics capability by default. The guest specific
890 * graphics driver will re-enable this when it is necessary.
891 */
892 rc = VBoxGuestSetGuestCapabilities(0, VMMDEV_GUEST_SUPPORTS_GRAPHICS);
893 if (RT_SUCCESS(rc))
894 {
895 vboxGuestInitFixateGuestMappings(pDevExt);
896
897#ifdef DEBUG
898 testSetMouseStatus(); /* Other tests? */
899#endif
900
901 rc = VBoxGuestReportDriverStatus(true /* Driver is active */);
902 if (RT_FAILURE(rc))
903 LogRel(("VBoxGuestInitDevExt: VBoxReportGuestDriverStatus failed, rc=%Rrc\n", rc));
904
905 Log(("VBoxGuestInitDevExt: returns success\n"));
906 return VINF_SUCCESS;
907 }
908
909 LogRel(("VBoxGuestInitDevExt: VBoxGuestSetGuestCapabilities failed, rc=%Rrc\n", rc));
910 }
911 else
912 LogRel(("VBoxGuestInitDevExt: vboxGuestSetFilterMask failed, rc=%Rrc\n", rc));
913 }
914 else
915 LogRel(("VBoxGuestInitDevExt: VBoxReportGuestInfo failed, rc=%Rrc\n", rc));
916 VbglGRFree((VMMDevRequestHeader *)pDevExt->pIrqAckEvents);
917 }
918 else
919 LogRel(("VBoxGuestInitDevExt: VBoxGRAlloc failed, rc=%Rrc\n", rc));
920
921 VbglTerminate();
922 }
923 else
924 LogRel(("VBoxGuestInitDevExt: VbglInit failed, rc=%Rrc\n", rc));
925
926 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
927 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
928 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
929
930#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
931 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
932 RTLogDestroy(RTLogSetDefaultInstance(NULL));
933#endif
934 return rc; /* (failed) */
935}
936
937
938/**
939 * Deletes all the items in a wait chain.
940 * @param pList The head of the chain.
941 */
942static void VBoxGuestDeleteWaitList(PRTLISTNODE pList)
943{
944 while (!RTListIsEmpty(pList))
945 {
946 int rc2;
947 PVBOXGUESTWAIT pWait = RTListGetFirst(pList, VBOXGUESTWAIT, ListNode);
948 RTListNodeRemove(&pWait->ListNode);
949
950 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
951 pWait->Event = NIL_RTSEMEVENTMULTI;
952 pWait->pSession = NULL;
953 RTMemFree(pWait);
954 }
955}
956
957
958/**
959 * Destroys the VBoxGuest device extension.
960 *
961 * The native code should call this before the driver is loaded,
962 * but don't call this on shutdown.
963 *
964 * @param pDevExt The device extension.
965 */
966void VBoxGuestDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
967{
968 int rc2;
969 Log(("VBoxGuestDeleteDevExt:\n"));
970 Log(("VBoxGuest: The additions driver is terminating.\n"));
971
972 /*
973 * Clean up the bits that involves the host first.
974 */
975 vboxGuestTermUnfixGuestMappings(pDevExt);
976 VBoxGuestSetGuestCapabilities(0, UINT32_MAX); /* clears all capabilities */
977 vboxGuestSetFilterMask(pDevExt, 0); /* filter all events */
978 vboxGuestCloseMemBalloon(pDevExt, (PVBOXGUESTSESSION)NULL);
979
980 /*
981 * Cleanup all the other resources.
982 */
983 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
984 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
985 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
986
987 VBoxGuestDeleteWaitList(&pDevExt->WaitList);
988#ifdef VBOX_WITH_HGCM
989 VBoxGuestDeleteWaitList(&pDevExt->HGCMWaitList);
990#endif
991#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
992 VBoxGuestDeleteWaitList(&pDevExt->WakeUpList);
993#endif
994 VBoxGuestDeleteWaitList(&pDevExt->WokenUpList);
995 VBoxGuestDeleteWaitList(&pDevExt->FreeList);
996
997 VbglTerminate();
998
999 pDevExt->pVMMDevMemory = NULL;
1000
1001 pDevExt->IOPortBase = 0;
1002 pDevExt->pIrqAckEvents = NULL;
1003
1004#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
1005 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
1006 RTLogDestroy(RTLogSetDefaultInstance(NULL));
1007#endif
1008
1009}
1010
1011
1012/**
1013 * Creates a VBoxGuest user session.
1014 *
1015 * The native code calls this when a ring-3 client opens the device.
1016 * Use VBoxGuestCreateKernelSession when a ring-0 client connects.
1017 *
1018 * @returns VBox status code.
1019 * @param pDevExt The device extension.
1020 * @param ppSession Where to store the session on success.
1021 */
1022int VBoxGuestCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
1023{
1024 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
1025 if (RT_UNLIKELY(!pSession))
1026 {
1027 LogRel(("VBoxGuestCreateUserSession: no memory!\n"));
1028 return VERR_NO_MEMORY;
1029 }
1030
1031 pSession->Process = RTProcSelf();
1032 pSession->R0Process = RTR0ProcHandleSelf();
1033 pSession->pDevExt = pDevExt;
1034
1035 *ppSession = pSession;
1036 LogFlow(("VBoxGuestCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1037 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1038 return VINF_SUCCESS;
1039}
1040
1041
1042/**
1043 * Creates a VBoxGuest kernel session.
1044 *
1045 * The native code calls this when a ring-0 client connects to the device.
1046 * Use VBoxGuestCreateUserSession when a ring-3 client opens the device.
1047 *
1048 * @returns VBox status code.
1049 * @param pDevExt The device extension.
1050 * @param ppSession Where to store the session on success.
1051 */
1052int VBoxGuestCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
1053{
1054 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
1055 if (RT_UNLIKELY(!pSession))
1056 {
1057 LogRel(("VBoxGuestCreateKernelSession: no memory!\n"));
1058 return VERR_NO_MEMORY;
1059 }
1060
1061 pSession->Process = NIL_RTPROCESS;
1062 pSession->R0Process = NIL_RTR0PROCESS;
1063 pSession->pDevExt = pDevExt;
1064
1065 *ppSession = pSession;
1066 LogFlow(("VBoxGuestCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1067 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1068 return VINF_SUCCESS;
1069}
1070
1071static int VBoxGuestCommonIOCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession);
1072
1073/**
1074 * Closes a VBoxGuest session.
1075 *
1076 * @param pDevExt The device extension.
1077 * @param pSession The session to close (and free).
1078 */
1079void VBoxGuestCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1080{
1081 unsigned i; NOREF(i);
1082 Log(("VBoxGuestCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1083 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1084
1085 VBoxGuestCommonGuestCapsAcquire(pDevExt, pSession, 0, UINT32_MAX, VBOXGUESTCAPSACQUIRE_FLAGS_NONE);
1086
1087 VBoxGuestCommonIOCtl_CancelAllWaitEvents(pDevExt, pSession);
1088
1089#ifdef VBOX_WITH_HGCM
1090 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1091 if (pSession->aHGCMClientIds[i])
1092 {
1093 VBoxGuestHGCMDisconnectInfo Info;
1094 Info.result = 0;
1095 Info.u32ClientID = pSession->aHGCMClientIds[i];
1096 pSession->aHGCMClientIds[i] = 0;
1097 Log(("VBoxGuestCloseSession: disconnecting client id %#RX32\n", Info.u32ClientID));
1098 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1099 }
1100#endif
1101
1102 pSession->pDevExt = NULL;
1103 pSession->Process = NIL_RTPROCESS;
1104 pSession->R0Process = NIL_RTR0PROCESS;
1105 vboxGuestCloseMemBalloon(pDevExt, pSession);
1106 /* Reset any mouse status flags which the session may have set. */
1107 VBoxGuestCommonIOCtl_SetMouseStatus(pDevExt, pSession, 0);
1108 RTMemFree(pSession);
1109}
1110
1111
1112/**
1113 * Allocates a wait-for-event entry.
1114 *
1115 * @returns The wait-for-event entry.
1116 * @param pDevExt The device extension.
1117 * @param pSession The session that's allocating this. Can be NULL.
1118 */
1119static PVBOXGUESTWAIT VBoxGuestWaitAlloc(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1120{
1121 /*
1122 * Allocate it one way or the other.
1123 */
1124 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1125 if (pWait)
1126 {
1127 RTSpinlockAcquire(pDevExt->EventSpinlock);
1128
1129 pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1130 if (pWait)
1131 RTListNodeRemove(&pWait->ListNode);
1132
1133 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1134 }
1135 if (!pWait)
1136 {
1137 static unsigned s_cErrors = 0;
1138 int rc;
1139
1140 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
1141 if (!pWait)
1142 {
1143 if (s_cErrors++ < 32)
1144 LogRel(("VBoxGuestWaitAlloc: out-of-memory!\n"));
1145 return NULL;
1146 }
1147
1148 rc = RTSemEventMultiCreate(&pWait->Event);
1149 if (RT_FAILURE(rc))
1150 {
1151 if (s_cErrors++ < 32)
1152 LogRel(("VBoxGuestCommonIOCtl: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
1153 RTMemFree(pWait);
1154 return NULL;
1155 }
1156
1157 pWait->ListNode.pNext = NULL;
1158 pWait->ListNode.pPrev = NULL;
1159 }
1160
1161 /*
1162 * Zero members just as an precaution.
1163 */
1164 pWait->fReqEvents = 0;
1165 pWait->fResEvents = 0;
1166#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1167 pWait->fPendingWakeUp = false;
1168 pWait->fFreeMe = false;
1169#endif
1170 pWait->pSession = pSession;
1171#ifdef VBOX_WITH_HGCM
1172 pWait->pHGCMReq = NULL;
1173#endif
1174 RTSemEventMultiReset(pWait->Event);
1175 return pWait;
1176}
1177
1178
1179/**
1180 * Frees the wait-for-event entry.
1181 *
1182 * The caller must own the wait spinlock !
1183 * The entry must be in a list!
1184 *
1185 * @param pDevExt The device extension.
1186 * @param pWait The wait-for-event entry to free.
1187 */
1188static void VBoxGuestWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1189{
1190 pWait->fReqEvents = 0;
1191 pWait->fResEvents = 0;
1192#ifdef VBOX_WITH_HGCM
1193 pWait->pHGCMReq = NULL;
1194#endif
1195#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1196 Assert(!pWait->fFreeMe);
1197 if (pWait->fPendingWakeUp)
1198 pWait->fFreeMe = true;
1199 else
1200#endif
1201 {
1202 RTListNodeRemove(&pWait->ListNode);
1203 RTListAppend(&pDevExt->FreeList, &pWait->ListNode);
1204 }
1205}
1206
1207
1208/**
1209 * Frees the wait-for-event entry.
1210 *
1211 * @param pDevExt The device extension.
1212 * @param pWait The wait-for-event entry to free.
1213 */
1214static void VBoxGuestWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1215{
1216 RTSpinlockAcquire(pDevExt->EventSpinlock);
1217 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1218 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1219}
1220
1221
1222#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1223/**
1224 * Processes the wake-up list.
1225 *
1226 * All entries in the wake-up list gets signalled and moved to the woken-up
1227 * list.
1228 *
1229 * @param pDevExt The device extension.
1230 */
1231void VBoxGuestWaitDoWakeUps(PVBOXGUESTDEVEXT pDevExt)
1232{
1233 if (!RTListIsEmpty(&pDevExt->WakeUpList))
1234 {
1235 RTSpinlockAcquire(pDevExt->EventSpinlock);
1236 for (;;)
1237 {
1238 int rc;
1239 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->WakeUpList, VBOXGUESTWAIT, ListNode);
1240 if (!pWait)
1241 break;
1242 pWait->fPendingWakeUp = true;
1243 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1244
1245 rc = RTSemEventMultiSignal(pWait->Event);
1246 AssertRC(rc);
1247
1248 RTSpinlockAcquire(pDevExt->EventSpinlock);
1249 pWait->fPendingWakeUp = false;
1250 if (!pWait->fFreeMe)
1251 {
1252 RTListNodeRemove(&pWait->ListNode);
1253 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1254 }
1255 else
1256 {
1257 pWait->fFreeMe = false;
1258 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1259 }
1260 }
1261 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1262 }
1263}
1264#endif /* VBOXGUEST_USE_DEFERRED_WAKE_UP */
1265
1266
1267/**
1268 * Modifies the guest capabilities.
1269 *
1270 * Should be called during driver init and termination.
1271 *
1272 * @returns VBox status code.
1273 * @param fOr The Or mask (what to enable).
1274 * @param fNot The Not mask (what to disable).
1275 */
1276int VBoxGuestSetGuestCapabilities(uint32_t fOr, uint32_t fNot)
1277{
1278 VMMDevReqGuestCapabilities2 *pReq;
1279 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
1280 if (RT_FAILURE(rc))
1281 {
1282 Log(("VBoxGuestSetGuestCapabilities: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1283 sizeof(*pReq), sizeof(*pReq), rc));
1284 return rc;
1285 }
1286
1287 pReq->u32OrMask = fOr;
1288 pReq->u32NotMask = fNot;
1289
1290 rc = VbglGRPerform(&pReq->header);
1291 if (RT_FAILURE(rc))
1292 Log(("VBoxGuestSetGuestCapabilities: VbglGRPerform failed, rc=%Rrc!\n", rc));
1293
1294 VbglGRFree(&pReq->header);
1295 return rc;
1296}
1297
1298
1299/**
1300 * Implements the fast (no input or output) type of IOCtls.
1301 *
1302 * This is currently just a placeholder stub inherited from the support driver code.
1303 *
1304 * @returns VBox status code.
1305 * @param iFunction The IOCtl function number.
1306 * @param pDevExt The device extension.
1307 * @param pSession The session.
1308 */
1309int VBoxGuestCommonIOCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1310{
1311 Log(("VBoxGuestCommonIOCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
1312
1313 NOREF(iFunction);
1314 NOREF(pDevExt);
1315 NOREF(pSession);
1316 return VERR_NOT_SUPPORTED;
1317}
1318
1319
1320/**
1321 * Return the VMM device port.
1322 *
1323 * returns IPRT status code.
1324 * @param pDevExt The device extension.
1325 * @param pInfo The request info.
1326 * @param pcbDataReturned (out) contains the number of bytes to return.
1327 */
1328static int VBoxGuestCommonIOCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
1329{
1330 Log(("VBoxGuestCommonIOCtl: GETVMMDEVPORT\n"));
1331 pInfo->portAddress = pDevExt->IOPortBase;
1332 pInfo->pVMMDevMemory = (VMMDevMemory *)pDevExt->pVMMDevMemory;
1333 if (pcbDataReturned)
1334 *pcbDataReturned = sizeof(*pInfo);
1335 return VINF_SUCCESS;
1336}
1337
1338
1339#ifndef RT_OS_WINDOWS
1340/**
1341 * Set the callback for the kernel mouse handler.
1342 *
1343 * returns IPRT status code.
1344 * @param pDevExt The device extension.
1345 * @param pNotify The new callback information.
1346 * @note This function takes the session spinlock to update the callback
1347 * information, but the interrupt handler will not do this. To make
1348 * sure that the interrupt handler sees a consistent structure, we
1349 * set the function pointer to NULL before updating the data and only
1350 * set it to the correct value once the data is updated. Since the
1351 * interrupt handler executes atomically this ensures that the data is
1352 * valid if the function pointer is non-NULL.
1353 */
1354int VBoxGuestCommonIOCtl_SetMouseNotifyCallback(PVBOXGUESTDEVEXT pDevExt, VBoxGuestMouseSetNotifyCallback *pNotify)
1355{
1356 Log(("VBoxGuestCommonIOCtl: SET_MOUSE_NOTIFY_CALLBACK\n"));
1357
1358 RTSpinlockAcquire(pDevExt->EventSpinlock);
1359 pDevExt->MouseNotifyCallback = *pNotify;
1360 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1361
1362 /* Make sure no active ISR is referencing the old data - hacky but should be
1363 * effective. */
1364 while (pDevExt->cISR > 0)
1365 ASMNopPause();
1366
1367 return VINF_SUCCESS;
1368}
1369#endif
1370
1371
1372/**
1373 * Worker VBoxGuestCommonIOCtl_WaitEvent.
1374 *
1375 * The caller enters the spinlock, we leave it.
1376 *
1377 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
1378 */
1379DECLINLINE(int) WaitEventCheckCondition(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestWaitEventInfo *pInfo,
1380 int iEvent, const uint32_t fReqEvents)
1381{
1382 uint32_t fMatches = VBoxGuestCommonGetAndCleanPendingEventsLocked(pDevExt, pSession, fReqEvents);
1383 if (fMatches || pSession->fPendingCancelWaitEvents)
1384 {
1385 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1386
1387 pInfo->u32EventFlagsOut = fMatches;
1388 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1389 if (fReqEvents & ~((uint32_t)1 << iEvent))
1390 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1391 else
1392 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1393 pSession->fPendingCancelWaitEvents = false;
1394 return VINF_SUCCESS;
1395 }
1396 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1397 return VERR_TIMEOUT;
1398}
1399
1400
1401static int VBoxGuestCommonIOCtl_WaitEvent(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1402 VBoxGuestWaitEventInfo *pInfo, size_t *pcbDataReturned, bool fInterruptible)
1403{
1404 const uint32_t fReqEvents = pInfo->u32EventMaskIn;
1405 uint32_t fResEvents;
1406 int iEvent;
1407 PVBOXGUESTWAIT pWait;
1408 int rc;
1409
1410 pInfo->u32EventFlagsOut = 0;
1411 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1412 if (pcbDataReturned)
1413 *pcbDataReturned = sizeof(*pInfo);
1414
1415 /*
1416 * Copy and verify the input mask.
1417 */
1418 iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
1419 if (RT_UNLIKELY(iEvent < 0))
1420 {
1421 LogRel(("VBoxGuestCommonIOCtl: WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
1422 return VERR_INVALID_PARAMETER;
1423 }
1424
1425 /*
1426 * Check the condition up front, before doing the wait-for-event allocations.
1427 */
1428 RTSpinlockAcquire(pDevExt->EventSpinlock);
1429 rc = WaitEventCheckCondition(pDevExt, pSession, pInfo, iEvent, fReqEvents);
1430 if (rc == VINF_SUCCESS)
1431 return rc;
1432
1433 if (!pInfo->u32TimeoutIn)
1434 {
1435 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1436 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
1437 return VERR_TIMEOUT;
1438 }
1439
1440 pWait = VBoxGuestWaitAlloc(pDevExt, pSession);
1441 if (!pWait)
1442 return VERR_NO_MEMORY;
1443 pWait->fReqEvents = fReqEvents;
1444
1445 /*
1446 * We've got the wait entry now, re-enter the spinlock and check for the condition.
1447 * If the wait condition is met, return.
1448 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
1449 */
1450 RTSpinlockAcquire(pDevExt->EventSpinlock);
1451 RTListAppend(&pDevExt->WaitList, &pWait->ListNode);
1452 rc = WaitEventCheckCondition(pDevExt, pSession, pInfo, iEvent, fReqEvents);
1453 if (rc == VINF_SUCCESS)
1454 {
1455 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
1456 return rc;
1457 }
1458
1459 if (fInterruptible)
1460 rc = RTSemEventMultiWaitNoResume(pWait->Event,
1461 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1462 else
1463 rc = RTSemEventMultiWait(pWait->Event,
1464 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1465
1466 /*
1467 * There is one special case here and that's when the semaphore is
1468 * destroyed upon device driver unload. This shouldn't happen of course,
1469 * but in case it does, just get out of here ASAP.
1470 */
1471 if (rc == VERR_SEM_DESTROYED)
1472 return rc;
1473
1474 /*
1475 * Unlink the wait item and dispose of it.
1476 */
1477 RTSpinlockAcquire(pDevExt->EventSpinlock);
1478 fResEvents = pWait->fResEvents;
1479 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1480 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1481
1482 /*
1483 * Now deal with the return code.
1484 */
1485 if ( fResEvents
1486 && fResEvents != UINT32_MAX)
1487 {
1488 pInfo->u32EventFlagsOut = fResEvents;
1489 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1490 if (fReqEvents & ~((uint32_t)1 << iEvent))
1491 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1492 else
1493 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1494 rc = VINF_SUCCESS;
1495 }
1496 else if ( fResEvents == UINT32_MAX
1497 || rc == VERR_INTERRUPTED)
1498 {
1499 pInfo->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
1500 rc = VERR_INTERRUPTED;
1501 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_INTERRUPTED\n"));
1502 }
1503 else if (rc == VERR_TIMEOUT)
1504 {
1505 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1506 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT (2)\n"));
1507 }
1508 else
1509 {
1510 if (RT_SUCCESS(rc))
1511 {
1512 static unsigned s_cErrors = 0;
1513 if (s_cErrors++ < 32)
1514 LogRel(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc but no events!\n", rc));
1515 rc = VERR_INTERNAL_ERROR;
1516 }
1517 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1518 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc\n", rc));
1519 }
1520
1521 return rc;
1522}
1523
1524
1525static int VBoxGuestCommonIOCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1526{
1527 PVBOXGUESTWAIT pWait;
1528 PVBOXGUESTWAIT pSafe;
1529 int rc = 0;
1530 /* Was as least one WAITEVENT in process for this session? If not we
1531 * set a flag that the next call should be interrupted immediately. This
1532 * is needed so that a user thread can reliably interrupt another one in a
1533 * WAITEVENT loop. */
1534 bool fCancelledOne = false;
1535
1536 Log(("VBoxGuestCommonIOCtl: CANCEL_ALL_WAITEVENTS\n"));
1537
1538 /*
1539 * Walk the event list and wake up anyone with a matching session.
1540 */
1541 RTSpinlockAcquire(pDevExt->EventSpinlock);
1542 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
1543 {
1544 if (pWait->pSession == pSession)
1545 {
1546 fCancelledOne = true;
1547 pWait->fResEvents = UINT32_MAX;
1548 RTListNodeRemove(&pWait->ListNode);
1549#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1550 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
1551#else
1552 rc |= RTSemEventMultiSignal(pWait->Event);
1553 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1554#endif
1555 }
1556 }
1557 if (!fCancelledOne)
1558 pSession->fPendingCancelWaitEvents = true;
1559 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1560 Assert(rc == 0);
1561 NOREF(rc);
1562
1563#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1564 VBoxGuestWaitDoWakeUps(pDevExt);
1565#endif
1566
1567 return VINF_SUCCESS;
1568}
1569
1570/**
1571 * Checks if the VMM request is allowed in the context of the given session.
1572 *
1573 * @returns VINF_SUCCESS or VERR_PERMISSION_DENIED.
1574 * @param pSession The calling session.
1575 * @param enmType The request type.
1576 * @param pReqHdr The request.
1577 */
1578static int VBoxGuestCheckIfVMMReqAllowed(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VMMDevRequestType enmType,
1579 VMMDevRequestHeader const *pReqHdr)
1580{
1581 /*
1582 * Categorize the request being made.
1583 */
1584 /** @todo This need quite some more work! */
1585 enum
1586 {
1587 kLevel_Invalid, kLevel_NoOne, kLevel_OnlyVBoxGuest, kLevel_OnlyKernel, kLevel_TrustedUsers, kLevel_AllUsers
1588 } enmRequired;
1589 switch (enmType)
1590 {
1591 /*
1592 * Deny access to anything we don't know or provide specialized I/O controls for.
1593 */
1594#ifdef VBOX_WITH_HGCM
1595 case VMMDevReq_HGCMConnect:
1596 case VMMDevReq_HGCMDisconnect:
1597# ifdef VBOX_WITH_64_BITS_GUESTS
1598 case VMMDevReq_HGCMCall32:
1599 case VMMDevReq_HGCMCall64:
1600# else
1601 case VMMDevReq_HGCMCall:
1602# endif /* VBOX_WITH_64_BITS_GUESTS */
1603 case VMMDevReq_HGCMCancel:
1604 case VMMDevReq_HGCMCancel2:
1605#endif /* VBOX_WITH_HGCM */
1606 default:
1607 enmRequired = kLevel_NoOne;
1608 break;
1609
1610 /*
1611 * There are a few things only this driver can do (and it doesn't use
1612 * the VMMRequst I/O control route anyway, but whatever).
1613 */
1614 case VMMDevReq_ReportGuestInfo:
1615 case VMMDevReq_ReportGuestInfo2:
1616 case VMMDevReq_GetHypervisorInfo:
1617 case VMMDevReq_SetHypervisorInfo:
1618 case VMMDevReq_RegisterPatchMemory:
1619 case VMMDevReq_DeregisterPatchMemory:
1620 case VMMDevReq_GetMemBalloonChangeRequest:
1621 enmRequired = kLevel_OnlyVBoxGuest;
1622 break;
1623
1624 /*
1625 * Trusted users apps only.
1626 */
1627 case VMMDevReq_QueryCredentials:
1628 case VMMDevReq_ReportCredentialsJudgement:
1629 case VMMDevReq_RegisterSharedModule:
1630 case VMMDevReq_UnregisterSharedModule:
1631 case VMMDevReq_WriteCoreDump:
1632 case VMMDevReq_GetCpuHotPlugRequest:
1633 case VMMDevReq_SetCpuHotPlugStatus:
1634 case VMMDevReq_CheckSharedModules:
1635 case VMMDevReq_GetPageSharingStatus:
1636 case VMMDevReq_DebugIsPageShared:
1637 case VMMDevReq_ReportGuestStats:
1638 case VMMDevReq_ReportGuestUserState:
1639 case VMMDevReq_GetStatisticsChangeRequest:
1640 case VMMDevReq_ChangeMemBalloon:
1641 enmRequired = kLevel_TrustedUsers;
1642 break;
1643
1644 /*
1645 * Anyone. But not for CapsAcquire mode
1646 */
1647 case VMMDevReq_SetGuestCapabilities:
1648 {
1649 VMMDevReqGuestCapabilities2 *pCaps = (VMMDevReqGuestCapabilities2*)pReqHdr;
1650 uint32_t fAcquireCaps = 0;
1651 if (!VBoxGuestCommonGuestCapsModeSet(pDevExt, pCaps->u32OrMask, false, &fAcquireCaps))
1652 {
1653 AssertFailed();
1654 LogRel(("calling caps set for acquired caps %d\n", pCaps->u32OrMask));
1655 enmRequired = kLevel_NoOne;
1656 break;
1657 }
1658 /* hack to adjust the notcaps.
1659 * @todo: move to a better place
1660 * user-mode apps are allowed to pass any mask to the notmask,
1661 * the driver cleans up them accordingly */
1662 pCaps->u32NotMask &= ~fAcquireCaps;
1663 /* do not break, make it fall through to the below enmRequired setting */
1664 }
1665 /*
1666 * Anyone.
1667 */
1668 case VMMDevReq_GetMouseStatus:
1669 case VMMDevReq_SetMouseStatus:
1670 case VMMDevReq_SetPointerShape:
1671 case VMMDevReq_GetHostVersion:
1672 case VMMDevReq_Idle:
1673 case VMMDevReq_GetHostTime:
1674 case VMMDevReq_SetPowerStatus:
1675 case VMMDevReq_AcknowledgeEvents:
1676 case VMMDevReq_CtlGuestFilterMask:
1677 case VMMDevReq_ReportGuestStatus:
1678 case VMMDevReq_GetDisplayChangeRequest:
1679 case VMMDevReq_VideoModeSupported:
1680 case VMMDevReq_GetHeightReduction:
1681 case VMMDevReq_GetDisplayChangeRequest2:
1682 case VMMDevReq_VideoModeSupported2:
1683 case VMMDevReq_VideoAccelEnable:
1684 case VMMDevReq_VideoAccelFlush:
1685 case VMMDevReq_VideoSetVisibleRegion:
1686 case VMMDevReq_GetDisplayChangeRequestEx:
1687 case VMMDevReq_GetSeamlessChangeRequest:
1688 case VMMDevReq_GetVRDPChangeRequest:
1689 case VMMDevReq_LogString:
1690 case VMMDevReq_GetSessionId:
1691 enmRequired = kLevel_AllUsers;
1692 break;
1693
1694 /*
1695 * Depends on the request parameters...
1696 */
1697 /** @todo this have to be changed into an I/O control and the facilities
1698 * tracked in the session so they can automatically be failed when the
1699 * session terminates without reporting the new status.
1700 *
1701 * The information presented by IGuest is not reliable without this! */
1702 case VMMDevReq_ReportGuestCapabilities:
1703 switch (((VMMDevReportGuestStatus const *)pReqHdr)->guestStatus.facility)
1704 {
1705 case VBoxGuestFacilityType_All:
1706 case VBoxGuestFacilityType_VBoxGuestDriver:
1707 enmRequired = kLevel_OnlyVBoxGuest;
1708 break;
1709 case VBoxGuestFacilityType_VBoxService:
1710 enmRequired = kLevel_TrustedUsers;
1711 break;
1712 case VBoxGuestFacilityType_VBoxTrayClient:
1713 case VBoxGuestFacilityType_Seamless:
1714 case VBoxGuestFacilityType_Graphics:
1715 default:
1716 enmRequired = kLevel_AllUsers;
1717 break;
1718 }
1719 break;
1720 }
1721
1722 /*
1723 * Check against the session.
1724 */
1725 switch (enmRequired)
1726 {
1727 default:
1728 case kLevel_NoOne:
1729 break;
1730 case kLevel_OnlyVBoxGuest:
1731 case kLevel_OnlyKernel:
1732 if (pSession->R0Process == NIL_RTR0PROCESS)
1733 return VINF_SUCCESS;
1734 break;
1735 case kLevel_TrustedUsers:
1736 case kLevel_AllUsers:
1737 return VINF_SUCCESS;
1738 }
1739
1740 return VERR_PERMISSION_DENIED;
1741}
1742
1743static int VBoxGuestCommonIOCtl_VMMRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1744 VMMDevRequestHeader *pReqHdr, size_t cbData, size_t *pcbDataReturned)
1745{
1746 int rc;
1747 VMMDevRequestHeader *pReqCopy;
1748
1749 /*
1750 * Validate the header and request size.
1751 */
1752 const VMMDevRequestType enmType = pReqHdr->requestType;
1753 const uint32_t cbReq = pReqHdr->size;
1754 const uint32_t cbMinSize = vmmdevGetRequestSize(enmType);
1755
1756 Log(("VBoxGuestCommonIOCtl: VMMREQUEST type %d\n", pReqHdr->requestType));
1757
1758 if (cbReq < cbMinSize)
1759 {
1760 LogRel(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
1761 cbReq, cbMinSize, enmType));
1762 return VERR_INVALID_PARAMETER;
1763 }
1764 if (cbReq > cbData)
1765 {
1766 LogRel(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
1767 cbData, cbReq, enmType));
1768 return VERR_INVALID_PARAMETER;
1769 }
1770 rc = VbglGRVerify(pReqHdr, cbData);
1771 if (RT_FAILURE(rc))
1772 {
1773 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid header: size %#x, expected >= %#x (hdr); type=%#x; rc=%Rrc!!\n",
1774 cbData, cbReq, enmType, rc));
1775 return rc;
1776 }
1777
1778 rc = VBoxGuestCheckIfVMMReqAllowed(pDevExt, pSession, enmType, pReqHdr);
1779 if (RT_FAILURE(rc))
1780 {
1781 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: Operation not allowed! type=%#x rc=%Rrc\n", enmType, rc));
1782 return rc;
1783 }
1784
1785 /*
1786 * Make a copy of the request in the physical memory heap so
1787 * the VBoxGuestLibrary can more easily deal with the request.
1788 * (This is really a waste of time since the OS or the OS specific
1789 * code has already buffered or locked the input/output buffer, but
1790 * it does makes things a bit simpler wrt to phys address.)
1791 */
1792 rc = VbglGRAlloc(&pReqCopy, cbReq, enmType);
1793 if (RT_FAILURE(rc))
1794 {
1795 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1796 cbReq, cbReq, rc));
1797 return rc;
1798 }
1799 memcpy(pReqCopy, pReqHdr, cbReq);
1800
1801 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
1802 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
1803
1804 rc = VbglGRPerform(pReqCopy);
1805 if ( RT_SUCCESS(rc)
1806 && RT_SUCCESS(pReqCopy->rc))
1807 {
1808 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
1809 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
1810
1811 memcpy(pReqHdr, pReqCopy, cbReq);
1812 if (pcbDataReturned)
1813 *pcbDataReturned = cbReq;
1814 }
1815 else if (RT_FAILURE(rc))
1816 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: VbglGRPerform - rc=%Rrc!\n", rc));
1817 else
1818 {
1819 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
1820 rc = pReqCopy->rc;
1821 }
1822
1823 VbglGRFree(pReqCopy);
1824 return rc;
1825}
1826
1827
1828static int VBoxGuestCommonIOCtl_CtlFilterMask(PVBOXGUESTDEVEXT pDevExt, VBoxGuestFilterMaskInfo *pInfo)
1829{
1830 VMMDevCtlGuestFilterMask *pReq;
1831 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
1832 if (RT_FAILURE(rc))
1833 {
1834 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1835 sizeof(*pReq), sizeof(*pReq), rc));
1836 return rc;
1837 }
1838
1839 pReq->u32OrMask = pInfo->u32OrMask;
1840 pReq->u32NotMask = pInfo->u32NotMask;
1841 pReq->u32NotMask &= ~pDevExt->fFixedEvents; /* don't permit these to be cleared! */
1842 rc = VbglGRPerform(&pReq->header);
1843 if (RT_FAILURE(rc))
1844 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: VbglGRPerform failed, rc=%Rrc!\n", rc));
1845
1846 VbglGRFree(&pReq->header);
1847 return rc;
1848}
1849
1850#ifdef VBOX_WITH_HGCM
1851
1852AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
1853
1854/** Worker for VBoxGuestHGCMAsyncWaitCallback*. */
1855static int VBoxGuestHGCMAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
1856 bool fInterruptible, uint32_t cMillies)
1857{
1858 int rc;
1859
1860 /*
1861 * Check to see if the condition was met by the time we got here.
1862 *
1863 * We create a simple poll loop here for dealing with out-of-memory
1864 * conditions since the caller isn't necessarily able to deal with
1865 * us returning too early.
1866 */
1867 PVBOXGUESTWAIT pWait;
1868 for (;;)
1869 {
1870 RTSpinlockAcquire(pDevExt->EventSpinlock);
1871 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1872 {
1873 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1874 return VINF_SUCCESS;
1875 }
1876 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1877
1878 pWait = VBoxGuestWaitAlloc(pDevExt, NULL);
1879 if (pWait)
1880 break;
1881 if (fInterruptible)
1882 return VERR_INTERRUPTED;
1883 RTThreadSleep(1);
1884 }
1885 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
1886 pWait->pHGCMReq = pHdr;
1887
1888 /*
1889 * Re-enter the spinlock and re-check for the condition.
1890 * If the condition is met, return.
1891 * Otherwise link us into the HGCM wait list and go to sleep.
1892 */
1893 RTSpinlockAcquire(pDevExt->EventSpinlock);
1894 RTListAppend(&pDevExt->HGCMWaitList, &pWait->ListNode);
1895 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1896 {
1897 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1898 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1899 return VINF_SUCCESS;
1900 }
1901 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1902
1903 if (fInterruptible)
1904 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
1905 else
1906 rc = RTSemEventMultiWait(pWait->Event, cMillies);
1907 if (rc == VERR_SEM_DESTROYED)
1908 return rc;
1909
1910 /*
1911 * Unlink, free and return.
1912 */
1913 if ( RT_FAILURE(rc)
1914 && rc != VERR_TIMEOUT
1915 && ( !fInterruptible
1916 || rc != VERR_INTERRUPTED))
1917 LogRel(("VBoxGuestHGCMAsyncWaitCallback: wait failed! %Rrc\n", rc));
1918
1919 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
1920 return rc;
1921}
1922
1923
1924/**
1925 * This is a callback for dealing with async waits.
1926 *
1927 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1928 */
1929static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
1930{
1931 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1932 Log(("VBoxGuestHGCMAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
1933 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1934 pDevExt,
1935 false /* fInterruptible */,
1936 u32User /* cMillies */);
1937}
1938
1939
1940/**
1941 * This is a callback for dealing with async waits with a timeout.
1942 *
1943 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1944 */
1945static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr,
1946 void *pvUser, uint32_t u32User)
1947{
1948 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1949 Log(("VBoxGuestHGCMAsyncWaitCallbackInterruptible: requestType=%d\n", pHdr->header.requestType));
1950 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1951 pDevExt,
1952 true /* fInterruptible */,
1953 u32User /* cMillies */ );
1954
1955}
1956
1957
1958static int VBoxGuestCommonIOCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1959 VBoxGuestHGCMConnectInfo *pInfo, size_t *pcbDataReturned)
1960{
1961 int rc;
1962
1963 /*
1964 * The VbglHGCMConnect call will invoke the callback if the HGCM
1965 * call is performed in an ASYNC fashion. The function is not able
1966 * to deal with cancelled requests.
1967 */
1968 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: %.128s\n",
1969 pInfo->Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->Loc.type == VMMDevHGCMLoc_LocalHost_Existing
1970 ? pInfo->Loc.u.host.achName : "<not local host>"));
1971
1972 rc = VbglR0HGCMInternalConnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1973 if (RT_SUCCESS(rc))
1974 {
1975 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
1976 pInfo->u32ClientID, pInfo->result, rc));
1977 if (RT_SUCCESS(pInfo->result))
1978 {
1979 /*
1980 * Append the client id to the client id table.
1981 * If the table has somehow become filled up, we'll disconnect the session.
1982 */
1983 unsigned i;
1984 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1985 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1986 if (!pSession->aHGCMClientIds[i])
1987 {
1988 pSession->aHGCMClientIds[i] = pInfo->u32ClientID;
1989 break;
1990 }
1991 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
1992 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1993 {
1994 static unsigned s_cErrors = 0;
1995 VBoxGuestHGCMDisconnectInfo Info;
1996
1997 if (s_cErrors++ < 32)
1998 LogRel(("VBoxGuestCommonIOCtl: HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
1999
2000 Info.result = 0;
2001 Info.u32ClientID = pInfo->u32ClientID;
2002 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2003 return VERR_TOO_MANY_OPEN_FILES;
2004 }
2005 }
2006 if (pcbDataReturned)
2007 *pcbDataReturned = sizeof(*pInfo);
2008 }
2009 return rc;
2010}
2011
2012
2013static int VBoxGuestCommonIOCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMDisconnectInfo *pInfo,
2014 size_t *pcbDataReturned)
2015{
2016 /*
2017 * Validate the client id and invalidate its entry while we're in the call.
2018 */
2019 int rc;
2020 const uint32_t u32ClientId = pInfo->u32ClientID;
2021 unsigned i;
2022 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2023 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2024 if (pSession->aHGCMClientIds[i] == u32ClientId)
2025 {
2026 pSession->aHGCMClientIds[i] = UINT32_MAX;
2027 break;
2028 }
2029 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
2030 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
2031 {
2032 static unsigned s_cErrors = 0;
2033 if (s_cErrors++ > 32)
2034 LogRel(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", u32ClientId));
2035 return VERR_INVALID_HANDLE;
2036 }
2037
2038 /*
2039 * The VbglHGCMConnect call will invoke the callback if the HGCM
2040 * call is performed in an ASYNC fashion. The function is not able
2041 * to deal with cancelled requests.
2042 */
2043 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", pInfo->u32ClientID));
2044 rc = VbglR0HGCMInternalDisconnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2045 if (RT_SUCCESS(rc))
2046 {
2047 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: result=%Rrc\n", pInfo->result));
2048 if (pcbDataReturned)
2049 *pcbDataReturned = sizeof(*pInfo);
2050 }
2051
2052 /* Update the client id array according to the result. */
2053 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2054 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
2055 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) && RT_SUCCESS(pInfo->result) ? 0 : u32ClientId;
2056 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
2057
2058 return rc;
2059}
2060
2061
2062static int VBoxGuestCommonIOCtl_HGCMCall(PVBOXGUESTDEVEXT pDevExt,
2063 PVBOXGUESTSESSION pSession,
2064 VBoxGuestHGCMCallInfo *pInfo,
2065 uint32_t cMillies, bool fInterruptible, bool f32bit, bool fUserData,
2066 size_t cbExtra, size_t cbData, size_t *pcbDataReturned)
2067{
2068 const uint32_t u32ClientId = pInfo->u32ClientID;
2069 uint32_t fFlags;
2070 size_t cbActual;
2071 unsigned i;
2072 int rc;
2073
2074 /*
2075 * Some more validations.
2076 */
2077 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
2078 {
2079 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
2080 return VERR_INVALID_PARAMETER;
2081 }
2082
2083 cbActual = cbExtra + sizeof(*pInfo);
2084#ifdef RT_ARCH_AMD64
2085 if (f32bit)
2086 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
2087 else
2088#endif
2089 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
2090 if (cbData < cbActual)
2091 {
2092 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
2093 cbData, cbData, cbActual, cbActual));
2094 return VERR_INVALID_PARAMETER;
2095 }
2096
2097 /*
2098 * Validate the client id.
2099 */
2100 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2101 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2102 if (pSession->aHGCMClientIds[i] == u32ClientId)
2103 break;
2104 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
2105 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
2106 {
2107 static unsigned s_cErrors = 0;
2108 if (s_cErrors++ > 32)
2109 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
2110 return VERR_INVALID_HANDLE;
2111 }
2112
2113 /*
2114 * The VbglHGCMCall call will invoke the callback if the HGCM
2115 * call is performed in an ASYNC fashion. This function can
2116 * deal with cancelled requests, so we let user more requests
2117 * be interruptible (should add a flag for this later I guess).
2118 */
2119 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
2120 fFlags = !fUserData && pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
2121#ifdef RT_ARCH_AMD64
2122 if (f32bit)
2123 {
2124 if (fInterruptible)
2125 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
2126 else
2127 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
2128 }
2129 else
2130#endif
2131 {
2132 if (fInterruptible)
2133 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
2134 else
2135 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
2136 }
2137 if (RT_SUCCESS(rc))
2138 {
2139 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: result=%Rrc\n", pInfo->result));
2140 if (pcbDataReturned)
2141 *pcbDataReturned = cbActual;
2142 }
2143 else
2144 {
2145 if ( rc != VERR_INTERRUPTED
2146 && rc != VERR_TIMEOUT)
2147 {
2148 static unsigned s_cErrors = 0;
2149 if (s_cErrors++ < 32)
2150 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
2151 }
2152 else
2153 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
2154 }
2155 return rc;
2156}
2157
2158
2159#endif /* VBOX_WITH_HGCM */
2160
2161/**
2162 * Handle VBOXGUEST_IOCTL_CHECK_BALLOON from R3.
2163 *
2164 * Ask the host for the size of the balloon and try to set it accordingly. If
2165 * this approach fails because it's not supported, return with fHandleInR3 set
2166 * and let the user land supply memory we can lock via the other ioctl.
2167 *
2168 * @returns VBox status code.
2169 *
2170 * @param pDevExt The device extension.
2171 * @param pSession The session.
2172 * @param pInfo The output buffer.
2173 * @param pcbDataReturned Where to store the amount of returned data. Can
2174 * be NULL.
2175 */
2176static int VBoxGuestCommonIOCtl_CheckMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2177 VBoxGuestCheckBalloonInfo *pInfo, size_t *pcbDataReturned)
2178{
2179 VMMDevGetMemBalloonChangeRequest *pReq;
2180 int rc;
2181
2182 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON\n"));
2183 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2184 AssertRCReturn(rc, rc);
2185
2186 /*
2187 * The first user trying to query/change the balloon becomes the
2188 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
2189 */
2190 if ( pDevExt->MemBalloon.pOwner != pSession
2191 && pDevExt->MemBalloon.pOwner == NULL)
2192 pDevExt->MemBalloon.pOwner = pSession;
2193
2194 if (pDevExt->MemBalloon.pOwner == pSession)
2195 {
2196 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevGetMemBalloonChangeRequest), VMMDevReq_GetMemBalloonChangeRequest);
2197 if (RT_SUCCESS(rc))
2198 {
2199 /*
2200 * This is a response to that event. Setting this bit means that
2201 * we request the value from the host and change the guest memory
2202 * balloon according to this value.
2203 */
2204 pReq->eventAck = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
2205 rc = VbglGRPerform(&pReq->header);
2206 if (RT_SUCCESS(rc))
2207 {
2208 Assert(pDevExt->MemBalloon.cMaxChunks == pReq->cPhysMemChunks || pDevExt->MemBalloon.cMaxChunks == 0);
2209 pDevExt->MemBalloon.cMaxChunks = pReq->cPhysMemChunks;
2210
2211 pInfo->cBalloonChunks = pReq->cBalloonChunks;
2212 pInfo->fHandleInR3 = false;
2213
2214 rc = vboxGuestSetBalloonSizeKernel(pDevExt, pReq->cBalloonChunks, &pInfo->fHandleInR3);
2215 /* Ignore various out of memory failures. */
2216 if ( rc == VERR_NO_MEMORY
2217 || rc == VERR_NO_PHYS_MEMORY
2218 || rc == VERR_NO_CONT_MEMORY)
2219 rc = VINF_SUCCESS;
2220
2221 if (pcbDataReturned)
2222 *pcbDataReturned = sizeof(VBoxGuestCheckBalloonInfo);
2223 }
2224 else
2225 LogRel(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON: VbglGRPerform failed. rc=%Rrc\n", rc));
2226 VbglGRFree(&pReq->header);
2227 }
2228 }
2229 else
2230 rc = VERR_PERMISSION_DENIED;
2231
2232 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2233 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON returns %Rrc\n", rc));
2234 return rc;
2235}
2236
2237
2238/**
2239 * Handle a request for changing the memory balloon.
2240 *
2241 * @returns VBox status code.
2242 *
2243 * @param pDevExt The device extention.
2244 * @param pSession The session.
2245 * @param pInfo The change request structure (input).
2246 * @param pcbDataReturned Where to store the amount of returned data. Can
2247 * be NULL.
2248 */
2249static int VBoxGuestCommonIOCtl_ChangeMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2250 VBoxGuestChangeBalloonInfo *pInfo, size_t *pcbDataReturned)
2251{
2252 int rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2253 AssertRCReturn(rc, rc);
2254
2255 if (!pDevExt->MemBalloon.fUseKernelAPI)
2256 {
2257 /*
2258 * The first user trying to query/change the balloon becomes the
2259 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
2260 */
2261 if ( pDevExt->MemBalloon.pOwner != pSession
2262 && pDevExt->MemBalloon.pOwner == NULL)
2263 pDevExt->MemBalloon.pOwner = pSession;
2264
2265 if (pDevExt->MemBalloon.pOwner == pSession)
2266 {
2267 rc = vboxGuestSetBalloonSizeFromUser(pDevExt, pSession, pInfo->u64ChunkAddr, !!pInfo->fInflate);
2268 if (pcbDataReturned)
2269 *pcbDataReturned = 0;
2270 }
2271 else
2272 rc = VERR_PERMISSION_DENIED;
2273 }
2274 else
2275 rc = VERR_PERMISSION_DENIED;
2276
2277 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2278 return rc;
2279}
2280
2281
2282/**
2283 * Handle a request for writing a core dump of the guest on the host.
2284 *
2285 * @returns VBox status code.
2286 *
2287 * @param pDevExt The device extension.
2288 * @param pInfo The output buffer.
2289 */
2290static int VBoxGuestCommonIOCtl_WriteCoreDump(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWriteCoreDump *pInfo)
2291{
2292 VMMDevReqWriteCoreDump *pReq = NULL;
2293 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_WriteCoreDump);
2294 if (RT_FAILURE(rc))
2295 {
2296 Log(("VBoxGuestCommonIOCtl: WRITE_CORE_DUMP: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
2297 sizeof(*pReq), sizeof(*pReq), rc));
2298 return rc;
2299 }
2300
2301 pReq->fFlags = pInfo->fFlags;
2302 rc = VbglGRPerform(&pReq->header);
2303 if (RT_FAILURE(rc))
2304 Log(("VBoxGuestCommonIOCtl: WRITE_CORE_DUMP: VbglGRPerform failed, rc=%Rrc!\n", rc));
2305
2306 VbglGRFree(&pReq->header);
2307 return rc;
2308}
2309
2310
2311#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
2312/**
2313 * Enables the VRDP session and saves its session ID.
2314 *
2315 * @returns VBox status code.
2316 *
2317 * @param pDevExt The device extention.
2318 * @param pSession The session.
2319 */
2320static int VBoxGuestCommonIOCtl_EnableVRDPSession(VBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
2321{
2322 /* Nothing to do here right now, since this only is supported on Windows at the moment. */
2323 return VERR_NOT_IMPLEMENTED;
2324}
2325
2326
2327/**
2328 * Disables the VRDP session.
2329 *
2330 * @returns VBox status code.
2331 *
2332 * @param pDevExt The device extention.
2333 * @param pSession The session.
2334 */
2335static int VBoxGuestCommonIOCtl_DisableVRDPSession(VBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
2336{
2337 /* Nothing to do here right now, since this only is supported on Windows at the moment. */
2338 return VERR_NOT_IMPLEMENTED;
2339}
2340#endif /* VBOX_WITH_VRDP_SESSION_HANDLING */
2341
2342#ifdef DEBUG
2343/** Unit test SetMouseStatus instead of really executing the request. */
2344static bool g_test_fSetMouseStatus = false;
2345/** When unit testing SetMouseStatus, the fake RC for the GR to return. */
2346static int g_test_SetMouseStatusGRRC;
2347/** When unit testing SetMouseStatus this will be set to the status passed to
2348 * the GR. */
2349static uint32_t g_test_statusSetMouseStatus;
2350#endif
2351
2352static int vboxguestcommonSetMouseStatus(uint32_t fFeatures)
2353{
2354 VMMDevReqMouseStatus *pReq;
2355 int rc;
2356
2357 LogRelFlowFunc(("fFeatures=%u\n", (int) fFeatures));
2358 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetMouseStatus);
2359 if (RT_SUCCESS(rc))
2360 {
2361 pReq->mouseFeatures = fFeatures;
2362 pReq->pointerXPos = 0;
2363 pReq->pointerYPos = 0;
2364#ifdef DEBUG
2365 if (g_test_fSetMouseStatus)
2366 {
2367 g_test_statusSetMouseStatus = pReq->mouseFeatures;
2368 rc = g_test_SetMouseStatusGRRC;
2369 }
2370 else
2371#endif
2372 rc = VbglGRPerform(&pReq->header);
2373 VbglGRFree(&pReq->header);
2374 }
2375 LogRelFlowFunc(("rc=%Rrc\n", rc));
2376 return rc;
2377}
2378
2379
2380/**
2381 * Sets the mouse status features for this session and updates them
2382 * globally. We aim to ensure that if several threads call this in
2383 * parallel the most recent status will always end up being set.
2384 *
2385 * @returns VBox status code.
2386 *
2387 * @param pDevExt The device extention.
2388 * @param pSession The session.
2389 * @param fFeatures New bitmap of enabled features.
2390 */
2391static int VBoxGuestCommonIOCtl_SetMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fFeatures)
2392{
2393 uint32_t fNewDevExtStatus = 0;
2394 unsigned i;
2395 int rc;
2396 /* Exit early if nothing has changed - hack to work around the
2397 * Windows Additions not using the common code. */
2398 bool fNoAction;
2399
2400 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2401
2402 /* For all the bits which the guest is allowed to set, check whether the
2403 * requested value is different to the current one and adjust the global
2404 * usage counter and if appropriate the global state if so. */
2405 for (i = 0; i < sizeof(fFeatures) * 8; i++)
2406 {
2407 if (RT_BIT_32(i) & VMMDEV_MOUSE_GUEST_MASK)
2408 {
2409 if ( (RT_BIT_32(i) & fFeatures)
2410 && !(RT_BIT_32(i) & pSession->fMouseStatus))
2411 pDevExt->acMouseFeatureUsage[i]++;
2412 else if ( !(RT_BIT_32(i) & fFeatures)
2413 && (RT_BIT_32(i) & pSession->fMouseStatus))
2414 pDevExt->acMouseFeatureUsage[i]--;
2415 }
2416 if (pDevExt->acMouseFeatureUsage[i] > 0)
2417 fNewDevExtStatus |= RT_BIT_32(i);
2418 }
2419
2420 pSession->fMouseStatus = fFeatures & VMMDEV_MOUSE_GUEST_MASK;
2421 fNoAction = (pDevExt->fMouseStatus == fNewDevExtStatus);
2422 pDevExt->fMouseStatus = fNewDevExtStatus;
2423
2424 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
2425 if (fNoAction)
2426 return VINF_SUCCESS;
2427
2428 do
2429 {
2430 fNewDevExtStatus = pDevExt->fMouseStatus;
2431 rc = vboxguestcommonSetMouseStatus(fNewDevExtStatus);
2432 } while ( RT_SUCCESS(rc)
2433 && fNewDevExtStatus != pDevExt->fMouseStatus);
2434
2435 return rc;
2436}
2437
2438
2439#ifdef DEBUG
2440/** Unit test for the SET_MOUSE_STATUS IoCtl. Since this is closely tied to
2441 * the code in question it probably makes most sense to keep it next to the
2442 * code. */
2443static void testSetMouseStatus(void)
2444{
2445 uint32_t u32Data;
2446 int rc;
2447 RTSPINLOCK Spinlock;
2448
2449 g_test_fSetMouseStatus = true;
2450 rc = RTSpinlockCreate(&Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestTest");
2451 AssertRCReturnVoid(rc);
2452 {
2453 VBOXGUESTDEVEXT DevExt = { 0 };
2454 VBOXGUESTSESSION Session = { 0 };
2455
2456 g_test_statusSetMouseStatus = ~0;
2457 g_test_SetMouseStatusGRRC = VINF_SUCCESS;
2458 DevExt.SessionSpinlock = Spinlock;
2459 u32Data = VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE;
2460 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2461 &Session, &u32Data, sizeof(u32Data), NULL);
2462 AssertRCSuccess(rc);
2463 AssertMsg( g_test_statusSetMouseStatus
2464 == VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE,
2465 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2466 DevExt.acMouseFeatureUsage[ASMBitFirstSetU32(VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR) - 1] = 1;
2467 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2468 &Session, &u32Data, sizeof(u32Data), NULL);
2469 AssertRCSuccess(rc);
2470 AssertMsg( g_test_statusSetMouseStatus
2471 == ( VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE
2472 | VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR),
2473 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2474 u32Data = VMMDEV_MOUSE_HOST_WANTS_ABSOLUTE; /* Can't change this */
2475 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2476 &Session, &u32Data, sizeof(u32Data), NULL);
2477 AssertRCSuccess(rc);
2478 AssertMsg( g_test_statusSetMouseStatus
2479 == VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR,
2480 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2481 u32Data = VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR;
2482 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2483 &Session, &u32Data, sizeof(u32Data), NULL);
2484 AssertRCSuccess(rc);
2485 AssertMsg( g_test_statusSetMouseStatus
2486 == VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR,
2487 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2488 u32Data = 0;
2489 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2490 &Session, &u32Data, sizeof(u32Data), NULL);
2491 AssertRCSuccess(rc);
2492 AssertMsg( g_test_statusSetMouseStatus
2493 == VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR,
2494 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2495 AssertMsg(DevExt.acMouseFeatureUsage[ASMBitFirstSetU32(VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR) - 1] == 1,
2496 ("Actual value: %d\n", DevExt.acMouseFeatureUsage[ASMBitFirstSetU32(VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR)]));
2497 g_test_SetMouseStatusGRRC = VERR_UNRESOLVED_ERROR;
2498 /* This should succeed as the host request should not be made
2499 * since nothing has changed. */
2500 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2501 &Session, &u32Data, sizeof(u32Data), NULL);
2502 AssertRCSuccess(rc);
2503 /* This should fail with VERR_UNRESOLVED_ERROR as set above. */
2504 u32Data = VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE;
2505 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2506 &Session, &u32Data, sizeof(u32Data), NULL);
2507 AssertMsg(rc == VERR_UNRESOLVED_ERROR, ("rc == %Rrc\n", rc));
2508 /* Untested paths: out of memory; race setting status to host */
2509 }
2510 RTSpinlockDestroy(Spinlock);
2511 g_test_fSetMouseStatus = false;
2512}
2513#endif
2514
2515
2516/**
2517 * Guest backdoor logging.
2518 *
2519 * @returns VBox status code.
2520 *
2521 * @param pDevExt The device extension.
2522 * @param pch The log message (need not be NULL terminated).
2523 * @param cbData Size of the buffer.
2524 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2525 */
2526static int VBoxGuestCommonIOCtl_Log(PVBOXGUESTDEVEXT pDevExt, const char *pch, size_t cbData, size_t *pcbDataReturned)
2527{
2528 NOREF(pch);
2529 NOREF(cbData);
2530 if (pDevExt->fLoggingEnabled)
2531 RTLogBackdoorPrintf("%.*s", cbData, pch);
2532 else
2533 Log(("%.*s", cbData, pch));
2534 if (pcbDataReturned)
2535 *pcbDataReturned = 0;
2536 return VINF_SUCCESS;
2537}
2538
2539static bool VBoxGuestCommonGuestCapsValidateValues(uint32_t fCaps)
2540{
2541 if (fCaps & (~(VMMDEV_GUEST_SUPPORTS_SEAMLESS | VMMDEV_GUEST_SUPPORTS_GUEST_HOST_WINDOW_MAPPING | VMMDEV_GUEST_SUPPORTS_GRAPHICS)))
2542 return false;
2543
2544 return true;
2545}
2546
2547/** Check whether any unreported VMM device events should be reported to any of
2548 * the currently listening sessions. In addition, report any events in
2549 * @a fGenFakeEvents.
2550 * @note This is called by GUEST_CAPS_ACQUIRE in case any pending events can now
2551 * be dispatched to the session which acquired capabilities. The fake
2552 * events are a hack to wake up threads in that session which would not
2553 * otherwise be woken.
2554 * @todo Why not just use CANCEL_ALL_WAITEVENTS to do the waking up rather than
2555 * adding additional code to the driver?
2556 * @todo Why does acquiring capabilities block and unblock events? Capabilities
2557 * are supposed to control what is reported to the host, we already have
2558 * separate requests for blocking and unblocking events. */
2559static void VBoxGuestCommonCheckEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fGenFakeEvents)
2560{
2561 RTSpinlockAcquire(pDevExt->EventSpinlock);
2562 uint32_t fEvents = fGenFakeEvents | pDevExt->f32PendingEvents;
2563 PVBOXGUESTWAIT pWait;
2564 PVBOXGUESTWAIT pSafe;
2565
2566 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
2567 {
2568 uint32_t fHandledEvents = VBoxGuestCommonGetHandledEventsLocked(pDevExt, pWait->pSession);
2569 if ( (pWait->fReqEvents & fEvents & fHandledEvents)
2570 && !pWait->fResEvents)
2571 {
2572 pWait->fResEvents = pWait->fReqEvents & fEvents & fHandledEvents;
2573 Assert(!(fGenFakeEvents & pWait->fResEvents) || pSession == pWait->pSession);
2574 fEvents &= ~pWait->fResEvents;
2575 RTListNodeRemove(&pWait->ListNode);
2576#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2577 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
2578#else
2579 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
2580 int rc = RTSemEventMultiSignal(pWait->Event);
2581 AssertRC(rc);
2582#endif
2583 if (!fEvents)
2584 break;
2585 }
2586 }
2587 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
2588
2589 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
2590
2591#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2592 VBoxGuestWaitDoWakeUps(pDevExt);
2593#endif
2594}
2595
2596/** Switch the capabilities in @a fOrMask to "acquire" mode if they are not
2597 * already in "set" mode. If @a enmFlags is not set to
2598 * VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE, also try to acquire those
2599 * capabilities for the current session and release those in @a fNotFlag. */
2600static int VBoxGuestCommonGuestCapsAcquire(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fOrMask, uint32_t fNotMask, VBOXGUESTCAPSACQUIRE_FLAGS enmFlags)
2601{
2602 uint32_t fSetCaps = 0;
2603
2604 if (!VBoxGuestCommonGuestCapsValidateValues(fOrMask))
2605 {
2606 LogRel(("VBoxGuestCommonGuestCapsAcquire: pSession(0x%p), OR(0x%x), NOT(0x%x), flags(0x%x) -- invalid fOrMask\n",
2607 pSession, fOrMask, fNotMask, enmFlags));
2608 return VERR_INVALID_PARAMETER;
2609 }
2610
2611 if ( enmFlags != VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE
2612 && enmFlags != VBOXGUESTCAPSACQUIRE_FLAGS_NONE)
2613 {
2614 LogRel(("VBoxGuestCommonGuestCapsAcquire: pSession(0x%p), OR(0x%x), NOT(0x%x), flags(0x%x) -- invalid enmFlags %d\n",
2615 pSession, fOrMask, fNotMask, enmFlags));
2616 return VERR_INVALID_PARAMETER;
2617 }
2618
2619 if (!VBoxGuestCommonGuestCapsModeSet(pDevExt, fOrMask, true, &fSetCaps))
2620 {
2621 LogRel(("VBoxGuestCommonGuestCapsAcquire: pSession(0x%p), OR(0x%x), NOT(0x%x), flags(0x%x) -- calling caps acquire for set caps\n",
2622 pSession, fOrMask, fNotMask, enmFlags));
2623 return VERR_INVALID_STATE;
2624 }
2625
2626 if (enmFlags & VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE)
2627 {
2628 Log(("VBoxGuestCommonGuestCapsAcquire: pSession(0x%p), OR(0x%x), NOT(0x%x), flags(0x%x) -- configured acquire caps: 0x%x\n",
2629 pSession, fOrMask, fNotMask, enmFlags));
2630 return VINF_SUCCESS;
2631 }
2632
2633 /* the fNotMask no need to have all values valid,
2634 * invalid ones will simply be ignored */
2635 uint32_t fCurrentOwnedCaps;
2636 uint32_t fSessionNotCaps;
2637 uint32_t fSessionOrCaps;
2638 uint32_t fOtherConflictingCaps;
2639
2640 fNotMask &= ~fOrMask;
2641
2642 RTSpinlockAcquire(pDevExt->EventSpinlock);
2643
2644 fCurrentOwnedCaps = pSession->u32AquiredGuestCaps;
2645 fSessionNotCaps = fCurrentOwnedCaps & fNotMask;
2646 fSessionOrCaps = fOrMask & ~fCurrentOwnedCaps;
2647 fOtherConflictingCaps = pDevExt->u32GuestCaps & ~fCurrentOwnedCaps;
2648 fOtherConflictingCaps &= fSessionOrCaps;
2649
2650 if (!fOtherConflictingCaps)
2651 {
2652 if (fSessionOrCaps)
2653 {
2654 pSession->u32AquiredGuestCaps |= fSessionOrCaps;
2655 pDevExt->u32GuestCaps |= fSessionOrCaps;
2656 }
2657
2658 if (fSessionNotCaps)
2659 {
2660 pSession->u32AquiredGuestCaps &= ~fSessionNotCaps;
2661 pDevExt->u32GuestCaps &= ~fSessionNotCaps;
2662 }
2663 }
2664
2665 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
2666
2667 if (fOtherConflictingCaps)
2668 {
2669 Log(("VBoxGuest: Caps 0x%x were busy\n", fOtherConflictingCaps));
2670 return VERR_RESOURCE_BUSY;
2671 }
2672
2673 /* now do host notification outside the lock */
2674 if (!fSessionOrCaps && !fSessionNotCaps)
2675 {
2676 /* no changes, return */
2677 return VINF_SUCCESS;
2678 }
2679
2680 int rc = VBoxGuestSetGuestCapabilities(fSessionOrCaps, fSessionNotCaps);
2681 if (RT_FAILURE(rc))
2682 {
2683 LogRel(("VBoxGuestCommonGuestCapsAcquire: VBoxGuestSetGuestCapabilities failed, rc=%Rrc\n", rc));
2684
2685 /* Failure branch
2686 * this is generally bad since e.g. failure to release the caps may result in other sessions not being able to use it
2687 * so we are not trying to restore the caps back to their values before the VBoxGuestCommonGuestCapsAcquire call,
2688 * but just pretend everithing is OK.
2689 * @todo: better failure handling mechanism? */
2690 }
2691
2692 /* success! */
2693 uint32_t fGenFakeEvents = 0;
2694
2695 if (fSessionOrCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
2696 {
2697 /* generate the seamless change event so that the r3 app could synch with the seamless state
2698 * although this introduces a false alarming of r3 client, it still solve the problem of
2699 * client state inconsistency in multiuser environment */
2700 fGenFakeEvents |= VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
2701 }
2702
2703 /* since the acquire filter mask has changed, we need to process events in any way to ensure they go from pending events field
2704 * to the proper (un-filtered) entries */
2705 VBoxGuestCommonCheckEvents(pDevExt, pSession, fGenFakeEvents);
2706
2707 return VINF_SUCCESS;
2708}
2709
2710static int VBoxGuestCommonIOCTL_GuestCapsAcquire(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestCapsAquire *pAcquire)
2711{
2712 int rc = VBoxGuestCommonGuestCapsAcquire(pDevExt, pSession, pAcquire->u32OrMask, pAcquire->u32NotMask, pAcquire->enmFlags);
2713 if (RT_FAILURE(rc))
2714 LogRel(("VBoxGuestCommonGuestCapsAcquire: failed rc=%Rrc\n", rc));
2715 pAcquire->rc = rc;
2716 return VINF_SUCCESS;
2717}
2718
2719
2720/**
2721 * Common IOCtl for user to kernel and kernel to kernel communication.
2722 *
2723 * This function only does the basic validation and then invokes
2724 * worker functions that takes care of each specific function.
2725 *
2726 * @returns VBox status code.
2727 *
2728 * @param iFunction The requested function.
2729 * @param pDevExt The device extension.
2730 * @param pSession The client session.
2731 * @param pvData The input/output data buffer. Can be NULL depending on the function.
2732 * @param cbData The max size of the data buffer.
2733 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2734 */
2735int VBoxGuestCommonIOCtl(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2736 void *pvData, size_t cbData, size_t *pcbDataReturned)
2737{
2738 int rc;
2739 Log(("VBoxGuestCommonIOCtl: iFunction=%#x pDevExt=%p pSession=%p pvData=%p cbData=%zu\n",
2740 iFunction, pDevExt, pSession, pvData, cbData));
2741
2742 /*
2743 * Make sure the returned data size is set to zero.
2744 */
2745 if (pcbDataReturned)
2746 *pcbDataReturned = 0;
2747
2748 /*
2749 * Define some helper macros to simplify validation.
2750 */
2751#define CHECKRET_RING0(mnemonic) \
2752 do { \
2753 if (pSession->R0Process != NIL_RTR0PROCESS) \
2754 { \
2755 LogFunc((mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
2756 pSession->Process, (uintptr_t)pSession->R0Process)); \
2757 return VERR_PERMISSION_DENIED; \
2758 } \
2759 } while (0)
2760#define CHECKRET_MIN_SIZE(mnemonic, cbMin) \
2761 do { \
2762 if (cbData < (cbMin)) \
2763 { \
2764 LogFunc((mnemonic ": cbData=%#zx (%zu) min is %#zx (%zu)\n", \
2765 cbData, cbData, (size_t)(cbMin), (size_t)(cbMin))); \
2766 return VERR_BUFFER_OVERFLOW; \
2767 } \
2768 if ((cbMin) != 0 && !VALID_PTR(pvData)) \
2769 { \
2770 LogFunc((mnemonic ": Invalid pointer %p\n", pvData)); \
2771 return VERR_INVALID_POINTER; \
2772 } \
2773 } while (0)
2774#define CHECKRET_SIZE(mnemonic, cb) \
2775 do { \
2776 if (cbData != (cb)) \
2777 { \
2778 LogFunc((mnemonic ": cbData=%#zx (%zu) expected is %#zx (%zu)\n", \
2779 cbData, cbData, (size_t)(cb), (size_t)(cb))); \
2780 return VERR_BUFFER_OVERFLOW; \
2781 } \
2782 if ((cb) != 0 && !VALID_PTR(pvData)) \
2783 { \
2784 LogFunc((mnemonic ": Invalid pointer %p\n", pvData)); \
2785 return VERR_INVALID_POINTER; \
2786 } \
2787 } while (0)
2788
2789
2790 /*
2791 * Deal with variably sized requests first.
2792 */
2793 rc = VINF_SUCCESS;
2794 if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0)))
2795 {
2796 CHECKRET_MIN_SIZE("VMMREQUEST", sizeof(VMMDevRequestHeader));
2797 rc = VBoxGuestCommonIOCtl_VMMRequest(pDevExt, pSession, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
2798 }
2799#ifdef VBOX_WITH_HGCM
2800 /*
2801 * These ones are a bit tricky.
2802 */
2803 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0)))
2804 {
2805 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2806 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2807 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2808 fInterruptible, false /*f32bit*/, false /* fUserData */,
2809 0, cbData, pcbDataReturned);
2810 }
2811 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED(0)))
2812 {
2813 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2814 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2815 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2816 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2817 false /*f32bit*/, false /* fUserData */,
2818 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2819 }
2820 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_USERDATA(0)))
2821 {
2822 bool fInterruptible = true;
2823 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2824 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2825 fInterruptible, false /*f32bit*/, true /* fUserData */,
2826 0, cbData, pcbDataReturned);
2827 }
2828# ifdef RT_ARCH_AMD64
2829 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_32(0)))
2830 {
2831 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2832 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2833 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2834 fInterruptible, true /*f32bit*/, false /* fUserData */,
2835 0, cbData, pcbDataReturned);
2836 }
2837 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(0)))
2838 {
2839 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2840 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2841 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2842 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2843 true /*f32bit*/, false /* fUserData */,
2844 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2845 }
2846# endif
2847#endif /* VBOX_WITH_HGCM */
2848 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0)))
2849 {
2850 CHECKRET_MIN_SIZE("LOG", 1);
2851 rc = VBoxGuestCommonIOCtl_Log(pDevExt, (char *)pvData, cbData, pcbDataReturned);
2852 }
2853 else
2854 {
2855 switch (iFunction)
2856 {
2857 case VBOXGUEST_IOCTL_GETVMMDEVPORT:
2858 CHECKRET_RING0("GETVMMDEVPORT");
2859 CHECKRET_MIN_SIZE("GETVMMDEVPORT", sizeof(VBoxGuestPortInfo));
2860 rc = VBoxGuestCommonIOCtl_GetVMMDevPort(pDevExt, (VBoxGuestPortInfo *)pvData, pcbDataReturned);
2861 break;
2862
2863#ifndef RT_OS_WINDOWS /* Windows has its own implementation of this. */
2864 case VBOXGUEST_IOCTL_SET_MOUSE_NOTIFY_CALLBACK:
2865 CHECKRET_RING0("SET_MOUSE_NOTIFY_CALLBACK");
2866 CHECKRET_SIZE("SET_MOUSE_NOTIFY_CALLBACK", sizeof(VBoxGuestMouseSetNotifyCallback));
2867 rc = VBoxGuestCommonIOCtl_SetMouseNotifyCallback(pDevExt, (VBoxGuestMouseSetNotifyCallback *)pvData);
2868 break;
2869#endif
2870
2871 case VBOXGUEST_IOCTL_WAITEVENT:
2872 CHECKRET_MIN_SIZE("WAITEVENT", sizeof(VBoxGuestWaitEventInfo));
2873 rc = VBoxGuestCommonIOCtl_WaitEvent(pDevExt, pSession, (VBoxGuestWaitEventInfo *)pvData,
2874 pcbDataReturned, pSession->R0Process != NIL_RTR0PROCESS);
2875 break;
2876
2877 case VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS:
2878 if (cbData != 0)
2879 rc = VERR_INVALID_PARAMETER;
2880 rc = VBoxGuestCommonIOCtl_CancelAllWaitEvents(pDevExt, pSession);
2881 break;
2882
2883 case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
2884 CHECKRET_MIN_SIZE("CTL_FILTER_MASK", sizeof(VBoxGuestFilterMaskInfo));
2885 rc = VBoxGuestCommonIOCtl_CtlFilterMask(pDevExt, (VBoxGuestFilterMaskInfo *)pvData);
2886 break;
2887
2888#ifdef VBOX_WITH_HGCM
2889 case VBOXGUEST_IOCTL_HGCM_CONNECT:
2890# ifdef RT_ARCH_AMD64
2891 case VBOXGUEST_IOCTL_HGCM_CONNECT_32:
2892# endif
2893 CHECKRET_MIN_SIZE("HGCM_CONNECT", sizeof(VBoxGuestHGCMConnectInfo));
2894 rc = VBoxGuestCommonIOCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
2895 break;
2896
2897 case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
2898# ifdef RT_ARCH_AMD64
2899 case VBOXGUEST_IOCTL_HGCM_DISCONNECT_32:
2900# endif
2901 CHECKRET_MIN_SIZE("HGCM_DISCONNECT", sizeof(VBoxGuestHGCMDisconnectInfo));
2902 rc = VBoxGuestCommonIOCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
2903 break;
2904#endif /* VBOX_WITH_HGCM */
2905
2906 case VBOXGUEST_IOCTL_CHECK_BALLOON:
2907 CHECKRET_MIN_SIZE("CHECK_MEMORY_BALLOON", sizeof(VBoxGuestCheckBalloonInfo));
2908 rc = VBoxGuestCommonIOCtl_CheckMemoryBalloon(pDevExt, pSession, (VBoxGuestCheckBalloonInfo *)pvData, pcbDataReturned);
2909 break;
2910
2911 case VBOXGUEST_IOCTL_CHANGE_BALLOON:
2912 CHECKRET_MIN_SIZE("CHANGE_MEMORY_BALLOON", sizeof(VBoxGuestChangeBalloonInfo));
2913 rc = VBoxGuestCommonIOCtl_ChangeMemoryBalloon(pDevExt, pSession, (VBoxGuestChangeBalloonInfo *)pvData, pcbDataReturned);
2914 break;
2915
2916 case VBOXGUEST_IOCTL_WRITE_CORE_DUMP:
2917 CHECKRET_MIN_SIZE("WRITE_CORE_DUMP", sizeof(VBoxGuestWriteCoreDump));
2918 rc = VBoxGuestCommonIOCtl_WriteCoreDump(pDevExt, (VBoxGuestWriteCoreDump *)pvData);
2919 break;
2920
2921#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
2922 case VBOXGUEST_IOCTL_ENABLE_VRDP_SESSION:
2923 rc = VBoxGuestCommonIOCtl_EnableVRDPSession(pDevExt, pSession);
2924 break;
2925
2926 case VBOXGUEST_IOCTL_DISABLE_VRDP_SESSION:
2927 rc = VBoxGuestCommonIOCtl_DisableVRDPSession(pDevExt, pSession);
2928 break;
2929#endif /* VBOX_WITH_VRDP_SESSION_HANDLING */
2930 case VBOXGUEST_IOCTL_SET_MOUSE_STATUS:
2931 CHECKRET_SIZE("SET_MOUSE_STATUS", sizeof(uint32_t));
2932 rc = VBoxGuestCommonIOCtl_SetMouseStatus(pDevExt, pSession,
2933 *(uint32_t *)pvData);
2934 break;
2935
2936#ifdef VBOX_WITH_DPC_LATENCY_CHECKER
2937 case VBOXGUEST_IOCTL_DPC_LATENCY_CHECKER:
2938 CHECKRET_SIZE("DPC_LATENCY_CHECKER", 0);
2939 rc = VbgdNtIOCtl_DpcLatencyChecker();
2940 break;
2941#endif
2942
2943 case VBOXGUEST_IOCTL_GUEST_CAPS_ACQUIRE:
2944 CHECKRET_SIZE("GUEST_CAPS_ACQUIRE", sizeof(VBoxGuestCapsAquire));
2945 rc = VBoxGuestCommonIOCTL_GuestCapsAcquire(pDevExt, pSession, (VBoxGuestCapsAquire*)pvData);
2946 *pcbDataReturned = sizeof(VBoxGuestCapsAquire);
2947 break;
2948
2949 default:
2950 {
2951 LogRel(("VBoxGuestCommonIOCtl: Unknown request iFunction=%#x stripped size=%#x\n",
2952 iFunction, VBOXGUEST_IOCTL_STRIP_SIZE(iFunction)));
2953 rc = VERR_NOT_SUPPORTED;
2954 break;
2955 }
2956 }
2957 }
2958
2959 Log(("VBoxGuestCommonIOCtl: returns %Rrc *pcbDataReturned=%zu\n", rc, pcbDataReturned ? *pcbDataReturned : 0));
2960 return rc;
2961}
2962
2963
2964
2965/**
2966 * Common interrupt service routine.
2967 *
2968 * This deals with events and with waking up thread waiting for those events.
2969 *
2970 * @returns true if it was our interrupt, false if it wasn't.
2971 * @param pDevExt The VBoxGuest device extension.
2972 */
2973bool VBoxGuestCommonISR(PVBOXGUESTDEVEXT pDevExt)
2974{
2975#ifndef RT_OS_WINDOWS
2976 VBoxGuestMouseSetNotifyCallback MouseNotifyCallback = { NULL, NULL };
2977#endif
2978 bool fMousePositionChanged = false;
2979 VMMDevEvents volatile *pReq = pDevExt->pIrqAckEvents;
2980 int rc = 0;
2981 bool fOurIrq;
2982
2983 /*
2984 * Make sure we've initialized the device extension.
2985 */
2986 if (RT_UNLIKELY(!pReq))
2987 return false;
2988
2989 /*
2990 * Enter the spinlock, increase the ISR count and check if it's our IRQ or
2991 * not.
2992 */
2993 RTSpinlockAcquire(pDevExt->EventSpinlock);
2994 ASMAtomicIncU32(&pDevExt->cISR);
2995 fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
2996 if (fOurIrq)
2997 {
2998 /*
2999 * Acknowlegde events.
3000 * We don't use VbglGRPerform here as it may take another spinlocks.
3001 */
3002 pReq->header.rc = VERR_INTERNAL_ERROR;
3003 pReq->events = 0;
3004 ASMCompilerBarrier();
3005 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
3006 ASMCompilerBarrier(); /* paranoia */
3007 if (RT_SUCCESS(pReq->header.rc))
3008 {
3009 uint32_t fEvents = pReq->events;
3010 PVBOXGUESTWAIT pWait;
3011 PVBOXGUESTWAIT pSafe;
3012
3013 Log3(("VBoxGuestCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
3014
3015 /*
3016 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
3017 */
3018 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
3019 {
3020#ifndef RT_OS_WINDOWS
3021 MouseNotifyCallback = pDevExt->MouseNotifyCallback;
3022#endif
3023 fMousePositionChanged = true;
3024 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
3025 }
3026
3027#ifdef VBOX_WITH_HGCM
3028 /*
3029 * The HGCM event/list is kind of different in that we evaluate all entries.
3030 */
3031 if (fEvents & VMMDEV_EVENT_HGCM)
3032 {
3033 RTListForEachSafe(&pDevExt->HGCMWaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
3034 {
3035 if (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE)
3036 {
3037 pWait->fResEvents = VMMDEV_EVENT_HGCM;
3038 RTListNodeRemove(&pWait->ListNode);
3039# ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
3040 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
3041# else
3042 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
3043 rc |= RTSemEventMultiSignal(pWait->Event);
3044# endif
3045 }
3046 }
3047 fEvents &= ~VMMDEV_EVENT_HGCM;
3048 }
3049#endif
3050
3051 /*
3052 * Normal FIFO waiter evaluation.
3053 */
3054 fEvents |= pDevExt->f32PendingEvents;
3055 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
3056 {
3057 uint32_t fHandledEvents = VBoxGuestCommonGetHandledEventsLocked(pDevExt, pWait->pSession);
3058 if ( (pWait->fReqEvents & fEvents & fHandledEvents)
3059 && !pWait->fResEvents)
3060 {
3061 pWait->fResEvents = pWait->fReqEvents & fEvents & fHandledEvents;
3062 fEvents &= ~pWait->fResEvents;
3063 RTListNodeRemove(&pWait->ListNode);
3064#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
3065 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
3066#else
3067 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
3068 rc |= RTSemEventMultiSignal(pWait->Event);
3069#endif
3070 if (!fEvents)
3071 break;
3072 }
3073 }
3074 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
3075 }
3076 else /* something is serious wrong... */
3077 Log(("VBoxGuestCommonISR: acknowledge events failed rc=%Rrc (events=%#x)!!\n",
3078 pReq->header.rc, pReq->events));
3079 }
3080 else
3081 LogFlow(("VBoxGuestCommonISR: not ours\n"));
3082
3083 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
3084
3085#if defined(VBOXGUEST_USE_DEFERRED_WAKE_UP) && !defined(RT_OS_WINDOWS)
3086 /*
3087 * Do wake-ups.
3088 * Note. On Windows this isn't possible at this IRQL, so a DPC will take
3089 * care of it.
3090 */
3091 VBoxGuestWaitDoWakeUps(pDevExt);
3092#endif
3093
3094 /*
3095 * Work the poll and async notification queues on OSes that implements that.
3096 * (Do this outside the spinlock to prevent some recursive spinlocking.)
3097 */
3098 if (fMousePositionChanged)
3099 {
3100 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
3101 VBoxGuestNativeISRMousePollEvent(pDevExt);
3102#ifndef RT_OS_WINDOWS
3103 if (MouseNotifyCallback.pfnNotify)
3104 MouseNotifyCallback.pfnNotify(MouseNotifyCallback.pvUser);
3105#endif
3106 }
3107
3108 ASMAtomicDecU32(&pDevExt->cISR);
3109 Assert(rc == 0);
3110 NOREF(rc);
3111 return fOurIrq;
3112}
3113
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette