VirtualBox

source: vbox/trunk/src/VBox/Devices/Graphics/DevVGA_VDMA.cpp@ 50418

Last change on this file since 50418 was 50149, checked in by vboxsync, 11 years ago

crOpenGL: bugfixes

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 61.2 KB
Line 
1/** @file
2 * Video DMA (VDMA) support.
3 */
4
5/*
6 * Copyright (C) 2006-2012 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.215389.xyz. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 */
16//#include <VBox/VMMDev.h>
17#include <VBox/vmm/pdmdev.h>
18#include <VBox/VBoxVideo.h>
19#include <iprt/semaphore.h>
20#include <iprt/thread.h>
21#include <iprt/mem.h>
22#include <iprt/asm.h>
23
24#include "DevVGA.h"
25#include "HGSMI/SHGSMIHost.h"
26#include "HGSMI/HGSMIHostHlp.h"
27
28#include <VBox/VBoxVideo3D.h>
29
30#ifdef DEBUG_misha
31#define WARN_BP() do { AssertFailed(); } while (0)
32#else
33#define WARN_BP() do { } while (0)
34#endif
35#define WARN(_msg) do { \
36 LogRel(_msg); \
37 WARN_BP(); \
38 } while (0)
39
40#ifdef VBOX_VDMA_WITH_WORKERTHREAD
41typedef enum
42{
43 VBOXVDMAPIPE_STATE_CLOSED = 0,
44 VBOXVDMAPIPE_STATE_CREATED = 1,
45 VBOXVDMAPIPE_STATE_OPENNED = 2,
46 VBOXVDMAPIPE_STATE_CLOSING = 3
47} VBOXVDMAPIPE_STATE;
48
49typedef struct VBOXVDMAPIPE
50{
51 RTSEMEVENT hEvent;
52 /* critical section for accessing pipe properties */
53 RTCRITSECT hCritSect;
54 VBOXVDMAPIPE_STATE enmState;
55 /* true iff the other end needs Event notification */
56 bool bNeedNotify;
57} VBOXVDMAPIPE, *PVBOXVDMAPIPE;
58
59typedef enum
60{
61 VBOXVDMAPIPE_CMD_TYPE_UNDEFINED = 0,
62 VBOXVDMAPIPE_CMD_TYPE_DMACMD = 1,
63 VBOXVDMAPIPE_CMD_TYPE_DMACTL = 2
64} VBOXVDMAPIPE_CMD_TYPE;
65
66typedef struct VBOXVDMAPIPE_CMD_BODY
67{
68 VBOXVDMAPIPE_CMD_TYPE enmType;
69 union
70 {
71 PVBOXVDMACBUF_DR pDr;
72 PVBOXVDMA_CTL pCtl;
73 void *pvCmd;
74 } u;
75}VBOXVDMAPIPE_CMD_BODY, *PVBOXVDMAPIPE_CMD_BODY;
76
77typedef struct VBOXVDMAPIPE_CMD
78{
79 HGSMILISTENTRY Entry;
80 VBOXVDMAPIPE_CMD_BODY Cmd;
81} VBOXVDMAPIPE_CMD, *PVBOXVDMAPIPE_CMD;
82
83#define VBOXVDMAPIPE_CMD_FROM_ENTRY(_pE) ( (PVBOXVDMAPIPE_CMD)((uint8_t *)(_pE) - RT_OFFSETOF(VBOXVDMAPIPE_CMD, Entry)) )
84
85typedef struct VBOXVDMAPIPE_CMD_POOL
86{
87 HGSMILIST List;
88 uint32_t cCmds;
89 VBOXVDMAPIPE_CMD aCmds[1];
90} VBOXVDMAPIPE_CMD_POOL, *PVBOXVDMAPIPE_CMD_POOL;
91#endif
92
93
94/* state transformations:
95 *
96 * submitter | processor
97 * STOPPED
98 * |
99 * |
100 * >
101 * LISTENING ---> PROCESSING
102 * ^ _/
103 * | _/
104 * | _/
105 * | _/
106 * | _/
107 * | _/
108 * | /
109 * < >
110 * PAUSED
111 *
112 * */
113#define VBVAEXHOSTCONTEXT_STATE_STOPPED 0
114#define VBVAEXHOSTCONTEXT_STATE_LISTENING 1
115#define VBVAEXHOSTCONTEXT_STATE_PROCESSING 2
116#define VBVAEXHOSTCONTEXT_STATE_PAUSED 3
117
118typedef struct VBVAEXHOSTCONTEXT
119{
120 VBVABUFFER *pVBVA;
121 uint32_t cbCurData;
122 volatile uint32_t u32State;
123 volatile uint32_t u32Pause;
124 volatile uint32_t u32cOtherCommands;
125} VBVAEXHOSTCONTEXT;
126
127/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
128 * but can be called with other VBoxVBVAExS** (submitter) functions except Init/Start/Term aparently.
129 * Can only be called be the processor, i.e. the entity that acquired the processor state by direct or indirect call to the VBoxVBVAExHSCheckCommands
130 * see mor edetailed comments in headers for function definitions */
131static bool VBoxVBVAExHPCmdCheckRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva);
132static int VBoxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd);
133
134/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
135 * can be called concurrently with istelf as well as with other VBoxVBVAEx** functions except Init/Start/Term aparently */
136static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva);
137
138static void VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva);
139static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA);
140static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva);
141static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva);
142static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM);
143static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version);
144
145typedef struct VBOXVDMAHOST
146{
147 PHGSMIINSTANCE pHgsmi;
148 PVGASTATE pVGAState;
149 VBVAEXHOSTCONTEXT CmdVbva;
150#ifdef VBOX_VDMA_WITH_WATCHDOG
151 PTMTIMERR3 WatchDogTimer;
152#endif
153#ifdef VBOX_VDMA_WITH_WORKERTHREAD
154 VBOXVDMAPIPE Pipe;
155 HGSMILIST PendingList;
156 RTTHREAD hWorkerThread;
157 VBOXVDMAPIPE_CMD_POOL CmdPool;
158#endif
159} VBOXVDMAHOST, *PVBOXVDMAHOST;
160
161
162#ifdef VBOX_WITH_CRHGSMI
163
164typedef DECLCALLBACK(void) FNVBOXVDMACRCTL_CALLBACK(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext);
165typedef FNVBOXVDMACRCTL_CALLBACK *PFNVBOXVDMACRCTL_CALLBACK;
166
167typedef struct VBOXVDMACMD_CHROMIUM_CTL_PRIVATE
168{
169 uint32_t cRefs;
170 int32_t rc;
171 PFNVBOXVDMACRCTL_CALLBACK pfnCompletion;
172 void *pvCompletion;
173 VBOXVDMACMD_CHROMIUM_CTL Cmd;
174} VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, *PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE;
175
176#define VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(_p) ((PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)(((uint8_t*)(_p)) - RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd)))
177
178static PVBOXVDMACMD_CHROMIUM_CTL vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE enmCmd, uint32_t cbCmd)
179{
180 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = (PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)RTMemAllocZ(cbCmd + RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd));
181 Assert(pHdr);
182 if (pHdr)
183 {
184 pHdr->cRefs = 1;
185 pHdr->rc = VERR_NOT_IMPLEMENTED;
186 pHdr->Cmd.enmType = enmCmd;
187 pHdr->Cmd.cbCmd = cbCmd;
188 return &pHdr->Cmd;
189 }
190
191 return NULL;
192}
193
194DECLINLINE(void) vboxVDMACrCtlRelease (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
195{
196 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
197 uint32_t cRefs = ASMAtomicDecU32(&pHdr->cRefs);
198 if(!cRefs)
199 {
200 RTMemFree(pHdr);
201 }
202}
203
204DECLINLINE(void) vboxVDMACrCtlRetain (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
205{
206 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
207 ASMAtomicIncU32(&pHdr->cRefs);
208}
209
210DECLINLINE(int) vboxVDMACrCtlGetRc (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
211{
212 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
213 return pHdr->rc;
214}
215
216static DECLCALLBACK(void) vboxVDMACrCtlCbSetEvent(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
217{
218 RTSemEventSignal((RTSEMEVENT)pvContext);
219}
220
221static DECLCALLBACK(void) vboxVDMACrCtlCbReleaseCmd(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
222{
223 vboxVDMACrCtlRelease(pCmd);
224}
225
226
227static int vboxVDMACrCtlPostAsync (PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd, PFNVBOXVDMACRCTL_CALLBACK pfnCompletion, void *pvCompletion)
228{
229 if ( pVGAState->pDrv
230 && pVGAState->pDrv->pfnCrHgsmiControlProcess)
231 {
232 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
233 pHdr->pfnCompletion = pfnCompletion;
234 pHdr->pvCompletion = pvCompletion;
235 pVGAState->pDrv->pfnCrHgsmiControlProcess(pVGAState->pDrv, pCmd, cbCmd);
236 return VINF_SUCCESS;
237 }
238#ifdef DEBUG_misha
239 Assert(0);
240#endif
241 return VERR_NOT_SUPPORTED;
242}
243
244static int vboxVDMACrCtlPost(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd)
245{
246 RTSEMEVENT hComplEvent;
247 int rc = RTSemEventCreate(&hComplEvent);
248 AssertRC(rc);
249 if(RT_SUCCESS(rc))
250 {
251 rc = vboxVDMACrCtlPostAsync (pVGAState, pCmd, cbCmd, vboxVDMACrCtlCbSetEvent, (void*)hComplEvent);
252#ifdef DEBUG_misha
253 AssertRC(rc);
254#endif
255 if (RT_SUCCESS(rc))
256 {
257 rc = RTSemEventWaitNoResume(hComplEvent, RT_INDEFINITE_WAIT);
258 AssertRC(rc);
259 if(RT_SUCCESS(rc))
260 {
261 RTSemEventDestroy(hComplEvent);
262 }
263 }
264 else
265 {
266 /* the command is completed */
267 RTSemEventDestroy(hComplEvent);
268 }
269 }
270 return rc;
271}
272
273static void vboxVDMACrCmdNotifyPerform(struct VBOXVDMAHOST *pVdma)
274{
275 PVGASTATE pVGAState = pVdma->pVGAState;
276 pVGAState->pDrv->pfnCrCmdNotifyCmds(pVGAState->pDrv);
277}
278
279/*
280 * @returns
281 *
282 */
283static int vboxVDMACrCmdPreprocess(struct VBOXVDMAHOST *pVdma, uint8_t* pu8Cmd, uint32_t cbCmd)
284{
285 if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
286 return VINF_EOF;
287
288 PVBOXCMDVBVA_HDR pCmd = (PVBOXCMDVBVA_HDR)pu8Cmd;
289
290 /* check if the command is cancelled */
291 if (!ASMAtomicCmpXchgU8(&pCmd->u8State, VBOXCMDVBVA_STATE_IN_PROGRESS, VBOXCMDVBVA_STATE_SUBMITTED))
292 {
293 Assert(pCmd->u8State == VBOXCMDVBVA_STATE_CANCELLED);
294 return VINF_EOF;
295 }
296
297 /* come commands can be handled right away? */
298 switch (pCmd->u8OpCode)
299 {
300 case VBOXCMDVBVA_OPTYPE_NOPCMD:
301 pCmd->i8Result = 0;
302 return VINF_EOF;
303 default:
304 return VINF_SUCCESS;
305 }
306}
307
308static DECLCALLBACK(int) vboxVDMACrCmdCltCmdGet(HVBOXCRCMDCLT hClt, PVBOXCMDVBVA_HDR *ppNextCmd, uint32_t *pcbNextCmd)
309{
310 struct VBOXVDMAHOST *pVdma = hClt;
311
312 VBoxVBVAExHPCmdCheckRelease(&pVdma->CmdVbva);
313
314 uint32_t cbCmd;
315 uint8_t *pu8Cmd;
316
317 for(;;)
318 {
319 int rc = VBoxVBVAExHPCmdGet(&pVdma->CmdVbva, &pu8Cmd, &cbCmd);
320 switch (rc)
321 {
322 case VINF_SUCCESS:
323 {
324 rc = vboxVDMACrCmdPreprocess(pVdma, pu8Cmd, cbCmd);
325 switch (rc)
326 {
327 case VINF_SUCCESS:
328 *ppNextCmd = (PVBOXCMDVBVA_HDR)pu8Cmd;
329 *pcbNextCmd = cbCmd;
330 return VINF_SUCCESS;
331 case VINF_EOF:
332 continue;
333 default:
334 Assert(!RT_FAILURE(rc));
335 return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR;
336 }
337 break;
338 }
339 case VINF_EOF:
340 return VINF_EOF;
341 case VINF_PERMISSION_DENIED:
342 /* processing was paused, processing state was released, only VBoxVBVAExHS*** calls are now allowed */
343 return VINF_EOF;
344 case VINF_INTERRUPTED:
345 /* command processing was interrupted, processor state remains set. client can process any commands */
346 vboxVDMACrCmdNotifyPerform(pVdma);
347 return VINF_EOF;
348 default:
349 Assert(!RT_FAILURE(rc));
350 return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR;
351 }
352 }
353
354 WARN(("Warning: vboxVDMACrCmdCltCmdGet unexpected state\n"));
355 return VERR_INTERNAL_ERROR;
356}
357
358static DECLCALLBACK(int) vboxVDMACrCmdCltDmGet(HVBOXCRCMDCLT hClt, uint32_t idScreen, struct VBVAINFOSCREEN *pScreen, void **ppvVram)
359{
360 struct VBOXVDMAHOST *pVdma = hClt;
361 PVGASTATE pVGAState = pVdma->pVGAState;
362
363 return VBVAGetScreenInfo(pVGAState, idScreen, pScreen, ppvVram);
364}
365
366static int vboxVDMACrCtlHgsmiSetup(struct VBOXVDMAHOST *pVdma)
367{
368 PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP pCmd;
369 pCmd = (PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP) vboxVDMACrCtlCreate (VBOXVDMACMD_CHROMIUM_CTL_TYPE_CRHGSMI_SETUP,
370 sizeof (*pCmd));
371 if (pCmd)
372 {
373 VBOXCRCMD_CLTINFO CltInfo;
374 CltInfo.hClient = pVdma;
375 CltInfo.pfnCmdGet = vboxVDMACrCmdCltCmdGet;
376 CltInfo.pfnDmGet = vboxVDMACrCmdCltDmGet;
377 PVGASTATE pVGAState = pVdma->pVGAState;
378 pCmd->pvVRamBase = pVGAState->vram_ptrR3;
379 pCmd->cbVRam = pVGAState->vram_size;
380 pCmd->pCrCmdClientInfo = &CltInfo;
381 int rc = vboxVDMACrCtlPost(pVGAState, &pCmd->Hdr, sizeof (*pCmd));
382 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
383 if (RT_SUCCESS(rc))
384 {
385 rc = vboxVDMACrCtlGetRc(&pCmd->Hdr);
386 }
387 vboxVDMACrCtlRelease(&pCmd->Hdr);
388 return rc;
389 }
390 return VERR_NO_MEMORY;
391}
392
393static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer);
394
395/* check if this is external cmd to be passed to chromium backend */
396static int vboxVDMACmdCheckCrCmd(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmdDr, uint32_t cbCmdDr)
397{
398 PVBOXVDMACMD pDmaCmd = NULL;
399 uint32_t cbDmaCmd = 0;
400 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
401 int rc = VINF_NOT_SUPPORTED;
402
403 cbDmaCmd = pCmdDr->cbBuf;
404
405 if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
406 {
407 if (cbCmdDr < sizeof (*pCmdDr) + VBOXVDMACMD_HEADER_SIZE())
408 {
409 AssertMsgFailed(("invalid buffer data!"));
410 return VERR_INVALID_PARAMETER;
411 }
412
413 if (cbDmaCmd < cbCmdDr - sizeof (*pCmdDr) - VBOXVDMACMD_HEADER_SIZE())
414 {
415 AssertMsgFailed(("invalid command buffer data!"));
416 return VERR_INVALID_PARAMETER;
417 }
418
419 pDmaCmd = VBOXVDMACBUF_DR_TAIL(pCmdDr, VBOXVDMACMD);
420 }
421 else if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
422 {
423 VBOXVIDEOOFFSET offBuf = pCmdDr->Location.offVramBuf;
424 if (offBuf + cbDmaCmd > pVdma->pVGAState->vram_size)
425 {
426 AssertMsgFailed(("invalid command buffer data from offset!"));
427 return VERR_INVALID_PARAMETER;
428 }
429 pDmaCmd = (VBOXVDMACMD*)(pvRam + offBuf);
430 }
431
432 if (pDmaCmd)
433 {
434 Assert(cbDmaCmd >= VBOXVDMACMD_HEADER_SIZE());
435 uint32_t cbBody = VBOXVDMACMD_BODY_SIZE(cbDmaCmd);
436
437 switch (pDmaCmd->enmType)
438 {
439 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
440 {
441 PVBOXVDMACMD_CHROMIUM_CMD pCrCmd = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_CHROMIUM_CMD);
442 if (cbBody < sizeof (*pCrCmd))
443 {
444 AssertMsgFailed(("invalid chromium command buffer size!"));
445 return VERR_INVALID_PARAMETER;
446 }
447 PVGASTATE pVGAState = pVdma->pVGAState;
448 rc = VINF_SUCCESS;
449 if (pVGAState->pDrv->pfnCrHgsmiCommandProcess)
450 {
451 VBoxSHGSMICommandMarkAsynchCompletion(pCmdDr);
452 pVGAState->pDrv->pfnCrHgsmiCommandProcess(pVGAState->pDrv, pCrCmd, cbBody);
453 break;
454 }
455 else
456 {
457 Assert(0);
458 }
459
460 int tmpRc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
461 AssertRC(tmpRc);
462 break;
463 }
464 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
465 {
466 PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
467 if (cbBody < sizeof (*pTransfer))
468 {
469 AssertMsgFailed(("invalid bpb transfer buffer size!"));
470 return VERR_INVALID_PARAMETER;
471 }
472
473 rc = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, sizeof (*pTransfer));
474 AssertRC(rc);
475 if (RT_SUCCESS(rc))
476 {
477 pCmdDr->rc = VINF_SUCCESS;
478 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
479 AssertRC(rc);
480 rc = VINF_SUCCESS;
481 }
482 break;
483 }
484 default:
485 break;
486 }
487 }
488 return rc;
489}
490
491int vboxVDMACrHgsmiCommandCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CMD pCmd, int rc)
492{
493 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
494 PHGSMIINSTANCE pIns = pVGAState->pHGSMI;
495 VBOXVDMACMD *pDmaHdr = VBOXVDMACMD_FROM_BODY(pCmd);
496 VBOXVDMACBUF_DR *pDr = VBOXVDMACBUF_DR_FROM_TAIL(pDmaHdr);
497 AssertRC(rc);
498 pDr->rc = rc;
499
500 Assert(pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
501 rc = VBoxSHGSMICommandComplete(pIns, pDr);
502 AssertRC(rc);
503 return rc;
504}
505
506int vboxVDMACrHgsmiControlCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CTL pCmd, int rc)
507{
508 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
509 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pCmdPrivate = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
510 pCmdPrivate->rc = rc;
511 if (pCmdPrivate->pfnCompletion)
512 {
513 pCmdPrivate->pfnCompletion(pVGAState, pCmd, pCmdPrivate->pvCompletion);
514 }
515 return VINF_SUCCESS;
516}
517
518#endif
519
520#ifdef VBOX_VDMA_WITH_WORKERTHREAD
521/* to simplify things and to avoid extra backend if modifications we assume the VBOXVDMA_RECTL is the same as VBVACMDHDR */
522AssertCompile(sizeof(VBOXVDMA_RECTL) == sizeof(VBVACMDHDR));
523AssertCompile(RT_SIZEOFMEMB(VBOXVDMA_RECTL, left) == RT_SIZEOFMEMB(VBVACMDHDR, x));
524AssertCompile(RT_SIZEOFMEMB(VBOXVDMA_RECTL, top) == RT_SIZEOFMEMB(VBVACMDHDR, y));
525AssertCompile(RT_SIZEOFMEMB(VBOXVDMA_RECTL, width) == RT_SIZEOFMEMB(VBVACMDHDR, w));
526AssertCompile(RT_SIZEOFMEMB(VBOXVDMA_RECTL, height) == RT_SIZEOFMEMB(VBVACMDHDR, h));
527AssertCompile(RT_OFFSETOF(VBOXVDMA_RECTL, left) == RT_OFFSETOF(VBVACMDHDR, x));
528AssertCompile(RT_OFFSETOF(VBOXVDMA_RECTL, top) == RT_OFFSETOF(VBVACMDHDR, y));
529AssertCompile(RT_OFFSETOF(VBOXVDMA_RECTL, width) == RT_OFFSETOF(VBVACMDHDR, w));
530AssertCompile(RT_OFFSETOF(VBOXVDMA_RECTL, height) == RT_OFFSETOF(VBVACMDHDR, h));
531
532static int vboxVDMANotifyPrimaryUpdate (PVGASTATE pVGAState, unsigned uScreenId, const VBOXVDMA_RECTL * pRectl)
533{
534 pVGAState->pDrv->pfnVBVAUpdateBegin (pVGAState->pDrv, uScreenId);
535
536 /* Updates the rectangle and sends the command to the VRDP server. */
537 pVGAState->pDrv->pfnVBVAUpdateProcess (pVGAState->pDrv, uScreenId,
538 (const PVBVACMDHDR)pRectl /* <- see above AssertCompile's and comments */,
539 sizeof (VBOXVDMA_RECTL));
540
541 pVGAState->pDrv->pfnVBVAUpdateEnd (pVGAState->pDrv, uScreenId, pRectl->left, pRectl->top,
542 pRectl->width, pRectl->height);
543
544 return VINF_SUCCESS;
545}
546#endif
547
548static int vboxVDMACmdExecBltPerform(PVBOXVDMAHOST pVdma,
549 uint8_t *pvDstSurf, const uint8_t *pvSrcSurf,
550 const PVBOXVDMA_SURF_DESC pDstDesc, const PVBOXVDMA_SURF_DESC pSrcDesc,
551 const VBOXVDMA_RECTL * pDstRectl, const VBOXVDMA_RECTL * pSrcRectl)
552{
553 /* we do not support color conversion */
554 Assert(pDstDesc->format == pSrcDesc->format);
555 /* we do not support stretching */
556 Assert(pDstRectl->height == pSrcRectl->height);
557 Assert(pDstRectl->width == pSrcRectl->width);
558 if (pDstDesc->format != pSrcDesc->format)
559 return VERR_INVALID_FUNCTION;
560 if (pDstDesc->width == pDstRectl->width
561 && pSrcDesc->width == pSrcRectl->width
562 && pSrcDesc->width == pDstDesc->width)
563 {
564 Assert(!pDstRectl->left);
565 Assert(!pSrcRectl->left);
566 uint32_t cbOff = pDstDesc->pitch * pDstRectl->top;
567 uint32_t cbSize = pDstDesc->pitch * pDstRectl->height;
568 memcpy(pvDstSurf + cbOff, pvSrcSurf + cbOff, cbSize);
569 }
570 else
571 {
572 uint32_t offDstLineStart = pDstRectl->left * pDstDesc->bpp >> 3;
573 uint32_t offDstLineEnd = ((pDstRectl->left * pDstDesc->bpp + 7) >> 3) + ((pDstDesc->bpp * pDstRectl->width + 7) >> 3);
574 uint32_t cbDstLine = offDstLineEnd - offDstLineStart;
575 uint32_t offDstStart = pDstDesc->pitch * pDstRectl->top + offDstLineStart;
576 Assert(cbDstLine <= pDstDesc->pitch);
577 uint32_t cbDstSkip = pDstDesc->pitch;
578 uint8_t * pvDstStart = pvDstSurf + offDstStart;
579
580 uint32_t offSrcLineStart = pSrcRectl->left * pSrcDesc->bpp >> 3;
581 uint32_t offSrcLineEnd = ((pSrcRectl->left * pSrcDesc->bpp + 7) >> 3) + ((pSrcDesc->bpp * pSrcRectl->width + 7) >> 3);
582 uint32_t cbSrcLine = offSrcLineEnd - offSrcLineStart;
583 uint32_t offSrcStart = pSrcDesc->pitch * pSrcRectl->top + offSrcLineStart;
584 Assert(cbSrcLine <= pSrcDesc->pitch);
585 uint32_t cbSrcSkip = pSrcDesc->pitch;
586 const uint8_t * pvSrcStart = pvSrcSurf + offSrcStart;
587
588 Assert(cbDstLine == cbSrcLine);
589
590 for (uint32_t i = 0; ; ++i)
591 {
592 memcpy (pvDstStart, pvSrcStart, cbDstLine);
593 if (i == pDstRectl->height)
594 break;
595 pvDstStart += cbDstSkip;
596 pvSrcStart += cbSrcSkip;
597 }
598 }
599 return VINF_SUCCESS;
600}
601
602static void vboxVDMARectlUnite(VBOXVDMA_RECTL * pRectl1, const VBOXVDMA_RECTL * pRectl2)
603{
604 if (!pRectl1->width)
605 *pRectl1 = *pRectl2;
606 else
607 {
608 int16_t x21 = pRectl1->left + pRectl1->width;
609 int16_t x22 = pRectl2->left + pRectl2->width;
610 if (pRectl1->left > pRectl2->left)
611 {
612 pRectl1->left = pRectl2->left;
613 pRectl1->width = x21 < x22 ? x22 - pRectl1->left : x21 - pRectl1->left;
614 }
615 else if (x21 < x22)
616 pRectl1->width = x22 - pRectl1->left;
617
618 x21 = pRectl1->top + pRectl1->height;
619 x22 = pRectl2->top + pRectl2->height;
620 if (pRectl1->top > pRectl2->top)
621 {
622 pRectl1->top = pRectl2->top;
623 pRectl1->height = x21 < x22 ? x22 - pRectl1->top : x21 - pRectl1->top;
624 }
625 else if (x21 < x22)
626 pRectl1->height = x22 - pRectl1->top;
627 }
628}
629
630/*
631 * @return on success the number of bytes the command contained, otherwise - VERR_xxx error code
632 */
633static int vboxVDMACmdExecBlt(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt, uint32_t cbBuffer)
634{
635 const uint32_t cbBlt = VBOXVDMACMD_BODY_FIELD_OFFSET(uint32_t, VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects[pBlt->cDstSubRects]);
636 Assert(cbBlt <= cbBuffer);
637 if (cbBuffer < cbBlt)
638 return VERR_INVALID_FUNCTION;
639
640 /* we do not support stretching for now */
641 Assert(pBlt->srcRectl.width == pBlt->dstRectl.width);
642 Assert(pBlt->srcRectl.height == pBlt->dstRectl.height);
643 if (pBlt->srcRectl.width != pBlt->dstRectl.width)
644 return VERR_INVALID_FUNCTION;
645 if (pBlt->srcRectl.height != pBlt->dstRectl.height)
646 return VERR_INVALID_FUNCTION;
647 Assert(pBlt->cDstSubRects);
648
649 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
650 VBOXVDMA_RECTL updateRectl = {0, 0, 0, 0};
651
652 if (pBlt->cDstSubRects)
653 {
654 VBOXVDMA_RECTL dstRectl, srcRectl;
655 const VBOXVDMA_RECTL *pDstRectl, *pSrcRectl;
656 for (uint32_t i = 0; i < pBlt->cDstSubRects; ++i)
657 {
658 pDstRectl = &pBlt->aDstSubRects[i];
659 if (pBlt->dstRectl.left || pBlt->dstRectl.top)
660 {
661 dstRectl.left = pDstRectl->left + pBlt->dstRectl.left;
662 dstRectl.top = pDstRectl->top + pBlt->dstRectl.top;
663 dstRectl.width = pDstRectl->width;
664 dstRectl.height = pDstRectl->height;
665 pDstRectl = &dstRectl;
666 }
667
668 pSrcRectl = &pBlt->aDstSubRects[i];
669 if (pBlt->srcRectl.left || pBlt->srcRectl.top)
670 {
671 srcRectl.left = pSrcRectl->left + pBlt->srcRectl.left;
672 srcRectl.top = pSrcRectl->top + pBlt->srcRectl.top;
673 srcRectl.width = pSrcRectl->width;
674 srcRectl.height = pSrcRectl->height;
675 pSrcRectl = &srcRectl;
676 }
677
678 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
679 &pBlt->dstDesc, &pBlt->srcDesc,
680 pDstRectl,
681 pSrcRectl);
682 AssertRC(rc);
683 if (!RT_SUCCESS(rc))
684 return rc;
685
686 vboxVDMARectlUnite(&updateRectl, pDstRectl);
687 }
688 }
689 else
690 {
691 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
692 &pBlt->dstDesc, &pBlt->srcDesc,
693 &pBlt->dstRectl,
694 &pBlt->srcRectl);
695 AssertRC(rc);
696 if (!RT_SUCCESS(rc))
697 return rc;
698
699 vboxVDMARectlUnite(&updateRectl, &pBlt->dstRectl);
700 }
701
702#ifdef VBOX_VDMA_WITH_WORKERTHREAD
703 int iView = 0;
704 /* @todo: fixme: check if update is needed and get iView */
705 vboxVDMANotifyPrimaryUpdate (pVdma->pVGAState, iView, &updateRectl);
706#endif
707
708 return cbBlt;
709}
710
711static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer)
712{
713 if (cbBuffer < sizeof (*pTransfer))
714 return VERR_INVALID_PARAMETER;
715
716 PVGASTATE pVGAState = pVdma->pVGAState;
717 uint8_t * pvRam = pVGAState->vram_ptrR3;
718 PGMPAGEMAPLOCK SrcLock;
719 PGMPAGEMAPLOCK DstLock;
720 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
721 const void * pvSrc;
722 void * pvDst;
723 int rc = VINF_SUCCESS;
724 uint32_t cbTransfer = pTransfer->cbTransferSize;
725 uint32_t cbTransfered = 0;
726 bool bSrcLocked = false;
727 bool bDstLocked = false;
728 do
729 {
730 uint32_t cbSubTransfer = cbTransfer;
731 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET)
732 {
733 pvSrc = pvRam + pTransfer->Src.offVramBuf + cbTransfered;
734 }
735 else
736 {
737 RTGCPHYS phPage = pTransfer->Src.phBuf;
738 phPage += cbTransfered;
739 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvSrc, &SrcLock);
740 AssertRC(rc);
741 if (RT_SUCCESS(rc))
742 {
743 bSrcLocked = true;
744 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
745 }
746 else
747 {
748 break;
749 }
750 }
751
752 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET)
753 {
754 pvDst = pvRam + pTransfer->Dst.offVramBuf + cbTransfered;
755 }
756 else
757 {
758 RTGCPHYS phPage = pTransfer->Dst.phBuf;
759 phPage += cbTransfered;
760 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, phPage, 0, &pvDst, &DstLock);
761 AssertRC(rc);
762 if (RT_SUCCESS(rc))
763 {
764 bDstLocked = true;
765 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
766 }
767 else
768 {
769 break;
770 }
771 }
772
773 if (RT_SUCCESS(rc))
774 {
775 memcpy(pvDst, pvSrc, cbSubTransfer);
776 cbTransfer -= cbSubTransfer;
777 cbTransfered += cbSubTransfer;
778 }
779 else
780 {
781 cbTransfer = 0; /* to break */
782 }
783
784 if (bSrcLocked)
785 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &SrcLock);
786 if (bDstLocked)
787 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &DstLock);
788 } while (cbTransfer);
789
790 if (RT_SUCCESS(rc))
791 return sizeof (*pTransfer);
792 return rc;
793}
794
795static int vboxVDMACmdExec(PVBOXVDMAHOST pVdma, const uint8_t *pvBuffer, uint32_t cbBuffer)
796{
797 do
798 {
799 Assert(pvBuffer);
800 Assert(cbBuffer >= VBOXVDMACMD_HEADER_SIZE());
801
802 if (!pvBuffer)
803 return VERR_INVALID_PARAMETER;
804 if (cbBuffer < VBOXVDMACMD_HEADER_SIZE())
805 return VERR_INVALID_PARAMETER;
806
807 PVBOXVDMACMD pCmd = (PVBOXVDMACMD)pvBuffer;
808 uint32_t cbCmd = 0;
809 switch (pCmd->enmType)
810 {
811 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
812 {
813#ifdef VBOXWDDM_TEST_UHGSMI
814 static int count = 0;
815 static uint64_t start, end;
816 if (count==0)
817 {
818 start = RTTimeNanoTS();
819 }
820 ++count;
821 if (count==100000)
822 {
823 end = RTTimeNanoTS();
824 float ems = (end-start)/1000000.f;
825 LogRel(("100000 calls took %i ms, %i cps\n", (int)ems, (int)(100000.f*1000.f/ems) ));
826 }
827#endif
828 /* todo: post the buffer to chromium */
829 return VINF_SUCCESS;
830 }
831 case VBOXVDMACMD_TYPE_DMA_PRESENT_BLT:
832 {
833 const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_PRESENT_BLT);
834 int cbBlt = vboxVDMACmdExecBlt(pVdma, pBlt, cbBuffer);
835 Assert(cbBlt >= 0);
836 Assert((uint32_t)cbBlt <= cbBuffer);
837 if (cbBlt >= 0)
838 {
839 if ((uint32_t)cbBlt == cbBuffer)
840 return VINF_SUCCESS;
841 else
842 {
843 cbBuffer -= (uint32_t)cbBlt;
844 pvBuffer -= cbBlt;
845 }
846 }
847 else
848 return cbBlt; /* error */
849 break;
850 }
851 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
852 {
853 const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
854 int cbTransfer = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, cbBuffer);
855 Assert(cbTransfer >= 0);
856 Assert((uint32_t)cbTransfer <= cbBuffer);
857 if (cbTransfer >= 0)
858 {
859 if ((uint32_t)cbTransfer == cbBuffer)
860 return VINF_SUCCESS;
861 else
862 {
863 cbBuffer -= (uint32_t)cbTransfer;
864 pvBuffer -= cbTransfer;
865 }
866 }
867 else
868 return cbTransfer; /* error */
869 break;
870 }
871 case VBOXVDMACMD_TYPE_DMA_NOP:
872 return VINF_SUCCESS;
873 case VBOXVDMACMD_TYPE_CHILD_STATUS_IRQ:
874 return VINF_SUCCESS;
875 default:
876 AssertBreakpoint();
877 return VERR_INVALID_FUNCTION;
878 }
879 } while (1);
880
881 /* we should not be here */
882 AssertBreakpoint();
883 return VERR_INVALID_STATE;
884}
885
886#ifdef VBOX_VDMA_WITH_WORKERTHREAD
887
888int vboxVDMAPipeConstruct(PVBOXVDMAPIPE pPipe)
889{
890 int rc = RTSemEventCreate(&pPipe->hEvent);
891 AssertRC(rc);
892 if (RT_SUCCESS(rc))
893 {
894 rc = RTCritSectInit(&pPipe->hCritSect);
895 AssertRC(rc);
896 if (RT_SUCCESS(rc))
897 {
898 pPipe->enmState = VBOXVDMAPIPE_STATE_CREATED;
899 pPipe->bNeedNotify = true;
900 return VINF_SUCCESS;
901// RTCritSectDelete(pPipe->hCritSect);
902 }
903 RTSemEventDestroy(pPipe->hEvent);
904 }
905 return rc;
906}
907
908int vboxVDMAPipeOpenServer(PVBOXVDMAPIPE pPipe)
909{
910 int rc = RTCritSectEnter(&pPipe->hCritSect);
911 AssertRC(rc);
912 if (RT_SUCCESS(rc))
913 {
914 Assert(pPipe->enmState == VBOXVDMAPIPE_STATE_CREATED);
915 switch (pPipe->enmState)
916 {
917 case VBOXVDMAPIPE_STATE_CREATED:
918 pPipe->enmState = VBOXVDMAPIPE_STATE_OPENNED;
919 pPipe->bNeedNotify = false;
920 rc = VINF_SUCCESS;
921 break;
922 case VBOXVDMAPIPE_STATE_OPENNED:
923 pPipe->bNeedNotify = false;
924 rc = VINF_ALREADY_INITIALIZED;
925 break;
926 default:
927 AssertBreakpoint();
928 rc = VERR_INVALID_STATE;
929 break;
930 }
931
932 RTCritSectLeave(&pPipe->hCritSect);
933 }
934 return rc;
935}
936
937int vboxVDMAPipeCloseServer(PVBOXVDMAPIPE pPipe)
938{
939 int rc = RTCritSectEnter(&pPipe->hCritSect);
940 AssertRC(rc);
941 if (RT_SUCCESS(rc))
942 {
943 Assert(pPipe->enmState == VBOXVDMAPIPE_STATE_CLOSED
944 || pPipe->enmState == VBOXVDMAPIPE_STATE_CLOSING);
945 switch (pPipe->enmState)
946 {
947 case VBOXVDMAPIPE_STATE_CLOSING:
948 pPipe->enmState = VBOXVDMAPIPE_STATE_CLOSED;
949 rc = VINF_SUCCESS;
950 break;
951 case VBOXVDMAPIPE_STATE_CLOSED:
952 rc = VINF_ALREADY_INITIALIZED;
953 break;
954 default:
955 AssertBreakpoint();
956 rc = VERR_INVALID_STATE;
957 break;
958 }
959
960 RTCritSectLeave(&pPipe->hCritSect);
961 }
962 return rc;
963}
964
965int vboxVDMAPipeCloseClient(PVBOXVDMAPIPE pPipe)
966{
967 int rc = RTCritSectEnter(&pPipe->hCritSect);
968 AssertRC(rc);
969 if (RT_SUCCESS(rc))
970 {
971 bool bNeedNotify = false;
972 Assert(pPipe->enmState == VBOXVDMAPIPE_STATE_OPENNED
973 || pPipe->enmState == VBOXVDMAPIPE_STATE_CREATED
974 || pPipe->enmState == VBOXVDMAPIPE_STATE_CLOSED);
975 switch (pPipe->enmState)
976 {
977 case VBOXVDMAPIPE_STATE_OPENNED:
978 pPipe->enmState = VBOXVDMAPIPE_STATE_CLOSING;
979 bNeedNotify = pPipe->bNeedNotify;
980 pPipe->bNeedNotify = false;
981 break;
982 case VBOXVDMAPIPE_STATE_CREATED:
983 pPipe->enmState = VBOXVDMAPIPE_STATE_CLOSED;
984 pPipe->bNeedNotify = false;
985 break;
986 case VBOXVDMAPIPE_STATE_CLOSED:
987 rc = VINF_ALREADY_INITIALIZED;
988 break;
989 default:
990 AssertBreakpoint();
991 rc = VERR_INVALID_STATE;
992 break;
993 }
994
995 RTCritSectLeave(&pPipe->hCritSect);
996
997 if (bNeedNotify)
998 {
999 rc = RTSemEventSignal(pPipe->hEvent);
1000 AssertRC(rc);
1001 }
1002 }
1003 return rc;
1004}
1005
1006
1007typedef DECLCALLBACK(bool) FNHVBOXVDMARWCB(PVBOXVDMAPIPE pPipe, void *pvCallback);
1008typedef FNHVBOXVDMARWCB *PFNHVBOXVDMARWCB;
1009
1010int vboxVDMAPipeModifyServer(PVBOXVDMAPIPE pPipe, PFNHVBOXVDMARWCB pfnCallback, void * pvCallback)
1011{
1012 int rc = RTCritSectEnter(&pPipe->hCritSect);
1013 AssertRC(rc);
1014 if (RT_SUCCESS(rc))
1015 {
1016 do
1017 {
1018 Assert(pPipe->enmState == VBOXVDMAPIPE_STATE_OPENNED
1019 || pPipe->enmState == VBOXVDMAPIPE_STATE_CLOSING);
1020
1021 if (pPipe->enmState >= VBOXVDMAPIPE_STATE_OPENNED)
1022 {
1023 bool bProcessing = pfnCallback(pPipe, pvCallback);
1024 pPipe->bNeedNotify = !bProcessing;
1025 if (bProcessing)
1026 {
1027 RTCritSectLeave(&pPipe->hCritSect);
1028 rc = VINF_SUCCESS;
1029 break;
1030 }
1031 else if (pPipe->enmState == VBOXVDMAPIPE_STATE_CLOSING)
1032 {
1033 pPipe->enmState = VBOXVDMAPIPE_STATE_CLOSED;
1034 RTCritSectLeave(&pPipe->hCritSect);
1035 rc = VINF_EOF;
1036 break;
1037 }
1038 }
1039 else
1040 {
1041 AssertBreakpoint();
1042 rc = VERR_INVALID_STATE;
1043 RTCritSectLeave(&pPipe->hCritSect);
1044 break;
1045 }
1046
1047 RTCritSectLeave(&pPipe->hCritSect);
1048
1049 rc = RTSemEventWait(pPipe->hEvent, RT_INDEFINITE_WAIT);
1050 AssertRC(rc);
1051 if (!RT_SUCCESS(rc))
1052 break;
1053
1054 rc = RTCritSectEnter(&pPipe->hCritSect);
1055 AssertRC(rc);
1056 if (!RT_SUCCESS(rc))
1057 break;
1058 } while (1);
1059 }
1060
1061 return rc;
1062}
1063
1064int vboxVDMAPipeModifyClient(PVBOXVDMAPIPE pPipe, PFNHVBOXVDMARWCB pfnCallback, void * pvCallback)
1065{
1066 int rc = RTCritSectEnter(&pPipe->hCritSect);
1067 AssertRC(rc);
1068 if (RT_SUCCESS(rc))
1069 {
1070 bool bNeedNotify = false;
1071 Assert(pPipe->enmState == VBOXVDMAPIPE_STATE_OPENNED);
1072 if (pPipe->enmState == VBOXVDMAPIPE_STATE_OPENNED)
1073 {
1074 bool bModified = pfnCallback(pPipe, pvCallback);
1075 if (bModified)
1076 {
1077 bNeedNotify = pPipe->bNeedNotify;
1078 pPipe->bNeedNotify = false;
1079 }
1080 }
1081 else
1082 rc = VERR_INVALID_STATE;
1083
1084 RTCritSectLeave(&pPipe->hCritSect);
1085
1086 if (bNeedNotify)
1087 {
1088 rc = RTSemEventSignal(pPipe->hEvent);
1089 AssertRC(rc);
1090 }
1091 }
1092 return rc;
1093}
1094
1095int vboxVDMAPipeDestruct(PVBOXVDMAPIPE pPipe)
1096{
1097 Assert(pPipe->enmState == VBOXVDMAPIPE_STATE_CLOSED
1098 || pPipe->enmState == VBOXVDMAPIPE_STATE_CREATED);
1099 /* ensure the pipe is closed */
1100 vboxVDMAPipeCloseClient(pPipe);
1101
1102 Assert(pPipe->enmState == VBOXVDMAPIPE_STATE_CLOSED);
1103
1104 if (pPipe->enmState != VBOXVDMAPIPE_STATE_CLOSED)
1105 return VERR_INVALID_STATE;
1106
1107 int rc = RTCritSectDelete(&pPipe->hCritSect);
1108 AssertRC(rc);
1109
1110 rc = RTSemEventDestroy(pPipe->hEvent);
1111 AssertRC(rc);
1112
1113 return VINF_SUCCESS;
1114}
1115#endif
1116
1117static void vboxVDMACommandProcess(PVBOXVDMAHOST pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
1118{
1119 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
1120 const uint8_t * pvBuf;
1121 PGMPAGEMAPLOCK Lock;
1122 int rc;
1123 bool bReleaseLocked = false;
1124
1125 do
1126 {
1127 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
1128
1129 if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
1130 pvBuf = VBOXVDMACBUF_DR_TAIL(pCmd, const uint8_t);
1131 else if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
1132 {
1133 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
1134 pvBuf = pvRam + pCmd->Location.offVramBuf;
1135 }
1136 else
1137 {
1138 RTGCPHYS phPage = pCmd->Location.phBuf & ~0xfffULL;
1139 uint32_t offset = pCmd->Location.phBuf & 0xfff;
1140 Assert(offset + pCmd->cbBuf <= 0x1000);
1141 if (offset + pCmd->cbBuf > 0x1000)
1142 {
1143 /* @todo: more advanced mechanism of command buffer proc is actually needed */
1144 rc = VERR_INVALID_PARAMETER;
1145 break;
1146 }
1147
1148 const void * pvPageBuf;
1149 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvPageBuf, &Lock);
1150 AssertRC(rc);
1151 if (!RT_SUCCESS(rc))
1152 {
1153 /* @todo: if (rc == VERR_PGM_PHYS_PAGE_RESERVED) -> fall back on using PGMPhysRead ?? */
1154 break;
1155 }
1156
1157 pvBuf = (const uint8_t *)pvPageBuf;
1158 pvBuf += offset;
1159
1160 bReleaseLocked = true;
1161 }
1162
1163 rc = vboxVDMACmdExec(pVdma, pvBuf, pCmd->cbBuf);
1164 AssertRC(rc);
1165
1166 if (bReleaseLocked)
1167 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1168 } while (0);
1169
1170 pCmd->rc = rc;
1171
1172 rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
1173 AssertRC(rc);
1174}
1175
1176static void vboxVDMAControlProcess(PVBOXVDMAHOST pVdma, PVBOXVDMA_CTL pCmd)
1177{
1178 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
1179 pCmd->i32Result = VINF_SUCCESS;
1180 int rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
1181 AssertRC(rc);
1182}
1183
1184#ifdef VBOX_VDMA_WITH_WORKERTHREAD
1185typedef struct
1186{
1187 struct VBOXVDMAHOST *pVdma;
1188 VBOXVDMAPIPE_CMD_BODY Cmd;
1189 bool bHasCmd;
1190} VBOXVDMACMD_PROCESS_CONTEXT, *PVBOXVDMACMD_PROCESS_CONTEXT;
1191
1192static DECLCALLBACK(bool) vboxVDMACommandProcessCb(PVBOXVDMAPIPE pPipe, void *pvCallback)
1193{
1194 PVBOXVDMACMD_PROCESS_CONTEXT pContext = (PVBOXVDMACMD_PROCESS_CONTEXT)pvCallback;
1195 struct VBOXVDMAHOST *pVdma = pContext->pVdma;
1196 HGSMILISTENTRY *pEntry = hgsmiListRemoveHead(&pVdma->PendingList);
1197 if (pEntry)
1198 {
1199 PVBOXVDMAPIPE_CMD pPipeCmd = VBOXVDMAPIPE_CMD_FROM_ENTRY(pEntry);
1200 Assert(pPipeCmd);
1201 pContext->Cmd = pPipeCmd->Cmd;
1202 hgsmiListPrepend(&pVdma->CmdPool.List, pEntry);
1203 pContext->bHasCmd = true;
1204 return true;
1205 }
1206
1207 pContext->bHasCmd = false;
1208 return false;
1209}
1210
1211static DECLCALLBACK(int) vboxVDMAWorkerThread(RTTHREAD ThreadSelf, void *pvUser)
1212{
1213 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvUser;
1214 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
1215 VBOXVDMACMD_PROCESS_CONTEXT Context;
1216 Context.pVdma = pVdma;
1217
1218 int rc = vboxVDMAPipeOpenServer(&pVdma->Pipe);
1219 AssertRC(rc);
1220 if (RT_SUCCESS(rc))
1221 {
1222 do
1223 {
1224 rc = vboxVDMAPipeModifyServer(&pVdma->Pipe, vboxVDMACommandProcessCb, &Context);
1225 AssertRC(rc);
1226 if (RT_SUCCESS(rc))
1227 {
1228 switch (Context.Cmd.enmType)
1229 {
1230 case VBOXVDMAPIPE_CMD_TYPE_DMACMD:
1231 {
1232 PVBOXVDMACBUF_DR pDr = Context.Cmd.u.pDr;
1233 vboxVDMACommandProcess(pVdma, pDr);
1234 break;
1235 }
1236 case VBOXVDMAPIPE_CMD_TYPE_DMACTL:
1237 {
1238 PVBOXVDMA_CTL pCtl = Context.Cmd.u.pCtl;
1239 vboxVDMAControlProcess(pVdma, pCtl);
1240 break;
1241 }
1242 default:
1243 AssertBreakpoint();
1244 break;
1245 }
1246
1247 if (rc == VINF_EOF)
1248 {
1249 rc = VINF_SUCCESS;
1250 break;
1251 }
1252 }
1253 else
1254 break;
1255 } while (1);
1256 }
1257
1258 /* always try to close the pipe to make sure the client side is notified */
1259 int tmpRc = vboxVDMAPipeCloseServer(&pVdma->Pipe);
1260 AssertRC(tmpRc);
1261 return rc;
1262}
1263#endif
1264
1265#ifdef VBOX_VDMA_WITH_WATCHDOG
1266static DECLCALLBACK(void) vboxVDMAWatchDogTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
1267{
1268 VBOXVDMAHOST *pVdma = (VBOXVDMAHOST *)pvUser;
1269 PVGASTATE pVGAState = pVdma->pVGAState;
1270 VBVARaiseIrq(pVGAState, HGSMIHOSTFLAGS_WATCHDOG);
1271}
1272
1273static int vboxVDMAWatchDogCtl(struct VBOXVDMAHOST *pVdma, uint32_t cMillis)
1274{
1275 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
1276 if (cMillis)
1277 TMTimerSetMillies(pVdma->WatchDogTimer, cMillis);
1278 else
1279 TMTimerStop(pVdma->WatchDogTimer);
1280 return VINF_SUCCESS;
1281}
1282#endif
1283
1284int vboxVDMAConstruct(PVGASTATE pVGAState, uint32_t cPipeElements)
1285{
1286 int rc;
1287#ifdef VBOX_VDMA_WITH_WORKERTHREAD
1288 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)RTMemAllocZ(RT_OFFSETOF(VBOXVDMAHOST, CmdPool.aCmds[cPipeElements]));
1289#else
1290 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)RTMemAllocZ(sizeof(*pVdma));
1291#endif
1292 Assert(pVdma);
1293 if (pVdma)
1294 {
1295 pVdma->pHgsmi = pVGAState->pHGSMI;
1296 pVdma->pVGAState = pVGAState;
1297
1298#ifdef VBOX_VDMA_WITH_WATCHDOG
1299 rc = PDMDevHlpTMTimerCreate(pVGAState->pDevInsR3, TMCLOCK_REAL, vboxVDMAWatchDogTimer,
1300 pVdma, TMTIMER_FLAGS_NO_CRIT_SECT,
1301 "VDMA WatchDog Timer", &pVdma->WatchDogTimer);
1302 AssertRC(rc);
1303#endif
1304#ifdef VBOX_VDMA_WITH_WORKERTHREAD
1305 hgsmiListInit(&pVdma->PendingList);
1306 rc = vboxVDMAPipeConstruct(&pVdma->Pipe);
1307 AssertRC(rc);
1308 if (RT_SUCCESS(rc))
1309 {
1310 rc = RTThreadCreate(&pVdma->hWorkerThread, vboxVDMAWorkerThread, pVdma, 0, RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "VDMA");
1311 AssertRC(rc);
1312 if (RT_SUCCESS(rc))
1313 {
1314 hgsmiListInit(&pVdma->CmdPool.List);
1315 pVdma->CmdPool.cCmds = cPipeElements;
1316 for (uint32_t i = 0; i < cPipeElements; ++i)
1317 {
1318 hgsmiListAppend(&pVdma->CmdPool.List, &pVdma->CmdPool.aCmds[i].Entry);
1319 }
1320# if 0 //def VBOX_WITH_CRHGSMI
1321 int tmpRc = vboxVDMACrCtlHgsmiSetup(pVdma);
1322# endif
1323#endif
1324 pVGAState->pVdma = pVdma;
1325 VBoxVBVAExHSInit(&pVdma->CmdVbva);
1326#ifdef VBOX_WITH_CRHGSMI
1327 int rcIgnored = vboxVDMACrCtlHgsmiSetup(pVdma); NOREF(rcIgnored); /** @todo is this ignoring intentional? */
1328#endif
1329 return VINF_SUCCESS;
1330#ifdef VBOX_VDMA_WITH_WORKERTHREAD
1331 }
1332
1333 int tmpRc = vboxVDMAPipeDestruct(&pVdma->Pipe);
1334 AssertRC(tmpRc);
1335 }
1336
1337 RTMemFree(pVdma);
1338#endif
1339 }
1340 else
1341 rc = VERR_OUT_OF_RESOURCES;
1342
1343 return rc;
1344}
1345
1346int vboxVDMADestruct(struct VBOXVDMAHOST *pVdma)
1347{
1348#ifdef VBOX_VDMA_WITH_WORKERTHREAD
1349 /* @todo: implement*/
1350 AssertBreakpoint();
1351#endif
1352 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
1353 RTMemFree(pVdma);
1354 return VINF_SUCCESS;
1355}
1356
1357#ifdef VBOX_VDMA_WITH_WORKERTHREAD
1358typedef struct
1359{
1360 struct VBOXVDMAHOST *pVdma;
1361 VBOXVDMAPIPE_CMD_BODY Cmd;
1362 bool bQueued;
1363} VBOXVDMACMD_SUBMIT_CONTEXT, *PVBOXVDMACMD_SUBMIT_CONTEXT;
1364
1365DECLCALLBACK(bool) vboxVDMACommandSubmitCb(PVBOXVDMAPIPE pPipe, void *pvCallback)
1366{
1367 PVBOXVDMACMD_SUBMIT_CONTEXT pContext = (PVBOXVDMACMD_SUBMIT_CONTEXT)pvCallback;
1368 struct VBOXVDMAHOST *pVdma = pContext->pVdma;
1369 HGSMILISTENTRY *pEntry = hgsmiListRemoveHead(&pVdma->CmdPool.List);
1370 Assert(pEntry);
1371 if (pEntry)
1372 {
1373 PVBOXVDMAPIPE_CMD pPipeCmd = VBOXVDMAPIPE_CMD_FROM_ENTRY(pEntry);
1374 pPipeCmd->Cmd = pContext->Cmd;
1375 VBoxSHGSMICommandMarkAsynchCompletion(pContext->Cmd.u.pvCmd);
1376 pContext->bQueued = true;
1377 hgsmiListAppend(&pVdma->PendingList, pEntry);
1378 return true;
1379 }
1380
1381 /* @todo: should we try to flush some commands here? */
1382 pContext->bQueued = false;
1383 return false;
1384}
1385#endif
1386
1387int vboxVDMASaveStateExecPrep(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
1388{
1389#ifdef VBOX_WITH_CRHGSMI
1390 PVGASTATE pVGAState = pVdma->pVGAState;
1391 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
1392 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_BEGIN, sizeof (*pCmd));
1393 Assert(pCmd);
1394 if (pCmd)
1395 {
1396 int rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
1397 AssertRC(rc);
1398 if (RT_SUCCESS(rc))
1399 {
1400 rc = vboxVDMACrCtlGetRc(pCmd);
1401 }
1402 vboxVDMACrCtlRelease(pCmd);
1403 return rc;
1404 }
1405 return VERR_NO_MEMORY;
1406#else
1407 return VINF_SUCCESS;
1408#endif
1409}
1410
1411int vboxVDMASaveStateExecDone(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
1412{
1413#ifdef VBOX_WITH_CRHGSMI
1414 PVGASTATE pVGAState = pVdma->pVGAState;
1415 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
1416 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_END, sizeof (*pCmd));
1417 Assert(pCmd);
1418 if (pCmd)
1419 {
1420 int rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
1421 AssertRC(rc);
1422 if (RT_SUCCESS(rc))
1423 {
1424 rc = vboxVDMACrCtlGetRc(pCmd);
1425 }
1426 vboxVDMACrCtlRelease(pCmd);
1427 return rc;
1428 }
1429 return VERR_NO_MEMORY;
1430#else
1431 return VINF_SUCCESS;
1432#endif
1433}
1434
1435void vboxVDMAControl(struct VBOXVDMAHOST *pVdma, PVBOXVDMA_CTL pCmd, uint32_t cbCmd)
1436{
1437#if 1
1438 PHGSMIINSTANCE pIns = pVdma->pHgsmi;
1439
1440 switch (pCmd->enmCtl)
1441 {
1442 case VBOXVDMA_CTL_TYPE_ENABLE:
1443 pCmd->i32Result = VINF_SUCCESS;
1444 break;
1445 case VBOXVDMA_CTL_TYPE_DISABLE:
1446 pCmd->i32Result = VINF_SUCCESS;
1447 break;
1448 case VBOXVDMA_CTL_TYPE_FLUSH:
1449 pCmd->i32Result = VINF_SUCCESS;
1450 break;
1451#ifdef VBOX_VDMA_WITH_WATCHDOG
1452 case VBOXVDMA_CTL_TYPE_WATCHDOG:
1453 pCmd->i32Result = vboxVDMAWatchDogCtl(pVdma, pCmd->u32Offset);
1454 break;
1455#endif
1456 default:
1457 AssertBreakpoint();
1458 pCmd->i32Result = VERR_NOT_SUPPORTED;
1459 }
1460
1461 int rc = VBoxSHGSMICommandComplete (pIns, pCmd);
1462 AssertRC(rc);
1463#else
1464 /* test asinch completion */
1465 VBOXVDMACMD_SUBMIT_CONTEXT Context;
1466 Context.pVdma = pVdma;
1467 Context.Cmd.enmType = VBOXVDMAPIPE_CMD_TYPE_DMACTL;
1468 Context.Cmd.u.pCtl = pCmd;
1469
1470 int rc = vboxVDMAPipeModifyClient(&pVdma->Pipe, vboxVDMACommandSubmitCb, &Context);
1471 AssertRC(rc);
1472 if (RT_SUCCESS(rc))
1473 {
1474 Assert(Context.bQueued);
1475 if (Context.bQueued)
1476 {
1477 /* success */
1478 return;
1479 }
1480 rc = VERR_OUT_OF_RESOURCES;
1481 }
1482
1483 /* failure */
1484 Assert(RT_FAILURE(rc));
1485 PHGSMIINSTANCE pIns = pVdma->pHgsmi;
1486 pCmd->i32Result = rc;
1487 int tmpRc = VBoxSHGSMICommandComplete (pIns, pCmd);
1488 AssertRC(tmpRc);
1489
1490#endif
1491}
1492
1493void vboxVDMACommand(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
1494{
1495 int rc = VERR_NOT_IMPLEMENTED;
1496
1497#ifdef VBOX_WITH_CRHGSMI
1498 /* chromium commands are processed by crhomium hgcm thread independently from our internal cmd processing pipeline
1499 * this is why we process them specially */
1500 rc = vboxVDMACmdCheckCrCmd(pVdma, pCmd, cbCmd);
1501 if (rc == VINF_SUCCESS)
1502 return;
1503
1504 if (RT_FAILURE(rc))
1505 {
1506 pCmd->rc = rc;
1507 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmd);
1508 AssertRC(rc);
1509 return;
1510 }
1511#endif
1512
1513#ifndef VBOX_VDMA_WITH_WORKERTHREAD
1514 vboxVDMACommandProcess(pVdma, pCmd, cbCmd);
1515#else
1516
1517# ifdef DEBUG_misha
1518 Assert(0);
1519# endif
1520
1521 VBOXVDMACMD_SUBMIT_CONTEXT Context;
1522 Context.pVdma = pVdma;
1523 Context.Cmd.enmType = VBOXVDMAPIPE_CMD_TYPE_DMACMD;
1524 Context.Cmd.u.pDr = pCmd;
1525
1526 rc = vboxVDMAPipeModifyClient(&pVdma->Pipe, vboxVDMACommandSubmitCb, &Context);
1527 AssertRC(rc);
1528 if (RT_SUCCESS(rc))
1529 {
1530 Assert(Context.bQueued);
1531 if (Context.bQueued)
1532 {
1533 /* success */
1534 return;
1535 }
1536 rc = VERR_OUT_OF_RESOURCES;
1537 }
1538 /* failure */
1539 Assert(RT_FAILURE(rc));
1540 PHGSMIINSTANCE pIns = pVdma->pHgsmi;
1541 pCmd->rc = rc;
1542 int tmpRc = VBoxSHGSMICommandComplete (pIns, pCmd);
1543 AssertRC(tmpRc);
1544#endif
1545}
1546
1547/**/
1548static int vboxVBVAExHSProcessorAcquire(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1549{
1550 Assert(pCmdVbva->u32State != VBVAEXHOSTCONTEXT_STATE_STOPPED);
1551
1552 uint32_t oldState;
1553 if (!ASMAtomicReadU32(&pCmdVbva->u32Pause))
1554 {
1555 if (ASMAtomicCmpXchgExU32(&pCmdVbva->u32State, VBVAEXHOSTCONTEXT_STATE_PROCESSING, VBVAEXHOSTCONTEXT_STATE_LISTENING, &oldState))
1556 return VINF_SUCCESS;
1557 return oldState == VBVAEXHOSTCONTEXT_STATE_PROCESSING ? VERR_SEM_BUSY : VERR_INVALID_STATE;
1558 }
1559 return VERR_INVALID_STATE;
1560}
1561
1562static bool vboxVBVAExHPCheckPause(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1563{
1564 Assert(pCmdVbva->u32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1565
1566 if (!ASMAtomicReadU32(&pCmdVbva->u32Pause))
1567 return false;
1568
1569 ASMAtomicWriteU32(&pCmdVbva->u32State, VBVAEXHOSTCONTEXT_STATE_PAUSED);
1570 return true;
1571}
1572
1573static bool vboxVBVAExHPCheckOtherCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1574{
1575 Assert(pCmdVbva->u32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1576
1577 return !!ASMAtomicUoReadU32(&pCmdVbva->u32cOtherCommands);
1578}
1579
1580static void vboxVBVAExHPProcessorRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1581{
1582 Assert(pCmdVbva->u32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1583
1584 if (!vboxVBVAExHPCheckPause(pCmdVbva))
1585 ASMAtomicWriteU32(&pCmdVbva->u32State, VBVAEXHOSTCONTEXT_STATE_LISTENING);
1586 else
1587 ASMAtomicWriteU32(&pCmdVbva->u32State, VBVAEXHOSTCONTEXT_STATE_PAUSED);
1588}
1589
1590static void vboxVBVAExHPHgEventSet(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1591{
1592 Assert(pCmdVbva->u32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1593
1594 ASMAtomicOrU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, VBVA_F_STATE_PROCESSING);
1595}
1596
1597static void vboxVBVAExHPHgEventClear(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1598{
1599 Assert(pCmdVbva->u32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1600
1601 ASMAtomicAndU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, ~VBVA_F_STATE_PROCESSING);
1602}
1603
1604static bool vboxVBVAExHPCmdCheckRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1605{
1606 if (!pCmdVbva->cbCurData)
1607 return false;
1608
1609 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
1610 pVBVA->off32Data = (pVBVA->off32Data + pCmdVbva->cbCurData) % pVBVA->cbData;
1611
1612 pVBVA->indexRecordFirst = (pVBVA->indexRecordFirst + 1) % RT_ELEMENTS(pVBVA->aRecords);
1613
1614 pCmdVbva->cbCurData = 0;
1615
1616 return true;
1617}
1618
1619static int vboxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
1620{
1621 Assert(pCmdVbva->u32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1622
1623 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
1624
1625 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
1626 uint32_t indexRecordFree = pVBVA->indexRecordFree;
1627
1628 Log(("first = %d, free = %d\n",
1629 indexRecordFirst, indexRecordFree));
1630
1631 if (indexRecordFirst == indexRecordFree)
1632 {
1633 /* No records to process. Return without assigning output variables. */
1634 return VINF_EOF;
1635 }
1636
1637 uint32_t cbRecordCurrent = ASMAtomicReadU32(&pVBVA->aRecords[indexRecordFirst].cbRecord);
1638
1639 /* A new record need to be processed. */
1640 if (cbRecordCurrent & VBVA_F_RECORD_PARTIAL)
1641 {
1642 /* the record is being recorded, try again */
1643 return VINF_TRY_AGAIN;
1644 }
1645
1646 uint32_t cbRecord = cbRecordCurrent & ~VBVA_F_RECORD_PARTIAL;
1647
1648 if (!cbRecord)
1649 {
1650 /* the record is being recorded, try again */
1651 return VINF_TRY_AGAIN;
1652 }
1653
1654 /* we should not get partial commands here actually */
1655 Assert(cbRecord);
1656
1657 /* The size of largest contiguous chunk in the ring biffer. */
1658 uint32_t u32BytesTillBoundary = pVBVA->cbData - pVBVA->off32Data;
1659
1660 /* The pointer to data in the ring buffer. */
1661 uint8_t *pSrc = &pVBVA->au8Data[pVBVA->off32Data];
1662
1663 /* Fetch or point the data. */
1664 if (u32BytesTillBoundary >= cbRecord)
1665 {
1666 /* The command does not cross buffer boundary. Return address in the buffer. */
1667 *ppCmd = pSrc;
1668 *pcbCmd = cbRecord;
1669 pCmdVbva->cbCurData = cbRecord;
1670 return VINF_SUCCESS;
1671 }
1672
1673 LogRel(("CmdVbva: cross-bound writes unsupported\n"));
1674 return VERR_INVALID_STATE;
1675}
1676
1677/* Resumes command processing
1678 * @returns - same as VBoxVBVAExHSCheckCommands
1679 */
1680static int vboxVBVAExHSResume(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1681{
1682 Assert(pCmdVbva->u32State != VBVAEXHOSTCONTEXT_STATE_STOPPED);
1683
1684 ASMAtomicWriteU32(&pCmdVbva->u32State, VBVAEXHOSTCONTEXT_STATE_LISTENING);
1685
1686 return VBoxVBVAExHSCheckCommands(pCmdVbva);
1687}
1688
1689/* pause the command processing. this will make the processor stop the command processing and release the processing state
1690 * to resume the command processing the vboxVBVAExHSResume must be called */
1691static void vboxVBVAExHSPause(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1692{
1693 Assert(pCmdVbva->u32State != VBVAEXHOSTCONTEXT_STATE_STOPPED);
1694
1695 Assert(!pCmdVbva->u32Pause);
1696
1697 ASMAtomicWriteU32(&pCmdVbva->u32Pause, 1);
1698
1699 for(;;)
1700 {
1701 if (ASMAtomicCmpXchgU32(&pCmdVbva->u32State, VBVAEXHOSTCONTEXT_STATE_PAUSED, VBVAEXHOSTCONTEXT_STATE_LISTENING))
1702 break;
1703
1704 if (ASMAtomicReadU32(&pCmdVbva->u32State) == VBVAEXHOSTCONTEXT_STATE_PAUSED)
1705 break;
1706
1707 RTThreadSleep(2);
1708 }
1709
1710 pCmdVbva->u32Pause = 0;
1711}
1712
1713/* releases (completed) the command previously acquired by VBoxVBVAExHCmdGet
1714 * for convenience can be called if no command is currently acquired
1715 * in that case it will do nothing and return false.
1716 * if the completion notification is needed returns true. */
1717static bool VBoxVBVAExHPCmdCheckRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1718{
1719 Assert(pCmdVbva->u32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1720
1721 return vboxVBVAExHPCmdCheckRelease(pCmdVbva);
1722}
1723
1724/*
1725 * @returns
1726 * VINF_SUCCESS - new command is obtained
1727 * VINF_EOF - processor has completed all commands and release the processing state, only VBoxVBVAExHS*** calls are now allowed
1728 * VINF_PERMISSION_DENIED - processing was paused, processing state was released, only VBoxVBVAExHS*** calls are now allowed
1729 * VINF_INTERRUPTED - command processing was interrupted, processor state remains set. client can process any commands,
1730 * and call VBoxVBVAExHPCmdGet again for further processing
1731 * VERR_** - error happened, most likely guest corrupted VBVA data
1732 *
1733 */
1734static int VBoxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
1735{
1736 Assert(pCmdVbva->u32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1737
1738 for(;;)
1739 {
1740 if (vboxVBVAExHPCheckPause(pCmdVbva))
1741 return VINF_PERMISSION_DENIED;
1742 if (vboxVBVAExHPCheckOtherCommands(pCmdVbva))
1743 return VINF_INTERRUPTED;
1744
1745 int rc = vboxVBVAExHPCmdGet(pCmdVbva, ppCmd, pcbCmd);
1746 switch (rc)
1747 {
1748 case VINF_SUCCESS:
1749 return VINF_SUCCESS;
1750 case VINF_EOF:
1751 vboxVBVAExHPHgEventClear(pCmdVbva);
1752 vboxVBVAExHPProcessorRelease(pCmdVbva);
1753 /* we need to prevent racing between us clearing the flag and command check/submission thread, i.e.
1754 * 1. we check the queue -> and it is empty
1755 * 2. submitter adds command to the queue
1756 * 3. submitter checks the "processing" -> and it is true , thus it does not submit a notification
1757 * 4. we clear the "processing" state
1758 * 5. ->here we need to re-check the queue state to ensure we do not leak the notification of the above command
1759 * 6. if the queue appears to be not-empty set the "processing" state back to "true"
1760 **/
1761 if (VBoxVBVAExHSCheckCommands(pCmdVbva) == VINF_SUCCESS)
1762 continue;
1763 return VINF_EOF;
1764 case VINF_TRY_AGAIN:
1765 RTThreadSleep(1);
1766 continue;
1767 default:
1768 /* this is something really unexpected, i.e. most likely guest has written something incorrect to the VBVA buffer */
1769 if (RT_FAILURE(rc))
1770 return rc;
1771
1772 WARN(("Warning: vboxVBVAExHCmdGet returned unexpected success status %d\n", rc));
1773 return VERR_INTERNAL_ERROR;
1774 }
1775 }
1776
1777 WARN(("Warning: VBoxVBVAExHCmdGet unexpected state\n"));
1778 return VERR_INTERNAL_ERROR;
1779}
1780
1781/* Checks whether the new commands are ready for processing
1782 * @returns
1783 * VINF_SUCCESS - there are commands are in a queue, and the given thread is now the processor (i.e. typically it would delegate processing to a worker thread)
1784 * VINF_EOF - no commands in a queue
1785 * VINF_ALREADY_INITIALIZED - another thread already processing the commands
1786 * VERR_INVALID_STATE - the VBVA is paused or pausing */
1787static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1788{
1789 if (ASMAtomicUoReadU32(&pCmdVbva->u32State) == VBVAEXHOSTCONTEXT_STATE_STOPPED)
1790 return VINF_EOF;
1791
1792 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
1793 if (RT_SUCCESS(rc))
1794 {
1795 /* we are the processor now */
1796 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
1797
1798 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
1799 uint32_t indexRecordFree = pVBVA->indexRecordFree;
1800
1801 if (indexRecordFirst != indexRecordFree)
1802 {
1803 vboxVBVAExHPHgEventSet(pCmdVbva);
1804 return VINF_SUCCESS;
1805 }
1806
1807 vboxVBVAExHPProcessorRelease(pCmdVbva);
1808 return VINF_EOF;
1809 }
1810 if (rc == VERR_SEM_BUSY)
1811 return VINF_ALREADY_INITIALIZED;
1812 Assert(rc == VERR_INVALID_STATE);
1813 return VERR_INVALID_STATE;
1814}
1815
1816static void VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1817{
1818 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
1819}
1820
1821static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA)
1822{
1823 if (ASMAtomicUoReadU32(&pCmdVbva->u32State) != VBVAEXHOSTCONTEXT_STATE_STOPPED)
1824 return VINF_ALREADY_INITIALIZED;
1825
1826 pCmdVbva->pVBVA = pVBVA;
1827 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
1828 ASMAtomicWriteU32(&pCmdVbva->u32State, VBVAEXHOSTCONTEXT_STATE_LISTENING);
1829 return VINF_SUCCESS;
1830}
1831
1832static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1833{
1834 if (ASMAtomicUoReadU32(&pCmdVbva->u32State) == VBVAEXHOSTCONTEXT_STATE_STOPPED)
1835 return VINF_SUCCESS;
1836
1837 /* ensure no commands pending and one tries to submit them */
1838 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
1839 if (RT_SUCCESS(rc))
1840 {
1841 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
1842 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
1843 return VINF_SUCCESS;
1844 }
1845 return VERR_INVALID_STATE;
1846}
1847
1848static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1849{
1850 /* ensure the processor is stopped */
1851 if (ASMAtomicUoReadU32(&pCmdVbva->u32State) == VBVAEXHOSTCONTEXT_STATE_STOPPED)
1852 return;
1853
1854 /* ensure no one tries to submit the command */
1855 vboxVBVAExHSPause(pCmdVbva);
1856 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
1857 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
1858}
1859
1860/* Saves state
1861 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
1862 */
1863static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
1864{
1865 int rc;
1866 if (ASMAtomicUoReadU32(&pCmdVbva->u32State) != VBVAEXHOSTCONTEXT_STATE_STOPPED)
1867 {
1868 vboxVBVAExHSPause(pCmdVbva);
1869 rc = SSMR3PutU32(pSSM, (uint32_t)(((uint8_t*)pCmdVbva->pVBVA) - pu8VramBase));
1870 AssertRCReturn(rc, rc);
1871 return vboxVBVAExHSResume(pCmdVbva);
1872 }
1873
1874 rc = SSMR3PutU32(pSSM, 0xffffffff);
1875 AssertRCReturn(rc, rc);
1876
1877 return VINF_EOF;
1878}
1879
1880/* Loads state
1881 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
1882 */
1883static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
1884{
1885 uint32_t u32;
1886 int rc = SSMR3GetU32(pSSM, &u32);
1887 AssertRCReturn(rc, rc);
1888 if (u32 != 0xffffffff)
1889 {
1890 VBVABUFFER *pVBVA = (VBVABUFFER*)pu8VramBase + u32;
1891 rc = VBoxVBVAExHSEnable(pCmdVbva, pVBVA);
1892 AssertRCReturn(rc, rc);
1893 return VBoxVBVAExHSCheckCommands(pCmdVbva);
1894 }
1895
1896 return VINF_EOF;
1897}
1898
1899int vboxCmdVBVAEnable(PVGASTATE pVGAState, VBVABUFFER *pVBVA)
1900{
1901 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
1902 return VBoxVBVAExHSEnable(&pVdma->CmdVbva, pVBVA);
1903}
1904
1905int vboxCmdVBVADisable(PVGASTATE pVGAState)
1906{
1907 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
1908 return VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1909}
1910
1911static int vboxCmdVBVACmdSubmitPerform(PVGASTATE pVGAState)
1912{
1913 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
1914 int rc = VBoxVBVAExHSCheckCommands(&pVdma->CmdVbva);
1915 switch (rc)
1916 {
1917 case VINF_SUCCESS:
1918 return pVGAState->pDrv->pfnCrCmdNotifyCmds(pVGAState->pDrv);
1919 case VINF_ALREADY_INITIALIZED:
1920 case VINF_EOF:
1921 case VERR_INVALID_STATE:
1922 return VINF_SUCCESS;
1923 default:
1924 Assert(!RT_FAILURE(rc));
1925 return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR;
1926 }
1927}
1928
1929int vboxCmdVBVACmdSubmit(PVGASTATE pVGAState)
1930{
1931 return vboxCmdVBVACmdSubmitPerform(pVGAState);
1932}
1933
1934int vboxCmdVBVACmdFlush(PVGASTATE pVGAState)
1935{
1936 return vboxCmdVBVACmdSubmitPerform(pVGAState);
1937}
1938
1939void vboxCmdVBVACmdTimer(PVGASTATE pVGAState)
1940{
1941 vboxCmdVBVACmdSubmitPerform(pVGAState);
1942}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette