VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp@ 20110

Last change on this file since 20110 was 20089, checked in by vboxsync, 16 years ago

TM: The critsect assertions.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 59.7 KB
Line 
1/* $Id: TMAll.cpp 20089 2009-05-27 14:53:47Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_TM
27#include <VBox/tm.h>
28#include <VBox/mm.h>
29#ifdef IN_RING3
30# include <VBox/rem.h>
31#endif
32#include "TMInternal.h"
33#include <VBox/vm.h>
34
35#include <VBox/param.h>
36#include <VBox/err.h>
37#include <VBox/log.h>
38#include <VBox/sup.h>
39#include <iprt/time.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#ifdef IN_RING3
43# include <iprt/thread.h>
44#endif
45
46
47/*******************************************************************************
48* Defined Constants And Macros *
49*******************************************************************************/
50/** @def TMTIMER_ASSERT_CRITSECT
51 * Checks that the caller owns the critical section if one is associated with
52 * the timer. */
53#ifdef VBOX_STRICT
54# define TMTIMER_ASSERT_CRITSECT(pTimer) \
55 do { \
56 if ((pTimer)->pCritSect) \
57 { \
58 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC((pTimer)->CTX_SUFF(pVM), (pTimer)->pCritSect); \
59 Assert(pCritSect && PDMCritSectIsOwner(pCritSect)); \
60 } \
61 } while (0)
62#else
63# define TMTIMER_ASSERT_CRITSECT(pTimer) do { } while (0)
64#endif
65
66
67#ifndef tmLock
68
69/**
70 * Try take the EMT/TM lock, wait in ring-3 return VERR_SEM_BUSY in R0/RC.
71 *
72 * @retval VINF_SUCCESS on success (always in ring-3).
73 * @retval VERR_SEM_BUSY in RC and R0 if the semaphore is busy.
74 *
75 * @param pVM The VM handle.
76 */
77int tmLock(PVM pVM)
78{
79 VM_ASSERT_EMT(pVM);
80 int rc = PDMCritSectEnter(&pVM->tm.s.EmtLock, VERR_SEM_BUSY);
81 return rc;
82}
83
84
85/**
86 * Try take the EMT/TM lock, no waiting.
87 *
88 * @retval VINF_SUCCESS on success.
89 * @retval VERR_SEM_BUSY if busy.
90 *
91 * @param pVM The VM handle.
92 */
93int tmTryLock(PVM pVM)
94{
95 VM_ASSERT_EMT(pVM);
96 int rc = PDMCritSectTryEnter(&pVM->tm.s.EmtLock);
97 return rc;
98}
99
100
101/**
102 * Release the EMT/TM lock.
103 *
104 * @param pVM The VM handle.
105 */
106void tmUnlock(PVM pVM)
107{
108 PDMCritSectLeave(&pVM->tm.s.EmtLock);
109}
110
111
112/**
113 * Try take the VirtualSync lock, wait in ring-3 return VERR_SEM_BUSY in R0/RC.
114 *
115 * @retval VINF_SUCCESS on success (always in ring-3).
116 * @retval VERR_SEM_BUSY in RC and R0 if the semaphore is busy.
117 *
118 * @param pVM The VM handle.
119 */
120int tmVirtualSyncLock(PVM pVM)
121{
122 VM_ASSERT_EMT(pVM);
123 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VERR_SEM_BUSY);
124 return rc;
125}
126
127
128/**
129 * Try take the VirtualSync lock, no waiting.
130 *
131 * @retval VINF_SUCCESS on success.
132 * @retval VERR_SEM_BUSY if busy.
133 *
134 * @param pVM The VM handle.
135 */
136int tmVirtualSyncTryLock(PVM pVM)
137{
138 VM_ASSERT_EMT(pVM);
139 int rc = PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock);
140 return rc;
141}
142
143
144/**
145 * Release the VirtualSync lock.
146 *
147 * @param pVM The VM handle.
148 */
149void tmVirtualSyncUnlock(PVM pVM)
150{
151 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
152}
153
154#endif /* ! macros */
155
156/**
157 * Notification that execution is about to start.
158 *
159 * This call must always be paired with a TMNotifyEndOfExecution call.
160 *
161 * The function may, depending on the configuration, resume the TSC and future
162 * clocks that only ticks when we're executing guest code.
163 *
164 * @param pVCpu The VMCPU to operate on.
165 */
166VMMDECL(void) TMNotifyStartOfExecution(PVMCPU pVCpu)
167{
168 PVM pVM = pVCpu->CTX_SUFF(pVM);
169
170 if (pVM->tm.s.fTSCTiedToExecution)
171 tmCpuTickResume(pVM, pVCpu);
172}
173
174
175/**
176 * Notification that execution is about to start.
177 *
178 * This call must always be paired with a TMNotifyStartOfExecution call.
179 *
180 * The function may, depending on the configuration, suspend the TSC and future
181 * clocks that only ticks when we're executing guest code.
182 *
183 * @param pVCpu The VMCPU to operate on.
184 */
185VMMDECL(void) TMNotifyEndOfExecution(PVMCPU pVCpu)
186{
187 PVM pVM = pVCpu->CTX_SUFF(pVM);
188
189 if (pVM->tm.s.fTSCTiedToExecution)
190 tmCpuTickPause(pVM, pVCpu);
191}
192
193
194/**
195 * Notification that the cpu is entering the halt state
196 *
197 * This call must always be paired with a TMNotifyEndOfExecution call.
198 *
199 * The function may, depending on the configuration, resume the TSC and future
200 * clocks that only ticks when we're halted.
201 *
202 * @param pVCpu The VMCPU to operate on.
203 */
204VMMDECL(void) TMNotifyStartOfHalt(PVMCPU pVCpu)
205{
206 PVM pVM = pVCpu->CTX_SUFF(pVM);
207
208 if ( pVM->tm.s.fTSCTiedToExecution
209 && !pVM->tm.s.fTSCNotTiedToHalt)
210 tmCpuTickResume(pVM, pVCpu);
211}
212
213
214/**
215 * Notification that the cpu is leaving the halt state
216 *
217 * This call must always be paired with a TMNotifyStartOfHalt call.
218 *
219 * The function may, depending on the configuration, suspend the TSC and future
220 * clocks that only ticks when we're halted.
221 *
222 * @param pVCpu The VMCPU to operate on.
223 */
224VMMDECL(void) TMNotifyEndOfHalt(PVMCPU pVCpu)
225{
226 PVM pVM = pVCpu->CTX_SUFF(pVM);
227
228 if ( pVM->tm.s.fTSCTiedToExecution
229 && !pVM->tm.s.fTSCNotTiedToHalt)
230 tmCpuTickPause(pVM, pVCpu);
231}
232
233
234/**
235 * Raise the timer force action flag and notify the dedicated timer EMT.
236 *
237 * @param pVM The VM handle.
238 */
239DECLINLINE(void) tmScheduleNotify(PVM pVM)
240{
241 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
242 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
243 {
244 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
245 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
246#ifdef IN_RING3
247 REMR3NotifyTimerPending(pVM, pVCpuDst);
248 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
249#endif
250 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
251 }
252}
253
254
255/**
256 * Schedule the queue which was changed.
257 */
258DECLINLINE(void) tmSchedule(PTMTIMER pTimer)
259{
260 PVM pVM = pTimer->CTX_SUFF(pVM);
261 if ( VM_IS_EMT(pVM)
262 && RT_SUCCESS(tmTryLock(pVM)))
263 {
264 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
265 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
266 tmTimerQueueSchedule(pVM, &pVM->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock]);
267#ifdef VBOX_STRICT
268 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
269#endif
270 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
271 tmUnlock(pVM);
272 }
273 else
274 {
275 TMTIMERSTATE enmState = pTimer->enmState;
276 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))
277 tmScheduleNotify(pVM);
278 }
279}
280
281
282/**
283 * Try change the state to enmStateNew from enmStateOld
284 * and link the timer into the scheduling queue.
285 *
286 * @returns Success indicator.
287 * @param pTimer Timer in question.
288 * @param enmStateNew The new timer state.
289 * @param enmStateOld The old timer state.
290 */
291DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
292{
293 /*
294 * Attempt state change.
295 */
296 bool fRc;
297 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
298 return fRc;
299}
300
301
302/**
303 * Links the timer onto the scheduling queue.
304 *
305 * @param pQueue The timer queue the timer belongs to.
306 * @param pTimer The timer.
307 *
308 * @todo FIXME: Look into potential race with the thread running the queues
309 * and stuff.
310 */
311DECLINLINE(void) tmTimerLink(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
312{
313 Assert(!pTimer->offScheduleNext);
314 const int32_t offHeadNew = (intptr_t)pTimer - (intptr_t)pQueue;
315 int32_t offHead;
316 do
317 {
318 offHead = pQueue->offSchedule;
319 if (offHead)
320 pTimer->offScheduleNext = ((intptr_t)pQueue + offHead) - (intptr_t)pTimer;
321 else
322 pTimer->offScheduleNext = 0;
323 } while (!ASMAtomicCmpXchgS32(&pQueue->offSchedule, offHeadNew, offHead));
324}
325
326
327/**
328 * Try change the state to enmStateNew from enmStateOld
329 * and link the timer into the scheduling queue.
330 *
331 * @returns Success indicator.
332 * @param pTimer Timer in question.
333 * @param enmStateNew The new timer state.
334 * @param enmStateOld The old timer state.
335 */
336DECLINLINE(bool) tmTimerTryWithLink(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
337{
338 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
339 {
340 tmTimerLink(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock], pTimer);
341 return true;
342 }
343 return false;
344}
345
346
347#ifdef VBOX_HIGH_RES_TIMERS_HACK
348
349/**
350 * Worker for tmTimerPollInternal that handles misses when the decidate timer
351 * EMT is polling.
352 *
353 * @returns See tmTimerPollInternal.
354 * @param pVM Pointer to the shared VM structure.
355 * @param u64Now Current virtual clock timestamp.
356 * @param u64Delta The delta to the next even in ticks of the
357 * virtual clock.
358 * @param pu64Delta Where to return the delta.
359 * @param pCounter The statistics counter to update.
360 */
361DECLINLINE(uint64_t) tmTimerPollReturnMiss(PVM pVM, uint64_t u64Now, uint64_t u64Delta, uint64_t *pu64Delta)
362{
363 Assert(!(u64Delta & RT_BIT_64(63)));
364
365 if (!pVM->tm.s.fVirtualWarpDrive)
366 {
367 *pu64Delta = u64Delta;
368 return u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
369 }
370
371 /*
372 * Warp drive adjustments - this is the reverse of what tmVirtualGetRaw is doing.
373 */
374 uint64_t const u64Start = pVM->tm.s.u64VirtualWarpDriveStart;
375 uint32_t const u32Pct = pVM->tm.s.u32VirtualWarpDrivePercentage;
376
377 uint64_t u64GipTime = u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
378 u64GipTime -= u64Start; /* the start is GIP time. */
379 if (u64GipTime >= u64Delta)
380 {
381 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
382 ASMMultU64ByU32DivByU32(u64Delta, 100, u32Pct);
383 }
384 else
385 {
386 u64Delta -= u64GipTime;
387 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
388 u64Delta += u64GipTime;
389 }
390 *pu64Delta = u64Delta;
391 u64GipTime += u64Start;
392 return u64GipTime;
393}
394
395
396/**
397 * Worker for tmTimerPollInternal dealing with returns on virtual CPUs other
398 * than the one dedicated to timer work.
399 *
400 * @returns See tmTimerPollInternal.
401 * @param pVM Pointer to the shared VM structure.
402 * @param u64Now Current virtual clock timestamp.
403 * @param pu64Delta Where to return the delta.
404 */
405DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnOtherCpu(PVM pVM, uint64_t u64Now, uint64_t *pu64Delta)
406{
407 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
408 *pu64Delta = s_u64OtherRet;
409 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
410}
411
412
413/**
414 * Worker for tmTimerPollInternal.
415 *
416 * @returns See tmTimerPollInternal.
417 * @param pVM Pointer to the shared VM structure.
418 * @param pVCpu Pointer to the shared VMCPU structure of the
419 * caller.
420 * @param pVCpuDst Pointer to the shared VMCPU structure of the
421 * dedicated timer EMT.
422 * @param u64Now Current virtual clock timestamp.
423 * @param pu64Delta Where to return the delta.
424 * @param pCounter The statistics counter to update.
425 */
426DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnHit(PVM pVM, PVMCPU pVCpu, PVMCPU pVCpuDst, uint64_t u64Now,
427 uint64_t *pu64Delta, PSTAMCOUNTER pCounter)
428{
429 STAM_COUNTER_INC(pCounter);
430 if (pVCpuDst != pVCpu)
431 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
432 *pu64Delta = 0;
433 return 0;
434}
435
436/**
437 * Common worker for TMTimerPollGIP and TMTimerPoll.
438 *
439 * This function is called before FFs are checked in the inner execution EM loops.
440 *
441 * @returns The GIP timestamp of the next event.
442 * 0 if the next event has already expired.
443 *
444 * @param pVM Pointer to the shared VM structure.
445 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
446 * @param pu64Delta Where to store the delta.
447 *
448 * @thread The emulation thread.
449 *
450 * @remarks GIP uses ns ticks.
451 */
452DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
453{
454 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
455 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
456 STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
457
458 /*
459 * Return straight away if the timer FF is already set ...
460 */
461 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
462 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
463
464 /*
465 * ... or if timers are being run.
466 */
467 if (ASMAtomicReadBool(&pVM->tm.s.fRunningQueues))
468 {
469 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
470 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
471 }
472
473 /*
474 * Check for TMCLOCK_VIRTUAL expiration.
475 */
476 const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire);
477 const int64_t i64Delta1 = u64Expire1 - u64Now;
478 if (i64Delta1 <= 0)
479 {
480 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
481 {
482 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
483 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
484#ifdef IN_RING3
485 REMR3NotifyTimerPending(pVM, pVCpuDst);
486#endif
487 }
488 LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now));
489 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
490 }
491
492 /*
493 * Check for TMCLOCK_VIRTUAL_SYNC expiration.
494 * This isn't quite as stright forward if in a catch-up, not only do
495 * we have to adjust the 'now' but when have to adjust the delta as well.
496 */
497
498 /*
499 * Optimistic lockless approach.
500 */
501 uint64_t u64VirtualSyncNow;
502 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
503 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
504 {
505 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
506 {
507 u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
508 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
509 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
510 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
511 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)))
512 {
513 u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
514 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
515 if (i64Delta2 > 0)
516 {
517 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
518 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
519
520 if (pVCpu == pVCpuDst)
521 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
522 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
523 }
524
525 if ( !pVM->tm.s.fRunningQueues
526 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
527 {
528 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
529 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
530#ifdef IN_RING3
531 REMR3NotifyTimerPending(pVM, pVCpuDst);
532#endif
533 }
534
535 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
536 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
537 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
538 }
539 }
540 }
541 else
542 {
543 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
544 LogFlow(("TMTimerPoll: stopped\n"));
545 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
546 }
547
548 /*
549 * Complicated lockless approach.
550 */
551 uint64_t off;
552 uint32_t u32Pct = 0;
553 bool fCatchUp;
554 int cOuterTries = 42;
555 for (;; cOuterTries--)
556 {
557 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp);
558 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
559 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
560 if (fCatchUp)
561 {
562 /* No changes allowed, try get a consistent set of parameters. */
563 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
564 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
565 u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
566 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
567 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
568 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
569 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
570 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
571 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
572 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
573 || cOuterTries <= 0)
574 {
575 uint64_t u64Delta = u64Now - u64Prev;
576 if (RT_LIKELY(!(u64Delta >> 32)))
577 {
578 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
579 if (off > u64Sub + offGivenUp)
580 off -= u64Sub;
581 else /* we've completely caught up. */
582 off = offGivenUp;
583 }
584 else
585 /* More than 4 seconds since last time (or negative), ignore it. */
586 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
587
588 /* Check that we're still running and in catch up. */
589 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
590 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
591 break;
592 }
593 }
594 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
595 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
596 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
597 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
598 break; /* Got an consistent offset */
599
600 /* Repeat the initial checks before iterating. */
601 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
602 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
603 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
604 {
605 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
606 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
607 }
608 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
609 {
610 LogFlow(("TMTimerPoll: stopped\n"));
611 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
612 }
613 if (cOuterTries <= 0)
614 break; /* that's enough */
615 }
616 if (cOuterTries <= 0)
617 STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop);
618 u64VirtualSyncNow = u64Now - off;
619
620 /* Calc delta and see if we've got a virtual sync hit. */
621 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
622 if (i64Delta2 <= 0)
623 {
624 if ( !pVM->tm.s.fRunningQueues
625 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
626 {
627 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
628 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
629#ifdef IN_RING3
630 REMR3NotifyTimerPending(pVM, pVCpuDst);
631#endif
632 }
633 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
634 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
635 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
636 }
637
638 /*
639 * Return the time left to the next event.
640 */
641 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
642 if (pVCpu == pVCpuDst)
643 {
644 if (fCatchUp)
645 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
646 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
647 }
648 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
649}
650
651
652/**
653 * Set FF if we've passed the next virtual event.
654 *
655 * This function is called before FFs are checked in the inner execution EM loops.
656 *
657 * @returns true if timers are pending, false if not.
658 *
659 * @param pVM Pointer to the shared VM structure.
660 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
661 * @thread The emulation thread.
662 */
663VMMDECL(bool) TMTimerPollBool(PVM pVM, PVMCPU pVCpu)
664{
665 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
666 uint64_t off = 0;
667 tmTimerPollInternal(pVM, pVCpu, &off);
668 return off == 0;
669}
670
671
672/**
673 * Set FF if we've passed the next virtual event.
674 *
675 * This function is called before FFs are checked in the inner execution EM loops.
676 *
677 * @param pVM Pointer to the shared VM structure.
678 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
679 * @thread The emulation thread.
680 */
681VMMDECL(void) TMTimerPollVoid(PVM pVM, PVMCPU pVCpu)
682{
683 uint64_t off;
684 tmTimerPollInternal(pVM, pVCpu, &off);
685}
686
687
688/**
689 * Set FF if we've passed the next virtual event.
690 *
691 * This function is called before FFs are checked in the inner execution EM loops.
692 *
693 * @returns The GIP timestamp of the next event.
694 * 0 if the next event has already expired.
695 * @param pVM Pointer to the shared VM structure.
696 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
697 * @param pu64Delta Where to store the delta.
698 * @thread The emulation thread.
699 */
700VMMDECL(uint64_t) TMTimerPollGIP(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
701{
702 return tmTimerPollInternal(pVM, pVCpu, pu64Delta);
703}
704
705#endif /* VBOX_HIGH_RES_TIMERS_HACK */
706
707/**
708 * Gets the host context ring-3 pointer of the timer.
709 *
710 * @returns HC R3 pointer.
711 * @param pTimer Timer handle as returned by one of the create functions.
712 */
713VMMDECL(PTMTIMERR3) TMTimerR3Ptr(PTMTIMER pTimer)
714{
715 return (PTMTIMERR3)MMHyperCCToR3(pTimer->CTX_SUFF(pVM), pTimer);
716}
717
718
719/**
720 * Gets the host context ring-0 pointer of the timer.
721 *
722 * @returns HC R0 pointer.
723 * @param pTimer Timer handle as returned by one of the create functions.
724 */
725VMMDECL(PTMTIMERR0) TMTimerR0Ptr(PTMTIMER pTimer)
726{
727 return (PTMTIMERR0)MMHyperCCToR0(pTimer->CTX_SUFF(pVM), pTimer);
728}
729
730
731/**
732 * Gets the RC pointer of the timer.
733 *
734 * @returns RC pointer.
735 * @param pTimer Timer handle as returned by one of the create functions.
736 */
737VMMDECL(PTMTIMERRC) TMTimerRCPtr(PTMTIMER pTimer)
738{
739 return (PTMTIMERRC)MMHyperCCToRC(pTimer->CTX_SUFF(pVM), pTimer);
740}
741
742
743/**
744 * Arm a timer with a (new) expire time.
745 *
746 * @returns VBox status.
747 * @param pTimer Timer handle as returned by one of the create functions.
748 * @param u64Expire New expire time.
749 */
750VMMDECL(int) TMTimerSet(PTMTIMER pTimer, uint64_t u64Expire)
751{
752 STAM_PROFILE_START(&pTimer->CTX_SUFF(pVM)->tm.s.CTXALLSUFF(StatTimerSet), a);
753 TMTIMER_ASSERT_CRITSECT(pTimer);
754
755 /** @todo find the most frequently used paths and make them skip tmSchedule and tmTimerTryWithLink. */
756 int cRetries = 1000;
757 do
758 {
759 /*
760 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
761 */
762 TMTIMERSTATE enmState = pTimer->enmState;
763 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n",
764 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries, u64Expire));
765 switch (enmState)
766 {
767 case TMTIMERSTATE_EXPIRED_DELIVER:
768 case TMTIMERSTATE_STOPPED:
769 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
770 {
771 Assert(!pTimer->offPrev);
772 Assert(!pTimer->offNext);
773 AssertMsg( pTimer->enmClock != TMCLOCK_VIRTUAL_SYNC
774 || pTimer->CTX_SUFF(pVM)->tm.s.fVirtualSyncTicking
775 || u64Expire >= pTimer->CTX_SUFF(pVM)->tm.s.u64VirtualSync,
776 ("%'RU64 < %'RU64 %s\n", u64Expire, pTimer->CTX_SUFF(pVM)->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc)));
777 pTimer->u64Expire = u64Expire;
778 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
779 tmSchedule(pTimer);
780 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSet), a);
781 return VINF_SUCCESS;
782 }
783 break;
784
785 case TMTIMERSTATE_PENDING_SCHEDULE:
786 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
787 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
788 {
789 pTimer->u64Expire = u64Expire;
790 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
791 tmSchedule(pTimer);
792 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSet), a);
793 return VINF_SUCCESS;
794 }
795 break;
796
797
798 case TMTIMERSTATE_ACTIVE:
799 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
800 {
801 pTimer->u64Expire = u64Expire;
802 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
803 tmSchedule(pTimer);
804 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSet), a);
805 return VINF_SUCCESS;
806 }
807 break;
808
809 case TMTIMERSTATE_PENDING_RESCHEDULE:
810 case TMTIMERSTATE_PENDING_STOP:
811 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
812 {
813 pTimer->u64Expire = u64Expire;
814 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
815 tmSchedule(pTimer);
816 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSet), a);
817 return VINF_SUCCESS;
818 }
819 break;
820
821
822 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
823 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
824 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
825#ifdef IN_RING3
826 if (!RTThreadYield())
827 RTThreadSleep(1);
828#else
829/** @todo call host context and yield after a couple of iterations */
830#endif
831 break;
832
833 /*
834 * Invalid states.
835 */
836 case TMTIMERSTATE_DESTROY:
837 case TMTIMERSTATE_FREE:
838 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
839 return VERR_TM_INVALID_STATE;
840 default:
841 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
842 return VERR_TM_UNKNOWN_STATE;
843 }
844 } while (cRetries-- > 0);
845
846 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
847 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSet), a);
848 return VERR_INTERNAL_ERROR;
849}
850
851
852/**
853 * Arm a timer with a (new) expire time relative to current time.
854 *
855 * @returns VBox status.
856 * @param pTimer Timer handle as returned by one of the create functions.
857 * @param cMilliesToNext Number of millieseconds to the next tick.
858 */
859VMMDECL(int) TMTimerSetMillies(PTMTIMER pTimer, uint32_t cMilliesToNext)
860{
861 PVM pVM = pTimer->CTX_SUFF(pVM);
862 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
863
864 switch (pTimer->enmClock)
865 {
866 case TMCLOCK_VIRTUAL:
867 return TMTimerSet(pTimer, cMilliesToNext * (uint64_t)TMCLOCK_FREQ_VIRTUAL / 1000 + TMVirtualGet(pVM));
868 case TMCLOCK_VIRTUAL_SYNC:
869 return TMTimerSet(pTimer, cMilliesToNext * (uint64_t)TMCLOCK_FREQ_VIRTUAL / 1000 + TMVirtualSyncGet(pVM));
870 case TMCLOCK_REAL:
871 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
872 return TMTimerSet(pTimer, cMilliesToNext + TMRealGet(pVM));
873 case TMCLOCK_TSC:
874 return TMTimerSet(pTimer, cMilliesToNext * pVM->tm.s.cTSCTicksPerSecond / 1000 + TMCpuTickGet(pVCpu));
875
876 default:
877 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
878 return VERR_INTERNAL_ERROR;
879 }
880}
881
882
883/**
884 * Arm a timer with a (new) expire time relative to current time.
885 *
886 * @returns VBox status.
887 * @param pTimer Timer handle as returned by one of the create functions.
888 * @param cMicrosToNext Number of microseconds to the next tick.
889 */
890VMMDECL(int) TMTimerSetMicro(PTMTIMER pTimer, uint64_t cMicrosToNext)
891{
892 PVM pVM = pTimer->CTX_SUFF(pVM);
893 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
894
895 switch (pTimer->enmClock)
896 {
897 case TMCLOCK_VIRTUAL:
898 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
899 return TMTimerSet(pTimer, cMicrosToNext * 1000 + TMVirtualGet(pVM));
900
901 case TMCLOCK_VIRTUAL_SYNC:
902 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
903 return TMTimerSet(pTimer, cMicrosToNext * 1000 + TMVirtualSyncGet(pVM));
904
905 case TMCLOCK_REAL:
906 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
907 return TMTimerSet(pTimer, cMicrosToNext / 1000 + TMRealGet(pVM));
908
909 case TMCLOCK_TSC:
910 return TMTimerSet(pTimer, TMTimerFromMicro(pTimer, cMicrosToNext) + TMCpuTickGet(pVCpu));
911
912 default:
913 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
914 return VERR_INTERNAL_ERROR;
915 }
916}
917
918
919/**
920 * Arm a timer with a (new) expire time relative to current time.
921 *
922 * @returns VBox status.
923 * @param pTimer Timer handle as returned by one of the create functions.
924 * @param cNanosToNext Number of nanoseconds to the next tick.
925 */
926VMMDECL(int) TMTimerSetNano(PTMTIMER pTimer, uint64_t cNanosToNext)
927{
928 PVM pVM = pTimer->CTX_SUFF(pVM);
929 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
930
931 switch (pTimer->enmClock)
932 {
933 case TMCLOCK_VIRTUAL:
934 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
935 return TMTimerSet(pTimer, cNanosToNext + TMVirtualGet(pVM));
936
937 case TMCLOCK_VIRTUAL_SYNC:
938 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
939 return TMTimerSet(pTimer, cNanosToNext + TMVirtualSyncGet(pVM));
940
941 case TMCLOCK_REAL:
942 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
943 return TMTimerSet(pTimer, cNanosToNext / 1000000 + TMRealGet(pVM));
944
945 case TMCLOCK_TSC:
946 return TMTimerSet(pTimer, TMTimerFromNano(pTimer, cNanosToNext) + TMCpuTickGet(pVCpu));
947
948 default:
949 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
950 return VERR_INTERNAL_ERROR;
951 }
952}
953
954
955/**
956 * Stop the timer.
957 * Use TMR3TimerArm() to "un-stop" the timer.
958 *
959 * @returns VBox status.
960 * @param pTimer Timer handle as returned by one of the create functions.
961 */
962VMMDECL(int) TMTimerStop(PTMTIMER pTimer)
963{
964 STAM_PROFILE_START(&pTimer->CTX_SUFF(pVM)->tm.s.CTXALLSUFF(StatTimerStop), a);
965 TMTIMER_ASSERT_CRITSECT(pTimer);
966
967 /** @todo see if this function needs optimizing. */
968 int cRetries = 1000;
969 do
970 {
971 /*
972 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
973 */
974 TMTIMERSTATE enmState = pTimer->enmState;
975 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
976 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries));
977 switch (enmState)
978 {
979 case TMTIMERSTATE_EXPIRED_DELIVER:
980 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
981 return VERR_INVALID_PARAMETER;
982
983 case TMTIMERSTATE_STOPPED:
984 case TMTIMERSTATE_PENDING_STOP:
985 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
986 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
987 return VINF_SUCCESS;
988
989 case TMTIMERSTATE_PENDING_SCHEDULE:
990 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
991 {
992 tmSchedule(pTimer);
993 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
994 return VINF_SUCCESS;
995 }
996
997 case TMTIMERSTATE_PENDING_RESCHEDULE:
998 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
999 {
1000 tmSchedule(pTimer);
1001 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1002 return VINF_SUCCESS;
1003 }
1004 break;
1005
1006 case TMTIMERSTATE_ACTIVE:
1007 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1008 {
1009 tmSchedule(pTimer);
1010 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1011 return VINF_SUCCESS;
1012 }
1013 break;
1014
1015 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1016 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1017 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1018#ifdef IN_RING3
1019 if (!RTThreadYield())
1020 RTThreadSleep(1);
1021#else
1022/**@todo call host and yield cpu after a while. */
1023#endif
1024 break;
1025
1026 /*
1027 * Invalid states.
1028 */
1029 case TMTIMERSTATE_DESTROY:
1030 case TMTIMERSTATE_FREE:
1031 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1032 return VERR_TM_INVALID_STATE;
1033 default:
1034 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1035 return VERR_TM_UNKNOWN_STATE;
1036 }
1037 } while (cRetries-- > 0);
1038
1039 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1040 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1041 return VERR_INTERNAL_ERROR;
1042}
1043
1044
1045/**
1046 * Get the current clock time.
1047 * Handy for calculating the new expire time.
1048 *
1049 * @returns Current clock time.
1050 * @param pTimer Timer handle as returned by one of the create functions.
1051 */
1052VMMDECL(uint64_t) TMTimerGet(PTMTIMER pTimer)
1053{
1054 uint64_t u64;
1055 PVM pVM = pTimer->CTX_SUFF(pVM);
1056
1057 switch (pTimer->enmClock)
1058 {
1059 case TMCLOCK_VIRTUAL:
1060 u64 = TMVirtualGet(pVM);
1061 break;
1062 case TMCLOCK_VIRTUAL_SYNC:
1063 u64 = TMVirtualSyncGet(pVM);
1064 break;
1065 case TMCLOCK_REAL:
1066 u64 = TMRealGet(pVM);
1067 break;
1068 case TMCLOCK_TSC:
1069 {
1070 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
1071 u64 = TMCpuTickGet(pVCpu);
1072 break;
1073 }
1074 default:
1075 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1076 return ~(uint64_t)0;
1077 }
1078 //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1079 // u64, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1080 return u64;
1081}
1082
1083
1084/**
1085 * Get the freqency of the timer clock.
1086 *
1087 * @returns Clock frequency (as Hz of course).
1088 * @param pTimer Timer handle as returned by one of the create functions.
1089 */
1090VMMDECL(uint64_t) TMTimerGetFreq(PTMTIMER pTimer)
1091{
1092 switch (pTimer->enmClock)
1093 {
1094 case TMCLOCK_VIRTUAL:
1095 case TMCLOCK_VIRTUAL_SYNC:
1096 return TMCLOCK_FREQ_VIRTUAL;
1097
1098 case TMCLOCK_REAL:
1099 return TMCLOCK_FREQ_REAL;
1100
1101 case TMCLOCK_TSC:
1102 return TMCpuTicksPerSecond(pTimer->CTX_SUFF(pVM));
1103
1104 default:
1105 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1106 return 0;
1107 }
1108}
1109
1110
1111/**
1112 * Get the current clock time as nanoseconds.
1113 *
1114 * @returns The timer clock as nanoseconds.
1115 * @param pTimer Timer handle as returned by one of the create functions.
1116 */
1117VMMDECL(uint64_t) TMTimerGetNano(PTMTIMER pTimer)
1118{
1119 return TMTimerToNano(pTimer, TMTimerGet(pTimer));
1120}
1121
1122
1123/**
1124 * Get the current clock time as microseconds.
1125 *
1126 * @returns The timer clock as microseconds.
1127 * @param pTimer Timer handle as returned by one of the create functions.
1128 */
1129VMMDECL(uint64_t) TMTimerGetMicro(PTMTIMER pTimer)
1130{
1131 return TMTimerToMicro(pTimer, TMTimerGet(pTimer));
1132}
1133
1134
1135/**
1136 * Get the current clock time as milliseconds.
1137 *
1138 * @returns The timer clock as milliseconds.
1139 * @param pTimer Timer handle as returned by one of the create functions.
1140 */
1141VMMDECL(uint64_t) TMTimerGetMilli(PTMTIMER pTimer)
1142{
1143 return TMTimerToMilli(pTimer, TMTimerGet(pTimer));
1144}
1145
1146
1147/**
1148 * Converts the specified timer clock time to nanoseconds.
1149 *
1150 * @returns nanoseconds.
1151 * @param pTimer Timer handle as returned by one of the create functions.
1152 * @param u64Ticks The clock ticks.
1153 * @remark There could be rounding errors here. We just do a simple integere divide
1154 * without any adjustments.
1155 */
1156VMMDECL(uint64_t) TMTimerToNano(PTMTIMER pTimer, uint64_t u64Ticks)
1157{
1158 switch (pTimer->enmClock)
1159 {
1160 case TMCLOCK_VIRTUAL:
1161 case TMCLOCK_VIRTUAL_SYNC:
1162 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1163 return u64Ticks;
1164
1165 case TMCLOCK_REAL:
1166 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1167 return u64Ticks * 1000000;
1168
1169 case TMCLOCK_TSC:
1170 AssertReleaseMsgFailed(("TMCLOCK_TSC conversions are not implemented\n"));
1171 return 0;
1172
1173 default:
1174 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1175 return 0;
1176 }
1177}
1178
1179
1180/**
1181 * Converts the specified timer clock time to microseconds.
1182 *
1183 * @returns microseconds.
1184 * @param pTimer Timer handle as returned by one of the create functions.
1185 * @param u64Ticks The clock ticks.
1186 * @remark There could be rounding errors here. We just do a simple integere divide
1187 * without any adjustments.
1188 */
1189VMMDECL(uint64_t) TMTimerToMicro(PTMTIMER pTimer, uint64_t u64Ticks)
1190{
1191 switch (pTimer->enmClock)
1192 {
1193 case TMCLOCK_VIRTUAL:
1194 case TMCLOCK_VIRTUAL_SYNC:
1195 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1196 return u64Ticks / 1000;
1197
1198 case TMCLOCK_REAL:
1199 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1200 return u64Ticks * 1000;
1201
1202 case TMCLOCK_TSC:
1203 AssertReleaseMsgFailed(("TMCLOCK_TSC conversions are not implemented\n"));
1204 return 0;
1205
1206 default:
1207 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1208 return 0;
1209 }
1210}
1211
1212
1213/**
1214 * Converts the specified timer clock time to milliseconds.
1215 *
1216 * @returns milliseconds.
1217 * @param pTimer Timer handle as returned by one of the create functions.
1218 * @param u64Ticks The clock ticks.
1219 * @remark There could be rounding errors here. We just do a simple integere divide
1220 * without any adjustments.
1221 */
1222VMMDECL(uint64_t) TMTimerToMilli(PTMTIMER pTimer, uint64_t u64Ticks)
1223{
1224 switch (pTimer->enmClock)
1225 {
1226 case TMCLOCK_VIRTUAL:
1227 case TMCLOCK_VIRTUAL_SYNC:
1228 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1229 return u64Ticks / 1000000;
1230
1231 case TMCLOCK_REAL:
1232 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1233 return u64Ticks;
1234
1235 case TMCLOCK_TSC:
1236 AssertReleaseMsgFailed(("TMCLOCK_TSC conversions are not implemented\n"));
1237 return 0;
1238
1239 default:
1240 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1241 return 0;
1242 }
1243}
1244
1245
1246/**
1247 * Converts the specified nanosecond timestamp to timer clock ticks.
1248 *
1249 * @returns timer clock ticks.
1250 * @param pTimer Timer handle as returned by one of the create functions.
1251 * @param u64NanoTS The nanosecond value ticks to convert.
1252 * @remark There could be rounding and overflow errors here.
1253 */
1254VMMDECL(uint64_t) TMTimerFromNano(PTMTIMER pTimer, uint64_t u64NanoTS)
1255{
1256 switch (pTimer->enmClock)
1257 {
1258 case TMCLOCK_VIRTUAL:
1259 case TMCLOCK_VIRTUAL_SYNC:
1260 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1261 return u64NanoTS;
1262
1263 case TMCLOCK_REAL:
1264 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1265 return u64NanoTS / 1000000;
1266
1267 case TMCLOCK_TSC:
1268 AssertReleaseMsgFailed(("TMCLOCK_TSC conversions are not implemented\n"));
1269 return 0;
1270
1271 default:
1272 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1273 return 0;
1274 }
1275}
1276
1277
1278/**
1279 * Converts the specified microsecond timestamp to timer clock ticks.
1280 *
1281 * @returns timer clock ticks.
1282 * @param pTimer Timer handle as returned by one of the create functions.
1283 * @param u64MicroTS The microsecond value ticks to convert.
1284 * @remark There could be rounding and overflow errors here.
1285 */
1286VMMDECL(uint64_t) TMTimerFromMicro(PTMTIMER pTimer, uint64_t u64MicroTS)
1287{
1288 switch (pTimer->enmClock)
1289 {
1290 case TMCLOCK_VIRTUAL:
1291 case TMCLOCK_VIRTUAL_SYNC:
1292 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1293 return u64MicroTS * 1000;
1294
1295 case TMCLOCK_REAL:
1296 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1297 return u64MicroTS / 1000;
1298
1299 case TMCLOCK_TSC:
1300 AssertReleaseMsgFailed(("TMCLOCK_TSC conversions are not implemented\n"));
1301 return 0;
1302
1303 default:
1304 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1305 return 0;
1306 }
1307}
1308
1309
1310/**
1311 * Converts the specified millisecond timestamp to timer clock ticks.
1312 *
1313 * @returns timer clock ticks.
1314 * @param pTimer Timer handle as returned by one of the create functions.
1315 * @param u64MilliTS The millisecond value ticks to convert.
1316 * @remark There could be rounding and overflow errors here.
1317 */
1318VMMDECL(uint64_t) TMTimerFromMilli(PTMTIMER pTimer, uint64_t u64MilliTS)
1319{
1320 switch (pTimer->enmClock)
1321 {
1322 case TMCLOCK_VIRTUAL:
1323 case TMCLOCK_VIRTUAL_SYNC:
1324 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1325 return u64MilliTS * 1000000;
1326
1327 case TMCLOCK_REAL:
1328 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1329 return u64MilliTS;
1330
1331 case TMCLOCK_TSC:
1332 AssertReleaseMsgFailed(("TMCLOCK_TSC conversions are not implemented\n"));
1333 return 0;
1334
1335 default:
1336 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1337 return 0;
1338 }
1339}
1340
1341
1342/**
1343 * Get the expire time of the timer.
1344 * Only valid for active timers.
1345 *
1346 * @returns Expire time of the timer.
1347 * @param pTimer Timer handle as returned by one of the create functions.
1348 */
1349VMMDECL(uint64_t) TMTimerGetExpire(PTMTIMER pTimer)
1350{
1351 TMTIMER_ASSERT_CRITSECT(pTimer);
1352 int cRetries = 1000;
1353 do
1354 {
1355 TMTIMERSTATE enmState = pTimer->enmState;
1356 switch (enmState)
1357 {
1358 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1359 case TMTIMERSTATE_EXPIRED_DELIVER:
1360 case TMTIMERSTATE_STOPPED:
1361 case TMTIMERSTATE_PENDING_STOP:
1362 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1363 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1364 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1365 return ~(uint64_t)0;
1366
1367 case TMTIMERSTATE_ACTIVE:
1368 case TMTIMERSTATE_PENDING_RESCHEDULE:
1369 case TMTIMERSTATE_PENDING_SCHEDULE:
1370 Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1371 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1372 return pTimer->u64Expire;
1373
1374 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1375 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1376#ifdef IN_RING3
1377 if (!RTThreadYield())
1378 RTThreadSleep(1);
1379#endif
1380 break;
1381
1382 /*
1383 * Invalid states.
1384 */
1385 case TMTIMERSTATE_DESTROY:
1386 case TMTIMERSTATE_FREE:
1387 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1388 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1389 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1390 return ~(uint64_t)0;
1391 default:
1392 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1393 return ~(uint64_t)0;
1394 }
1395 } while (cRetries-- > 0);
1396
1397 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1398 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1399 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1400 return ~(uint64_t)0;
1401}
1402
1403
1404/**
1405 * Checks if a timer is active or not.
1406 *
1407 * @returns True if active.
1408 * @returns False if not active.
1409 * @param pTimer Timer handle as returned by one of the create functions.
1410 */
1411VMMDECL(bool) TMTimerIsActive(PTMTIMER pTimer)
1412{
1413 TMTIMERSTATE enmState = pTimer->enmState;
1414 switch (enmState)
1415 {
1416 case TMTIMERSTATE_STOPPED:
1417 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1418 case TMTIMERSTATE_EXPIRED_DELIVER:
1419 case TMTIMERSTATE_PENDING_STOP:
1420 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1421 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1422 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1423 return false;
1424
1425 case TMTIMERSTATE_ACTIVE:
1426 case TMTIMERSTATE_PENDING_RESCHEDULE:
1427 case TMTIMERSTATE_PENDING_SCHEDULE:
1428 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1429 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1430 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1431 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1432 return true;
1433
1434 /*
1435 * Invalid states.
1436 */
1437 case TMTIMERSTATE_DESTROY:
1438 case TMTIMERSTATE_FREE:
1439 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1440 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1441 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1442 return false;
1443 default:
1444 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1445 return false;
1446 }
1447}
1448
1449
1450/**
1451 * Convert state to string.
1452 *
1453 * @returns Readonly status name.
1454 * @param enmState State.
1455 */
1456const char *tmTimerState(TMTIMERSTATE enmState)
1457{
1458 switch (enmState)
1459 {
1460#define CASE(num, state) \
1461 case TMTIMERSTATE_##state: \
1462 AssertCompile(TMTIMERSTATE_##state == (num)); \
1463 return #num "-" #state
1464 CASE( 1,STOPPED);
1465 CASE( 2,ACTIVE);
1466 CASE( 3,EXPIRED_GET_UNLINK);
1467 CASE( 4,EXPIRED_DELIVER);
1468 CASE( 5,PENDING_STOP);
1469 CASE( 6,PENDING_STOP_SCHEDULE);
1470 CASE( 7,PENDING_SCHEDULE_SET_EXPIRE);
1471 CASE( 8,PENDING_SCHEDULE);
1472 CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE);
1473 CASE(10,PENDING_RESCHEDULE);
1474 CASE(11,DESTROY);
1475 CASE(12,FREE);
1476 default:
1477 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
1478 return "Invalid state!";
1479#undef CASE
1480 }
1481}
1482
1483
1484/**
1485 * Schedules the given timer on the given queue.
1486 *
1487 * @param pQueue The timer queue.
1488 * @param pTimer The timer that needs scheduling.
1489 *
1490 * @remarks Called while owning the lock.
1491 */
1492DECLINLINE(void) tmTimerQueueScheduleOne(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
1493{
1494 /*
1495 * Processing.
1496 */
1497 unsigned cRetries = 2;
1498 do
1499 {
1500 TMTIMERSTATE enmState = pTimer->enmState;
1501 switch (enmState)
1502 {
1503 /*
1504 * Reschedule timer (in the active list).
1505 */
1506 case TMTIMERSTATE_PENDING_RESCHEDULE:
1507 {
1508 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
1509 break; /* retry */
1510
1511 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);
1512 const PTMTIMER pNext = TMTIMER_GET_NEXT(pTimer);
1513 if (pPrev)
1514 TMTIMER_SET_NEXT(pPrev, pNext);
1515 else
1516 {
1517 TMTIMER_SET_HEAD(pQueue, pNext);
1518 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
1519 }
1520 if (pNext)
1521 TMTIMER_SET_PREV(pNext, pPrev);
1522 pTimer->offNext = 0;
1523 pTimer->offPrev = 0;
1524 /* fall thru */
1525 }
1526
1527 /*
1528 * Schedule timer (insert into the active list).
1529 */
1530 case TMTIMERSTATE_PENDING_SCHEDULE:
1531 {
1532 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
1533 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
1534 break; /* retry */
1535
1536 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);
1537 if (pCur)
1538 {
1539 const uint64_t u64Expire = pTimer->u64Expire;
1540 for (;; pCur = TMTIMER_GET_NEXT(pCur))
1541 {
1542 if (pCur->u64Expire > u64Expire)
1543 {
1544 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);
1545 TMTIMER_SET_NEXT(pTimer, pCur);
1546 TMTIMER_SET_PREV(pTimer, pPrev);
1547 if (pPrev)
1548 TMTIMER_SET_NEXT(pPrev, pTimer);
1549 else
1550 {
1551 TMTIMER_SET_HEAD(pQueue, pTimer);
1552 pQueue->u64Expire = u64Expire;
1553 }
1554 TMTIMER_SET_PREV(pCur, pTimer);
1555 return;
1556 }
1557 if (!pCur->offNext)
1558 {
1559 TMTIMER_SET_NEXT(pCur, pTimer);
1560 TMTIMER_SET_PREV(pTimer, pCur);
1561 return;
1562 }
1563 }
1564 }
1565 else
1566 {
1567 TMTIMER_SET_HEAD(pQueue, pTimer);
1568 pQueue->u64Expire = pTimer->u64Expire;
1569 }
1570 return;
1571 }
1572
1573 /*
1574 * Stop the timer in active list.
1575 */
1576 case TMTIMERSTATE_PENDING_STOP:
1577 {
1578 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
1579 break; /* retry */
1580
1581 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);
1582 const PTMTIMER pNext = TMTIMER_GET_NEXT(pTimer);
1583 if (pPrev)
1584 TMTIMER_SET_NEXT(pPrev, pNext);
1585 else
1586 {
1587 TMTIMER_SET_HEAD(pQueue, pNext);
1588 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
1589 }
1590 if (pNext)
1591 TMTIMER_SET_PREV(pNext, pPrev);
1592 pTimer->offNext = 0;
1593 pTimer->offPrev = 0;
1594 /* fall thru */
1595 }
1596
1597 /*
1598 * Stop the timer (not on the active list).
1599 */
1600 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1601 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
1602 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
1603 break;
1604 return;
1605
1606 /*
1607 * The timer is pending destruction by TMR3TimerDestroy, our caller.
1608 * Nothing to do here.
1609 */
1610 case TMTIMERSTATE_DESTROY:
1611 break;
1612
1613 /*
1614 * Postpone these until they get into the right state.
1615 */
1616 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1617 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1618 tmTimerLink(pQueue, pTimer);
1619 STAM_COUNTER_INC(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatPostponed));
1620 return;
1621
1622 /*
1623 * None of these can be in the schedule.
1624 */
1625 case TMTIMERSTATE_FREE:
1626 case TMTIMERSTATE_STOPPED:
1627 case TMTIMERSTATE_ACTIVE:
1628 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1629 case TMTIMERSTATE_EXPIRED_DELIVER:
1630 default:
1631 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
1632 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
1633 return;
1634 }
1635 } while (cRetries-- > 0);
1636}
1637
1638
1639/**
1640 * Schedules the specified timer queue.
1641 *
1642 * @param pVM The VM to run the timers for.
1643 * @param pQueue The queue to schedule.
1644 *
1645 * @remarks Called while owning the lock.
1646 */
1647void tmTimerQueueSchedule(PVM pVM, PTMTIMERQUEUE pQueue)
1648{
1649 TM_ASSERT_EMT_LOCK(pVM);
1650
1651 /*
1652 * Dequeue the scheduling list and iterate it.
1653 */
1654 int32_t offNext = ASMAtomicXchgS32(&pQueue->offSchedule, 0);
1655 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, offNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, offNext, pQueue->u64Expire));
1656 if (!offNext)
1657 return;
1658 PTMTIMER pNext = (PTMTIMER)((intptr_t)pQueue + offNext);
1659 while (pNext)
1660 {
1661 /*
1662 * Unlink the head timer and find the next one.
1663 */
1664 PTMTIMER pTimer = pNext;
1665 pNext = pNext->offScheduleNext ? (PTMTIMER)((intptr_t)pNext + pNext->offScheduleNext) : NULL;
1666 pTimer->offScheduleNext = 0;
1667
1668 /*
1669 * Do the scheduling.
1670 */
1671 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .pszDesc=%s}\n",
1672 pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, R3STRING(pTimer->pszDesc)));
1673 tmTimerQueueScheduleOne(pQueue, pTimer);
1674 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
1675 } /* foreach timer in current schedule batch. */
1676 Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire));
1677}
1678
1679
1680#ifdef VBOX_STRICT
1681/**
1682 * Checks that the timer queues are sane.
1683 *
1684 * @param pVM VM handle.
1685 *
1686 * @remarks Called while owning the lock.
1687 */
1688void tmTimerQueuesSanityChecks(PVM pVM, const char *pszWhere)
1689{
1690 TM_ASSERT_EMT_LOCK(pVM);
1691
1692 /*
1693 * Check the linking of the active lists.
1694 */
1695 for (int i = 0; i < TMCLOCK_MAX; i++)
1696 {
1697 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
1698 Assert((int)pQueue->enmClock == i);
1699 PTMTIMER pPrev = NULL;
1700 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pPrev = pCur, pCur = TMTIMER_GET_NEXT(pCur))
1701 {
1702 AssertMsg((int)pCur->enmClock == i, ("%s: %d != %d\n", pszWhere, pCur->enmClock, i));
1703 AssertMsg(TMTIMER_GET_PREV(pCur) == pPrev, ("%s: %p != %p\n", pszWhere, TMTIMER_GET_PREV(pCur), pPrev));
1704 TMTIMERSTATE enmState = pCur->enmState;
1705 switch (enmState)
1706 {
1707 case TMTIMERSTATE_ACTIVE:
1708 AssertMsg( !pCur->offScheduleNext
1709 || pCur->enmState != TMTIMERSTATE_ACTIVE,
1710 ("%s: %RI32\n", pszWhere, pCur->offScheduleNext));
1711 break;
1712 case TMTIMERSTATE_PENDING_STOP:
1713 case TMTIMERSTATE_PENDING_RESCHEDULE:
1714 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1715 break;
1716 default:
1717 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
1718 break;
1719 }
1720 }
1721 }
1722
1723
1724# ifdef IN_RING3
1725 /*
1726 * Do the big list and check that active timers all are in the active lists.
1727 */
1728 PTMTIMERR3 pPrev = NULL;
1729 for (PTMTIMERR3 pCur = pVM->tm.s.pCreated; pCur; pPrev = pCur, pCur = pCur->pBigNext)
1730 {
1731 Assert(pCur->pBigPrev == pPrev);
1732 Assert((unsigned)pCur->enmClock < (unsigned)TMCLOCK_MAX);
1733
1734 TMTIMERSTATE enmState = pCur->enmState;
1735 switch (enmState)
1736 {
1737 case TMTIMERSTATE_ACTIVE:
1738 case TMTIMERSTATE_PENDING_STOP:
1739 case TMTIMERSTATE_PENDING_RESCHEDULE:
1740 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1741 {
1742 PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
1743 Assert(pCur->offPrev || pCur == pCurAct);
1744 while (pCurAct && pCurAct != pCur)
1745 pCurAct = TMTIMER_GET_NEXT(pCurAct);
1746 Assert(pCurAct == pCur);
1747 break;
1748 }
1749
1750 case TMTIMERSTATE_PENDING_SCHEDULE:
1751 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1752 case TMTIMERSTATE_STOPPED:
1753 case TMTIMERSTATE_EXPIRED_DELIVER:
1754 {
1755 Assert(!pCur->offNext);
1756 Assert(!pCur->offPrev);
1757 for (PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
1758 pCurAct;
1759 pCurAct = TMTIMER_GET_NEXT(pCurAct))
1760 {
1761 Assert(pCurAct != pCur);
1762 Assert(TMTIMER_GET_NEXT(pCurAct) != pCur);
1763 Assert(TMTIMER_GET_PREV(pCurAct) != pCur);
1764 }
1765 break;
1766 }
1767
1768 /* ignore */
1769 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1770 break;
1771
1772 /* shouldn't get here! */
1773 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1774 case TMTIMERSTATE_DESTROY:
1775 default:
1776 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
1777 break;
1778 }
1779 }
1780# endif /* IN_RING3 */
1781}
1782#endif /* !VBOX_STRICT */
1783
1784
1785/**
1786 * Gets the current warp drive percent.
1787 *
1788 * @returns The warp drive percent.
1789 * @param pVM The VM handle.
1790 */
1791VMMDECL(uint32_t) TMGetWarpDrive(PVM pVM)
1792{
1793 return pVM->tm.s.u32VirtualWarpDrivePercentage;
1794}
1795
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette