VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp@ 19810

Last change on this file since 19810 was 19810, checked in by vboxsync, 16 years ago

TM: TMTimerPoll hacking.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 58.7 KB
Line 
1/* $Id: TMAll.cpp 19810 2009-05-19 09:59:20Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_TM
27#include <VBox/tm.h>
28#include <VBox/mm.h>
29#ifdef IN_RING3
30# include <VBox/rem.h>
31#endif
32#include "TMInternal.h"
33#include <VBox/vm.h>
34
35#include <VBox/param.h>
36#include <VBox/err.h>
37#include <VBox/log.h>
38#include <VBox/sup.h>
39#include <iprt/time.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#ifdef IN_RING3
43# include <iprt/thread.h>
44#endif
45
46
47#ifndef tmLock
48
49/**
50 * Try take the EMT/TM lock, wait in ring-3 return VERR_SEM_BUSY in R0/RC.
51 *
52 * @retval VINF_SUCCESS on success (always in ring-3).
53 * @retval VERR_SEM_BUSY in RC and R0 if the semaphore is busy.
54 *
55 * @param pVM The VM handle.
56 */
57int tmLock(PVM pVM)
58{
59 VM_ASSERT_EMT(pVM);
60 int rc = PDMCritSectEnter(&pVM->tm.s.EmtLock, VERR_SEM_BUSY);
61 return rc;
62}
63
64
65/**
66 * Try take the EMT/TM lock, no waiting.
67 *
68 * @retval VINF_SUCCESS on success.
69 * @retval VERR_SEM_BUSY if busy.
70 *
71 * @param pVM The VM handle.
72 */
73int tmTryLock(PVM pVM)
74{
75 VM_ASSERT_EMT(pVM);
76 int rc = PDMCritSectTryEnter(&pVM->tm.s.EmtLock);
77 return rc;
78}
79
80
81/**
82 * Release the EMT/TM lock.
83 *
84 * @param pVM The VM handle.
85 */
86void tmUnlock(PVM pVM)
87{
88 PDMCritSectLeave(&pVM->tm.s.EmtLock);
89}
90
91
92/**
93 * Try take the VirtualSync lock, wait in ring-3 return VERR_SEM_BUSY in R0/RC.
94 *
95 * @retval VINF_SUCCESS on success (always in ring-3).
96 * @retval VERR_SEM_BUSY in RC and R0 if the semaphore is busy.
97 *
98 * @param pVM The VM handle.
99 */
100int tmVirtualSyncLock(PVM pVM)
101{
102 VM_ASSERT_EMT(pVM);
103 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VERR_SEM_BUSY);
104 return rc;
105}
106
107
108/**
109 * Try take the VirtualSync lock, no waiting.
110 *
111 * @retval VINF_SUCCESS on success.
112 * @retval VERR_SEM_BUSY if busy.
113 *
114 * @param pVM The VM handle.
115 */
116int tmVirtualSyncTryLock(PVM pVM)
117{
118 VM_ASSERT_EMT(pVM);
119 int rc = PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock);
120 return rc;
121}
122
123
124/**
125 * Release the VirtualSync lock.
126 *
127 * @param pVM The VM handle.
128 */
129void tmVirtualSyncUnlock(PVM pVM)
130{
131 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
132}
133
134#endif /* ! macros */
135
136/**
137 * Notification that execution is about to start.
138 *
139 * This call must always be paired with a TMNotifyEndOfExecution call.
140 *
141 * The function may, depending on the configuration, resume the TSC and future
142 * clocks that only ticks when we're executing guest code.
143 *
144 * @param pVCpu The VMCPU to operate on.
145 */
146VMMDECL(void) TMNotifyStartOfExecution(PVMCPU pVCpu)
147{
148 PVM pVM = pVCpu->CTX_SUFF(pVM);
149
150 if (pVM->tm.s.fTSCTiedToExecution)
151 tmCpuTickResume(pVM, pVCpu);
152}
153
154
155/**
156 * Notification that execution is about to start.
157 *
158 * This call must always be paired with a TMNotifyStartOfExecution call.
159 *
160 * The function may, depending on the configuration, suspend the TSC and future
161 * clocks that only ticks when we're executing guest code.
162 *
163 * @param pVCpu The VMCPU to operate on.
164 */
165VMMDECL(void) TMNotifyEndOfExecution(PVMCPU pVCpu)
166{
167 PVM pVM = pVCpu->CTX_SUFF(pVM);
168
169 if (pVM->tm.s.fTSCTiedToExecution)
170 tmCpuTickPause(pVM, pVCpu);
171}
172
173
174/**
175 * Notification that the cpu is entering the halt state
176 *
177 * This call must always be paired with a TMNotifyEndOfExecution call.
178 *
179 * The function may, depending on the configuration, resume the TSC and future
180 * clocks that only ticks when we're halted.
181 *
182 * @param pVCpu The VMCPU to operate on.
183 */
184VMMDECL(void) TMNotifyStartOfHalt(PVMCPU pVCpu)
185{
186 PVM pVM = pVCpu->CTX_SUFF(pVM);
187
188 if ( pVM->tm.s.fTSCTiedToExecution
189 && !pVM->tm.s.fTSCNotTiedToHalt)
190 tmCpuTickResume(pVM, pVCpu);
191}
192
193
194/**
195 * Notification that the cpu is leaving the halt state
196 *
197 * This call must always be paired with a TMNotifyStartOfHalt call.
198 *
199 * The function may, depending on the configuration, suspend the TSC and future
200 * clocks that only ticks when we're halted.
201 *
202 * @param pVCpu The VMCPU to operate on.
203 */
204VMMDECL(void) TMNotifyEndOfHalt(PVMCPU pVCpu)
205{
206 PVM pVM = pVCpu->CTX_SUFF(pVM);
207
208 if ( pVM->tm.s.fTSCTiedToExecution
209 && !pVM->tm.s.fTSCNotTiedToHalt)
210 tmCpuTickPause(pVM, pVCpu);
211}
212
213
214/**
215 * Raise the timer force action flag and notify the dedicated timer EMT.
216 *
217 * @param pVM The VM handle.
218 */
219DECLINLINE(void) tmScheduleNotify(PVM pVM)
220{
221 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
222 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
223 {
224 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
225 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
226#ifdef IN_RING3
227 REMR3NotifyTimerPending(pVM, pVCpuDst);
228 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
229#endif
230 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
231 }
232}
233
234
235/**
236 * Schedule the queue which was changed.
237 */
238DECLINLINE(void) tmSchedule(PTMTIMER pTimer)
239{
240 PVM pVM = pTimer->CTX_SUFF(pVM);
241 if ( VM_IS_EMT(pVM)
242 && RT_SUCCESS(tmTryLock(pVM)))
243 {
244 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
245 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
246 tmTimerQueueSchedule(pVM, &pVM->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock]);
247#ifdef VBOX_STRICT
248 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
249#endif
250 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
251 tmUnlock(pVM);
252 }
253 else
254 {
255 TMTIMERSTATE enmState = pTimer->enmState;
256 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))
257 tmScheduleNotify(pVM);
258 }
259}
260
261
262/**
263 * Try change the state to enmStateNew from enmStateOld
264 * and link the timer into the scheduling queue.
265 *
266 * @returns Success indicator.
267 * @param pTimer Timer in question.
268 * @param enmStateNew The new timer state.
269 * @param enmStateOld The old timer state.
270 */
271DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
272{
273 /*
274 * Attempt state change.
275 */
276 bool fRc;
277 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
278 return fRc;
279}
280
281
282/**
283 * Links the timer onto the scheduling queue.
284 *
285 * @param pQueue The timer queue the timer belongs to.
286 * @param pTimer The timer.
287 *
288 * @todo FIXME: Look into potential race with the thread running the queues
289 * and stuff.
290 */
291DECLINLINE(void) tmTimerLink(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
292{
293 Assert(!pTimer->offScheduleNext);
294 const int32_t offHeadNew = (intptr_t)pTimer - (intptr_t)pQueue;
295 int32_t offHead;
296 do
297 {
298 offHead = pQueue->offSchedule;
299 if (offHead)
300 pTimer->offScheduleNext = ((intptr_t)pQueue + offHead) - (intptr_t)pTimer;
301 else
302 pTimer->offScheduleNext = 0;
303 } while (!ASMAtomicCmpXchgS32(&pQueue->offSchedule, offHeadNew, offHead));
304}
305
306
307/**
308 * Try change the state to enmStateNew from enmStateOld
309 * and link the timer into the scheduling queue.
310 *
311 * @returns Success indicator.
312 * @param pTimer Timer in question.
313 * @param enmStateNew The new timer state.
314 * @param enmStateOld The old timer state.
315 */
316DECLINLINE(bool) tmTimerTryWithLink(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
317{
318 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
319 {
320 tmTimerLink(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock], pTimer);
321 return true;
322 }
323 return false;
324}
325
326
327#ifdef VBOX_HIGH_RES_TIMERS_HACK
328/**
329 * Set FF if we've passed the next virtual event.
330 *
331 * This function is called before FFs are checked in the inner execution EM loops.
332 *
333 * @returns Virtual timer ticks to the next event. (I.e. 0 means that an timer
334 * has expired or some important rescheduling is pending.)
335 * @param pVM Pointer to the shared VM structure.
336 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
337 * @thread The emulation thread.
338 */
339VMMDECL(uint64_t) TMTimerPoll(PVM pVM, PVMCPU pVCpu)
340{
341 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
342 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
343 STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
344
345 /*
346 * Return straight away if the timer FF is already set ...
347 */
348 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
349 {
350 STAM_COUNTER_INC(&pVM->tm.s.StatPollAlreadySet);
351 return pVCpu == pVCpuDst ? 0 : s_u64OtherRet;
352 }
353
354 /*
355 * ... or if timers are being run.
356 */
357 if (pVM->tm.s.fRunningQueues)
358 {
359 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
360 return s_u64OtherRet;
361 }
362
363 /*
364 * Get current time and check the expire times of the two relevant queues.
365 */
366 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
367
368 /*
369 * TMCLOCK_VIRTUAL
370 */
371 const uint64_t u64Expire1 = ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire);
372 const int64_t i64Delta1 = u64Expire1 - u64Now;
373 if (i64Delta1 <= 0)
374 {
375 LogFlow(("TMTimerPoll: expire1=%RU64 <= now=%RU64\n", u64Expire1, u64Now));
376 if ( !pVM->tm.s.fRunningQueues
377 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
378 {
379 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
380 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
381#ifdef IN_RING3
382 REMR3NotifyTimerPending(pVM, pVCpuDst);
383#endif
384 }
385 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtual);
386 return pVCpu == pVCpuDst ? 0 : s_u64OtherRet;
387 }
388
389 /*
390 * TMCLOCK_VIRTUAL_SYNC
391 * This isn't quite as stright forward if in a catch-up, not only do
392 * we have to adjust the 'now' but when have to adjust the delta as well.
393 */
394
395 /*
396 * Optimistic lockless approach.
397 */
398 uint64_t u64VirtualSyncNow;
399 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
400 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
401 {
402 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
403 {
404 u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
405 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
406 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
407 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
408 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)))
409 {
410 u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
411 if (u64VirtualSyncNow < u64Expire2)
412 {
413 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
414 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
415 return pVCpu == pVCpuDst
416 ? RT_MIN(i64Delta1, (int64_t)(u64Expire2 - u64VirtualSyncNow))
417 : s_u64OtherRet;
418 }
419
420 if ( !pVM->tm.s.fRunningQueues
421 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
422 {
423 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
424 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
425#ifdef IN_RING3
426 REMR3NotifyTimerPending(pVM, pVCpuDst);
427#endif
428 }
429
430 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
431 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
432 LogFlow(("TMTimerPoll: expire2=%RU64 <= now=%RU64\n", u64Expire2, u64Now));
433 return pVCpu == pVCpuDst ? 0 : s_u64OtherRet;
434 }
435 }
436 }
437 else
438 {
439 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
440 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
441 LogFlow(("TMTimerPoll: stopped\n"));
442 return pVCpu == pVCpuDst ? 0 : s_u64OtherRet;
443 }
444
445 /*
446 * Complicated lockless approach.
447 */
448 uint64_t off;
449 uint32_t u32Pct = 0;
450 bool fCatchUp;
451 int cOuterTries = 42;
452 for (;; cOuterTries--)
453 {
454 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp);
455 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
456 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
457 if (fCatchUp)
458 {
459 /* No changes allowed, try get a consistent set of parameters. */
460 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
461 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
462 u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
463 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
464 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
465 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
466 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
467 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
468 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
469 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
470 || cOuterTries <= 0)
471 {
472 uint64_t u64Delta = u64Now - u64Prev;
473 if (RT_LIKELY(!(u64Delta >> 32)))
474 {
475 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
476 if (off > u64Sub + offGivenUp)
477 off -= u64Sub;
478 else /* we've completely caught up. */
479 off = offGivenUp;
480 }
481 else
482 /* More than 4 seconds since last time (or negative), ignore it. */
483 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
484
485 /* Check that we're still running and in catch up. */
486 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
487 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
488 break;
489 }
490 }
491 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
492 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
493 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
494 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
495 break; /* Got an consistent offset */
496
497 /* Repeat the initial checks before iterating. */
498 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
499 {
500 STAM_COUNTER_INC(&pVM->tm.s.StatPollAlreadySet);
501 return pVCpu == pVCpuDst ? 0 : s_u64OtherRet;
502 }
503 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
504 {
505 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
506 return s_u64OtherRet;
507 }
508 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
509 {
510 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
511 LogFlow(("TMTimerPoll: stopped\n"));
512 return pVCpu == pVCpuDst ? 0 : s_u64OtherRet;
513 }
514 if (cOuterTries <= 0)
515 break; /* that's enough */
516 }
517 if (cOuterTries <= 0)
518 STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop);
519 u64VirtualSyncNow = u64Now - off;
520
521 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
522 if (i64Delta2 <= 0)
523 {
524 if ( !pVM->tm.s.fRunningQueues
525 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
526 {
527 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
528 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
529#ifdef IN_RING3
530 REMR3NotifyTimerPending(pVM, pVCpuDst);
531#endif
532 }
533 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
534 LogFlow(("TMTimerPoll: expire2=%RU64 <= now=%RU64\n", u64Expire2, u64Now));
535 return pVCpu == pVCpuDst ? 0 : s_u64OtherRet;
536 }
537
538 /*
539 * Return the time left to the next event.
540 */
541 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
542 if (pVCpu == pVCpuDst)
543 {
544 if (fCatchUp)
545 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
546 return RT_MIN(i64Delta1, i64Delta2);
547 }
548 return s_u64OtherRet;
549}
550
551
552/**
553 * Set FF if we've passed the next virtual event.
554 *
555 * This function is called before FFs are checked in the inner execution EM loops.
556 *
557 * @returns The GIP timestamp of the next event.
558 * 0 if the next event has already expired.
559 * @param pVM Pointer to the shared VM structure.
560 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
561 * @param pu64Delta Where to store the delta.
562 * @thread The emulation thread.
563 */
564VMMDECL(uint64_t) TMTimerPollGIP(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
565{
566 static const uint64_t s_u64OtherRet = 500000000; /* 500 million GIP ticks for non-timer EMTs. */
567 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
568 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
569 STAM_COUNTER_INC(&pVM->tm.s.StatPollGIP);
570
571 /*
572 * Return straight away if the timer FF is already set ...
573 */
574 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
575 {
576 STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPAlreadySet);
577 if (pVCpuDst == pVCpu)
578 {
579 *pu64Delta = 0;
580 return 0;
581 }
582 *pu64Delta = s_u64OtherRet;
583 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
584 }
585
586 /*
587 * ... or if timers are being run.
588 */
589 if (pVM->tm.s.fRunningQueues)
590 {
591 STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPRunning);
592 *pu64Delta = s_u64OtherRet;
593 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
594 }
595
596 /*
597 * Check for TMCLOCK_VIRTUAL expiration.
598 */
599 const uint64_t u64Expire1 = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire;
600 const int64_t i64Delta1 = u64Expire1 - u64Now;
601 if (i64Delta1 <= 0)
602 {
603 STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPVirtual);
604 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
605 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
606#ifdef IN_RING3
607 REMR3NotifyTimerPending(pVM, pVCpuDst);
608#endif
609 LogFlow(("TMTimerPoll: expire1=%RU64 <= now=%RU64\n", u64Expire1, u64Now));
610 if (pVCpuDst == pVCpu)
611 {
612 *pu64Delta = 0;
613 return 0;
614 }
615 *pu64Delta = s_u64OtherRet;
616 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
617 }
618
619 /*
620 * Check for TMCLOCK_VIRTUAL_SYNC expiration.
621 * This isn't quite as stright forward if in a catch-up, not only do
622 * we have to adjust the 'now' but when have to adjust the delta as well.
623 */
624 int rc = tmVirtualSyncLock(pVM); /** @todo FIXME: Stop playin safe... */
625
626 const uint64_t u64Expire2 = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire;
627 uint64_t u64VirtualSyncNow;
628 if (!pVM->tm.s.fVirtualSyncTicking)
629 u64VirtualSyncNow = pVM->tm.s.u64VirtualSync;
630 else
631 {
632 if (!pVM->tm.s.fVirtualSyncCatchUp)
633 u64VirtualSyncNow = u64Now - pVM->tm.s.offVirtualSync;
634 else
635 {
636 uint64_t off = pVM->tm.s.offVirtualSync;
637 uint64_t u64Delta = u64Now - pVM->tm.s.u64VirtualSyncCatchUpPrev;
638 if (RT_LIKELY(!(u64Delta >> 32)))
639 {
640 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
641 if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
642 off -= u64Sub;
643 else
644 off = pVM->tm.s.offVirtualSyncGivenUp;
645 }
646 u64VirtualSyncNow = u64Now - off;
647 }
648 }
649
650 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
651 if (i64Delta2 <= 0)
652 {
653 if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TIMER))
654 {
655 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
656 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER /** @todo poke */);
657#ifdef IN_RING3
658 REMR3NotifyTimerPending(pVM, pVCpuDst);
659#endif
660 }
661 STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPVirtualSync);
662
663#ifndef IN_RING3
664 if (RT_SUCCESS(rc))
665#endif
666 tmVirtualSyncUnlock(pVM);
667 LogFlow(("TMTimerPoll: expire2=%RU64 <= now=%RU64\n", u64Expire2, u64Now));
668 if (pVCpuDst == pVCpu)
669 {
670 *pu64Delta = 0;
671 return 0;
672 }
673 *pu64Delta = s_u64OtherRet;
674 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
675 }
676 if (pVM->tm.s.fVirtualSyncCatchUp)
677 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, pVM->tm.s.u32VirtualSyncCatchUpPercentage + 100);
678
679 uint64_t u64GipTime;
680 if (pVCpuDst == pVCpu)
681 {
682 /*
683 * Return the GIP time of the next event.
684 * This is the reverse of what tmVirtualGetRaw is doing.
685 */
686 STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPMiss);
687 u64GipTime = RT_MIN(i64Delta1, i64Delta2);
688 *pu64Delta = u64GipTime;
689 u64GipTime += u64Now + pVM->tm.s.u64VirtualOffset;
690 if (RT_UNLIKELY(!pVM->tm.s.fVirtualWarpDrive))
691 {
692 u64GipTime -= pVM->tm.s.u64VirtualWarpDriveStart; /* the start is GIP time. */
693 u64GipTime *= 100;
694 u64GipTime /= pVM->tm.s.u32VirtualWarpDrivePercentage;
695 u64GipTime += pVM->tm.s.u64VirtualWarpDriveStart;
696 }
697 }
698 else
699 {
700 *pu64Delta = s_u64OtherRet;
701 u64GipTime = u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
702 }
703#ifndef IN_RING3
704 if (RT_SUCCESS(rc))
705#endif
706 tmVirtualSyncUnlock(pVM);
707 return u64GipTime;
708}
709#endif
710
711
712/**
713 * Gets the host context ring-3 pointer of the timer.
714 *
715 * @returns HC R3 pointer.
716 * @param pTimer Timer handle as returned by one of the create functions.
717 */
718VMMDECL(PTMTIMERR3) TMTimerR3Ptr(PTMTIMER pTimer)
719{
720 return (PTMTIMERR3)MMHyperCCToR3(pTimer->CTX_SUFF(pVM), pTimer);
721}
722
723
724/**
725 * Gets the host context ring-0 pointer of the timer.
726 *
727 * @returns HC R0 pointer.
728 * @param pTimer Timer handle as returned by one of the create functions.
729 */
730VMMDECL(PTMTIMERR0) TMTimerR0Ptr(PTMTIMER pTimer)
731{
732 return (PTMTIMERR0)MMHyperCCToR0(pTimer->CTX_SUFF(pVM), pTimer);
733}
734
735
736/**
737 * Gets the RC pointer of the timer.
738 *
739 * @returns RC pointer.
740 * @param pTimer Timer handle as returned by one of the create functions.
741 */
742VMMDECL(PTMTIMERRC) TMTimerRCPtr(PTMTIMER pTimer)
743{
744 return (PTMTIMERRC)MMHyperCCToRC(pTimer->CTX_SUFF(pVM), pTimer);
745}
746
747
748/**
749 * Arm a timer with a (new) expire time.
750 *
751 * @returns VBox status.
752 * @param pTimer Timer handle as returned by one of the create functions.
753 * @param u64Expire New expire time.
754 */
755VMMDECL(int) TMTimerSet(PTMTIMER pTimer, uint64_t u64Expire)
756{
757 STAM_PROFILE_START(&pTimer->CTX_SUFF(pVM)->tm.s.CTXALLSUFF(StatTimerSet), a);
758
759 /** @todo find the most frequently used paths and make them skip tmSchedule and tmTimerTryWithLink. */
760 int cRetries = 1000;
761 do
762 {
763 /*
764 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
765 */
766 TMTIMERSTATE enmState = pTimer->enmState;
767 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%llu\n",
768 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries, u64Expire));
769 switch (enmState)
770 {
771 case TMTIMERSTATE_EXPIRED:
772 case TMTIMERSTATE_STOPPED:
773 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
774 {
775 Assert(!pTimer->offPrev);
776 Assert(!pTimer->offNext);
777 AssertMsg( pTimer->enmClock != TMCLOCK_VIRTUAL_SYNC
778 || pTimer->CTX_SUFF(pVM)->tm.s.fVirtualSyncTicking
779 || u64Expire >= pTimer->CTX_SUFF(pVM)->tm.s.u64VirtualSync,
780 ("%RU64 < %RU64 %s\n", u64Expire, pTimer->CTX_SUFF(pVM)->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc)));
781 pTimer->u64Expire = u64Expire;
782 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
783 tmSchedule(pTimer);
784 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSet), a);
785 return VINF_SUCCESS;
786 }
787 break;
788
789 case TMTIMERSTATE_PENDING_SCHEDULE:
790 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
791 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
792 {
793 pTimer->u64Expire = u64Expire;
794 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
795 tmSchedule(pTimer);
796 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSet), a);
797 return VINF_SUCCESS;
798 }
799 break;
800
801
802 case TMTIMERSTATE_ACTIVE:
803 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
804 {
805 pTimer->u64Expire = u64Expire;
806 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
807 tmSchedule(pTimer);
808 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSet), a);
809 return VINF_SUCCESS;
810 }
811 break;
812
813 case TMTIMERSTATE_PENDING_RESCHEDULE:
814 case TMTIMERSTATE_PENDING_STOP:
815 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
816 {
817 pTimer->u64Expire = u64Expire;
818 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
819 tmSchedule(pTimer);
820 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSet), a);
821 return VINF_SUCCESS;
822 }
823 break;
824
825
826 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
827 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
828#ifdef IN_RING3
829 if (!RTThreadYield())
830 RTThreadSleep(1);
831#else
832/** @todo call host context and yield after a couple of iterations */
833#endif
834 break;
835
836 /*
837 * Invalid states.
838 */
839 case TMTIMERSTATE_DESTROY:
840 case TMTIMERSTATE_FREE:
841 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
842 return VERR_TM_INVALID_STATE;
843 default:
844 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
845 return VERR_TM_UNKNOWN_STATE;
846 }
847 } while (cRetries-- > 0);
848
849 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
850 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSet), a);
851 return VERR_INTERNAL_ERROR;
852}
853
854
855/**
856 * Arm a timer with a (new) expire time relative to current time.
857 *
858 * @returns VBox status.
859 * @param pTimer Timer handle as returned by one of the create functions.
860 * @param cMilliesToNext Number of millieseconds to the next tick.
861 */
862VMMDECL(int) TMTimerSetMillies(PTMTIMER pTimer, uint32_t cMilliesToNext)
863{
864 PVM pVM = pTimer->CTX_SUFF(pVM);
865 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
866
867 switch (pTimer->enmClock)
868 {
869 case TMCLOCK_VIRTUAL:
870 return TMTimerSet(pTimer, cMilliesToNext * (uint64_t)TMCLOCK_FREQ_VIRTUAL / 1000 + TMVirtualGet(pVM));
871 case TMCLOCK_VIRTUAL_SYNC:
872 return TMTimerSet(pTimer, cMilliesToNext * (uint64_t)TMCLOCK_FREQ_VIRTUAL / 1000 + TMVirtualSyncGet(pVM));
873 case TMCLOCK_REAL:
874 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
875 return TMTimerSet(pTimer, cMilliesToNext + TMRealGet(pVM));
876 case TMCLOCK_TSC:
877 return TMTimerSet(pTimer, cMilliesToNext * pVM->tm.s.cTSCTicksPerSecond / 1000 + TMCpuTickGet(pVCpu));
878
879 default:
880 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
881 return VERR_INTERNAL_ERROR;
882 }
883}
884
885
886/**
887 * Arm a timer with a (new) expire time relative to current time.
888 *
889 * @returns VBox status.
890 * @param pTimer Timer handle as returned by one of the create functions.
891 * @param cMicrosToNext Number of microseconds to the next tick.
892 */
893VMMDECL(int) TMTimerSetMicro(PTMTIMER pTimer, uint64_t cMicrosToNext)
894{
895 PVM pVM = pTimer->CTX_SUFF(pVM);
896 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
897
898 switch (pTimer->enmClock)
899 {
900 case TMCLOCK_VIRTUAL:
901 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
902 return TMTimerSet(pTimer, cMicrosToNext * 1000 + TMVirtualGet(pVM));
903
904 case TMCLOCK_VIRTUAL_SYNC:
905 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
906 return TMTimerSet(pTimer, cMicrosToNext * 1000 + TMVirtualSyncGet(pVM));
907
908 case TMCLOCK_REAL:
909 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
910 return TMTimerSet(pTimer, cMicrosToNext / 1000 + TMRealGet(pVM));
911
912 case TMCLOCK_TSC:
913 return TMTimerSet(pTimer, TMTimerFromMicro(pTimer, cMicrosToNext) + TMCpuTickGet(pVCpu));
914
915 default:
916 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
917 return VERR_INTERNAL_ERROR;
918 }
919}
920
921
922/**
923 * Arm a timer with a (new) expire time relative to current time.
924 *
925 * @returns VBox status.
926 * @param pTimer Timer handle as returned by one of the create functions.
927 * @param cNanosToNext Number of nanoseconds to the next tick.
928 */
929VMMDECL(int) TMTimerSetNano(PTMTIMER pTimer, uint64_t cNanosToNext)
930{
931 PVM pVM = pTimer->CTX_SUFF(pVM);
932 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
933
934 switch (pTimer->enmClock)
935 {
936 case TMCLOCK_VIRTUAL:
937 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
938 return TMTimerSet(pTimer, cNanosToNext + TMVirtualGet(pVM));
939
940 case TMCLOCK_VIRTUAL_SYNC:
941 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
942 return TMTimerSet(pTimer, cNanosToNext + TMVirtualSyncGet(pVM));
943
944 case TMCLOCK_REAL:
945 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
946 return TMTimerSet(pTimer, cNanosToNext / 1000000 + TMRealGet(pVM));
947
948 case TMCLOCK_TSC:
949 return TMTimerSet(pTimer, TMTimerFromNano(pTimer, cNanosToNext) + TMCpuTickGet(pVCpu));
950
951 default:
952 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
953 return VERR_INTERNAL_ERROR;
954 }
955}
956
957
958/**
959 * Stop the timer.
960 * Use TMR3TimerArm() to "un-stop" the timer.
961 *
962 * @returns VBox status.
963 * @param pTimer Timer handle as returned by one of the create functions.
964 */
965VMMDECL(int) TMTimerStop(PTMTIMER pTimer)
966{
967 STAM_PROFILE_START(&pTimer->CTX_SUFF(pVM)->tm.s.CTXALLSUFF(StatTimerStop), a);
968 /** @todo see if this function needs optimizing. */
969 int cRetries = 1000;
970 do
971 {
972 /*
973 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
974 */
975 TMTIMERSTATE enmState = pTimer->enmState;
976 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
977 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries));
978 switch (enmState)
979 {
980 case TMTIMERSTATE_EXPIRED:
981 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
982 return VERR_INVALID_PARAMETER;
983
984 case TMTIMERSTATE_STOPPED:
985 case TMTIMERSTATE_PENDING_STOP:
986 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
987 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
988 return VINF_SUCCESS;
989
990 case TMTIMERSTATE_PENDING_SCHEDULE:
991 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
992 {
993 tmSchedule(pTimer);
994 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
995 return VINF_SUCCESS;
996 }
997
998 case TMTIMERSTATE_PENDING_RESCHEDULE:
999 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1000 {
1001 tmSchedule(pTimer);
1002 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1003 return VINF_SUCCESS;
1004 }
1005 break;
1006
1007 case TMTIMERSTATE_ACTIVE:
1008 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1009 {
1010 tmSchedule(pTimer);
1011 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1012 return VINF_SUCCESS;
1013 }
1014 break;
1015
1016 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1017 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1018#ifdef IN_RING3
1019 if (!RTThreadYield())
1020 RTThreadSleep(1);
1021#else
1022/**@todo call host and yield cpu after a while. */
1023#endif
1024 break;
1025
1026 /*
1027 * Invalid states.
1028 */
1029 case TMTIMERSTATE_DESTROY:
1030 case TMTIMERSTATE_FREE:
1031 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1032 return VERR_TM_INVALID_STATE;
1033 default:
1034 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1035 return VERR_TM_UNKNOWN_STATE;
1036 }
1037 } while (cRetries-- > 0);
1038
1039 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1040 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1041 return VERR_INTERNAL_ERROR;
1042}
1043
1044
1045/**
1046 * Get the current clock time.
1047 * Handy for calculating the new expire time.
1048 *
1049 * @returns Current clock time.
1050 * @param pTimer Timer handle as returned by one of the create functions.
1051 */
1052VMMDECL(uint64_t) TMTimerGet(PTMTIMER pTimer)
1053{
1054 uint64_t u64;
1055 PVM pVM = pTimer->CTX_SUFF(pVM);
1056
1057 switch (pTimer->enmClock)
1058 {
1059 case TMCLOCK_VIRTUAL:
1060 u64 = TMVirtualGet(pVM);
1061 break;
1062 case TMCLOCK_VIRTUAL_SYNC:
1063 u64 = TMVirtualSyncGet(pVM);
1064 break;
1065 case TMCLOCK_REAL:
1066 u64 = TMRealGet(pVM);
1067 break;
1068 case TMCLOCK_TSC:
1069 {
1070 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
1071 u64 = TMCpuTickGet(pVCpu);
1072 break;
1073 }
1074 default:
1075 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1076 return ~(uint64_t)0;
1077 }
1078 //Log2(("TMTimerGet: returns %llu (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1079 // u64, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1080 return u64;
1081}
1082
1083
1084/**
1085 * Get the freqency of the timer clock.
1086 *
1087 * @returns Clock frequency (as Hz of course).
1088 * @param pTimer Timer handle as returned by one of the create functions.
1089 */
1090VMMDECL(uint64_t) TMTimerGetFreq(PTMTIMER pTimer)
1091{
1092 switch (pTimer->enmClock)
1093 {
1094 case TMCLOCK_VIRTUAL:
1095 case TMCLOCK_VIRTUAL_SYNC:
1096 return TMCLOCK_FREQ_VIRTUAL;
1097
1098 case TMCLOCK_REAL:
1099 return TMCLOCK_FREQ_REAL;
1100
1101 case TMCLOCK_TSC:
1102 return TMCpuTicksPerSecond(pTimer->CTX_SUFF(pVM));
1103
1104 default:
1105 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1106 return 0;
1107 }
1108}
1109
1110
1111/**
1112 * Get the current clock time as nanoseconds.
1113 *
1114 * @returns The timer clock as nanoseconds.
1115 * @param pTimer Timer handle as returned by one of the create functions.
1116 */
1117VMMDECL(uint64_t) TMTimerGetNano(PTMTIMER pTimer)
1118{
1119 return TMTimerToNano(pTimer, TMTimerGet(pTimer));
1120}
1121
1122
1123/**
1124 * Get the current clock time as microseconds.
1125 *
1126 * @returns The timer clock as microseconds.
1127 * @param pTimer Timer handle as returned by one of the create functions.
1128 */
1129VMMDECL(uint64_t) TMTimerGetMicro(PTMTIMER pTimer)
1130{
1131 return TMTimerToMicro(pTimer, TMTimerGet(pTimer));
1132}
1133
1134
1135/**
1136 * Get the current clock time as milliseconds.
1137 *
1138 * @returns The timer clock as milliseconds.
1139 * @param pTimer Timer handle as returned by one of the create functions.
1140 */
1141VMMDECL(uint64_t) TMTimerGetMilli(PTMTIMER pTimer)
1142{
1143 return TMTimerToMilli(pTimer, TMTimerGet(pTimer));
1144}
1145
1146
1147/**
1148 * Converts the specified timer clock time to nanoseconds.
1149 *
1150 * @returns nanoseconds.
1151 * @param pTimer Timer handle as returned by one of the create functions.
1152 * @param u64Ticks The clock ticks.
1153 * @remark There could be rounding errors here. We just do a simple integere divide
1154 * without any adjustments.
1155 */
1156VMMDECL(uint64_t) TMTimerToNano(PTMTIMER pTimer, uint64_t u64Ticks)
1157{
1158 switch (pTimer->enmClock)
1159 {
1160 case TMCLOCK_VIRTUAL:
1161 case TMCLOCK_VIRTUAL_SYNC:
1162 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1163 return u64Ticks;
1164
1165 case TMCLOCK_REAL:
1166 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1167 return u64Ticks * 1000000;
1168
1169 case TMCLOCK_TSC:
1170 AssertReleaseMsgFailed(("TMCLOCK_TSC conversions are not implemented\n"));
1171 return 0;
1172
1173 default:
1174 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1175 return 0;
1176 }
1177}
1178
1179
1180/**
1181 * Converts the specified timer clock time to microseconds.
1182 *
1183 * @returns microseconds.
1184 * @param pTimer Timer handle as returned by one of the create functions.
1185 * @param u64Ticks The clock ticks.
1186 * @remark There could be rounding errors here. We just do a simple integere divide
1187 * without any adjustments.
1188 */
1189VMMDECL(uint64_t) TMTimerToMicro(PTMTIMER pTimer, uint64_t u64Ticks)
1190{
1191 switch (pTimer->enmClock)
1192 {
1193 case TMCLOCK_VIRTUAL:
1194 case TMCLOCK_VIRTUAL_SYNC:
1195 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1196 return u64Ticks / 1000;
1197
1198 case TMCLOCK_REAL:
1199 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1200 return u64Ticks * 1000;
1201
1202 case TMCLOCK_TSC:
1203 AssertReleaseMsgFailed(("TMCLOCK_TSC conversions are not implemented\n"));
1204 return 0;
1205
1206 default:
1207 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1208 return 0;
1209 }
1210}
1211
1212
1213/**
1214 * Converts the specified timer clock time to milliseconds.
1215 *
1216 * @returns milliseconds.
1217 * @param pTimer Timer handle as returned by one of the create functions.
1218 * @param u64Ticks The clock ticks.
1219 * @remark There could be rounding errors here. We just do a simple integere divide
1220 * without any adjustments.
1221 */
1222VMMDECL(uint64_t) TMTimerToMilli(PTMTIMER pTimer, uint64_t u64Ticks)
1223{
1224 switch (pTimer->enmClock)
1225 {
1226 case TMCLOCK_VIRTUAL:
1227 case TMCLOCK_VIRTUAL_SYNC:
1228 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1229 return u64Ticks / 1000000;
1230
1231 case TMCLOCK_REAL:
1232 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1233 return u64Ticks;
1234
1235 case TMCLOCK_TSC:
1236 AssertReleaseMsgFailed(("TMCLOCK_TSC conversions are not implemented\n"));
1237 return 0;
1238
1239 default:
1240 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1241 return 0;
1242 }
1243}
1244
1245
1246/**
1247 * Converts the specified nanosecond timestamp to timer clock ticks.
1248 *
1249 * @returns timer clock ticks.
1250 * @param pTimer Timer handle as returned by one of the create functions.
1251 * @param u64NanoTS The nanosecond value ticks to convert.
1252 * @remark There could be rounding and overflow errors here.
1253 */
1254VMMDECL(uint64_t) TMTimerFromNano(PTMTIMER pTimer, uint64_t u64NanoTS)
1255{
1256 switch (pTimer->enmClock)
1257 {
1258 case TMCLOCK_VIRTUAL:
1259 case TMCLOCK_VIRTUAL_SYNC:
1260 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1261 return u64NanoTS;
1262
1263 case TMCLOCK_REAL:
1264 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1265 return u64NanoTS / 1000000;
1266
1267 case TMCLOCK_TSC:
1268 AssertReleaseMsgFailed(("TMCLOCK_TSC conversions are not implemented\n"));
1269 return 0;
1270
1271 default:
1272 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1273 return 0;
1274 }
1275}
1276
1277
1278/**
1279 * Converts the specified microsecond timestamp to timer clock ticks.
1280 *
1281 * @returns timer clock ticks.
1282 * @param pTimer Timer handle as returned by one of the create functions.
1283 * @param u64MicroTS The microsecond value ticks to convert.
1284 * @remark There could be rounding and overflow errors here.
1285 */
1286VMMDECL(uint64_t) TMTimerFromMicro(PTMTIMER pTimer, uint64_t u64MicroTS)
1287{
1288 switch (pTimer->enmClock)
1289 {
1290 case TMCLOCK_VIRTUAL:
1291 case TMCLOCK_VIRTUAL_SYNC:
1292 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1293 return u64MicroTS * 1000;
1294
1295 case TMCLOCK_REAL:
1296 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1297 return u64MicroTS / 1000;
1298
1299 case TMCLOCK_TSC:
1300 AssertReleaseMsgFailed(("TMCLOCK_TSC conversions are not implemented\n"));
1301 return 0;
1302
1303 default:
1304 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1305 return 0;
1306 }
1307}
1308
1309
1310/**
1311 * Converts the specified millisecond timestamp to timer clock ticks.
1312 *
1313 * @returns timer clock ticks.
1314 * @param pTimer Timer handle as returned by one of the create functions.
1315 * @param u64MilliTS The millisecond value ticks to convert.
1316 * @remark There could be rounding and overflow errors here.
1317 */
1318VMMDECL(uint64_t) TMTimerFromMilli(PTMTIMER pTimer, uint64_t u64MilliTS)
1319{
1320 switch (pTimer->enmClock)
1321 {
1322 case TMCLOCK_VIRTUAL:
1323 case TMCLOCK_VIRTUAL_SYNC:
1324 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1325 return u64MilliTS * 1000000;
1326
1327 case TMCLOCK_REAL:
1328 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1329 return u64MilliTS;
1330
1331 case TMCLOCK_TSC:
1332 AssertReleaseMsgFailed(("TMCLOCK_TSC conversions are not implemented\n"));
1333 return 0;
1334
1335 default:
1336 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1337 return 0;
1338 }
1339}
1340
1341
1342/**
1343 * Get the expire time of the timer.
1344 * Only valid for active timers.
1345 *
1346 * @returns Expire time of the timer.
1347 * @param pTimer Timer handle as returned by one of the create functions.
1348 */
1349VMMDECL(uint64_t) TMTimerGetExpire(PTMTIMER pTimer)
1350{
1351 int cRetries = 1000;
1352 do
1353 {
1354 TMTIMERSTATE enmState = pTimer->enmState;
1355 switch (enmState)
1356 {
1357 case TMTIMERSTATE_EXPIRED:
1358 case TMTIMERSTATE_STOPPED:
1359 case TMTIMERSTATE_PENDING_STOP:
1360 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1361 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1362 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1363 return ~(uint64_t)0;
1364
1365 case TMTIMERSTATE_ACTIVE:
1366 case TMTIMERSTATE_PENDING_RESCHEDULE:
1367 case TMTIMERSTATE_PENDING_SCHEDULE:
1368 Log2(("TMTimerGetExpire: returns %llu (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1369 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1370 return pTimer->u64Expire;
1371
1372 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1373 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1374#ifdef IN_RING3
1375 if (!RTThreadYield())
1376 RTThreadSleep(1);
1377#endif
1378 break;
1379
1380 /*
1381 * Invalid states.
1382 */
1383 case TMTIMERSTATE_DESTROY:
1384 case TMTIMERSTATE_FREE:
1385 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1386 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1387 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1388 return ~(uint64_t)0;
1389 default:
1390 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1391 return ~(uint64_t)0;
1392 }
1393 } while (cRetries-- > 0);
1394
1395 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1396 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1397 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1398 return ~(uint64_t)0;
1399}
1400
1401
1402/**
1403 * Checks if a timer is active or not.
1404 *
1405 * @returns True if active.
1406 * @returns False if not active.
1407 * @param pTimer Timer handle as returned by one of the create functions.
1408 */
1409VMMDECL(bool) TMTimerIsActive(PTMTIMER pTimer)
1410{
1411 TMTIMERSTATE enmState = pTimer->enmState;
1412 switch (enmState)
1413 {
1414 case TMTIMERSTATE_STOPPED:
1415 case TMTIMERSTATE_EXPIRED:
1416 case TMTIMERSTATE_PENDING_STOP:
1417 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1418 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1419 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1420 return false;
1421
1422 case TMTIMERSTATE_ACTIVE:
1423 case TMTIMERSTATE_PENDING_RESCHEDULE:
1424 case TMTIMERSTATE_PENDING_SCHEDULE:
1425 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1426 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1427 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1428 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1429 return true;
1430
1431 /*
1432 * Invalid states.
1433 */
1434 case TMTIMERSTATE_DESTROY:
1435 case TMTIMERSTATE_FREE:
1436 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1437 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1438 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1439 return false;
1440 default:
1441 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1442 return false;
1443 }
1444}
1445
1446
1447/**
1448 * Convert state to string.
1449 *
1450 * @returns Readonly status name.
1451 * @param enmState State.
1452 */
1453const char *tmTimerState(TMTIMERSTATE enmState)
1454{
1455 switch (enmState)
1456 {
1457#define CASE(num, state) \
1458 case TMTIMERSTATE_##state: \
1459 AssertCompile(TMTIMERSTATE_##state == (num)); \
1460 return #num "-" #state
1461 CASE( 1,STOPPED);
1462 CASE( 2,ACTIVE);
1463 CASE( 3,EXPIRED);
1464 CASE( 4,PENDING_STOP);
1465 CASE( 5,PENDING_STOP_SCHEDULE);
1466 CASE( 6,PENDING_SCHEDULE_SET_EXPIRE);
1467 CASE( 7,PENDING_SCHEDULE);
1468 CASE( 8,PENDING_RESCHEDULE_SET_EXPIRE);
1469 CASE( 9,PENDING_RESCHEDULE);
1470 CASE(10,DESTROY);
1471 CASE(11,FREE);
1472 default:
1473 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
1474 return "Invalid state!";
1475#undef CASE
1476 }
1477}
1478
1479
1480/**
1481 * Schedules the given timer on the given queue.
1482 *
1483 * @param pQueue The timer queue.
1484 * @param pTimer The timer that needs scheduling.
1485 *
1486 * @remarks Called while owning the lock.
1487 */
1488DECLINLINE(void) tmTimerQueueScheduleOne(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
1489{
1490 /*
1491 * Processing.
1492 */
1493 unsigned cRetries = 2;
1494 do
1495 {
1496 TMTIMERSTATE enmState = pTimer->enmState;
1497 switch (enmState)
1498 {
1499 /*
1500 * Reschedule timer (in the active list).
1501 */
1502 case TMTIMERSTATE_PENDING_RESCHEDULE:
1503 {
1504 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
1505 break; /* retry */
1506
1507 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);
1508 const PTMTIMER pNext = TMTIMER_GET_NEXT(pTimer);
1509 if (pPrev)
1510 TMTIMER_SET_NEXT(pPrev, pNext);
1511 else
1512 {
1513 TMTIMER_SET_HEAD(pQueue, pNext);
1514 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
1515 }
1516 if (pNext)
1517 TMTIMER_SET_PREV(pNext, pPrev);
1518 pTimer->offNext = 0;
1519 pTimer->offPrev = 0;
1520 /* fall thru */
1521 }
1522
1523 /*
1524 * Schedule timer (insert into the active list).
1525 */
1526 case TMTIMERSTATE_PENDING_SCHEDULE:
1527 {
1528 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
1529 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
1530 break; /* retry */
1531
1532 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);
1533 if (pCur)
1534 {
1535 const uint64_t u64Expire = pTimer->u64Expire;
1536 for (;; pCur = TMTIMER_GET_NEXT(pCur))
1537 {
1538 if (pCur->u64Expire > u64Expire)
1539 {
1540 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);
1541 TMTIMER_SET_NEXT(pTimer, pCur);
1542 TMTIMER_SET_PREV(pTimer, pPrev);
1543 if (pPrev)
1544 TMTIMER_SET_NEXT(pPrev, pTimer);
1545 else
1546 {
1547 TMTIMER_SET_HEAD(pQueue, pTimer);
1548 pQueue->u64Expire = u64Expire;
1549 }
1550 TMTIMER_SET_PREV(pCur, pTimer);
1551 return;
1552 }
1553 if (!pCur->offNext)
1554 {
1555 TMTIMER_SET_NEXT(pCur, pTimer);
1556 TMTIMER_SET_PREV(pTimer, pCur);
1557 return;
1558 }
1559 }
1560 }
1561 else
1562 {
1563 TMTIMER_SET_HEAD(pQueue, pTimer);
1564 pQueue->u64Expire = pTimer->u64Expire;
1565 }
1566 return;
1567 }
1568
1569 /*
1570 * Stop the timer in active list.
1571 */
1572 case TMTIMERSTATE_PENDING_STOP:
1573 {
1574 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
1575 break; /* retry */
1576
1577 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);
1578 const PTMTIMER pNext = TMTIMER_GET_NEXT(pTimer);
1579 if (pPrev)
1580 TMTIMER_SET_NEXT(pPrev, pNext);
1581 else
1582 {
1583 TMTIMER_SET_HEAD(pQueue, pNext);
1584 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
1585 }
1586 if (pNext)
1587 TMTIMER_SET_PREV(pNext, pPrev);
1588 pTimer->offNext = 0;
1589 pTimer->offPrev = 0;
1590 /* fall thru */
1591 }
1592
1593 /*
1594 * Stop the timer (not on the active list).
1595 */
1596 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1597 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
1598 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
1599 break;
1600 return;
1601
1602 /*
1603 * The timer is pending destruction by TMR3TimerDestroy, our caller.
1604 * Nothing to do here.
1605 */
1606 case TMTIMERSTATE_DESTROY:
1607 break;
1608
1609 /*
1610 * Postpone these until they get into the right state.
1611 */
1612 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1613 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1614 tmTimerLink(pQueue, pTimer);
1615 STAM_COUNTER_INC(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatPostponed));
1616 return;
1617
1618 /*
1619 * None of these can be in the schedule.
1620 */
1621 case TMTIMERSTATE_FREE:
1622 case TMTIMERSTATE_STOPPED:
1623 case TMTIMERSTATE_ACTIVE:
1624 case TMTIMERSTATE_EXPIRED:
1625 default:
1626 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
1627 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
1628 return;
1629 }
1630 } while (cRetries-- > 0);
1631}
1632
1633
1634/**
1635 * Schedules the specified timer queue.
1636 *
1637 * @param pVM The VM to run the timers for.
1638 * @param pQueue The queue to schedule.
1639 *
1640 * @remarks Called while owning the lock.
1641 */
1642void tmTimerQueueSchedule(PVM pVM, PTMTIMERQUEUE pQueue)
1643{
1644 TM_ASSERT_EMT_LOCK(pVM);
1645
1646 /*
1647 * Dequeue the scheduling list and iterate it.
1648 */
1649 int32_t offNext = ASMAtomicXchgS32(&pQueue->offSchedule, 0);
1650 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, offNext=%RI32}\n", pQueue, pQueue->enmClock, offNext));
1651 if (!offNext)
1652 return;
1653 PTMTIMER pNext = (PTMTIMER)((intptr_t)pQueue + offNext);
1654 while (pNext)
1655 {
1656 /*
1657 * Unlink the head timer and find the next one.
1658 */
1659 PTMTIMER pTimer = pNext;
1660 pNext = pNext->offScheduleNext ? (PTMTIMER)((intptr_t)pNext + pNext->offScheduleNext) : NULL;
1661 pTimer->offScheduleNext = 0;
1662
1663 /*
1664 * Do the scheduling.
1665 */
1666 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .pszDesc=%s}\n",
1667 pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, R3STRING(pTimer->pszDesc)));
1668 tmTimerQueueScheduleOne(pQueue, pTimer);
1669 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
1670 } /* foreach timer in current schedule batch. */
1671}
1672
1673
1674#ifdef VBOX_STRICT
1675/**
1676 * Checks that the timer queues are sane.
1677 *
1678 * @param pVM VM handle.
1679 *
1680 * @remarks Called while owning the lock.
1681 */
1682void tmTimerQueuesSanityChecks(PVM pVM, const char *pszWhere)
1683{
1684 TM_ASSERT_EMT_LOCK(pVM);
1685
1686 /*
1687 * Check the linking of the active lists.
1688 */
1689 for (int i = 0; i < TMCLOCK_MAX; i++)
1690 {
1691 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
1692 Assert((int)pQueue->enmClock == i);
1693 PTMTIMER pPrev = NULL;
1694 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pPrev = pCur, pCur = TMTIMER_GET_NEXT(pCur))
1695 {
1696 AssertMsg((int)pCur->enmClock == i, ("%s: %d != %d\n", pszWhere, pCur->enmClock, i));
1697 AssertMsg(TMTIMER_GET_PREV(pCur) == pPrev, ("%s: %p != %p\n", pszWhere, TMTIMER_GET_PREV(pCur), pPrev));
1698 TMTIMERSTATE enmState = pCur->enmState;
1699 switch (enmState)
1700 {
1701 case TMTIMERSTATE_ACTIVE:
1702 AssertMsg( !pCur->offScheduleNext
1703 || pCur->enmState != TMTIMERSTATE_ACTIVE,
1704 ("%s: %RI32\n", pszWhere, pCur->offScheduleNext));
1705 break;
1706 case TMTIMERSTATE_PENDING_STOP:
1707 case TMTIMERSTATE_PENDING_RESCHEDULE:
1708 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1709 break;
1710 default:
1711 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
1712 break;
1713 }
1714 }
1715 }
1716
1717
1718# ifdef IN_RING3
1719 /*
1720 * Do the big list and check that active timers all are in the active lists.
1721 */
1722 PTMTIMERR3 pPrev = NULL;
1723 for (PTMTIMERR3 pCur = pVM->tm.s.pCreated; pCur; pPrev = pCur, pCur = pCur->pBigNext)
1724 {
1725 Assert(pCur->pBigPrev == pPrev);
1726 Assert((unsigned)pCur->enmClock < (unsigned)TMCLOCK_MAX);
1727
1728 TMTIMERSTATE enmState = pCur->enmState;
1729 switch (enmState)
1730 {
1731 case TMTIMERSTATE_ACTIVE:
1732 case TMTIMERSTATE_PENDING_STOP:
1733 case TMTIMERSTATE_PENDING_RESCHEDULE:
1734 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1735 {
1736 PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
1737 Assert(pCur->offPrev || pCur == pCurAct);
1738 while (pCurAct && pCurAct != pCur)
1739 pCurAct = TMTIMER_GET_NEXT(pCurAct);
1740 Assert(pCurAct == pCur);
1741 break;
1742 }
1743
1744 case TMTIMERSTATE_PENDING_SCHEDULE:
1745 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1746 case TMTIMERSTATE_STOPPED:
1747 case TMTIMERSTATE_EXPIRED:
1748 {
1749 Assert(!pCur->offNext);
1750 Assert(!pCur->offPrev);
1751 for (PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
1752 pCurAct;
1753 pCurAct = TMTIMER_GET_NEXT(pCurAct))
1754 {
1755 Assert(pCurAct != pCur);
1756 Assert(TMTIMER_GET_NEXT(pCurAct) != pCur);
1757 Assert(TMTIMER_GET_PREV(pCurAct) != pCur);
1758 }
1759 break;
1760 }
1761
1762 /* ignore */
1763 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1764 break;
1765
1766 /* shouldn't get here! */
1767 case TMTIMERSTATE_DESTROY:
1768 default:
1769 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
1770 break;
1771 }
1772 }
1773# endif /* IN_RING3 */
1774}
1775#endif /* !VBOX_STRICT */
1776
1777
1778/**
1779 * Gets the current warp drive percent.
1780 *
1781 * @returns The warp drive percent.
1782 * @param pVM The VM handle.
1783 */
1784VMMDECL(uint32_t) TMGetWarpDrive(PVM pVM)
1785{
1786 return pVM->tm.s.u32VirtualWarpDrivePercentage;
1787}
1788
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette