/* $Id$ */
/** @file
* TM - Timeout Manager, Virtual Time, All Contexts.
*/
/*
* Copyright (C) 2006-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#ifdef IN_RING3
# ifdef VBOX_WITH_REM
# endif
#endif
#include "TMInternal.h"
#include <iprt/asm-math.h>
/**
* @interface_method_impl{RTTIMENANOTSDATA, pfnBadPrev}
*/
DECLEXPORT(void) tmVirtualNanoTSBadPrev(PRTTIMENANOTSDATA pData, uint64_t u64NanoTS, uint64_t u64DeltaPrev,
{
if ((int64_t)u64DeltaPrev < 0)
LogRel(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 pVM=%p\n",
else
Log(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 pVM=%p (debugging?)\n",
}
/**
* @interface_method_impl{RTTIMENANOTSDATA, pfnRediscover}
*
* This is the initial worker, so the first call in each context ends up here.
* It is also used should the delta rating of the host CPUs change or if the
* fGetGipCpu feature the current worker relies upon becomes unavailable. The
* last two events may occur as CPUs are taken online.
*/
{
/*
* We require a valid GIP for the selection below. Invalid GIP is fatal.
*/
AssertFatalMsg(pGip->u32Magic == SUPGLOBALINFOPAGE_MAGIC, ("pVM=%p pGip=%p u32Magic=%#x\n", pVM, pGip, pGip->u32Magic));
/*
* Determine the new worker.
*/
{
case SUPGIPMODE_SYNC_TSC:
case SUPGIPMODE_INVARIANT_TSC:
else
#else
: fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseIdtrLim : RTTimeNanoTSLegacySyncInvarWithDeltaUseIdtrLim;
: fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseRdtscp : RTTimeNanoTSLegacySyncInvarWithDeltaUseRdtscp;
else
: fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicId : RTTimeNanoTSLegacySyncInvarWithDeltaUseApicId;
#endif
break;
case SUPGIPMODE_ASYNC_TSC:
#else
else
#endif
break;
default:
}
/*
* Update the pfnVirtualGetRaw pointer and call the worker we selected.
*/
ASMAtomicWritePtr((void * volatile *)&CTX_SUFF(pVM->tm.s.pfnVirtualGetRaw), (void *)(uintptr_t)pfnWorker);
}
/**
* @interface_method_impl{RTTIMENANOTSDATA, pfnBadGipIndex}
*/
DECLEXPORT(uint64_t) tmVirtualNanoTSBadCpuIndex(PRTTIMENANOTSDATA pData, uint16_t idApic, uint16_t iCpuSet, uint16_t iGipCpu)
{
AssertFatalMsgFailed(("pVM=%p idApic=%#x iCpuSet=%#x iGipCpu=%#x\n", pVM, idApic, iCpuSet, iGipCpu));
#ifndef _MSC_VER
return UINT64_MAX;
#endif
}
/**
* Wrapper around the IPRT GIP time methods.
*/
{
# ifdef IN_RING3
# else /* !IN_RING3 */
# endif /* !IN_RING3 */
/*DBGFTRACE_POS_U64(pVM, u64);*/
return u64;
}
/**
* Get the time when we're not running at 100%
*
* @returns The timestamp.
* @param pVM Pointer to the VM.
*/
{
/*
* Recalculate the RTTimeNanoTS() value for the period where
* warp drive has been enabled.
*/
u64 /= 100;
/*
* Now we apply the virtual time offset.
* (Which is the negated tmVirtualGetRawNanoTS() value for when the virtual
* machine started if it had been running continuously without any suspends.)
*/
return u64;
}
/**
* Get the raw virtual time.
*
* @returns The current time stamp.
* @param pVM Pointer to the VM.
*/
{
return tmVirtualGetRawNonNormal(pVM);
}
/**
* Inlined version of tmVirtualGetEx.
*/
{
{
/*
* Use the chance to check for expired timers.
*/
if (fCheckTimers)
{
&& pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s.offVirtualSync
)
)
)
{
#ifdef IN_RING3
# ifdef VBOX_WITH_REM
# endif
#endif
}
}
}
else
return u64;
}
/**
* Gets the current TMCLOCK_VIRTUAL time
*
* @returns The timestamp.
* @param pVM Pointer to the VM.
*
* @remark While the flow of time will never go backwards, the speed of the
* progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be
* influenced by power saving (SpeedStep, PowerNow!), while the former
* makes use of TSC and kernel timers.
*/
{
}
/**
* Gets the current TMCLOCK_VIRTUAL time without checking
* timers or anything.
*
* Meaning, this has no side effect on FFs like TMVirtualGet may have.
*
* @returns The timestamp.
* @param pVM Pointer to the VM.
*
* @remarks See TMVirtualGet.
*/
{
}
/**
* Converts the dead line interval from TMCLOCK_VIRTUAL to host nano seconds.
*
* @returns Host nano second count.
* @param pVM Pointer to the VM.
* @param cVirtTicksToDeadline The TMCLOCK_VIRTUAL interval.
*/
{
return cVirtTicksToDeadline;
}
/**
* tmVirtualSyncGetLocked worker for handling catch-up when owning the lock.
*
* @returns The timestamp.
* @param pVM Pointer to the VM.
* @param u64 raw virtual time.
* @param off offVirtualSync.
* @param pcNsToDeadline Where to return the number of nano seconds to
* the next virtual sync timer deadline. Can be
* NULL.
*/
DECLINLINE(uint64_t) tmVirtualSyncGetHandleCatchUpLocked(PVM pVM, uint64_t u64, uint64_t off, uint64_t *pcNsToDeadline)
{
/*
* Don't make updates until we've check the timer queue.
*/
bool fUpdatePrev = true;
bool fUpdateOff = true;
bool fStop = false;
{
uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
{
Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [vsghcul]\n", u64 - off, off - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));
}
else
{
/* we've completely caught up. */
fStop = true;
}
}
else
{
/* More than 4 seconds since last time (or negative), ignore it. */
fUpdateOff = false;
}
/*
* Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
* approach is to never pass the head timer. So, when we do stop the clock and
* set the timer pending flag.
*/
{
}
uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
{
if (fUpdateOff)
if (fStop)
if (fUpdatePrev)
if (pcNsToDeadline)
{
}
}
else
{
Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsghcul]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
if (pcNsToDeadline)
*pcNsToDeadline = 0;
#ifdef IN_RING3
# ifdef VBOX_WITH_REM
# endif
#endif
}
return u64;
}
/**
* tmVirtualSyncGetEx worker for when we get the lock.
*
* @returns timesamp.
* @param pVM Pointer to the VM.
* @param u64 The virtual clock timestamp.
* @param pcNsToDeadline Where to return the number of nano seconds to
* the next virtual sync timer deadline. Can be
* NULL.
*/
{
/*
* Not ticking?
*/
{
if (pcNsToDeadline)
*pcNsToDeadline = 0;
return u64;
}
/*
* Handle catch up in a separate function.
*/
/*
* Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
* approach is to never pass the head timer. So, when we do stop the clock and
* set the timer pending flag.
*/
{
}
uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
{
if (pcNsToDeadline)
}
else
{
Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, !!VMCPU_FF_IS_PENDING(pVCpuDst, VMCPU_FF_TIMER)));
Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsgl]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
#ifdef IN_RING3
# ifdef VBOX_WITH_REM
# endif
#endif
if (pcNsToDeadline)
*pcNsToDeadline = 0;
}
return u64;
}
/**
* Gets the current TMCLOCK_VIRTUAL_SYNC time.
*
* @returns The timestamp.
* @param pVM Pointer to the VM.
* @param fCheckTimers Check timers or not
* @param pcNsToDeadline Where to return the number of nano seconds to
* the next virtual sync timer deadline. Can be
* NULL.
* @thread EMT.
*/
{
{
if (pcNsToDeadline)
*pcNsToDeadline = 0;
return u64;
}
/*
* Query the virtual clock and do the usual expired timer check.
*/
if (fCheckTimers)
{
{
#ifdef IN_RING3
# ifdef VBOX_WITH_REM
# endif
#endif
}
}
/*
* If we can get the lock, get it. The result is much more reliable.
*
* Note! This is where all clock source devices branch off because they
* will be owning the lock already. The 'else' is taken by code
* which is less picky or hasn't been adjusted yet
*/
/*
* When the clock is ticking, not doing catch ups and not running into an
* expired time, we can get away without locking. Try this first.
*/
{
{
{
uint64_t const u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
{
if (pcNsToDeadline)
return off;
}
}
}
}
else
{
{
if (pcNsToDeadline)
*pcNsToDeadline = 0;
return off;
}
}
/*
* Read the offset and adjust if we're playing catch-up.
*
* The catch-up adjusting work by us decrementing the offset by a percentage of
* the time elapsed since the previous TMVirtualGetSync call.
*
* It's possible to get a very long or even negative interval between two read
* for the following reasons:
* - Someone might have suspended the process execution, frequently the case when
* debugging the process.
* - We might be on a different CPU which TSC isn't quite in sync with the
* other CPUs in the system.
* - Another thread is racing us and we might have been preempted while inside
* this function.
*
* Assuming nano second virtual time, we can simply ignore any intervals which has
* any of the upper 32 bits set.
*/
for (;; cOuterTries--)
{
/* Try grab the lock, things get simpler when owning the lock. */
if (RT_SUCCESS_NP(rcLock))
/* Re-check the ticking flag. */
{
&& cOuterTries > 0)
continue;
if (pcNsToDeadline)
*pcNsToDeadline = 0;
return off;
}
{
/* No changes allowed, try get a consistent set of parameters. */
|| cOuterTries <= 0)
{
{
{
Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [NoLock]\n", u64 - off, pVM->tm.s.offVirtualSync - offGivenUp, u64Sub));
}
else
{
/* we've completely caught up. */
off = offGivenUp;
}
}
else
/* More than 4 seconds since last time (or negative), ignore it. */
/* Check that we're still running and in catch up. */
break;
if (cOuterTries <= 0)
break; /* enough */
}
}
break; /* Got an consistent offset */
else if (cOuterTries <= 0)
break; /* enough */
}
if (cOuterTries <= 0)
/*
* Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
* approach is to never pass the head timer. So, when we do stop the clock and
* set the timer pending flag.
*/
/** @todo u64VirtualSyncLast */
uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
{
{
Log5(("TMAllVirtual(%u): FF: %d -> 1 (NoLock)\n", __LINE__, VMCPU_FF_IS_PENDING(pVCpuDst, VMCPU_FF_TIMER)));
#ifdef IN_RING3
# ifdef VBOX_WITH_REM
# endif
#endif
Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
}
else
Log4(("TM: %'RU64/-%'8RU64: exp tmr [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
if (pcNsToDeadline)
*pcNsToDeadline = 0;
}
else if (pcNsToDeadline)
{
}
return u64;
}
/**
* Gets the current TMCLOCK_VIRTUAL_SYNC time.
*
* @returns The timestamp.
* @param pVM Pointer to the VM.
* @thread EMT.
* @remarks May set the timer and virtual sync FFs.
*/
{
}
/**
* Gets the current TMCLOCK_VIRTUAL_SYNC time without checking timers running on
* TMCLOCK_VIRTUAL.
*
* @returns The timestamp.
* @param pVM Pointer to the VM.
* @thread EMT.
* @remarks May set the timer and virtual sync FFs.
*/
{
}
/**
* Gets the current TMCLOCK_VIRTUAL_SYNC time.
*
* @returns The timestamp.
* @param pVM Pointer to the VM.
* @param fCheckTimers Check timers on the virtual clock or not.
* @thread EMT.
* @remarks May set the timer and virtual sync FFs.
*/
{
}
/**
* Gets the current TMCLOCK_VIRTUAL_SYNC time and ticks to the next deadline
* without checking timers running on TMCLOCK_VIRTUAL.
*
* @returns The timestamp.
* @param pVM Pointer to the VM.
* @param pcNsToDeadline Where to return the number of nano seconds to
* the next virtual sync timer deadline.
* @thread EMT.
* @remarks May set the timer and virtual sync FFs.
*/
{
return u64Now;
}
/**
* Gets the number of nano seconds to the next virtual sync deadline.
*
* @returns The number of TMCLOCK_VIRTUAL ticks.
* @param pVM Pointer to the VM.
* @thread EMT.
* @remarks May set the timer and virtual sync FFs.
*/
{
return cNsToDeadline;
}
/**
* Gets the current lag of the synchronous virtual clock (relative to the virtual clock).
*
* @return The current lag.
* @param pVM Pointer to the VM.
*/
{
}
/**
* Get the current catch-up percent.
*
* @return The current catch0up percent. 0 means running at the same speed as the virtual clock.
* @param pVM Pointer to the VM.
*/
{
return 0;
}
/**
* Gets the current TMCLOCK_VIRTUAL frequency.
*
* @returns The frequency.
* @param pVM Pointer to the VM.
*/
{
return TMCLOCK_FREQ_VIRTUAL;
}
/**
* Worker for TMR3PauseClocks.
*
* @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
* @param pVM Pointer to the VM.
*/
{
if (c == 0)
{
}
return VINF_SUCCESS;
}
/**
* Worker for TMR3ResumeClocks.
*
* @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
* @param pVM Pointer to the VM.
*/
{
if (c == 1)
{
}
return VINF_SUCCESS;
}
/**
* Converts from virtual ticks to nanoseconds.
*
* @returns nanoseconds.
* @param pVM Pointer to the VM.
* @param u64VirtualTicks The virtual ticks to convert.
* @remark There could be rounding errors here. We just do a simple integer divide
* without any adjustments.
*/
{
return u64VirtualTicks;
}
/**
* Converts from virtual ticks to microseconds.
*
* @returns microseconds.
* @param pVM Pointer to the VM.
* @param u64VirtualTicks The virtual ticks to convert.
* @remark There could be rounding errors here. We just do a simple integer divide
* without any adjustments.
*/
{
return u64VirtualTicks / 1000;
}
/**
* Converts from virtual ticks to milliseconds.
*
* @returns milliseconds.
* @param pVM Pointer to the VM.
* @param u64VirtualTicks The virtual ticks to convert.
* @remark There could be rounding errors here. We just do a simple integer divide
* without any adjustments.
*/
{
return u64VirtualTicks / 1000000;
}
/**
* Converts from nanoseconds to virtual ticks.
*
* @returns virtual ticks.
* @param pVM Pointer to the VM.
* @param u64NanoTS The nanosecond value ticks to convert.
* @remark There could be rounding and overflow errors here.
*/
{
return u64NanoTS;
}
/**
* Converts from microseconds to virtual ticks.
*
* @returns virtual ticks.
* @param pVM Pointer to the VM.
* @param u64MicroTS The microsecond value ticks to convert.
* @remark There could be rounding and overflow errors here.
*/
{
return u64MicroTS * 1000;
}
/**
* Converts from milliseconds to virtual ticks.
*
* @returns virtual ticks.
* @param pVM Pointer to the VM.
* @param u64MilliTS The millisecond value ticks to convert.
* @remark There could be rounding and overflow errors here.
*/
{
return u64MilliTS * 1000000;
}