TMAllVirtual.cpp revision 94c892d12226640309d953c5d3e16bb7d01c7040
/* $Id$ */
/** @file
* TM - Timeout Manager, Virtual Time, All Contexts.
*/
/*
* Copyright (C) 2006-2007 Sun Microsystems, Inc.
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
* Clara, CA 95054 USA or visit http://www.sun.com if you need
* additional information or have any questions.
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#define LOG_GROUP LOG_GROUP_TM
#ifdef IN_RING3
#endif
#include "TMInternal.h"
/**
* Helper function that's used by the assembly routines when something goes bust.
*
* @param pData Pointer to the data structure.
* @param u64NanoTS The calculated nano ts.
* @param u64DeltaPrev The delta relative to the previously returned timestamp.
* @param u64PrevNanoTS The previously returned timestamp (as it was read it).
*/
DECLEXPORT(void) tmVirtualNanoTSBad(PRTTIMENANOTSDATA pData, uint64_t u64NanoTS, uint64_t u64DeltaPrev, uint64_t u64PrevNanoTS)
{
//PVM pVM = (PVM)((uint8_t *)pData - RT_OFFSETOF(VM, CTXALLSUFF(s.tm.VirtualGetRawData)));
if ((int64_t)u64DeltaPrev < 0)
LogRel(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64\n",
else
Log(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 (debugging?)\n",
}
/**
* Called the first time somebody asks for the time or when the GIP
*
* This should never ever happen.
*/
{
//PVM pVM = (PVM)((uint8_t *)pData - RT_OFFSETOF(VM, CTXALLSUFF(s.tm.VirtualGetRawData)));
}
#if 1
/**
* Wrapper around the IPRT GIP time methods.
*/
{
#ifdef IN_RING3
# else /* !IN_RING3 */
return u64;
# endif /* !IN_RING3 */
}
#else
/**
* This is (mostly) the same as rtTimeNanoTSInternal() except
* for the two globals which live in TM.
*
* @returns Nanosecond timestamp.
* @param pVM The VM handle.
*/
{
/*
* Read the GIP data and the previous value.
*/
for (;;)
{
#ifdef IN_RING3
return RTTimeSystemNanoTS();
#endif
{
#ifdef RT_OS_L4
#endif
u64Delta = ASMReadTSC();
|| (u32TransactionId & 1)))
continue;
}
else
{
/* SUPGIPMODE_ASYNC_TSC */
else
{
}
#ifdef RT_OS_L4
#endif
u64Delta = ASMReadTSC();
#ifdef IN_RC
#else
continue;
|| (u32TransactionId & 1)))
continue;
#endif
}
break;
}
/*
* Calc NanoTS delta.
*/
if (u64Delta > u32UpdateIntervalTSC)
{
/*
* We've expired the interval, cap it. If we're here for the 2nd
* time without any GIP update inbetween, the checks against
* pVM->tm.s.u64VirtualRawPrev below will force 1ns stepping.
*/
}
#if !defined(_MSC_VER) || defined(RT_ARCH_AMD64) /* GCC makes very pretty code from these two inline calls, while MSC cannot. */
#else
{
}
#endif
/*
* Calculate the time and compare it with the previously returned value.
*
* Since this function is called *very* frequently when the VM is running
* and then mostly on EMT, we can restrict the valid range of the delta
*/
/* frequent - less than 1s since last call. */;
else if ( (int64_t)u64DeltaPrev < 0
{
/* occasional - u64NanoTS is in the 'past' relative to previous returns. */
#ifndef IN_RING3
#endif
}
else if (u64PrevNanoTS)
{
/* Something has gone bust, if negative offset it's real bad. */
if ((int64_t)u64DeltaPrev < 0)
LogRel(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 u64Delta=%#RX64\n",
else
Log(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 u64Delta=%#RX64 (debugging?)\n",
#ifdef DEBUG_bird
/** @todo there are some hickups during boot and reset that can cause 2-5 seconds delays. Investigate... */
("u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 u64Delta=%#RX64\n",
#endif
}
/* else: We're resuming (see TMVirtualResume). */
return u64NanoTS;
/*
* Attempt updating the previous value, provided we're still ahead of it.
*
* There is no point in recalculating u64NanoTS because we got preemted or if
* we raced somebody while the GIP was updated, since these are events
* that might occure at any point in the return path as well.
*/
for (int cTries = 50;;)
{
if (u64PrevNanoTS >= u64NanoTS)
break;
break;
AssertBreak(--cTries <= 0);
break;
}
return u64NanoTS;
}
#endif
/**
* Get the time when we're not running at 100%
*
* @returns The timestamp.
* @param pVM The VM handle.
*/
{
/*
* Recalculate the RTTimeNanoTS() value for the period where
* warp drive has been enabled.
*/
u64 /= 100;
/*
* Now we apply the virtual time offset.
* (Which is the negated tmVirtualGetRawNanoTS() value for when the virtual
* machine started if it had been running continuously without any suspends.)
*/
return u64;
}
/**
* Get the raw virtual time.
*
* @returns The current time stamp.
* @param pVM The VM handle.
*/
{
return tmVirtualGetRawNonNormal(pVM);
}
/**
* Inlined version of tmVirtualGetEx.
*/
{
{
/*
* Use the chance to check for expired timers.
*/
if (fCheckTimers)
{
&& pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s.offVirtualSync
)
)
)
{
#ifdef IN_RING3
#endif
}
}
}
else
return u64;
}
/**
* Gets the current TMCLOCK_VIRTUAL time
*
* @returns The timestamp.
* @param pVM VM handle.
*
* @remark While the flow of time will never go backwards, the speed of the
* progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be
* influenced by power saving (SpeedStep, PowerNow!), while the former
* makes use of TSC and kernel timers.
*/
{
}
/**
* Gets the current TMCLOCK_VIRTUAL time without checking
* timers or anything.
*
* Meaning, this has no side effect on FFs like TMVirtualGet may have.
*
* @returns The timestamp.
* @param pVM VM handle.
*
* @remarks See TMVirtualGet.
*/
{
}
/**
* Gets the current TMCLOCK_VIRTUAL_SYNC time.
*
* @returns The timestamp.
* @param pVM VM handle.
* @param fCheckTimers Check timers or not
* @thread EMT.
*/
{
/*
* Query the virtual clock and do the usual expired timer check.
*/
if (fCheckTimers)
{
{
#ifdef IN_RING3
#endif
}
}
/*
* Read the offset and adjust if we're playing catch-up.
*
* The catch-up adjusting work by us decrementing the offset by a percentage of
* the time elapsed since the previous TMVirtualGetSync call.
*
* It's possible to get a very long or even negative interval between two read
* for the following reasons:
* - Someone might have suspended the process execution, frequently the case when
* debugging the process.
* - We might be on a different CPU which TSC isn't quite in sync with the
* other CPUs in the system.
* - Another thread is racing us and we might have been preemnted while inside
* this function.
*
* Assuming nano second virtual time, we can simply ignore any intervals which has
* any of the upper 32 bits set.
*/
int cOuterTries = 42;
for (;; cOuterTries--)
{
/* Re-check the ticking flag. */
{
if (RT_SUCCESS(rcLock))
}
{
/* adjust the offset. */
if (RT_FAILURE(rcLock))
if (RT_SUCCESS(rcLock))
{
/* We own the lock and may make updates. */
{
uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
{
Log4(("TM: %RU64/%RU64: sub %RU32\n", u64 - off, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));
}
else
{
/* we've completely caught up. */
}
}
else
{
/* More than 4 seconds since last time (or negative), ignore it. */
}
break;
}
/* No changes allowed, try get a consistent set of parameters. */
|| cOuterTries <= 0)
{
{
{
Log4(("TM: %RU64/%RU64: sub %RU32 (NoLock)\n", u64 - off, pVM->tm.s.offVirtualSync - offGivenUp, u64Sub));
}
else
{
/* we've completely caught up. */
off = offGivenUp;
}
}
else
/* More than 4 seconds since last time (or negative), ignore it. */
/* Check that we're still running and in catch up. */
break;
if (cOuterTries <= 0)
break;
}
}
break; /* Got an consistent offset */
}
if (cOuterTries <= 0)
/*
* Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
* approach is to never pass the head timer. So, when we do stop the clock and
* set the timer pending flag.
*/
const uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
{
if (RT_FAILURE(rcLock))
if (RT_SUCCESS(rcLock))
{
#ifdef IN_RING3
#endif
Log4(("TM: %RU64/%RU64: exp tmr=>ff\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
}
{
Log5(("TMAllVirtual(%u): FF: %d -> 1 (NoLock)\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
#ifdef IN_RING3
#endif
Log4(("TM: %RU64/%RU64: exp tmr=>ff (NoLock)\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
}
else
Log4(("TM: %RU64/%RU64: exp tmr (NoLock)\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
}
else if (RT_SUCCESS(rcLock))
{
}
return u64;
}
/**
* Gets the current TMCLOCK_VIRTUAL_SYNC time.
*
* @returns The timestamp.
* @param pVM VM handle.
* @thread EMT.
* @remarks May set the timer and virtual sync FFs.
*/
{
}
/**
* Gets the current TMCLOCK_VIRTUAL_SYNC time without checking timers running on
* TMCLOCK_VIRTUAL.
*
* @returns The timestamp.
* @param pVM VM handle.
* @thread EMT.
* @remarks May set the timer and virtual sync FFs.
*/
{
}
/**
* Gets the current TMCLOCK_VIRTUAL_SYNC time.
*
* @returns The timestamp.
* @param pVM VM handle.
* @param fCheckTimers Check timers on the virtual clock or not.
* @thread EMT.
* @remarks May set the timer and virtual sync FFs.
*/
{
}
/**
* Gets the current lag of the synchronous virtual clock (relative to the virtual clock).
*
* @return The current lag.
* @param pVM VM handle.
*/
{
}
/**
* Get the current catch-up percent.
*
* @return The current catch0up percent. 0 means running at the same speed as the virtual clock.
* @param pVM VM handle.
*/
{
return 0;
}
/**
* Gets the current TMCLOCK_VIRTUAL frequency.
*
* @returns The freqency.
* @param pVM VM handle.
*/
{
return TMCLOCK_FREQ_VIRTUAL;
}
/**
* Worker for TMR3PauseClocks.
*
* @returns VINF_SUCCESS or VERR_INTERNAL_ERROR (asserted).
* @param pVM The VM handle.
*/
{
if (c == 0)
{
}
return VINF_SUCCESS;
}
/**
* Worker for TMR3ResumeClocks.
*
* @returns VINF_SUCCESS or VERR_INTERNAL_ERROR (asserted).
* @param pVM The VM handle.
*/
{
if (c == 1)
{
}
return VINF_SUCCESS;
}
/**
* Converts from virtual ticks to nanoseconds.
*
* @returns nanoseconds.
* @param pVM The VM handle.
* @param u64VirtualTicks The virtual ticks to convert.
* @remark There could be rounding errors here. We just do a simple integere divide
* without any adjustments.
*/
{
return u64VirtualTicks;
}
/**
* Converts from virtual ticks to microseconds.
*
* @returns microseconds.
* @param pVM The VM handle.
* @param u64VirtualTicks The virtual ticks to convert.
* @remark There could be rounding errors here. We just do a simple integere divide
* without any adjustments.
*/
{
return u64VirtualTicks / 1000;
}
/**
* Converts from virtual ticks to milliseconds.
*
* @returns milliseconds.
* @param pVM The VM handle.
* @param u64VirtualTicks The virtual ticks to convert.
* @remark There could be rounding errors here. We just do a simple integere divide
* without any adjustments.
*/
{
return u64VirtualTicks / 1000000;
}
/**
* Converts from nanoseconds to virtual ticks.
*
* @returns virtual ticks.
* @param pVM The VM handle.
* @param u64NanoTS The nanosecond value ticks to convert.
* @remark There could be rounding and overflow errors here.
*/
{
return u64NanoTS;
}
/**
* Converts from microseconds to virtual ticks.
*
* @returns virtual ticks.
* @param pVM The VM handle.
* @param u64MicroTS The microsecond value ticks to convert.
* @remark There could be rounding and overflow errors here.
*/
{
return u64MicroTS * 1000;
}
/**
* Converts from milliseconds to virtual ticks.
*
* @returns virtual ticks.
* @param pVM The VM handle.
* @param u64MilliTS The millisecond value ticks to convert.
* @remark There could be rounding and overflow errors here.
*/
{
return u64MilliTS * 1000000;
}