TMAllVirtual.cpp revision 10ecdac9fbdfe8d3a17e3debc35307f648e3f65b
af062818b47340eef15700d2f0211576ba3506eevboxsync/* $Id$ */
af062818b47340eef15700d2f0211576ba3506eevboxsync/** @file
af062818b47340eef15700d2f0211576ba3506eevboxsync * TM - Timeout Manager, Virtual Time, All Contexts.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync/*
af062818b47340eef15700d2f0211576ba3506eevboxsync * Copyright (C) 2006-2007 Sun Microsystems, Inc.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * This file is part of VirtualBox Open Source Edition (OSE), as
af062818b47340eef15700d2f0211576ba3506eevboxsync * available from http://www.virtualbox.org. This file is free software;
af062818b47340eef15700d2f0211576ba3506eevboxsync * you can redistribute it and/or modify it under the terms of the GNU
af062818b47340eef15700d2f0211576ba3506eevboxsync * General Public License (GPL) as published by the Free Software
af062818b47340eef15700d2f0211576ba3506eevboxsync * Foundation, in version 2 as it comes in the "COPYING" file of the
af062818b47340eef15700d2f0211576ba3506eevboxsync * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
af062818b47340eef15700d2f0211576ba3506eevboxsync * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
af062818b47340eef15700d2f0211576ba3506eevboxsync * Clara, CA 95054 USA or visit http://www.sun.com if you need
af062818b47340eef15700d2f0211576ba3506eevboxsync * additional information or have any questions.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync/*******************************************************************************
af062818b47340eef15700d2f0211576ba3506eevboxsync* Header Files *
af062818b47340eef15700d2f0211576ba3506eevboxsync*******************************************************************************/
af062818b47340eef15700d2f0211576ba3506eevboxsync#define LOG_GROUP LOG_GROUP_TM
af062818b47340eef15700d2f0211576ba3506eevboxsync#include <VBox/tm.h>
af062818b47340eef15700d2f0211576ba3506eevboxsync#ifdef IN_RING3
af062818b47340eef15700d2f0211576ba3506eevboxsync# include <VBox/rem.h>
af062818b47340eef15700d2f0211576ba3506eevboxsync# include <iprt/thread.h>
af062818b47340eef15700d2f0211576ba3506eevboxsync#endif
af062818b47340eef15700d2f0211576ba3506eevboxsync#include "TMInternal.h"
af062818b47340eef15700d2f0211576ba3506eevboxsync#include <VBox/vm.h>
af062818b47340eef15700d2f0211576ba3506eevboxsync#include <VBox/vmm.h>
af062818b47340eef15700d2f0211576ba3506eevboxsync#include <VBox/err.h>
af062818b47340eef15700d2f0211576ba3506eevboxsync#include <VBox/log.h>
af062818b47340eef15700d2f0211576ba3506eevboxsync#include <VBox/sup.h>
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync#include <iprt/time.h>
af062818b47340eef15700d2f0211576ba3506eevboxsync#include <iprt/assert.h>
af062818b47340eef15700d2f0211576ba3506eevboxsync#include <iprt/asm.h>
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync/**
af062818b47340eef15700d2f0211576ba3506eevboxsync * Helper function that's used by the assembly routines when something goes bust.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param pData Pointer to the data structure.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param u64NanoTS The calculated nano ts.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param u64DeltaPrev The delta relative to the previously returned timestamp.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param u64PrevNanoTS The previously returned timestamp (as it was read it).
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsyncDECLEXPORT(void) tmVirtualNanoTSBad(PRTTIMENANOTSDATA pData, uint64_t u64NanoTS, uint64_t u64DeltaPrev, uint64_t u64PrevNanoTS)
af062818b47340eef15700d2f0211576ba3506eevboxsync{
af062818b47340eef15700d2f0211576ba3506eevboxsync //PVM pVM = (PVM)((uint8_t *)pData - RT_OFFSETOF(VM, CTXALLSUFF(s.tm.VirtualGetRawData)));
af062818b47340eef15700d2f0211576ba3506eevboxsync pData->cBadPrev++;
af062818b47340eef15700d2f0211576ba3506eevboxsync if ((int64_t)u64DeltaPrev < 0)
af062818b47340eef15700d2f0211576ba3506eevboxsync LogRel(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64\n",
af062818b47340eef15700d2f0211576ba3506eevboxsync u64DeltaPrev, u64PrevNanoTS, u64NanoTS));
af062818b47340eef15700d2f0211576ba3506eevboxsync else
af062818b47340eef15700d2f0211576ba3506eevboxsync Log(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 (debugging?)\n",
af062818b47340eef15700d2f0211576ba3506eevboxsync u64DeltaPrev, u64PrevNanoTS, u64NanoTS));
af062818b47340eef15700d2f0211576ba3506eevboxsync}
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync/**
af062818b47340eef15700d2f0211576ba3506eevboxsync * Called the first time somebody asks for the time or when the GIP
af062818b47340eef15700d2f0211576ba3506eevboxsync * is mapped/unmapped.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * This should never ever happen.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsyncDECLEXPORT(uint64_t) tmVirtualNanoTSRediscover(PRTTIMENANOTSDATA pData)
af062818b47340eef15700d2f0211576ba3506eevboxsync{
af062818b47340eef15700d2f0211576ba3506eevboxsync //PVM pVM = (PVM)((uint8_t *)pData - RT_OFFSETOF(VM, CTXALLSUFF(s.tm.VirtualGetRawData)));
af062818b47340eef15700d2f0211576ba3506eevboxsync PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
af062818b47340eef15700d2f0211576ba3506eevboxsync AssertFatalMsgFailed(("pGip=%p u32Magic=%#x\n", pGip, VALID_PTR(pGip) ? pGip->u32Magic : 0));
af062818b47340eef15700d2f0211576ba3506eevboxsync}
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync#if 1
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync/**
af062818b47340eef15700d2f0211576ba3506eevboxsync * Wrapper around the IPRT GIP time methods.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsyncDECLINLINE(uint64_t) tmVirtualGetRawNanoTS(PVM pVM)
af062818b47340eef15700d2f0211576ba3506eevboxsync{
af062818b47340eef15700d2f0211576ba3506eevboxsync#ifdef IN_RING3
af062818b47340eef15700d2f0211576ba3506eevboxsync return CTXALLSUFF(pVM->tm.s.pfnVirtualGetRaw)(&CTXALLSUFF(pVM->tm.s.VirtualGetRawData));
af062818b47340eef15700d2f0211576ba3506eevboxsync# else /* !IN_RING3 */
af062818b47340eef15700d2f0211576ba3506eevboxsync uint32_t cPrevSteps = pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps;
af062818b47340eef15700d2f0211576ba3506eevboxsync uint64_t u64 = pVM->tm.s.CTX_SUFF(pfnVirtualGetRaw)(&pVM->tm.s.CTX_SUFF(VirtualGetRawData));
af062818b47340eef15700d2f0211576ba3506eevboxsync if (cPrevSteps != pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps)
af062818b47340eef15700d2f0211576ba3506eevboxsync VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
af062818b47340eef15700d2f0211576ba3506eevboxsync return u64;
af062818b47340eef15700d2f0211576ba3506eevboxsync# endif /* !IN_RING3 */
af062818b47340eef15700d2f0211576ba3506eevboxsync}
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync#else
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync/**
af062818b47340eef15700d2f0211576ba3506eevboxsync * This is (mostly) the same as rtTimeNanoTSInternal() except
af062818b47340eef15700d2f0211576ba3506eevboxsync * for the two globals which live in TM.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * @returns Nanosecond timestamp.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param pVM The VM handle.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsyncstatic uint64_t tmVirtualGetRawNanoTS(PVM pVM)
af062818b47340eef15700d2f0211576ba3506eevboxsync{
af062818b47340eef15700d2f0211576ba3506eevboxsync uint64_t u64Delta;
af062818b47340eef15700d2f0211576ba3506eevboxsync uint32_t u32NanoTSFactor0;
af062818b47340eef15700d2f0211576ba3506eevboxsync uint64_t u64TSC;
af062818b47340eef15700d2f0211576ba3506eevboxsync uint64_t u64NanoTS;
af062818b47340eef15700d2f0211576ba3506eevboxsync uint32_t u32UpdateIntervalTSC;
af062818b47340eef15700d2f0211576ba3506eevboxsync uint64_t u64PrevNanoTS;
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync /*
af062818b47340eef15700d2f0211576ba3506eevboxsync * Read the GIP data and the previous value.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsync for (;;)
af062818b47340eef15700d2f0211576ba3506eevboxsync {
af062818b47340eef15700d2f0211576ba3506eevboxsync uint32_t u32TransactionId;
af062818b47340eef15700d2f0211576ba3506eevboxsync PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
af062818b47340eef15700d2f0211576ba3506eevboxsync#ifdef IN_RING3
af062818b47340eef15700d2f0211576ba3506eevboxsync if (RT_UNLIKELY(!pGip || pGip->u32Magic != SUPGLOBALINFOPAGE_MAGIC))
af062818b47340eef15700d2f0211576ba3506eevboxsync return RTTimeSystemNanoTS();
af062818b47340eef15700d2f0211576ba3506eevboxsync#endif
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
af062818b47340eef15700d2f0211576ba3506eevboxsync {
af062818b47340eef15700d2f0211576ba3506eevboxsync u32TransactionId = pGip->aCPUs[0].u32TransactionId;
af062818b47340eef15700d2f0211576ba3506eevboxsync#ifdef RT_OS_L4
af062818b47340eef15700d2f0211576ba3506eevboxsync Assert((u32TransactionId & 1) == 0);
af062818b47340eef15700d2f0211576ba3506eevboxsync#endif
af062818b47340eef15700d2f0211576ba3506eevboxsync u32UpdateIntervalTSC = pGip->aCPUs[0].u32UpdateIntervalTSC;
af062818b47340eef15700d2f0211576ba3506eevboxsync u64NanoTS = pGip->aCPUs[0].u64NanoTS;
af062818b47340eef15700d2f0211576ba3506eevboxsync u64TSC = pGip->aCPUs[0].u64TSC;
af062818b47340eef15700d2f0211576ba3506eevboxsync u32NanoTSFactor0 = pGip->u32UpdateIntervalNS;
af062818b47340eef15700d2f0211576ba3506eevboxsync u64Delta = ASMReadTSC();
af062818b47340eef15700d2f0211576ba3506eevboxsync u64PrevNanoTS = ASMAtomicReadU64(&pVM->tm.s.u64VirtualRawPrev);
af062818b47340eef15700d2f0211576ba3506eevboxsync if (RT_UNLIKELY( pGip->aCPUs[0].u32TransactionId != u32TransactionId
af062818b47340eef15700d2f0211576ba3506eevboxsync || (u32TransactionId & 1)))
af062818b47340eef15700d2f0211576ba3506eevboxsync continue;
af062818b47340eef15700d2f0211576ba3506eevboxsync }
af062818b47340eef15700d2f0211576ba3506eevboxsync else
af062818b47340eef15700d2f0211576ba3506eevboxsync {
af062818b47340eef15700d2f0211576ba3506eevboxsync /* SUPGIPMODE_ASYNC_TSC */
af062818b47340eef15700d2f0211576ba3506eevboxsync PSUPGIPCPU pGipCpu;
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync uint8_t u8ApicId = ASMGetApicId();
af062818b47340eef15700d2f0211576ba3506eevboxsync if (RT_LIKELY(u8ApicId < RT_ELEMENTS(pGip->aCPUs)))
af062818b47340eef15700d2f0211576ba3506eevboxsync pGipCpu = &pGip->aCPUs[u8ApicId];
af062818b47340eef15700d2f0211576ba3506eevboxsync else
af062818b47340eef15700d2f0211576ba3506eevboxsync {
af062818b47340eef15700d2f0211576ba3506eevboxsync AssertMsgFailed(("%x\n", u8ApicId));
af062818b47340eef15700d2f0211576ba3506eevboxsync pGipCpu = &pGip->aCPUs[0];
af062818b47340eef15700d2f0211576ba3506eevboxsync }
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync u32TransactionId = pGipCpu->u32TransactionId;
af062818b47340eef15700d2f0211576ba3506eevboxsync#ifdef RT_OS_L4
af062818b47340eef15700d2f0211576ba3506eevboxsync Assert((u32TransactionId & 1) == 0);
af062818b47340eef15700d2f0211576ba3506eevboxsync#endif
af062818b47340eef15700d2f0211576ba3506eevboxsync u32UpdateIntervalTSC = pGipCpu->u32UpdateIntervalTSC;
af062818b47340eef15700d2f0211576ba3506eevboxsync u64NanoTS = pGipCpu->u64NanoTS;
af062818b47340eef15700d2f0211576ba3506eevboxsync u64TSC = pGipCpu->u64TSC;
af062818b47340eef15700d2f0211576ba3506eevboxsync u32NanoTSFactor0 = pGip->u32UpdateIntervalNS;
af062818b47340eef15700d2f0211576ba3506eevboxsync u64Delta = ASMReadTSC();
af062818b47340eef15700d2f0211576ba3506eevboxsync u64PrevNanoTS = ASMAtomicReadU64(&pVM->tm.s.u64VirtualRawPrev);
af062818b47340eef15700d2f0211576ba3506eevboxsync#ifdef IN_RC
af062818b47340eef15700d2f0211576ba3506eevboxsync Assert(!(ASMGetFlags() & X86_EFL_IF));
af062818b47340eef15700d2f0211576ba3506eevboxsync#else
af062818b47340eef15700d2f0211576ba3506eevboxsync if (RT_UNLIKELY(u8ApicId != ASMGetApicId()))
af062818b47340eef15700d2f0211576ba3506eevboxsync continue;
af062818b47340eef15700d2f0211576ba3506eevboxsync if (RT_UNLIKELY( pGipCpu->u32TransactionId != u32TransactionId
af062818b47340eef15700d2f0211576ba3506eevboxsync || (u32TransactionId & 1)))
af062818b47340eef15700d2f0211576ba3506eevboxsync continue;
af062818b47340eef15700d2f0211576ba3506eevboxsync#endif
af062818b47340eef15700d2f0211576ba3506eevboxsync }
af062818b47340eef15700d2f0211576ba3506eevboxsync break;
af062818b47340eef15700d2f0211576ba3506eevboxsync }
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync /*
af062818b47340eef15700d2f0211576ba3506eevboxsync * Calc NanoTS delta.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsync u64Delta -= u64TSC;
af062818b47340eef15700d2f0211576ba3506eevboxsync if (u64Delta > u32UpdateIntervalTSC)
af062818b47340eef15700d2f0211576ba3506eevboxsync {
af062818b47340eef15700d2f0211576ba3506eevboxsync /*
af062818b47340eef15700d2f0211576ba3506eevboxsync * We've expired the interval, cap it. If we're here for the 2nd
af062818b47340eef15700d2f0211576ba3506eevboxsync * time without any GIP update inbetween, the checks against
af062818b47340eef15700d2f0211576ba3506eevboxsync * pVM->tm.s.u64VirtualRawPrev below will force 1ns stepping.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsync u64Delta = u32UpdateIntervalTSC;
af062818b47340eef15700d2f0211576ba3506eevboxsync }
af062818b47340eef15700d2f0211576ba3506eevboxsync#if !defined(_MSC_VER) || defined(RT_ARCH_AMD64) /* GCC makes very pretty code from these two inline calls, while MSC cannot. */
af062818b47340eef15700d2f0211576ba3506eevboxsync u64Delta = ASMMult2xU32RetU64((uint32_t)u64Delta, u32NanoTSFactor0);
af062818b47340eef15700d2f0211576ba3506eevboxsync u64Delta = ASMDivU64ByU32RetU32(u64Delta, u32UpdateIntervalTSC);
af062818b47340eef15700d2f0211576ba3506eevboxsync#else
af062818b47340eef15700d2f0211576ba3506eevboxsync __asm
af062818b47340eef15700d2f0211576ba3506eevboxsync {
af062818b47340eef15700d2f0211576ba3506eevboxsync mov eax, dword ptr [u64Delta]
af062818b47340eef15700d2f0211576ba3506eevboxsync mul dword ptr [u32NanoTSFactor0]
af062818b47340eef15700d2f0211576ba3506eevboxsync div dword ptr [u32UpdateIntervalTSC]
af062818b47340eef15700d2f0211576ba3506eevboxsync mov dword ptr [u64Delta], eax
af062818b47340eef15700d2f0211576ba3506eevboxsync xor edx, edx
af062818b47340eef15700d2f0211576ba3506eevboxsync mov dword ptr [u64Delta + 4], edx
af062818b47340eef15700d2f0211576ba3506eevboxsync }
af062818b47340eef15700d2f0211576ba3506eevboxsync#endif
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync /*
af062818b47340eef15700d2f0211576ba3506eevboxsync * Calculate the time and compare it with the previously returned value.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * Since this function is called *very* frequently when the VM is running
af062818b47340eef15700d2f0211576ba3506eevboxsync * and then mostly on EMT, we can restrict the valid range of the delta
af062818b47340eef15700d2f0211576ba3506eevboxsync * (-1s to 2*GipUpdates) and simplify/optimize the default path.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsync u64NanoTS += u64Delta;
af062818b47340eef15700d2f0211576ba3506eevboxsync uint64_t u64DeltaPrev = u64NanoTS - u64PrevNanoTS;
af062818b47340eef15700d2f0211576ba3506eevboxsync if (RT_LIKELY(u64DeltaPrev < 1000000000 /* 1s */))
af062818b47340eef15700d2f0211576ba3506eevboxsync /* frequent - less than 1s since last call. */;
af062818b47340eef15700d2f0211576ba3506eevboxsync else if ( (int64_t)u64DeltaPrev < 0
af062818b47340eef15700d2f0211576ba3506eevboxsync && (int64_t)u64DeltaPrev + u32NanoTSFactor0 * 2 > 0)
af062818b47340eef15700d2f0211576ba3506eevboxsync {
af062818b47340eef15700d2f0211576ba3506eevboxsync /* occasional - u64NanoTS is in the 'past' relative to previous returns. */
af062818b47340eef15700d2f0211576ba3506eevboxsync ASMAtomicIncU32(&pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps);
af062818b47340eef15700d2f0211576ba3506eevboxsync u64NanoTS = u64PrevNanoTS + 1;
af062818b47340eef15700d2f0211576ba3506eevboxsync#ifndef IN_RING3
af062818b47340eef15700d2f0211576ba3506eevboxsync VM_FF_SET(pVM, VM_FF_TO_R3); /* S10 hack */
af062818b47340eef15700d2f0211576ba3506eevboxsync#endif
af062818b47340eef15700d2f0211576ba3506eevboxsync }
af062818b47340eef15700d2f0211576ba3506eevboxsync else if (u64PrevNanoTS)
af062818b47340eef15700d2f0211576ba3506eevboxsync {
af062818b47340eef15700d2f0211576ba3506eevboxsync /* Something has gone bust, if negative offset it's real bad. */
af062818b47340eef15700d2f0211576ba3506eevboxsync ASMAtomicIncU32(&pVM->tm.s.CTX_SUFF(VirtualGetRawData).cBadPrev);
af062818b47340eef15700d2f0211576ba3506eevboxsync if ((int64_t)u64DeltaPrev < 0)
af062818b47340eef15700d2f0211576ba3506eevboxsync LogRel(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 u64Delta=%#RX64\n",
af062818b47340eef15700d2f0211576ba3506eevboxsync u64DeltaPrev, u64PrevNanoTS, u64NanoTS, u64Delta));
af062818b47340eef15700d2f0211576ba3506eevboxsync else
af062818b47340eef15700d2f0211576ba3506eevboxsync Log(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 u64Delta=%#RX64 (debugging?)\n",
af062818b47340eef15700d2f0211576ba3506eevboxsync u64DeltaPrev, u64PrevNanoTS, u64NanoTS, u64Delta));
af062818b47340eef15700d2f0211576ba3506eevboxsync#ifdef DEBUG_bird
af062818b47340eef15700d2f0211576ba3506eevboxsync /** @todo there are some hickups during boot and reset that can cause 2-5 seconds delays. Investigate... */
af062818b47340eef15700d2f0211576ba3506eevboxsync AssertMsg(u64PrevNanoTS > UINT64_C(100000000000) /* 100s */,
af062818b47340eef15700d2f0211576ba3506eevboxsync ("u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 u64Delta=%#RX64\n",
af062818b47340eef15700d2f0211576ba3506eevboxsync u64DeltaPrev, u64PrevNanoTS, u64NanoTS, u64Delta));
af062818b47340eef15700d2f0211576ba3506eevboxsync#endif
af062818b47340eef15700d2f0211576ba3506eevboxsync }
af062818b47340eef15700d2f0211576ba3506eevboxsync /* else: We're resuming (see TMVirtualResume). */
af062818b47340eef15700d2f0211576ba3506eevboxsync if (RT_LIKELY(ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualRawPrev, u64NanoTS, u64PrevNanoTS)))
af062818b47340eef15700d2f0211576ba3506eevboxsync return u64NanoTS;
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync /*
af062818b47340eef15700d2f0211576ba3506eevboxsync * Attempt updating the previous value, provided we're still ahead of it.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * There is no point in recalculating u64NanoTS because we got preemted or if
af062818b47340eef15700d2f0211576ba3506eevboxsync * we raced somebody while the GIP was updated, since these are events
af062818b47340eef15700d2f0211576ba3506eevboxsync * that might occure at any point in the return path as well.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsync for (int cTries = 50;;)
af062818b47340eef15700d2f0211576ba3506eevboxsync {
af062818b47340eef15700d2f0211576ba3506eevboxsync u64PrevNanoTS = ASMAtomicReadU64(&pVM->tm.s.u64VirtualRawPrev);
af062818b47340eef15700d2f0211576ba3506eevboxsync if (u64PrevNanoTS >= u64NanoTS)
af062818b47340eef15700d2f0211576ba3506eevboxsync break;
af062818b47340eef15700d2f0211576ba3506eevboxsync if (ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualRawPrev, u64NanoTS, u64PrevNanoTS))
af062818b47340eef15700d2f0211576ba3506eevboxsync break;
af062818b47340eef15700d2f0211576ba3506eevboxsync AssertBreak(--cTries <= 0);
af062818b47340eef15700d2f0211576ba3506eevboxsync if (cTries < 25 && !VM_IS_EMT(pVM)) /* give up early */
af062818b47340eef15700d2f0211576ba3506eevboxsync break;
af062818b47340eef15700d2f0211576ba3506eevboxsync }
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync return u64NanoTS;
af062818b47340eef15700d2f0211576ba3506eevboxsync}
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync#endif
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync/**
af062818b47340eef15700d2f0211576ba3506eevboxsync * Get the time when we're not running at 100%
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * @returns The timestamp.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param pVM The VM handle.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsyncstatic uint64_t tmVirtualGetRawNonNormal(PVM pVM)
af062818b47340eef15700d2f0211576ba3506eevboxsync{
af062818b47340eef15700d2f0211576ba3506eevboxsync /*
af062818b47340eef15700d2f0211576ba3506eevboxsync * Recalculate the RTTimeNanoTS() value for the period where
af062818b47340eef15700d2f0211576ba3506eevboxsync * warp drive has been enabled.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsync uint64_t u64 = tmVirtualGetRawNanoTS(pVM);
af062818b47340eef15700d2f0211576ba3506eevboxsync u64 -= pVM->tm.s.u64VirtualWarpDriveStart;
af062818b47340eef15700d2f0211576ba3506eevboxsync u64 *= pVM->tm.s.u32VirtualWarpDrivePercentage;
af062818b47340eef15700d2f0211576ba3506eevboxsync u64 /= 100;
af062818b47340eef15700d2f0211576ba3506eevboxsync u64 += pVM->tm.s.u64VirtualWarpDriveStart;
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync /*
af062818b47340eef15700d2f0211576ba3506eevboxsync * Now we apply the virtual time offset.
af062818b47340eef15700d2f0211576ba3506eevboxsync * (Which is the negated tmVirtualGetRawNanoTS() value for when the virtual
af062818b47340eef15700d2f0211576ba3506eevboxsync * machine started if it had been running continuously without any suspends.)
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsync u64 -= pVM->tm.s.u64VirtualOffset;
af062818b47340eef15700d2f0211576ba3506eevboxsync return u64;
af062818b47340eef15700d2f0211576ba3506eevboxsync}
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync/**
af062818b47340eef15700d2f0211576ba3506eevboxsync * Get the raw virtual time.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * @returns The current time stamp.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param pVM The VM handle.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsyncDECLINLINE(uint64_t) tmVirtualGetRaw(PVM pVM)
af062818b47340eef15700d2f0211576ba3506eevboxsync{
af062818b47340eef15700d2f0211576ba3506eevboxsync if (RT_LIKELY(!pVM->tm.s.fVirtualWarpDrive))
af062818b47340eef15700d2f0211576ba3506eevboxsync return tmVirtualGetRawNanoTS(pVM) - pVM->tm.s.u64VirtualOffset;
af062818b47340eef15700d2f0211576ba3506eevboxsync return tmVirtualGetRawNonNormal(pVM);
af062818b47340eef15700d2f0211576ba3506eevboxsync}
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync/**
af062818b47340eef15700d2f0211576ba3506eevboxsync * Inlined version of tmVirtualGetEx.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsyncDECLINLINE(uint64_t) tmVirtualGet(PVM pVM, bool fCheckTimers)
af062818b47340eef15700d2f0211576ba3506eevboxsync{
af062818b47340eef15700d2f0211576ba3506eevboxsync uint64_t u64;
af062818b47340eef15700d2f0211576ba3506eevboxsync if (RT_LIKELY(pVM->tm.s.cVirtualTicking))
af062818b47340eef15700d2f0211576ba3506eevboxsync {
af062818b47340eef15700d2f0211576ba3506eevboxsync STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGet);
af062818b47340eef15700d2f0211576ba3506eevboxsync u64 = tmVirtualGetRaw(pVM);
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync /*
af062818b47340eef15700d2f0211576ba3506eevboxsync * Use the chance to check for expired timers.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsync if (fCheckTimers)
af062818b47340eef15700d2f0211576ba3506eevboxsync {
af062818b47340eef15700d2f0211576ba3506eevboxsync PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
af062818b47340eef15700d2f0211576ba3506eevboxsync if ( !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)
af062818b47340eef15700d2f0211576ba3506eevboxsync && !pVM->tm.s.fRunningQueues
af062818b47340eef15700d2f0211576ba3506eevboxsync && ( pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64
af062818b47340eef15700d2f0211576ba3506eevboxsync || ( pVM->tm.s.fVirtualSyncTicking
af062818b47340eef15700d2f0211576ba3506eevboxsync && pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s.offVirtualSync
af062818b47340eef15700d2f0211576ba3506eevboxsync )
af062818b47340eef15700d2f0211576ba3506eevboxsync )
af062818b47340eef15700d2f0211576ba3506eevboxsync && !pVM->tm.s.fRunningQueues
af062818b47340eef15700d2f0211576ba3506eevboxsync )
af062818b47340eef15700d2f0211576ba3506eevboxsync {
af062818b47340eef15700d2f0211576ba3506eevboxsync STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSetFF);
af062818b47340eef15700d2f0211576ba3506eevboxsync Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
af062818b47340eef15700d2f0211576ba3506eevboxsync VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
af062818b47340eef15700d2f0211576ba3506eevboxsync#ifdef IN_RING3
af062818b47340eef15700d2f0211576ba3506eevboxsync REMR3NotifyTimerPending(pVM, pVCpuDst);
af062818b47340eef15700d2f0211576ba3506eevboxsync VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
af062818b47340eef15700d2f0211576ba3506eevboxsync#endif
af062818b47340eef15700d2f0211576ba3506eevboxsync }
af062818b47340eef15700d2f0211576ba3506eevboxsync }
af062818b47340eef15700d2f0211576ba3506eevboxsync }
af062818b47340eef15700d2f0211576ba3506eevboxsync else
af062818b47340eef15700d2f0211576ba3506eevboxsync u64 = pVM->tm.s.u64Virtual;
af062818b47340eef15700d2f0211576ba3506eevboxsync return u64;
af062818b47340eef15700d2f0211576ba3506eevboxsync}
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync/**
af062818b47340eef15700d2f0211576ba3506eevboxsync * Gets the current TMCLOCK_VIRTUAL time
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * @returns The timestamp.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param pVM VM handle.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * @remark While the flow of time will never go backwards, the speed of the
af062818b47340eef15700d2f0211576ba3506eevboxsync * progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be
af062818b47340eef15700d2f0211576ba3506eevboxsync * influenced by power saving (SpeedStep, PowerNow!), while the former
af062818b47340eef15700d2f0211576ba3506eevboxsync * makes use of TSC and kernel timers.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsyncVMMDECL(uint64_t) TMVirtualGet(PVM pVM)
af062818b47340eef15700d2f0211576ba3506eevboxsync{
af062818b47340eef15700d2f0211576ba3506eevboxsync return tmVirtualGet(pVM, true /* check timers */);
af062818b47340eef15700d2f0211576ba3506eevboxsync}
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync/**
af062818b47340eef15700d2f0211576ba3506eevboxsync * Gets the current TMCLOCK_VIRTUAL time without checking
af062818b47340eef15700d2f0211576ba3506eevboxsync * timers or anything.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * Meaning, this has no side effect on FFs like TMVirtualGet may have.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * @returns The timestamp.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param pVM VM handle.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * @remarks See TMVirtualGet.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsyncVMMDECL(uint64_t) TMVirtualGetNoCheck(PVM pVM)
af062818b47340eef15700d2f0211576ba3506eevboxsync{
af062818b47340eef15700d2f0211576ba3506eevboxsync return tmVirtualGet(pVM, false /*fCheckTimers*/);
af062818b47340eef15700d2f0211576ba3506eevboxsync}
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync/**
af062818b47340eef15700d2f0211576ba3506eevboxsync * Gets the current TMCLOCK_VIRTUAL_SYNC time.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * @returns The timestamp.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param pVM VM handle.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param fCheckTimers Check timers or not
af062818b47340eef15700d2f0211576ba3506eevboxsync * @thread EMT.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsyncDECLINLINE(uint64_t) tmVirtualSyncGetEx(PVM pVM, bool fCheckTimers)
af062818b47340eef15700d2f0211576ba3506eevboxsync{
af062818b47340eef15700d2f0211576ba3506eevboxsync STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSync);
af062818b47340eef15700d2f0211576ba3506eevboxsync uint64_t u64;
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync if (pVM->tm.s.fVirtualSyncTicking)
af062818b47340eef15700d2f0211576ba3506eevboxsync {
af062818b47340eef15700d2f0211576ba3506eevboxsync PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync /*
af062818b47340eef15700d2f0211576ba3506eevboxsync * Query the virtual clock and do the usual expired timer check.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsync Assert(pVM->tm.s.cVirtualTicking);
af062818b47340eef15700d2f0211576ba3506eevboxsync u64 = tmVirtualGetRaw(pVM);
af062818b47340eef15700d2f0211576ba3506eevboxsync if (fCheckTimers)
af062818b47340eef15700d2f0211576ba3506eevboxsync {
af062818b47340eef15700d2f0211576ba3506eevboxsync if ( !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)
af062818b47340eef15700d2f0211576ba3506eevboxsync && pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64)
af062818b47340eef15700d2f0211576ba3506eevboxsync {
af062818b47340eef15700d2f0211576ba3506eevboxsync Log5(("TMAllVirtual(%u): FF: 0 -> 1\n", __LINE__));
af062818b47340eef15700d2f0211576ba3506eevboxsync VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
af062818b47340eef15700d2f0211576ba3506eevboxsync#ifdef IN_RING3
af062818b47340eef15700d2f0211576ba3506eevboxsync REMR3NotifyTimerPending(pVM, pVCpuDst);
af062818b47340eef15700d2f0211576ba3506eevboxsync VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM /** @todo |VMNOTIFYFF_FLAGS_POKE*/);
af062818b47340eef15700d2f0211576ba3506eevboxsync#endif
af062818b47340eef15700d2f0211576ba3506eevboxsync STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF);
af062818b47340eef15700d2f0211576ba3506eevboxsync }
af062818b47340eef15700d2f0211576ba3506eevboxsync }
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync /*
af062818b47340eef15700d2f0211576ba3506eevboxsync * Read the offset and adjust if we're playing catch-up.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * The catch-up adjusting work by us decrementing the offset by a percentage of
af062818b47340eef15700d2f0211576ba3506eevboxsync * the time elapsed since the previous TMVirtualGetSync call.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * It's possible to get a very long or even negative interval between two read
af062818b47340eef15700d2f0211576ba3506eevboxsync * for the following reasons:
af062818b47340eef15700d2f0211576ba3506eevboxsync * - Someone might have suspended the process execution, frequently the case when
af062818b47340eef15700d2f0211576ba3506eevboxsync * debugging the process.
* - We might be on a different CPU which TSC isn't quite in sync with the
* other CPUs in the system.
* - Another thread is racing us and we might have been preemnted while inside
* this function.
*
* Assuming nano second virtual time, we can simply ignore any intervals which has
* any of the upper 32 bits set.
*/
AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
uint64_t off = pVM->tm.s.offVirtualSync;
if (pVM->tm.s.fVirtualSyncCatchUp)
{
int rc = tmVirtualSyncTryLock(pVM); /** @todo SMP: Here be dragons... Need to get back to this later. */
const uint64_t u64Prev = pVM->tm.s.u64VirtualSyncCatchUpPrev;
uint64_t u64Delta = u64 - u64Prev;
if (RT_LIKELY(!(u64Delta >> 32)))
{
uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
{
off -= u64Sub;
ASMAtomicXchgU64(&pVM->tm.s.offVirtualSync, off);
pVM->tm.s.u64VirtualSyncCatchUpPrev = u64;
Log4(("TM: %RU64/%RU64: sub %RU32\n", u64 - off, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));
}
else
{
/* we've completely caught up. */
STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
off = pVM->tm.s.offVirtualSyncGivenUp;
ASMAtomicXchgU64(&pVM->tm.s.offVirtualSync, off);
ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
pVM->tm.s.u64VirtualSyncCatchUpPrev = u64;
Log4(("TM: %RU64/0: caught up\n", u64));
}
}
else
{
/* More than 4 seconds since last time (or negative), ignore it. */
if (!(u64Delta & RT_BIT_64(63)))
pVM->tm.s.u64VirtualSyncCatchUpPrev = u64;
Log(("TMVirtualGetSync: u64Delta=%RX64\n", u64Delta));
}
if (RT_SUCCESS(rc))
tmVirtualSyncUnlock(pVM);
}
/*
* Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
* approach is to never pass the head timer. So, when we do stop the clock and
* set the timer pending flag.
*/
u64 -= off;
const uint64_t u64Expire = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire;
if (u64 >= u64Expire)
{
u64 = u64Expire;
int rc = tmVirtualSyncTryLock(pVM); /** @todo SMP: Here be dragons... Need to get back to this later. FIXME */
if (RT_SUCCESS(rc))
{
ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSync, u64);
ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncTicking, false);
VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
tmVirtualSyncUnlock(pVM);
}
if ( fCheckTimers
&& !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
{
Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
#ifdef IN_RING3
REMR3NotifyTimerPending(pVM, pVCpuDst);
VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
#endif
STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF);
Log4(("TM: %RU64/%RU64: exp tmr=>ff\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
}
else
Log4(("TM: %RU64/%RU64: exp tmr\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
}
}
else
{
u64 = pVM->tm.s.u64VirtualSync;
}
return u64;
}
/**
* Gets the current TMCLOCK_VIRTUAL_SYNC time.
*
* @returns The timestamp.
* @param pVM VM handle.
* @thread EMT.
* @remarks May set the timer and virtual sync FFs.
*/
VMMDECL(uint64_t) TMVirtualSyncGet(PVM pVM)
{
return tmVirtualSyncGetEx(pVM, true /* check timers */);
}
/**
* Gets the current TMCLOCK_VIRTUAL_SYNC time without checking timers running on
* TMCLOCK_VIRTUAL.
*
* @returns The timestamp.
* @param pVM VM handle.
* @thread EMT.
* @remarks May set the timer and virtual sync FFs.
*/
VMMDECL(uint64_t) TMVirtualSyncGetNoCheck(PVM pVM)
{
return tmVirtualSyncGetEx(pVM, false /* check timers */);
}
/**
* Gets the current TMCLOCK_VIRTUAL_SYNC time.
*
* @returns The timestamp.
* @param pVM VM handle.
* @param fCheckTimers Check timers on the virtual clock or not.
* @thread EMT.
* @remarks May set the timer and virtual sync FFs.
*/
VMMDECL(uint64_t) TMVirtualSyncGetEx(PVM pVM, bool fCheckTimers)
{
return tmVirtualSyncGetEx(pVM, fCheckTimers);
}
/**
* Gets the current lag of the synchronous virtual clock (relative to the virtual clock).
*
* @return The current lag.
* @param pVM VM handle.
*/
VMMDECL(uint64_t) TMVirtualSyncGetLag(PVM pVM)
{
return pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp;
}
/**
* Get the current catch-up percent.
*
* @return The current catch0up percent. 0 means running at the same speed as the virtual clock.
* @param pVM VM handle.
*/
VMMDECL(uint32_t) TMVirtualSyncGetCatchUpPct(PVM pVM)
{
if (pVM->tm.s.fVirtualSyncCatchUp)
return pVM->tm.s.u32VirtualSyncCatchUpPercentage;
return 0;
}
/**
* Gets the current TMCLOCK_VIRTUAL frequency.
*
* @returns The freqency.
* @param pVM VM handle.
*/
VMMDECL(uint64_t) TMVirtualGetFreq(PVM pVM)
{
return TMCLOCK_FREQ_VIRTUAL;
}
/**
* Worker for TMR3PauseClocks.
*
* @returns VINF_SUCCESS or VERR_INTERNAL_ERROR (asserted).
* @param pVM The VM handle.
*/
int tmVirtualPauseLocked(PVM pVM)
{
uint32_t c = ASMAtomicDecU32(&pVM->tm.s.cVirtualTicking);
AssertMsgReturn(c < pVM->cCPUs, ("%u vs %u\n", c, pVM->cCPUs), VERR_INTERNAL_ERROR);
if (c == 0)
{
STAM_COUNTER_INC(&pVM->tm.s.StatVirtualPause);
pVM->tm.s.u64Virtual = tmVirtualGetRaw(pVM);
ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
}
return VINF_SUCCESS;
}
/**
* Worker for TMR3ResumeClocks.
*
* @returns VINF_SUCCESS or VERR_INTERNAL_ERROR (asserted).
* @param pVM The VM handle.
*/
int tmVirtualResumeLocked(PVM pVM)
{
uint32_t c = ASMAtomicIncU32(&pVM->tm.s.cVirtualTicking);
AssertMsgReturn(c <= pVM->cCPUs, ("%u vs %u\n", c, pVM->cCPUs), VERR_INTERNAL_ERROR);
if (c == 1)
{
STAM_COUNTER_INC(&pVM->tm.s.StatVirtualResume);
pVM->tm.s.u64VirtualRawPrev = 0;
pVM->tm.s.u64VirtualWarpDriveStart = tmVirtualGetRawNanoTS(pVM);
pVM->tm.s.u64VirtualOffset = pVM->tm.s.u64VirtualWarpDriveStart - pVM->tm.s.u64Virtual;
ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, true);
}
return VINF_SUCCESS;
}
/**
* Converts from virtual ticks to nanoseconds.
*
* @returns nanoseconds.
* @param pVM The VM handle.
* @param u64VirtualTicks The virtual ticks to convert.
* @remark There could be rounding errors here. We just do a simple integere divide
* without any adjustments.
*/
VMMDECL(uint64_t) TMVirtualToNano(PVM pVM, uint64_t u64VirtualTicks)
{
AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
return u64VirtualTicks;
}
/**
* Converts from virtual ticks to microseconds.
*
* @returns microseconds.
* @param pVM The VM handle.
* @param u64VirtualTicks The virtual ticks to convert.
* @remark There could be rounding errors here. We just do a simple integere divide
* without any adjustments.
*/
VMMDECL(uint64_t) TMVirtualToMicro(PVM pVM, uint64_t u64VirtualTicks)
{
AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
return u64VirtualTicks / 1000;
}
/**
* Converts from virtual ticks to milliseconds.
*
* @returns milliseconds.
* @param pVM The VM handle.
* @param u64VirtualTicks The virtual ticks to convert.
* @remark There could be rounding errors here. We just do a simple integere divide
* without any adjustments.
*/
VMMDECL(uint64_t) TMVirtualToMilli(PVM pVM, uint64_t u64VirtualTicks)
{
AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
return u64VirtualTicks / 1000000;
}
/**
* Converts from nanoseconds to virtual ticks.
*
* @returns virtual ticks.
* @param pVM The VM handle.
* @param u64NanoTS The nanosecond value ticks to convert.
* @remark There could be rounding and overflow errors here.
*/
VMMDECL(uint64_t) TMVirtualFromNano(PVM pVM, uint64_t u64NanoTS)
{
AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
return u64NanoTS;
}
/**
* Converts from microseconds to virtual ticks.
*
* @returns virtual ticks.
* @param pVM The VM handle.
* @param u64MicroTS The microsecond value ticks to convert.
* @remark There could be rounding and overflow errors here.
*/
VMMDECL(uint64_t) TMVirtualFromMicro(PVM pVM, uint64_t u64MicroTS)
{
AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
return u64MicroTS * 1000;
}
/**
* Converts from milliseconds to virtual ticks.
*
* @returns virtual ticks.
* @param pVM The VM handle.
* @param u64MilliTS The millisecond value ticks to convert.
* @remark There could be rounding and overflow errors here.
*/
VMMDECL(uint64_t) TMVirtualFromMilli(PVM pVM, uint64_t u64MilliTS)
{
AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
return u64MilliTS * 1000000;
}