GVMMR0.cpp revision 5f03542cd4b877bd29f439da4ee1442c6bacd08e
/* $Id$ */
/** @file
* GVMM - Global VM Manager.
*/
/*
* Copyright (C) 2007-2010 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
/** @page pg_gvmm GVMM - The Global VM Manager
*
* The Global VM Manager lives in ring-0. Its main function at the moment is
* to manage a list of all running VMs, keep a ring-0 only structure (GVM) for
* each of them, and assign them unique identifiers (so GMM can track page
* owners). The GVMM also manage some of the host CPU resources, like the the
* periodic preemption timer.
*
* The GVMM will create a ring-0 object for each VM when it is registered, this
* is both for session cleanup purposes and for having a point where it is
* possible to implement usage polices later (in SUPR0ObjRegister).
*
*
* @section sec_gvmm_ppt Periodic Preemption Timer (PPT)
*
* On system that sports a high resolution kernel timer API, we use per-cpu
* timers to generate interrupts that preempts VT-x, AMD-V and raw-mode guest
* execution. The timer frequency is calculating by taking the max
* TMCalcHostTimerFrequency for all VMs running on a CPU for the last ~160 ms
* (RT_ELEMENTS((PGVMMHOSTCPU)0, Ppt.aHzHistory) *
* GVMMHOSTCPU_PPT_HIST_INTERVAL_NS).
*
* The TMCalcHostTimerFrequency() part of the things gets its takes the max
* TMTimerSetFrequencyHint() value and adjusts by the current catch-up percent,
* warp drive percent and some fudge factors. VMMR0.cpp reports the result via
* GVMMR0SchedUpdatePeriodicPreemptionTimer() before switching to the VT-x,
* AMD-V and raw-mode execution environments.
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#define LOG_GROUP LOG_GROUP_GVMM
#include "GVMMR0Internal.h"
#include <iprt/asm-amd64-x86.h>
#include <iprt/semaphore.h>
#include <iprt/spinlock.h>
/*******************************************************************************
* Defined Constants And Macros *
*******************************************************************************/
#if defined(RT_OS_LINUX) || defined(DOXYGEN_RUNNING)
/** Define this to enable the periodic preemption timer. */
# define GVMM_SCHED_WITH_PPT
#endif
/*******************************************************************************
* Structures and Typedefs *
*******************************************************************************/
/**
* Global VM handle.
*/
typedef struct GVMHANDLE
{
/** The index of the next handle in the list (free or used). (0 is nil.) */
/** Our own index / handle value. */
/** The pointer to the ring-0 only (aka global) VM structure. */
/** The ring-0 mapping of the shared VM instance data. */
/** The virtual machine object. */
void *pvObj;
/** The session this VM is associated with. */
/** The ring-0 handle of the EMT0 thread.
* This is used for ownership checks as well as looking up a VM handle by thread
* at times like assertions. */
/** The process ID of the handle owner.
* This is used for access checks. */
} GVMHANDLE;
/** Pointer to a global VM handle. */
typedef GVMHANDLE *PGVMHANDLE;
/** Number of GVM handles (including the NIL handle). */
#if HC_ARCH_BITS == 64
# define GVMM_MAX_HANDLES 1024
#else
# define GVMM_MAX_HANDLES 128
#endif
/**
* Per host CPU GVMM data.
*/
typedef struct GVMMHOSTCPU
{
/** Magic number (GVMMHOSTCPU_MAGIC). */
/** The CPU ID. */
/** The CPU set index. */
#ifdef GVMM_SCHED_WITH_PPT
/** Periodic preemption timer data. */
struct
{
/** The handle to the periodic preemption timer. */
/** Spinlock protecting the data below. */
/** The smalles Hz that we need to care about. (static) */
/** The number of ticks between each historization. */
/** The current historization tick (counting up to
* cTicksHistoriziationInterval and then resetting). */
/** The current timer interval. This is set to 0 when inactive. */
/** The current timer frequency. This is set to 0 when inactive. */
/** The current max frequency reported by the EMTs.
* This gets historicize and reset by the timer callback. This is
* read without holding the spinlock, so needs atomic updating. */
uint32_t volatile uDesiredHz;
/** Whether the timer was started or not. */
bool volatile fStarted;
/** Set if we're starting timer. */
bool volatile fStarting;
/** The index of the next history entry (mod it). */
/** Hitoricized uDesiredHz values. The array wraps around, new entries
* are added at iHzHistory. This is updated approximately every
* GVMMHOSTCPU_PPT_HIST_INTERVAL_NS by the timer callback. */
/** Statistics counter for recording the number of interval changes. */
/** Statistics counter for recording the number of timer starts. */
} Ppt;
#endif /* GVMM_SCHED_WITH_PPT */
} GVMMHOSTCPU;
/** Pointer to the per host CPU GVMM data. */
typedef GVMMHOSTCPU *PGVMMHOSTCPU;
/** The GVMMHOSTCPU::u32Magic value (Petra, Tanya & Rachel Haden). */
/** The interval on history entry should cover (approximately) give in
* nanoseconds. */
/**
* The GVMM instance data.
*/
typedef struct GVMM
{
/** Eyecatcher / magic. */
/** The index of the head of the free handle chain. (0 is nil.) */
/** The index of the head of the active handle chain. (0 is nil.) */
/** The number of VMs. */
/** Alignment padding. */
/** The number of EMTs. */
/** The number of EMTs that have halted in GVMMR0SchedHalt. */
uint32_t volatile cHaltedEMTs;
/** Alignment padding. */
/** When the next halted or sleeping EMT will wake up.
* This is set to 0 when it needs recalculating and to UINT64_MAX when
* there are no halted or sleeping EMTs in the GVMM. */
/** The lock used to serialize VM creation, destruction and associated events that
* isn't performance critical. Owners may acquire the list lock. */
/** The lock used to serialize used list updates and accesses.
* This indirectly includes scheduling since the scheduler will have to walk the
* used list to examin running VMs. Owners may not acquire any other locks. */
/** The handle array.
* The size of this array defines the maximum number of currently running VMs.
* The first entry is unused as it represents the NIL handle. */
/** @gcfgm{/GVMM/cEMTsMeansCompany, 32-bit, 0, UINT32_MAX, 1}
* The number of EMTs that means we no longer consider ourselves alone on a
*/
/** @gcfgm{/GVMM/MinSleepAlone,32-bit, 0, 100000000, 750000, ns}
* The minimum sleep time for when we're alone, in nano seconds.
*/
/** @gcfgm{/GVMM/MinSleepCompany,32-bit,0, 100000000, 15000, ns}
* The minimum sleep time for when we've got company, in nano seconds.
*/
/** @gcfgm{/GVMM/EarlyWakeUp1, 32-bit, 0, 100000000, 25000, ns}
* The limit for the first round of early wakeups, given in nano seconds.
*/
/** @gcfgm{/GVMM/EarlyWakeUp2, 32-bit, 0, 100000000, 50000, ns}
* The limit for the second round of early wakeups, given in nano seconds.
*/
/** The number of entries in the host CPU array (aHostCpus). */
/** Per host CPU data (variable length). */
} GVMM;
/** Pointer to the GVMM instance data. */
/** The GVMM::u32Magic value (Charlie Haden). */
#define GVMM_MAGIC 0x19370806
/*******************************************************************************
* Global Variables *
*******************************************************************************/
/** Pointer to the GVMM instance data.
* (Just my general dislike for global variables.) */
/** Macro for obtaining and validating the g_pGVMM pointer.
* On failure it will return from the invoking function with the specified return value.
*
* @param pGVMM The name of the pGVMM variable.
* @param rc The return value on failure. Use VERR_INTERNAL_ERROR for
* VBox status codes.
*/
do { \
AssertMsgReturn((pGVMM)->u32Magic == GVMM_MAGIC, ("%p - %#x\n", (pGVMM), (pGVMM)->u32Magic), (rc)); \
} while (0)
/** Macro for obtaining and validating the g_pGVMM pointer, void function variant.
* On failure it will return from the invoking function.
*
* @param pGVMM The name of the pGVMM variable.
*/
#define GVMM_GET_VALID_INSTANCE_VOID(pGVMM) \
do { \
AssertPtrReturnVoid((pGVMM)); \
} while (0)
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
#ifdef GVMM_SCHED_WITH_PPT
static DECLCALLBACK(void) gvmmR0SchedPeriodicPreemptionTimerCallback(PRTTIMER pTimer, void *pvUser, uint64_t iTick);
#endif
/**
* Initializes the GVMM.
*
* This is called while owninng the loader sempahore (see supdrvIOCtl_LdrLoad()).
*
* @returns VBox status code.
*/
GVMMR0DECL(int) GVMMR0Init(void)
{
LogFlow(("GVMMR0Init:\n"));
/*
* Allocate and initialize the instance data.
*/
if (!pGVMM)
return VERR_NO_MEMORY;
if (RT_SUCCESS(rc))
{
if (RT_SUCCESS(rc))
{
/* the nil handle */
/* the tail */
/* the rest */
while (i-- > 1)
{
}
/* The default configuration values. */
pGVMM->cEMTsMeansCompany = 1; /** @todo should be adjusted to relative to the cpu count or something... */
{
pGVMM->nsMinSleepAlone = 750000 /* ns (0.750 ms) */; /** @todo this should be adjusted to be 75% (or something) of the scheduler granularity... */
}
else if (cNsResolution > RT_NS_100US)
{
pGVMM->nsEarlyWakeUp1 = 0;
pGVMM->nsEarlyWakeUp2 = 0;
}
else
{
pGVMM->nsEarlyWakeUp1 = 0;
pGVMM->nsEarlyWakeUp2 = 0;
}
/* The host CPU data. */
while (iCpu-- > 0)
{
#ifdef GVMM_SCHED_WITH_PPT
pGVMM->aHostCpus[iCpu].Ppt.uMinHz = 5; /** @todo Add some API which figures this one out. (not *that* important) */
//pGVMM->aHostCpus[iCpu].Ppt.iTickHistorization = 0;
//pGVMM->aHostCpus[iCpu].Ppt.cNsInterval = 0;
//pGVMM->aHostCpus[iCpu].Ppt.uTimerHz = 0;
//pGVMM->aHostCpus[iCpu].Ppt.uDesiredHz = 0;
//pGVMM->aHostCpus[iCpu].Ppt.fStarted = false;
//pGVMM->aHostCpus[iCpu].Ppt.fStarting = false;
//pGVMM->aHostCpus[iCpu].Ppt.iHzHistory = 0;
//pGVMM->aHostCpus[iCpu].Ppt.aHzHistory = {0};
#endif
{
#ifdef GVMM_SCHED_WITH_PPT
50*1000*1000 /* whatever */,
if (RT_SUCCESS(rc))
if (RT_FAILURE(rc))
{
{
iCpu++;
}
break;
}
#endif
}
else
{
}
}
if (RT_SUCCESS(rc))
{
return VINF_SUCCESS;
}
/* bail out. */
}
}
return rc;
}
/**
* Terminates the GVM.
*
* This is called while owning the loader semaphore (see supdrvLdrFree()).
* And unless something is wrong, there should be absolutely no VMs
* registered at this point.
*/
GVMMR0DECL(void) GVMMR0Term(void)
{
LogFlow(("GVMMR0Term:\n"));
{
return;
}
/*
* First of all, stop all active timers.
*/
uint32_t cActiveTimers = 0;
while (iCpu-- > 0)
{
#ifdef GVMM_SCHED_WITH_PPT
#endif
}
if (cActiveTimers)
/*
* Invalidate the and free resources.
*/
{
SUPR0Printf("GVMMR0Term: iUsedHead=%#x! (cVMs=%#x cEMTs=%#x)\n", pGVMM->iUsedHead, pGVMM->cVMs, pGVMM->cEMTs);
}
#ifdef GVMM_SCHED_WITH_PPT
while (iCpu-- > 0)
{
}
#endif
}
/**
* A quick hack for setting global config values.
*
* @returns VBox status code.
*
* @param pSession The session handle. Used for authentication.
* @param pszName The variable name.
* @param u64Value The new value.
*/
{
/*
* Validate input.
*/
/*
* String switch time!
*/
return VERR_CFGM_VALUE_NOT_FOUND; /* borrow status codes from CFGM... */
int rc = VINF_SUCCESS;
{
if (u64Value <= UINT32_MAX)
else
}
{
if (u64Value <= RT_NS_100MS)
else
}
{
if (u64Value <= RT_NS_100MS)
else
}
{
if (u64Value <= RT_NS_100MS)
else
}
{
if (u64Value <= RT_NS_100MS)
else
}
else
return rc;
}
/**
* A quick hack for getting global config values.
*
* @returns VBox status code.
*
* @param pSession The session handle. Used for authentication.
* @param pszName The variable name.
* @param u64Value The new value.
*/
GVMMR0DECL(int) GVMMR0QueryConfig(PSUPDRVSESSION pSession, const char *pszName, uint64_t *pu64Value)
{
/*
* Validate input.
*/
/*
* String switch time!
*/
return VERR_CFGM_VALUE_NOT_FOUND; /* borrow status codes from CFGM... */
int rc = VINF_SUCCESS;
else
return rc;
}
/**
* Try acquire the 'used' lock.
*
* @returns IPRT status code, see RTSemFastMutexRequest.
* @param pGVMM The GVMM instance data.
*/
{
return rc;
}
/**
* Release the 'used' lock.
*
* @returns IPRT status code, see RTSemFastMutexRelease.
* @param pGVMM The GVMM instance data.
*/
{
return rc;
}
/**
* Try acquire the 'create & destroy' lock.
*
* @returns IPRT status code, see RTSemFastMutexRequest.
* @param pGVMM The GVMM instance data.
*/
{
return rc;
}
/**
* Release the 'create & destroy' lock.
*
* @returns IPRT status code, see RTSemFastMutexRequest.
* @param pGVMM The GVMM instance data.
*/
{
return rc;
}
/**
* Request wrapper for the GVMMR0CreateVM API.
*
* @returns VBox status code.
* @param pReq The request buffer.
*/
{
/*
* Validate the request.
*/
return VERR_INVALID_POINTER;
return VERR_INVALID_PARAMETER;
return VERR_INVALID_POINTER;
/*
* Execute it.
*/
if (RT_SUCCESS(rc))
{
}
return rc;
}
/**
* Allocates the VM structure and registers it with GVM.
*
* The caller will become the VM owner and there by the EMT.
*
* @returns VBox status code.
* @param pSession The support driver session.
* @param cCpus Number of virtual CPUs for the new VM.
* @param ppVM Where to store the pointer to the VM structure.
*
* @thread EMT.
*/
{
if ( cCpus == 0
|| cCpus > VMM_MAX_CPU_COUNT)
return VERR_INVALID_PARAMETER;
/*
* The whole allocation process is protected by the lock.
*/
/*
* Allocate a handle first so we don't waste resources unnecessarily.
*/
if (iHandle)
{
/* consistency checks, a bit paranoid as always. */
{
pHandle->pvObj = SUPR0ObjRegister(pSession, SUPDRVOBJTYPE_VM, gvmmR0HandleObjDestructor, pGVMM, pHandle);
{
/*
* Move the handle from the free to used list and perform permission checks.
*/
if (RT_SUCCESS(rc))
{
/*
* Allocate the global VM structure (GVM) and initialize it.
*/
if (pGVM)
{
/*
* Allocate the shared VM structure and associated page array.
*/
#ifdef RT_OS_DARWIN /** @todo Figure out why this is broken. Is it only on snow leopard? */
rc = RTR0MemObjAllocLow(&pGVM->gvmm.s.VMMemObj, (cPages + 1) << PAGE_SHIFT, false /* fExecutable */);
#else
#endif
if (RT_SUCCESS(rc))
{
rc = RTR0MemObjAllocPage(&pGVM->gvmm.s.VMPagesMemObj, cPages * sizeof(SUPPAGE), false /* fExecutable */);
if (RT_SUCCESS(rc))
{
{
}
/*
* Map them into ring-3.
*/
if (RT_SUCCESS(rc))
{
/* Initialize all the VM pointers. */
{
}
if (RT_SUCCESS(rc))
{
/* complete the handle - take the UsedLock sem just to be careful. */
return VINF_SUCCESS;
}
}
}
}
}
}
/* else: The user wasn't permitted to create this VM. */
/*
* The handle will be freed by gvmmR0HandleObjDestructor as we release the
* object reference here. A little extra mess because of non-recursive lock.
*/
return rc;
}
rc = VERR_NO_MEMORY;
}
else
}
else
return rc;
}
/**
* Initializes the per VM data belonging to GVMM.
*
* @param pGVM Pointer to the global VM structure.
*/
{
{
}
}
/**
* Does the VM initialization.
*
* @returns VBox status code.
* @param pVM Pointer to the shared VM structure.
*/
{
/*
* Validate the VM structure, state and handle.
*/
if (RT_SUCCESS(rc))
{
{
{
if (RT_FAILURE(rc))
{
break;
}
}
}
else
}
return rc;
}
/**
* Indicates that we're done with the ring-0 initialization
* of the VM.
*
* @param pVM Pointer to the shared VM structure.
* @thread EMT(0)
*/
{
/* Validate the VM structure, state and handle. */
/* Set the indicator. */
}
/**
* Indicates that we're doing the ring-0 termination of the VM.
*
* @returns true if termination hasn't been done already, false if it has.
* @param pVM Pointer to the shared VM structure.
* @param pGVM Pointer to the global VM structure. Optional.
* @thread EMT(0)
*/
{
/* Validate the VM structure, state and handle. */
AssertPtrNullReturn(pGVM, false);
if (!pGVM)
{
AssertRCReturn(rc, false);
}
/* Set the indicator. */
return false;
return true;
}
/**
* Destroys the VM, freeing all associated resources (the ring-0 ones anyway).
*
* This is call from the vmR3DestroyFinalBit and from a error path in VMR3Create,
* and the caller is not the EMT thread, unfortunately. For security reasons, it
* would've been nice if the caller was actually the EMT thread or that we somehow
* could've associated the calling thread with the VM up front.
*
* @returns VBox status code.
* @param pVM Where to store the pointer to the VM structure.
*
* @thread EMT(0) if it's associated with the VM, otherwise any thread.
*/
{
/*
* Validate the VM structure, state and caller.
*/
AssertMsgReturn(pVM->enmVMState >= VMSTATE_CREATING && pVM->enmVMState <= VMSTATE_TERMINATED, ("%d\n", pVM->enmVMState), VERR_WRONG_ORDER);
/*
* Lookup the handle and destroy the object.
* Since the lock isn't recursive and we'll have to leave it before dereferencing the
* object, we take some precautions against racing callers just in case...
*/
/* be careful here because we might theoretically be racing someone else cleaning up. */
{
}
else
{
SUPR0Printf("GVMMR0DestroyVM: pHandle=%p:{.pVM=%p, .hEMT0=%p, .ProcId=%u, .pvObj=%p} pVM=%p hSelf=%p\n",
}
return rc;
}
/**
* Performs VM cleanup task as part of object destruction.
*
* @param pGVM The GVM pointer.
*/
{
{
{
LogFlow(("gvmmR0CleanupVM: Calling VMMR0TermVM\n"));
}
else
}
}
/**
* Handle destructor.
*
* @param pvGVMM The GVM instance pointer.
* @param pvHandle The handle pointer.
*/
{
/*
* Some quick, paranoid, input validation.
*/
if ( !iHandle
{
return;
}
/*
* This is a tad slow but a doubly linked list is too much hazzle.
*/
{
return;
}
else
{
while (iPrev)
{
{
return;
}
if (RT_UNLIKELY(c-- <= 0))
{
iPrev = 0;
break;
}
break;
}
if (!iPrev)
{
return;
}
}
/*
* Do the global cleanup round.
*/
{
/*
* Do the GVMM cleanup - must be done last.
*/
/* The VM and VM pages mappings/allocations. */
{
}
{
}
{
}
{
}
{
{
}
}
/* the GVM structure itself. */
/* Re-acquire the UsedLock before freeing the handle since we're updating handle fields. */
}
/* else: GVMMR0CreateVM cleanup. */
/*
* Free the handle.
*/
LogFlow(("gvmmR0HandleObjDestructor: returns\n"));
}
/**
* Registers the calling thread as the EMT of a Virtual CPU.
*
* Note that VCPU 0 is automatically registered during VM creation.
*
* @returns VBox status code
* @param pVM The shared VM structure (the ring-0 mapping).
* @param idCpu VCPU id.
*/
{
/*
* Validate the VM structure, state and handle.
*/
if (RT_FAILURE(rc))
return rc;
return VINF_SUCCESS;
}
/**
* Lookup a GVM structure by its handle.
*
* @returns The GVM pointer on success, NULL on failure.
* @param hGVM The global VM handle. Asserts on bad handle.
*/
{
/*
* Validate.
*/
/*
* Look it up.
*/
}
/**
* Lookup a GVM structure by the shared VM structure.
*
* The calling thread must be in the same process as the VM. All current lookups
* are by threads inside the same process, so this will not be an issue.
*
* @returns VBox status code.
* @param pVM The shared VM structure (the ring-0 mapping).
* @param ppGVM Where to store the GVM pointer.
* @param ppGVMM Where to store the pointer to the GVMM instance data.
* @param fTakeUsedLock Whether to take the used lock or not.
* Be very careful if not taking the lock as it's possible that
* the VM will disappear then.
*
* @remark This will not assert on an invalid pVM but try return sliently.
*/
{
/*
* Validate.
*/
return VERR_INVALID_POINTER;
return VERR_INVALID_POINTER;
return VERR_INVALID_HANDLE;
/*
* Look it up.
*/
if (fTakeUsedLock)
{
{
return VERR_INVALID_HANDLE;
}
}
else
{
return VERR_INVALID_HANDLE;
return VERR_INVALID_HANDLE;
return VERR_INVALID_HANDLE;
return VERR_INVALID_HANDLE;
return VERR_INVALID_HANDLE;
}
return VINF_SUCCESS;
}
/**
* Lookup a GVM structure by the shared VM structure.
*
* @returns VBox status code.
* @param pVM The shared VM structure (the ring-0 mapping).
* @param ppGVM Where to store the GVM pointer.
*
* @remark This will not take the 'used'-lock because it doesn't do
* nesting and this function will be used from under the lock.
*/
{
}
/**
* Lookup a GVM structure by the shared VM structure and ensuring that the
* caller is an EMT thread.
*
* @returns VBox status code.
* @param pVM The shared VM structure (the ring-0 mapping).
* @param idCpu The Virtual CPU ID of the calling EMT.
* @param ppGVM Where to store the GVM pointer.
* @param ppGVMM Where to store the pointer to the GVMM instance data.
* @thread EMT
*
* @remark This will assert in all failure paths.
*/
{
/*
* Validate.
*/
/*
* Look it up.
*/
return VINF_SUCCESS;
}
/**
* Lookup a GVM structure by the shared VM structure
* and ensuring that the caller is the EMT thread.
*
* @returns VBox status code.
* @param pVM The shared VM structure (the ring-0 mapping).
* @param idCpu The Virtual CPU ID of the calling EMT.
* @param ppGVM Where to store the GVM pointer.
* @thread EMT
*/
{
}
/**
* Lookup a VM by its global handle.
*
* @returns The VM handle on success, NULL on failure.
* @param hGVM The global VM handle. Asserts on bad handle.
*/
{
}
/**
* Looks up the VM belonging to the specified EMT thread.
*
* This is used by the assertion machinery in VMMR0.cpp to avoid causing
* unnecessary kernel panics when the EMT thread hits an assertion. The
* call may or not be an EMT thread.
*
* @returns The VM handle on success, NULL on failure.
* @param hEMT The native thread handle of the EMT.
* NIL_RTNATIVETHREAD means the current thread
*/
{
/*
* No Assertions here as we're usually called in a AssertMsgN or
* RTAssert* context.
*/
return NULL;
if (hEMT == NIL_RTNATIVETHREAD)
hEMT = RTThreadNativeSelf();
/*
* Search the handles in a linear fashion as we don't dare to take the lock (assert).
*/
{
{
/* This is fearly safe with the current process per VM approach. */
if ( cCpus < 1
|| cCpus > VMM_MAX_CPU_COUNT)
continue;
}
}
return NULL;
}
/**
* This is will wake up expired and soon-to-be expired VMs.
*
* @returns Number of VMs that has been woken up.
* @param pGVMM Pointer to the GVMM instance data.
* @param u64Now The current time.
*/
{
/*
* Skip this if we've got disabled because of high resolution wakeups or by
* the user.
*/
if ( !pGVMM->nsEarlyWakeUp1
&& !pGVMM->nsEarlyWakeUp2)
return 0;
/** @todo Rewrite this algorithm. See performance defect XYZ. */
/*
* A cheap optimization to stop wasting so much time here on big setups.
*/
if ( pGVMM->cHaltedEMTs == 0
return 0;
/*
* The first pass will wake up VMs which have actually expired
* and look for VMs that should be woken up in the 2nd and 3rd passes.
*/
unsigned cWoken = 0;
unsigned cHalted = 0;
unsigned cTodo2nd = 0;
unsigned cTodo3rd = 0;
{
{
{
if (u64)
{
{
{
cWoken++;
}
}
else
{
cHalted++;
if (u64 <= uNsEarlyWakeUp1)
cTodo2nd++;
else if (u64 <= uNsEarlyWakeUp2)
cTodo3rd++;
}
}
}
}
}
if (cTodo2nd)
{
{
{
{
if ( u64
&& u64 <= uNsEarlyWakeUp1)
{
{
cWoken++;
}
}
}
}
}
}
if (cTodo3rd)
{
{
{
{
if ( u64
&& u64 <= uNsEarlyWakeUp2)
{
{
cWoken++;
}
}
}
}
}
}
/*
* Set the minimum value.
*/
return cWoken;
}
/**
* Halt the EMT thread.
*
* @returns VINF_SUCCESS normal wakeup (timeout or kicked by other thread).
* VERR_INTERRUPTED if a signal was scheduled for the thread.
* @param pVM Pointer to the shared VM structure.
* @param idCpu The Virtual CPU ID of the calling EMT.
* @param u64ExpireGipTime The time for the sleep to expire expressed as GIP time.
* @thread EMT(idCpu).
*/
{
/*
* Validate the VM structure, state and handle.
*/
if (RT_FAILURE(rc))
return rc;
/*
* Take the UsedList semaphore, get the current time
* and check if anyone needs waking up.
* Interrupts must NOT be disabled at this point because we ask for GIP time!
*/
/* GIP hack: We might are frequently sleeping for short intervals where the
difference between GIP and system time matters on systems with high resolution
system time. So, convert the input from GIP to System time in that case. */
/*
* Go to sleep if we must...
* Cap the sleep time to 1 second to be on the safe side.
*/
if ( u64NowGip < u64ExpireGipTime
: pGVMM->nsMinSleepAlone))
{
if (cNsInterval > RT_NS_1SEC)
/* Reset the semaphore to try prevent a few false wake-ups. */
if (rc == VINF_SUCCESS)
else if (rc == VERR_TIMEOUT)
{
rc = VINF_SUCCESS;
}
}
else
{
}
return rc;
}
/**
* Worker for GVMMR0SchedWakeUp and GVMMR0SchedWakeUpAndPokeCpus that wakes up
* the a sleeping EMT.
*
* @retval VINF_SUCCESS if successfully woken up.
* @retval VINF_GVM_NOT_BLOCKED if the EMT wasn't blocked.
*
* @param pGVM The global (ring-0) VM structure.
* @param pGVCpu The global (ring-0) VCPU structure.
*/
{
/*
* Signal the semaphore regardless of whether it's current blocked on it.
*
* The reason for this is that there is absolutely no way we can be 100%
* certain that it isn't *about* go to go to sleep on it and just got
* delayed a bit en route. So, we will always signal the semaphore when
* the it is flagged as halted in the VMM.
*/
/** @todo we can optimize some of that by means of the pVCpu->enmState now. */
int rc;
{
rc = VINF_SUCCESS;
}
else
{
}
return rc;
}
/**
* Wakes up the halted EMT thread so it can service a pending request.
*
* @returns VBox status code.
* @retval VINF_SUCCESS if successfully woken up.
* @retval VINF_GVM_NOT_BLOCKED if the EMT wasn't blocked.
*
* @param pVM Pointer to the shared VM structure.
* @param idCpu The Virtual CPU ID of the EMT to wake up.
* @param fTakeUsedLock Take the used lock or not
* @thread Any but EMT.
*/
{
/*
* Validate input and take the UsedLock.
*/
if (RT_SUCCESS(rc))
{
{
/*
* Do the actual job.
*/
if (fTakeUsedLock)
{
/*
* While we're here, do a round of scheduling.
*/
}
}
else
if (fTakeUsedLock)
{
}
}
return rc;
}
/**
* Wakes up the halted EMT thread so it can service a pending request.
*
* @returns VBox status code.
* @retval VINF_SUCCESS if successfully woken up.
* @retval VINF_GVM_NOT_BLOCKED if the EMT wasn't blocked.
*
* @param pVM Pointer to the shared VM structure.
* @param idCpu The Virtual CPU ID of the EMT to wake up.
* @thread Any but EMT.
*/
{
}
/**
* Worker common to GVMMR0SchedPoke and GVMMR0SchedWakeUpAndPokeCpus that pokes
* the Virtual CPU if it's still busy executing guest code.
*
* @returns VBox status code.
* @retval VINF_SUCCESS if poked successfully.
* @retval VINF_GVM_NOT_BUSY_IN_GC if the EMT wasn't busy in GC.
*
* @param pGVM The global (ring-0) VM structure.
* @param pVCpu The Virtual CPU handle.
*/
{
if ( idHostCpu == NIL_RTCPUID
{
return VINF_GVM_NOT_BUSY_IN_GC;
}
/* Note: this function is not implemented on Darwin and Linux (kernel < 2.6.19) */
return VINF_SUCCESS;
}
/**
* Pokes an EMT if it's still busy running guest code.
*
* @returns VBox status code.
* @retval VINF_SUCCESS if poked successfully.
* @retval VINF_GVM_NOT_BUSY_IN_GC if the EMT wasn't busy in GC.
*
* @param pVM Pointer to the shared VM structure.
* @param idCpu The ID of the virtual CPU to poke.
* @param fTakeUsedLock Take the used lock or not
*/
{
/*
* Validate input and take the UsedLock.
*/
if (RT_SUCCESS(rc))
{
else
if (fTakeUsedLock)
{
}
}
return rc;
}
/**
* Pokes an EMT if it's still busy running guest code.
*
* @returns VBox status code.
* @retval VINF_SUCCESS if poked successfully.
* @retval VINF_GVM_NOT_BUSY_IN_GC if the EMT wasn't busy in GC.
*
* @param pVM Pointer to the shared VM structure.
* @param idCpu The ID of the virtual CPU to poke.
*/
{
}
/**
* Wakes up a set of halted EMT threads so they can service pending request.
*
* @returns VBox status code, no informational stuff.
*
* @param pVM Pointer to the shared VM structure.
* @param pSleepSet The set of sleepers to wake up.
* @param pPokeSet The set of CPUs to poke.
*/
{
/*
* Validate input and take the UsedLock.
*/
if (RT_SUCCESS(rc))
{
rc = VINF_SUCCESS;
while (idCpu-- > 0)
{
/* Don't try poke or wake up ourselves. */
continue;
/* just ignore errors for now. */
}
}
return rc;
}
/**
* VMMR0 request wrapper for GVMMR0SchedWakeUpAndPokeCpus.
*
* @returns see GVMMR0SchedWakeUpAndPokeCpus.
* @param pVM Pointer to the shared VM structure.
* @param pReq The request packet.
*/
{
/*
* Validate input and pass it on.
*/
AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
}
/**
* Poll the schedule to see if someone else should get a chance to run.
*
* This is a bit hackish and will not work too well if the machine is
* under heavy load from non-VM processes.
*
* @returns VINF_SUCCESS if not yielded.
* VINF_GVM_YIELDED if an attempt to switch to a different VM task was made.
* @param pVM Pointer to the shared VM structure.
* @param idCpu The Virtual CPU ID of the calling EMT.
* @param u64ExpireGipTime The time for the sleep to expire expressed as GIP time.
* @param fYield Whether to yield or not.
* This is for when we're spinning in the halt loop.
* @thread EMT(idCpu).
*/
{
/*
* Validate input.
*/
if (RT_SUCCESS(rc))
{
if (!fYield)
else
{
/** @todo implement this... */
}
}
return rc;
}
#ifdef GVMM_SCHED_WITH_PPT
/**
* Timer callback for the periodic preemption timer.
*
* @param pTimer The timer handle.
* @param pvUser Pointer to the per cpu structure.
* @param iTick The current tick.
*/
static DECLCALLBACK(void) gvmmR0SchedPeriodicPreemptionTimerCallback(PRTTIMER pTimer, void *pvUser, uint64_t iTick)
{
/*
* Termination check
*/
return;
/*
* Do the house keeping.
*/
{
/*
* Historicize the max frequency.
*/
/*
* Check if the current timer frequency.
*/
uint32_t uHistMaxHz = 0;
else if (uHistMaxHz)
{
/*
* Reprogram it.
*/
/ cNsInterval;
else
/*SUPR0Printf("Cpu%u: change to %u Hz / %u ns\n", pCpu->idxCpuSet, uHistMaxHz, cNsInterval);*/
}
else
{
/*
* Stop it.
*/
/*SUPR0Printf("Cpu%u: stopping (%u Hz)\n", pCpu->idxCpuSet, uHistMaxHz);*/
}
}
else
}
#endif /* GVMM_SCHED_WITH_PPT */
/**
* Updates the periodic preemption timer for the calling CPU.
*
* The caller must have disabled preemption!
* The caller must check that the host can do high resolution timers.
*
* @param pVM The VM handle.
* @param idHostCpu The current host CPU id.
* @param uHz The desired frequency.
*/
{
#ifdef GVMM_SCHED_WITH_PPT
/*
* Resolve the per CPU data.
*/
return;
/*
* Check whether we need to do anything about the timer.
* We have to be a little bit careful since we might be race the timer
* callback here.
*/
if (uHz > 16384)
{
uint32_t cNsInterval = 0;
{
/ cNsInterval;
else
}
if (cNsInterval)
{
if (RT_FAILURE(rc))
}
}
#endif /* GVMM_SCHED_WITH_PPT */
}
/**
* Retrieves the GVMM statistics visible to the caller.
*
* @returns VBox status code.
*
* @param pStats Where to put the statistics.
* @param pSession The current session.
* @param pVM The VM to obtain statistics for. Optional.
*/
{
/*
* Validate input.
*/
/*
* Take the lock and get the VM statistics.
*/
if (pVM)
{
if (RT_FAILURE(rc))
return rc;
}
else
{
}
/*
* Enumerate the VMs and add the ones visibile to the statistics.
*/
{
{
}
}
/*
* Copy out the per host CPU statistics.
*/
{
{
#ifdef GVMM_SCHED_WITH_PPT
#else
#endif
iDstCpu++;
}
}
return VINF_SUCCESS;
}
/**
* VMMR0 request wrapper for GVMMR0QueryStatistics.
*
* @returns see GVMMR0QueryStatistics.
* @param pVM Pointer to the shared VM structure. Optional.
* @param pReq The request packet.
*/
{
/*
* Validate input and pass it on.
*/
AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
}
/**
* Resets the specified GVMM statistics.
*
* @returns VBox status code.
*
* @param pStats Which statistics to reset, that is, non-zero fields indicates which to reset.
* @param pSession The current session.
* @param pVM The VM to reset statistics for. Optional.
*/
{
/*
* Validate input.
*/
/*
* Take the lock and get the VM statistics.
*/
if (pVM)
{
if (RT_FAILURE(rc))
return rc;
# define MAYBE_RESET_FIELD(field) \
}
else
{
}
/*
* Enumerate the VMs and add the ones visibile to the statistics.
*/
{
{
{
# define MAYBE_RESET_FIELD(field) \
}
}
}
return VINF_SUCCESS;
}
/**
* VMMR0 request wrapper for GVMMR0ResetStatistics.
*
* @returns see GVMMR0ResetStatistics.
* @param pVM Pointer to the shared VM structure. Optional.
* @param pReq The request packet.
*/
{
/*
* Validate input and pass it on.
*/
AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
}