GVMMR0.cpp revision 5d03929e4e487d6d6e0f4e6ed7cfdc3bad6ebd72
/* $Id$ */
/** @file
* GVMM - Global VM Manager.
*/
/*
* Copyright (C) 2007 Sun Microsystems, Inc.
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
* Clara, CA 95054 USA or visit http://www.sun.com if you need
* additional information or have any questions.
*/
/** @page pg_gvmm GVMM - The Global VM Manager
*
* The Global VM Manager lives in ring-0. It's main function at the moment
* is to manage a list of all running VMs, keep a ring-0 only structure (GVM)
* for each of them, and assign them unique identifiers (so GMM can track
* page owners). The idea for the future is to add an idle priority kernel
* thread that can take care of tasks like page sharing.
*
* The GVMM will create a ring-0 object for each VM when it's registered,
* this is both for session cleanup purposes and for having a point where
* it's possible to implement usage polices later (in SUPR0ObjRegister).
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#define LOG_GROUP LOG_GROUP_GVMM
#include "GVMMR0Internal.h"
#include <iprt/semaphore.h>
/*******************************************************************************
* Structures and Typedefs *
*******************************************************************************/
/**
* Global VM handle.
*/
typedef struct GVMHANDLE
{
/** The index of the next handle in the list (free or used). (0 is nil.) */
/** Our own index / handle value. */
/** The pointer to the ring-0 only (aka global) VM structure. */
/** The ring-0 mapping of the shared VM instance data. */
/** The virtual machine object. */
void *pvObj;
/** The session this VM is associated with. */
/** The ring-0 handle of the EMT0 thread.
* This is used for ownership checks as well as looking up a VM handle by thread
* at times like assertions. */
/** The process ID of the handle owner.
* This is used for access checks. */
} GVMHANDLE;
/** Pointer to a global VM handle. */
typedef GVMHANDLE *PGVMHANDLE;
/** Number of GVM handles (including the NIL handle). */
#if HC_ARCH_BITS == 64
# define GVMM_MAX_HANDLES 1024
#else
# define GVMM_MAX_HANDLES 128
#endif
/**
* The GVMM instance data.
*/
typedef struct GVMM
{
/** Eyecatcher / magic. */
/** The index of the head of the free handle chain. (0 is nil.) */
/** The index of the head of the active handle chain. (0 is nil.) */
/** The number of VMs. */
// /** The number of halted EMT threads. */
// uint16_t volatile cHaltedEMTs;
/** The number of EMTs. */
/** The lock used to serialize VM creation, destruction and associated events that
* isn't performance critical. Owners may acquire the list lock. */
/** The lock used to serialize used list updates and accesses.
* This indirectly includes scheduling since the scheduler will have to walk the
* used list to examin running VMs. Owners may not acquire any other locks. */
/** The handle array.
* The size of this array defines the maximum number of currently running VMs.
* The first entry is unused as it represents the NIL handle. */
/** @gcfgm{/GVMM/cEMTsMeansCompany, 32-bit, 0, UINT32_MAX, 1}
* The number of EMTs that means we no longer consider ourselves alone on a
*/
/** @gcfgm{/GVMM/MinSleepAlone,32-bit, 0, 100000000, 750000, ns}
* The minimum sleep time for when we're alone, in nano seconds.
*/
/** @gcfgm{/GVMM/MinSleepCompany,32-bit,0, 100000000, 15000, ns}
* The minimum sleep time for when we've got company, in nano seconds.
*/
/** @gcfgm{/GVMM/EarlyWakeUp1, 32-bit, 0, 100000000, 25000, ns}
* The limit for the first round of early wakeups, given in nano seconds.
*/
/** @gcfgm{/GVMM/EarlyWakeUp2, 32-bit, 0, 100000000, 50000, ns}
* The limit for the second round of early wakeups, given in nano seconds.
*/
} GVMM;
/** Pointer to the GVMM instance data. */
/** The GVMM::u32Magic value (Charlie Haden). */
#define GVMM_MAGIC 0x19370806
/*******************************************************************************
* Global Variables *
*******************************************************************************/
/** Pointer to the GVMM instance data.
* (Just my general dislike for global variables.) */
/** Macro for obtaining and validating the g_pGVMM pointer.
* On failure it will return from the invoking function with the specified return value.
*
* @param pGVMM The name of the pGVMM variable.
* @param rc The return value on failure. Use VERR_INTERNAL_ERROR for
* VBox status codes.
*/
do { \
AssertMsgReturn((pGVMM)->u32Magic == GVMM_MAGIC, ("%p - %#x\n", (pGVMM), (pGVMM)->u32Magic), (rc)); \
} while (0)
/** Macro for obtaining and validating the g_pGVMM pointer, void function variant.
* On failure it will return from the invoking function.
*
* @param pGVMM The name of the pGVMM variable.
*/
#define GVMM_GET_VALID_INSTANCE_VOID(pGVMM) \
do { \
AssertPtrReturnVoid((pGVMM)); \
} while (0)
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
/**
* Initializes the GVMM.
*
* This is called while owninng the loader sempahore (see supdrvIOCtl_LdrLoad()).
*
* @returns VBox status code.
*/
GVMMR0DECL(int) GVMMR0Init(void)
{
LogFlow(("GVMMR0Init:\n"));
/*
* Allocate and initialize the instance data.
*/
if (!pGVMM)
return VERR_NO_MEMORY;
if (RT_SUCCESS(rc))
{
if (RT_SUCCESS(rc))
{
/* the nil handle */
/* the tail */
/* the rest */
while (i-- > 1)
{
}
/* The default configuration values. */
pGVMM->cEMTsMeansCompany = 1; /** @todo should be adjusted to relative to the cpu count or something... */
pGVMM->nsMinSleepAlone = 750000 /* ns (0.750 ms) */; /** @todo this should be adjusted to be 75% (or something) of the scheduler granularity... */
return VINF_SUCCESS;
}
}
return rc;
}
/**
* Terminates the GVM.
*
* This is called while owning the loader semaphore (see supdrvLdrFree()).
* And unless something is wrong, there should be absolutely no VMs
* registered at this point.
*/
GVMMR0DECL(void) GVMMR0Term(void)
{
LogFlow(("GVMMR0Term:\n"));
{
return;
}
{
SUPR0Printf("GVMMR0Term: iUsedHead=%#x! (cVMs=%#x cEMTs=%#x)\n", pGVMM->iUsedHead, pGVMM->cVMs, pGVMM->cEMTs);
}
}
/**
* A quick hack for setting global config values.
*
* @returns VBox status code.
*
* @param pSession The session handle. Used for authentication.
* @param pszName The variable name.
* @param u64Value The new value.
*/
{
/*
* Validate input.
*/
/*
* String switch time!
*/
return VERR_CFGM_VALUE_NOT_FOUND; /* borrow status codes from CFGM... */
int rc = VINF_SUCCESS;
{
if (u64Value <= UINT32_MAX)
else
}
{
if (u64Value <= 100000000)
else
}
{
if (u64Value <= 100000000)
else
}
{
if (u64Value <= 100000000)
else
}
{
if (u64Value <= 100000000)
else
}
else
return rc;
}
/**
* A quick hack for getting global config values.
*
* @returns VBox status code.
*
* @param pSession The session handle. Used for authentication.
* @param pszName The variable name.
* @param u64Value The new value.
*/
GVMMR0DECL(int) GVMMR0QueryConfig(PSUPDRVSESSION pSession, const char *pszName, uint64_t *pu64Value)
{
/*
* Validate input.
*/
/*
* String switch time!
*/
return VERR_CFGM_VALUE_NOT_FOUND; /* borrow status codes from CFGM... */
int rc = VINF_SUCCESS;
else
return rc;
}
/**
* Try acquire the 'used' lock.
*
* @returns IPRT status code, see RTSemFastMutexRequest.
* @param pGVMM The GVMM instance data.
*/
{
return rc;
}
/**
* Release the 'used' lock.
*
* @returns IPRT status code, see RTSemFastMutexRelease.
* @param pGVMM The GVMM instance data.
*/
{
return rc;
}
/**
* Try acquire the 'create & destroy' lock.
*
* @returns IPRT status code, see RTSemFastMutexRequest.
* @param pGVMM The GVMM instance data.
*/
{
return rc;
}
/**
* Release the 'create & destroy' lock.
*
* @returns IPRT status code, see RTSemFastMutexRequest.
* @param pGVMM The GVMM instance data.
*/
{
return rc;
}
/**
* Request wrapper for the GVMMR0CreateVM API.
*
* @returns VBox status code.
* @param pReq The request buffer.
*/
{
/*
* Validate the request.
*/
return VERR_INVALID_POINTER;
return VERR_INVALID_PARAMETER;
return VERR_INVALID_POINTER;
/*
* Execute it.
*/
if (RT_SUCCESS(rc))
{
}
return rc;
}
/**
* Allocates the VM structure and registers it with GVM.
*
* The caller will become the VM owner and there by the EMT.
*
* @returns VBox status code.
* @param pSession The support driver session.
* @param cCpus Number of virtual CPUs for the new VM.
* @param ppVM Where to store the pointer to the VM structure.
*
* @thread EMT.
*/
{
if ( cCpus == 0
|| cCpus > VMM_MAX_CPU_COUNT)
return VERR_INVALID_PARAMETER;
/*
* The whole allocation process is protected by the lock.
*/
/*
* Allocate a handle first so we don't waste resources unnecessarily.
*/
if (iHandle)
{
/* consistency checks, a bit paranoid as always. */
{
pHandle->pvObj = SUPR0ObjRegister(pSession, SUPDRVOBJTYPE_VM, gvmmR0HandleObjDestructor, pGVMM, pHandle);
{
/*
* Move the handle from the free to used list and perform permission checks.
*/
if (RT_SUCCESS(rc))
{
/*
* Allocate the global VM structure (GVM) and initialize it.
*/
if (pGVM)
{
/*
* Allocate the shared VM structure and associated page array.
*/
if (RT_SUCCESS(rc))
{
rc = RTR0MemObjAllocPage(&pGVM->gvmm.s.VMPagesMemObj, cPages * sizeof(SUPPAGE), false /* fExecutable */);
if (RT_SUCCESS(rc))
{
{
}
/*
* Map them into ring-3.
*/
if (RT_SUCCESS(rc))
{
/* Initialize all the VM pointers. */
{
}
if (RT_SUCCESS(rc))
{
/* complete the handle - take the UsedLock sem just to be careful. */
return VINF_SUCCESS;
}
}
}
}
}
}
/* else: The user wasn't permitted to create this VM. */
/*
* The handle will be freed by gvmmR0HandleObjDestructor as we release the
* object reference here. A little extra mess because of non-recursive lock.
*/
return rc;
}
rc = VERR_NO_MEMORY;
}
else
}
else
return rc;
}
/**
* Initializes the per VM data belonging to GVMM.
*
* @param pGVM Pointer to the global VM structure.
*/
{
{
}
}
/**
* Does the VM initialization.
*
* @returns VBox status code.
* @param pVM Pointer to the shared VM structure.
*/
{
/*
* Validate the VM structure, state and handle.
*/
if (RT_SUCCESS(rc))
{
{
{
if (RT_FAILURE(rc))
{
break;
}
}
}
else
}
return rc;
}
/**
* Indicates that we're done with the ring-0 initialization
* of the VM.
*
* @param pVM Pointer to the shared VM structure.
* @thread EMT(0)
*/
{
/* Validate the VM structure, state and handle. */
/* Set the indicator. */
}
/**
* Indicates that we're doing the ring-0 termination of the VM.
*
* @returns true if termination hasn't been done already, false if it has.
* @param pVM Pointer to the shared VM structure.
* @param pGVM Pointer to the global VM structure. Optional.
* @thread EMT(0)
*/
{
/* Validate the VM structure, state and handle. */
AssertPtrNullReturn(pGVM, false);
if (!pGVM)
{
AssertRCReturn(rc, false);
}
/* Set the indicator. */
return false;
return true;
}
/**
* Destroys the VM, freeing all associated resources (the ring-0 ones anyway).
*
* This is call from the vmR3DestroyFinalBit and from a error path in VMR3Create,
* and the caller is not the EMT thread, unfortunately. For security reasons, it
* would've been nice if the caller was actually the EMT thread or that we somehow
* could've associated the calling thread with the VM up front.
*
* @returns VBox status code.
* @param pVM Where to store the pointer to the VM structure.
*
* @thread EMT(0) if it's associated with the VM, otherwise any thread.
*/
{
/*
* Validate the VM structure, state and caller.
*/
AssertMsgReturn(pVM->enmVMState >= VMSTATE_CREATING && pVM->enmVMState <= VMSTATE_TERMINATED, ("%d\n", pVM->enmVMState), VERR_WRONG_ORDER);
/*
* Lookup the handle and destroy the object.
* Since the lock isn't recursive and we'll have to leave it before dereferencing the
* object, we take some precautions against racing callers just in case...
*/
/* be careful here because we might theoretically be racing someone else cleaning up. */
{
}
else
{
SUPR0Printf("GVMMR0DestroyVM: pHandle=%p:{.pVM=%p, .hEMT0=%p, .ProcId=%u, .pvObj=%p} pVM=%p hSelf=%p\n",
}
return rc;
}
/**
* Performs VM cleanup task as part of object destruction.
*
* @param pGVM The GVM pointer.
*/
{
{
{
LogFlow(("gvmmR0CleanupVM: Calling VMMR0TermVM\n"));
}
else
}
}
/**
* Handle destructor.
*
* @param pvGVMM The GVM instance pointer.
* @param pvHandle The handle pointer.
*/
{
/*
* Some quick, paranoid, input validation.
*/
if ( !iHandle
{
return;
}
/*
* This is a tad slow but a doubly linked list is too much hazzle.
*/
{
return;
}
else
{
while (iPrev)
{
{
SUPR0Printf("GVM: used list index %d is out of range!\n");
return;
}
if (RT_UNLIKELY(c-- <= 0))
{
iPrev = 0;
break;
}
break;
}
if (!iPrev)
{
return;
}
}
/*
* Do the global cleanup round.
*/
{
/*
* Do the GVMM cleanup - must be done last.
*/
/* The VM and VM pages mappings/allocations. */
{
}
{
}
{
}
{
}
{
{
}
}
/* the GVM structure itself. */
/* Re-acquire the UsedLock before freeing the handle since we're updating handle fields. */
}
/* else: GVMMR0CreateVM cleanup. */
/*
* Free the handle.
*/
LogFlow(("gvmmR0HandleObjDestructor: returns\n"));
}
/**
* Registers the calling thread as the EMT of a Virtual CPU.
*
* Note that VCPU 0 is automatically registered during VM creation.
*
* @returns VBox status code
* @param pVM The shared VM structure (the ring-0 mapping).
* @param idCpu VCPU id.
*/
{
/*
* Validate the VM structure, state and handle.
*/
if (RT_FAILURE(rc))
return rc;
return VINF_SUCCESS;
}
/**
* Lookup a GVM structure by its handle.
*
* @returns The GVM pointer on success, NULL on failure.
* @param hGVM The global VM handle. Asserts on bad handle.
*/
{
/*
* Validate.
*/
/*
* Look it up.
*/
}
/**
* Lookup a GVM structure by the shared VM structure.
*
* The calling thread must be in the same process as the VM. All current lookups
* are by threads inside the same process, so this will not be an issue.
*
* @returns VBox status code.
* @param pVM The shared VM structure (the ring-0 mapping).
* @param ppGVM Where to store the GVM pointer.
* @param ppGVMM Where to store the pointer to the GVMM instance data.
* @param fTakeUsedLock Whether to take the used lock or not.
* Be very careful if not taking the lock as it's possible that
* the VM will disappear then.
*
* @remark This will not assert on an invalid pVM but try return sliently.
*/
{
/*
* Validate.
*/
return VERR_INVALID_POINTER;
return VERR_INVALID_POINTER;
return VERR_INVALID_HANDLE;
/*
* Look it up.
*/
if (fTakeUsedLock)
{
{
return VERR_INVALID_HANDLE;
}
}
else
{
return VERR_INVALID_HANDLE;
return VERR_INVALID_HANDLE;
return VERR_INVALID_HANDLE;
return VERR_INVALID_HANDLE;
return VERR_INVALID_HANDLE;
}
return VINF_SUCCESS;
}
/**
* Lookup a GVM structure by the shared VM structure.
*
* @returns The GVM pointer on success, NULL on failure.
* @param pVM The shared VM structure (the ring-0 mapping).
*
* @remark This will not take the 'used'-lock because it doesn't do
* nesting and this function will be used from under the lock.
*/
{
if (RT_SUCCESS(rc))
return pGVM;
return NULL;
}
/**
* Lookup a GVM structure by the shared VM structure and ensuring that the
* caller is an EMT thread.
*
* @returns VBox status code.
* @param pVM The shared VM structure (the ring-0 mapping).
* @param idCpu The Virtual CPU ID of the calling EMT.
* @param ppGVM Where to store the GVM pointer.
* @param ppGVMM Where to store the pointer to the GVMM instance data.
* @thread EMT
*
* @remark This will assert in all failure paths.
*/
{
/*
* Validate.
*/
/*
* Look it up.
*/
return VINF_SUCCESS;
}
/**
* Lookup a GVM structure by the shared VM structure
* and ensuring that the caller is the EMT thread.
*
* @returns VBox status code.
* @param pVM The shared VM structure (the ring-0 mapping).
* @param idCpu The Virtual CPU ID of the calling EMT.
* @param ppGVM Where to store the GVM pointer.
* @thread EMT
*/
{
}
/**
* Lookup a VM by its global handle.
*
* @returns The VM handle on success, NULL on failure.
* @param hGVM The global VM handle. Asserts on bad handle.
*/
{
}
/**
* Looks up the VM belonging to the specified EMT thread.
*
* This is used by the assertion machinery in VMMR0.cpp to avoid causing
* unnecessary kernel panics when the EMT thread hits an assertion. The
* call may or not be an EMT thread.
*
* @returns The VM handle on success, NULL on failure.
* @param hEMT The native thread handle of the EMT.
* NIL_RTNATIVETHREAD means the current thread
*/
{
/*
* No Assertions here as we're usually called in a AssertMsgN or
* RTAssert* context.
*/
return NULL;
if (hEMT == NIL_RTNATIVETHREAD)
hEMT = RTThreadNativeSelf();
/*
* Search the handles in a linear fashion as we don't dare to take the lock (assert).
*/
{
{
/* This is fearly safe with the current process per VM approach. */
if ( cCpus < 1
|| cCpus > VMM_MAX_CPU_COUNT)
continue;
}
}
return NULL;
}
/**
* This is will wake up expired and soon-to-be expired VMs.
*
* @returns Number of VMs that has been woken up.
* @param pGVMM Pointer to the GVMM instance data.
* @param u64Now The current time.
*/
{
/** @todo Rewrite this algorithm. See performance defect XYZ. */
/*
* The first pass will wake up VMs which have actually expired
* and look for VMs that should be woken up in the 2nd and 3rd passes.
*/
unsigned cWoken = 0;
unsigned cHalted = 0;
unsigned cTodo2nd = 0;
unsigned cTodo3rd = 0;
{
{
{
if (u64)
{
{
{
cWoken++;
}
}
else
{
cHalted++;
cTodo2nd++;
cTodo3rd++;
}
}
}
}
}
if (cTodo2nd)
{
{
{
{
{
{
cWoken++;
}
}
}
}
}
}
if (cTodo3rd)
{
{
{
{
{
{
cWoken++;
}
}
}
}
}
}
return cWoken;
}
/**
* Halt the EMT thread.
*
* @returns VINF_SUCCESS normal wakeup (timeout or kicked by other thread).
* VERR_INTERRUPTED if a signal was scheduled for the thread.
* @param pVM Pointer to the shared VM structure.
* @param idCpu The Virtual CPU ID of the calling EMT.
* @param u64ExpireGipTime The time for the sleep to expire expressed as GIP time.
* @thread EMT(idCpu).
*/
{
/*
* Validate the VM structure, state and handle.
*/
if (RT_FAILURE(rc))
return rc;
/*
* Take the UsedList semaphore, get the current time
* and check if anyone needs waking up.
* Interrupts must NOT be disabled at this point because we ask for GIP time!
*/
/*
* Go to sleep if we must...
*/
if ( u64Now < u64ExpireGipTime
: pGVMM->nsMinSleepAlone))
{
if (rc == VERR_TIMEOUT)
{
rc = VINF_SUCCESS;
}
}
else
{
}
/* Make sure false wake up calls (gvmmR0SchedDoWakeUps) cause us to spin. */
return rc;
}
/**
* Worker for GVMMR0SchedWakeUp and GVMMR0SchedWakeUpAndPokeCpus that wakes up
* the a sleeping EMT.
*
* @retval VINF_SUCCESS if successfully woken up.
* @retval VINF_GVM_NOT_BLOCKED if the EMT wasn't blocked.
*
* @param pGVM The global (ring-0) VM structure.
* @param pGVCpu The global (ring-0) VCPU structure.
*/
{
/*
* Signal the semaphore regardless of whether it's current blocked on it.
*
* The reason for this is that there is absolutely no way we can be 100%
* certain that it isn't *about* go to go to sleep on it and just got
* delayed a bit en route. So, we will always signal the semaphore when
* the it is flagged as halted in the VMM.
*/
/** @todo we can optimize some of that by means of the pVCpu->enmState now. */
int rc;
{
rc = VINF_SUCCESS;
}
else
{
}
return rc;
}
/**
* Wakes up the halted EMT thread so it can service a pending request.
*
* @returns VBox status code.
* @retval VINF_SUCCESS if successfully woken up.
* @retval VINF_GVM_NOT_BLOCKED if the EMT wasn't blocked.
*
* @param pVM Pointer to the shared VM structure.
* @param idCpu The Virtual CPU ID of the EMT to wake up.
* @param fTakeUsedLock Take the used lock or not
* @thread Any but EMT.
*/
{
/*
* Validate input and take the UsedLock.
*/
if (RT_SUCCESS(rc))
{
{
/*
* Do the actual job.
*/
if (fTakeUsedLock)
{
/*
* While we're here, do a round of scheduling.
*/
}
}
else
if (fTakeUsedLock)
{
}
}
return rc;
}
/**
* Wakes up the halted EMT thread so it can service a pending request.
*
* @returns VBox status code.
* @retval VINF_SUCCESS if successfully woken up.
* @retval VINF_GVM_NOT_BLOCKED if the EMT wasn't blocked.
*
* @param pVM Pointer to the shared VM structure.
* @param idCpu The Virtual CPU ID of the EMT to wake up.
* @thread Any but EMT.
*/
{
}
/**
* Worker common to GVMMR0SchedPoke and GVMMR0SchedWakeUpAndPokeCpus that pokes
* the Virtual CPU if it's still busy executing guest code.
*
* @returns VBox status code.
* @retval VINF_SUCCESS if poked successfully.
* @retval VINF_GVM_NOT_BUSY_IN_GC if the EMT wasn't busy in GC.
*
* @param pGVM The global (ring-0) VM structure.
* @param pVCpu The Virtual CPU handle.
*/
{
if ( idHostCpu == NIL_RTCPUID
{
return VINF_GVM_NOT_BUSY_IN_GC;
}
return VINF_SUCCESS;
}
/**
* Pokes an EMT if it's still busy running guest code.
*
* @returns VBox status code.
* @retval VINF_SUCCESS if poked successfully.
* @retval VINF_GVM_NOT_BUSY_IN_GC if the EMT wasn't busy in GC.
*
* @param pVM Pointer to the shared VM structure.
* @param idCpu The ID of the virtual CPU to poke.
* @param fTakeUsedLock Take the used lock or not
*/
{
/*
* Validate input and take the UsedLock.
*/
if (RT_SUCCESS(rc))
{
else
if (fTakeUsedLock)
{
}
}
return rc;
}
/**
* Pokes an EMT if it's still busy running guest code.
*
* @returns VBox status code.
* @retval VINF_SUCCESS if poked successfully.
* @retval VINF_GVM_NOT_BUSY_IN_GC if the EMT wasn't busy in GC.
*
* @param pVM Pointer to the shared VM structure.
* @param idCpu The ID of the virtual CPU to poke.
*/
{
}
/**
* Wakes up a set of halted EMT threads so they can service pending request.
*
* @returns VBox status code, no informational stuff.
*
* @param pVM Pointer to the shared VM structure.
* @param pSleepSet The set of sleepers to wake up.
* @param pPokeSet The set of CPUs to poke.
*/
{
/*
* Validate input and take the UsedLock.
*/
if (RT_SUCCESS(rc))
{
rc = VINF_SUCCESS;
while (idCpu-- > 0)
{
/* Don't try poke or wake up ourselves. */
continue;
/* just ignore errors for now. */
}
}
return rc;
}
/**
* VMMR0 request wrapper for GVMMR0SchedWakeUpAndPokeCpus.
*
* @returns see GVMMR0SchedWakeUpAndPokeCpus.
* @param pVM Pointer to the shared VM structure.
* @param pReq The request packet.
*/
{
/*
* Validate input and pass it on.
*/
AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
}
/**
* Poll the schedule to see if someone else should get a chance to run.
*
* This is a bit hackish and will not work too well if the machine is
* under heavy load from non-VM processes.
*
* @returns VINF_SUCCESS if not yielded.
* VINF_GVM_YIELDED if an attempt to switch to a different VM task was made.
* @param pVM Pointer to the shared VM structure.
* @param idCpu The Virtual CPU ID of the calling EMT.
* @param u64ExpireGipTime The time for the sleep to expire expressed as GIP time.
* @param fYield Whether to yield or not.
* This is for when we're spinning in the halt loop.
* @thread EMT(idCpu).
*/
{
/*
* Validate input.
*/
if (RT_SUCCESS(rc))
{
if (!fYield)
else
{
/** @todo implement this... */
}
}
return rc;
}
/**
* Retrieves the GVMM statistics visible to the caller.
*
* @returns VBox status code.
*
* @param pStats Where to put the statistics.
* @param pSession The current session.
* @param pVM The VM to obtain statistics for. Optional.
*/
{
/*
* Validate input.
*/
/*
* Take the lock and get the VM statistics.
*/
if (pVM)
{
if (RT_FAILURE(rc))
return rc;
}
else
{
}
/*
* Enumerate the VMs and add the ones visibile to the statistics.
*/
{
{
}
}
return VINF_SUCCESS;
}
/**
* VMMR0 request wrapper for GVMMR0QueryStatistics.
*
* @returns see GVMMR0QueryStatistics.
* @param pVM Pointer to the shared VM structure. Optional.
* @param pReq The request packet.
*/
{
/*
* Validate input and pass it on.
*/
AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
}
/**
* Resets the specified GVMM statistics.
*
* @returns VBox status code.
*
* @param pStats Which statistics to reset, that is, non-zero fields indicates which to reset.
* @param pSession The current session.
* @param pVM The VM to reset statistics for. Optional.
*/
{
/*
* Validate input.
*/
/*
* Take the lock and get the VM statistics.
*/
if (pVM)
{
if (RT_FAILURE(rc))
return rc;
# define MAYBE_RESET_FIELD(field) \
}
else
{
}
/*
* Enumerate the VMs and add the ones visibile to the statistics.
*/
{
{
{
# define MAYBE_RESET_FIELD(field) \
}
}
}
return VINF_SUCCESS;
}
/**
* VMMR0 request wrapper for GVMMR0ResetStatistics.
*
* @returns see GVMMR0ResetStatistics.
* @param pVM Pointer to the shared VM structure. Optional.
* @param pReq The request packet.
*/
{
/*
* Validate input and pass it on.
*/
AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
}