VMM.cpp revision 52194993b9d83198a057fea598ec8506e1a3bbaa
/* $Id$ */
/** @file
* VMM - The Virtual Machine Monitor Core.
*/
/*
* Copyright (C) 2006-2007 Sun Microsystems, Inc.
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
* Clara, CA 95054 USA or visit http://www.sun.com if you need
* additional information or have any questions.
*/
//#define NO_SUPCALLR0VMM
/** @page pg_vmm VMM - The Virtual Machine Monitor
*
* The VMM component is two things at the moment, it's a component doing a few
* management and routing tasks, and it's the whole virtual machine monitor
* thing. For hysterical reasons, it is not doing all the management that one
* would expect, this is instead done by @ref pg_vm. We'll address this
* misdesign eventually.
*
* @see grp_vmm, grp_vm
*
*
* @section sec_vmmstate VMM State
*
* @image html VM_Statechart_Diagram.gif
*
* To be written.
*
*
* @subsection subsec_vmm_init VMM Initialization
*
* To be written.
*
*
* @subsection subsec_vmm_term VMM Termination
*
* To be written.
*
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#define LOG_GROUP LOG_GROUP_VMM
#include <VBox/pdmqueue.h>
#include "VMMInternal.h"
#include "VMMSwitcher/VMMSwitcher.h"
/** The saved state version. */
#define VMM_SAVED_STATE_VERSION 3
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
/**
* Initializes the VMM.
*
* @returns VBox status code.
* @param pVM The VM to operate on.
*/
{
LogFlow(("VMMR3Init\n"));
/*
* Assert alignment, sizes and order.
*/
/*
* Init basic VM VMM members.
*/
if (rc == VERR_CFGM_VALUE_NOT_FOUND)
pVM->vmm.s.cYieldEveryMillies = 23; /* Value arrived at after experimenting with the grub boot prompt. */
//pVM->vmm.s.cYieldEveryMillies = 8; //debugging
else
AssertMsgRCReturn(rc, ("Configuration error. Failed to query \"YieldEMTInterval\", rc=%Rrc\n", rc), rc);
/*
* Initialize the VMM sync critical section.
*/
/* GC switchers are enabled by default. Turned off by HWACCM. */
/*
* Register the saved state data unit.
*/
rc = SSMR3RegisterInternal(pVM, "vmm", 1, VMM_SAVED_STATE_VERSION, VMM_STACK_SIZE + sizeof(RTGCPTR),
if (RT_FAILURE(rc))
return rc;
/*
* Register the Ring-0 VM handle with the session for fast ioctl calls.
*/
if (RT_FAILURE(rc))
return rc;
/*
* Init various sub-components.
*/
if (RT_SUCCESS(rc))
{
if (RT_SUCCESS(rc))
{
#ifdef VBOX_WITH_NMI
/*
* Allocate mapping for the host APIC.
*/
if (RT_SUCCESS(rc))
{
}
#endif
if (RT_SUCCESS(rc))
{
/*
* Debug info and statistics.
*/
return VINF_SUCCESS;
}
}
/** @todo: Need failure cleanup. */
//more todo in here?
//if (RT_SUCCESS(rc))
//{
//}
//int rc2 = vmmR3TermCoreCode(pVM);
//AssertRC(rc2));
}
return rc;
}
/**
* Allocate & setup the VMM RC stack(s) (for EMTs).
*
* The stacks are also used for long jumps in Ring-0.
*
* @returns VBox status code.
* @param pVM Pointer to the shared VM structure.
*
* @remarks The optional guard page gets it protection setup up during R3 init
* completion because of init order issues.
*/
{
int rc = VINF_SUCCESS;
{
#ifdef VBOX_STRICT_VMM_STACK
rc = MMR3HyperAllocOnceNoRel(pVM, VMM_STACK_SIZE + PAGE_SIZE + PAGE_SIZE, PAGE_SIZE, MM_TAG_VMM, (void **)&pVCpu->vmm.s.pbEMTStackR3);
#else
rc = MMR3HyperAllocOnceNoRel(pVM, VMM_STACK_SIZE, PAGE_SIZE, MM_TAG_VMM, (void **)&pVCpu->vmm.s.pbEMTStackR3);
#endif
if (RT_SUCCESS(rc))
{
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
/* MMHyperR3ToR0 returns R3 when not doing hardware assisted virtualization. */
if (!VMMIsHwVirtExtForced(pVM))
else
#endif
}
}
return rc;
}
/**
* Initialize the loggers.
*
* @returns VBox status code.
* @param pVM Pointer to the shared VM structure.
*/
{
int rc;
/*
* Allocate RC & R0 Logger instances (they are finalized in the relocator).
*/
#ifdef LOG_ENABLED
if (pLogger)
{
rc = MMR3HyperAllocOnceNoRel(pVM, pVM->vmm.s.cbRCLogger, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pRCLoggerR3);
if (RT_FAILURE(rc))
return rc;
# ifdef VBOX_WITH_R0_LOGGING
if (RT_FAILURE(rc))
return rc;
//pVM->vmm.s.pR0LoggerR3->fCreated = false;
# endif
}
#endif /* LOG_ENABLED */
#ifdef VBOX_WITH_RC_RELEASE_LOGGING
/*
* Allocate RC release logger instances (finalized in the relocator).
*/
if (pRelLogger)
{
rc = MMR3HyperAllocOnceNoRel(pVM, pVM->vmm.s.cbRCRelLogger, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pRCRelLoggerR3);
if (RT_FAILURE(rc))
return rc;
}
#endif /* VBOX_WITH_RC_RELEASE_LOGGING */
return VINF_SUCCESS;
}
/**
* VMMR3Init worker that register the statistics with STAM.
*
* @param pVM The shared VM structure.
*/
{
/*
* Statistics.
*/
STAM_REG(pVM, &pVM->vmm.s.StatRunRC, STAMTYPE_COUNTER, "/VMM/RunRC", STAMUNIT_OCCURENCES, "Number of context switches.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetNormal, STAMTYPE_COUNTER, "/VMM/RZRet/Normal", STAMUNIT_OCCURENCES, "Number of VINF_SUCCESS returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetInterrupt, STAMTYPE_COUNTER, "/VMM/RZRet/Interrupt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetInterruptHyper, STAMTYPE_COUNTER, "/VMM/RZRet/InterruptHyper", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_HYPER returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetGuestTrap, STAMTYPE_COUNTER, "/VMM/RZRet/GuestTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_GUEST_TRAP returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetRingSwitch, STAMTYPE_COUNTER, "/VMM/RZRet/RingSwitch", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetRingSwitchInt, STAMTYPE_COUNTER, "/VMM/RZRet/RingSwitchInt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH_INT returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetExceptionPrivilege, STAMTYPE_COUNTER, "/VMM/RZRet/ExceptionPrivilege", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_EXCEPTION_PRIVILEGED returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetStaleSelector, STAMTYPE_COUNTER, "/VMM/RZRet/StaleSelector", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_STALE_SELECTOR returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetIRETTrap, STAMTYPE_COUNTER, "/VMM/RZRet/IRETTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_IRET_TRAP returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetEmulate, STAMTYPE_COUNTER, "/VMM/RZRet/Emulate", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetIOBlockEmulate, STAMTYPE_COUNTER, "/VMM/RZRet/EmulateIOBlock", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_EMULATE_IO_BLOCK returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchEmulate, STAMTYPE_COUNTER, "/VMM/RZRet/PatchEmulate", STAMUNIT_OCCURENCES, "Number of VINF_PATCH_EMULATE_INSTR returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetIORead, STAMTYPE_COUNTER, "/VMM/RZRet/IORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_IOPORT_READ returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetIOWrite, STAMTYPE_COUNTER, "/VMM/RZRet/IOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_IOPORT_WRITE returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIORead, STAMTYPE_COUNTER, "/VMM/RZRet/MMIORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_READ returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_WRITE returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOReadWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOReadWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_READ_WRITE returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOPatchRead, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOPatchRead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_READ returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOPatchWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOPatchWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_WRITE returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetLDTFault, STAMTYPE_COUNTER, "/VMM/RZRet/LDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_GDT_FAULT returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetGDTFault, STAMTYPE_COUNTER, "/VMM/RZRet/GDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_LDT_FAULT returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetIDTFault, STAMTYPE_COUNTER, "/VMM/RZRet/IDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_IDT_FAULT returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetTSSFault, STAMTYPE_COUNTER, "/VMM/RZRet/TSSFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_TSS_FAULT returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetPDFault, STAMTYPE_COUNTER, "/VMM/RZRet/PDFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_PD_FAULT returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetCSAMTask, STAMTYPE_COUNTER, "/VMM/RZRet/CSAMTask", STAMUNIT_OCCURENCES, "Number of VINF_CSAM_PENDING_ACTION returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetSyncCR3, STAMTYPE_COUNTER, "/VMM/RZRet/SyncCR", STAMUNIT_OCCURENCES, "Number of VINF_PGM_SYNC_CR3 returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetMisc, STAMTYPE_COUNTER, "/VMM/RZRet/Misc", STAMUNIT_OCCURENCES, "Number of misc returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchInt3, STAMTYPE_COUNTER, "/VMM/RZRet/PatchInt3", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_INT3 returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchPF, STAMTYPE_COUNTER, "/VMM/RZRet/PatchPF", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_PF returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchGP, STAMTYPE_COUNTER, "/VMM/RZRet/PatchGP", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_GP returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchIretIRQ, STAMTYPE_COUNTER, "/VMM/RZRet/PatchIret", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PENDING_IRQ_AFTER_IRET returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetPageOverflow, STAMTYPE_COUNTER, "/VMM/RZRet/InvlpgOverflow", STAMUNIT_OCCURENCES, "Number of VERR_REM_FLUSHED_PAGES_OVERFLOW returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetRescheduleREM, STAMTYPE_COUNTER, "/VMM/RZRet/ScheduleREM", STAMUNIT_OCCURENCES, "Number of VINF_EM_RESCHEDULE_REM returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetTimerPending, STAMTYPE_COUNTER, "/VMM/RZRet/TimerPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TIMER_PENDING returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetInterruptPending, STAMTYPE_COUNTER, "/VMM/RZRet/InterruptPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_PENDING returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetPATMDuplicateFn, STAMTYPE_COUNTER, "/VMM/RZRet/PATMDuplicateFn", STAMUNIT_OCCURENCES, "Number of VINF_PATM_DUPLICATE_FUNCTION returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetPGMChangeMode, STAMTYPE_COUNTER, "/VMM/RZRet/PGMChangeMode", STAMUNIT_OCCURENCES, "Number of VINF_PGM_CHANGE_MODE returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetEmulHlt, STAMTYPE_COUNTER, "/VMM/RZRet/EmulHlt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_EMULATE_INSTR_HLT returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetPendingRequest, STAMTYPE_COUNTER, "/VMM/RZRet/PendingRequest", STAMUNIT_OCCURENCES, "Number of VINF_EM_PENDING_REQUEST returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetCallHost, STAMTYPE_COUNTER, "/VMM/RZCallR3/Misc", STAMUNIT_OCCURENCES, "Number of Other ring-3 calls.");
STAM_REG(pVM, &pVM->vmm.s.StatRZCallPDMLock, STAMTYPE_COUNTER, "/VMM/RZCallR3/PDMLock", STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_PDM_LOCK calls.");
STAM_REG(pVM, &pVM->vmm.s.StatRZCallPDMQueueFlush, STAMTYPE_COUNTER, "/VMM/RZCallR3/PDMQueueFlush", STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_PDM_QUEUE_FLUSH calls.");
STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMLock, STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMLock", STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_PGM_LOCK calls.");
STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMPoolGrow, STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMPoolGrow", STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_PGM_POOL_GROW calls.");
STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMMapChunk, STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMMapChunk", STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_PGM_MAP_CHUNK calls.");
STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMAllocHandy, STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMAllocHandy", STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES calls.");
STAM_REG(pVM, &pVM->vmm.s.StatRZCallRemReplay, STAMTYPE_COUNTER, "/VMM/RZCallR3/REMReplay", STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_REM_REPLAY_HANDLER_NOTIFICATIONS calls.");
STAM_REG(pVM, &pVM->vmm.s.StatRZCallLogFlush, STAMTYPE_COUNTER, "/VMM/RZCallR3/VMMLogFlush", STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_VMM_LOGGER_FLUSH calls.");
STAM_REG(pVM, &pVM->vmm.s.StatRZCallVMSetError, STAMTYPE_COUNTER, "/VMM/RZCallR3/VMSetError", STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_VM_SET_ERROR calls.");
STAM_REG(pVM, &pVM->vmm.s.StatRZCallVMSetRuntimeError, STAMTYPE_COUNTER, "/VMM/RZCallR3/VMRuntimeError", STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_VM_SET_RUNTIME_ERROR calls.");
}
/**
* Initializes the per-VCPU VMM.
*
* @returns VBox status code.
* @param pVM The VM to operate on.
*/
{
LogFlow(("VMMR3InitCPU\n"));
return VINF_SUCCESS;
}
/**
* Ring-3 init finalizing.
*
* @returns VBox status code.
* @param pVM The VM handle.
*/
{
int rc = VINF_SUCCESS;
{
#ifdef VBOX_STRICT_VMM_STACK
/*
* Two inaccessible pages at each sides of the stack to catch over/under-flows.
*/
#endif
/*
* Set page attributes to r/w for stack pages.
*/
rc = PGMMapSetPage(pVM, pVCpu->vmm.s.pbEMTStackRC, VMM_STACK_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
if (RT_FAILURE(rc))
break;
}
if (RT_SUCCESS(rc))
{
/*
* Create the EMT yield timer.
*/
rc = TMR3TimerCreateInternal(pVM, TMCLOCK_REAL, vmmR3YieldEMT, NULL, "EMT Yielder", &pVM->vmm.s.pYieldTimer);
if (RT_SUCCESS(rc))
}
#ifdef VBOX_WITH_NMI
/*
*/
if (RT_SUCCESS(rc))
#endif
return rc;
}
/**
* Initializes the R0 VMM.
*
* @returns VBox status code.
* @param pVM The VM to operate on.
*/
{
int rc;
/*
* Initialize the ring-0 logger if we haven't done so yet.
*/
{
if (RT_FAILURE(rc))
return rc;
}
/*
* Call Ring-0 entry with init code.
*/
for (;;)
{
#ifdef NO_SUPCALLR0VMM
//rc = VERR_GENERAL_FAILURE;
rc = VINF_SUCCESS;
#else
#endif
if (rc != VINF_VMM_CALL_HOST)
break;
break;
/* Resume R0 */
}
{
if (RT_SUCCESS(rc))
}
return rc;
}
/**
* Initializes the RC VMM.
*
* @returns VBox status code.
* @param pVM The VM to operate on.
*/
{
/* In VMX mode, there's no need to init RC. */
return VINF_SUCCESS;
/*
* Call VMMGCInit():
* -# resolve the address.
* -# setup stackframe and EIP to use the trampoline.
* -# do a generic hypervisor call.
*/
if (RT_SUCCESS(rc))
{
for (;;)
{
#ifdef NO_SUPCALLR0VMM
//rc = VERR_GENERAL_FAILURE;
rc = VINF_SUCCESS;
#else
#endif
#ifdef LOG_ENABLED
if ( pLogger
&& pLogger->offScratch > 0)
#endif
#ifdef VBOX_WITH_RC_RELEASE_LOGGING
#endif
if (rc != VINF_VMM_CALL_HOST)
break;
break;
}
{
}
}
return rc;
}
/**
* Terminate the VMM bits.
*
* @returns VINF_SUCCESS.
* @param pVM The VM handle.
*/
{
/*
* Call Ring-0 entry with termination code.
*/
int rc;
for (;;)
{
#ifdef NO_SUPCALLR0VMM
//rc = VERR_GENERAL_FAILURE;
rc = VINF_SUCCESS;
#else
#endif
if (rc != VINF_VMM_CALL_HOST)
break;
break;
/* Resume R0 */
}
{
if (RT_SUCCESS(rc))
}
#ifdef VBOX_STRICT_VMM_STACK
/*
* Make the two stack guard pages present again.
*/
RTMemProtect(pVM->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
#endif
return rc;
}
/**
* Terminates the per-VCPU VMM.
*
* Termination means cleaning up and freeing all resources,
* the VM it self is at this point powered off or suspended.
*
* @returns VBox status code.
* @param pVM The VM to operate on.
*/
{
return VINF_SUCCESS;
}
/**
* Applies relocations to data and code managed by this
* component. This function will be called at init and
* whenever the VMM need to relocate it self inside the GC.
*
* The VMM will need to apply relocations to the core code.
*
* @param pVM The VM handle.
* @param offDelta The relocation delta.
*/
{
/*
* Recalc the RC address.
*/
/*
* The stack.
*/
{
}
/*
* All the switchers.
*/
/*
* Get other RC entry points.
*/
int rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuest", &pVM->vmm.s.pfnCPUMRCResumeGuest);
rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuestV86", &pVM->vmm.s.pfnCPUMRCResumeGuestV86);
/*
* Update the logger.
*/
}
/**
* Updates the settings for the RC and R0 loggers.
*
* @returns VBox status code.
* @param pVM The VM handle.
*/
{
/*
* Simply clone the logger instance (for RC).
*/
int rc = VINF_SUCCESS;
RTRCPTR RCPtrLoggerFlush = 0;
#ifdef VBOX_WITH_RC_RELEASE_LOGGING
#endif
)
{
}
{
}
#ifdef VBOX_WITH_RC_RELEASE_LOGGING
{
rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCRelLoggerWrapper", &RCPtrLoggerWrapper);
}
#endif /* VBOX_WITH_RC_RELEASE_LOGGING */
/*
* For the ring-0 EMT logger, we use a per-thread logger instance
* in ring-0. Only initialize it once.
*/
if (pR0LoggerR3)
{
if (!pR0LoggerR3->fCreated)
{
pR0LoggerR3->fCreated = true;
pR0LoggerR3->fFlushingDisabled = false;
}
rc = RTLogCopyGroupsAndFlags(&pR0LoggerR3->Logger, NULL /* default */, pVM->vmm.s.pRCLoggerR3->fFlags, RTLOGFLAGS_BUFFERED);
}
return rc;
}
/**
*
* @returns Pointer to the buffer.
* @param pVM The VM handle.
*/
{
if (HWACCMIsEnabled(pVM))
if (RT_SUCCESS(rc))
return NULL;
}
/**
*
* @returns Pointer to the buffer.
* @param pVM The VM handle.
*/
{
if (HWACCMIsEnabled(pVM))
if (RT_SUCCESS(rc))
return NULL;
}
/**
* Execute state save operation.
*
* @returns VBox status code.
* @param pVM VM Handle.
* @param pSSM SSM operation handle.
*/
{
LogFlow(("vmmR3Save:\n"));
/*
* The hypervisor stack.
* Note! See note in vmmR3Load (remove this on version change).
*/
AssertMsg(pVCpu0->vmm.s.pbEMTStackBottomRC - RCPtrESP <= VMM_STACK_SIZE, ("Bottom %RRv ESP=%RRv\n", pVCpu0->vmm.s.pbEMTStackBottomRC, RCPtrESP));
/*
* be running. This avoids breaking the saved state version. :-)
*/
}
/**
* Execute state load operation.
*
* @returns VBox status code.
* @param pVM VM Handle.
* @param pSSM SSM operation handle.
* @param u32Version Data layout version.
*/
{
LogFlow(("vmmR3Load:\n"));
/*
* Validate version.
*/
if (u32Version != VMM_SAVED_STATE_VERSION)
{
}
/*
* Check that the stack is in the same place, or that it's fearly empty.
*
* Note! This can be skipped next time we update saved state as we will
* stack and the two associated pointers are not required.
*/
if (RT_FAILURE(rc))
return rc;
/* Restore the VMCPU states. VCPU 0 is always started. */
{
bool fStarted;
if (RT_FAILURE(rc))
return rc;
}
/* terminator */
if (RT_FAILURE(rc))
return rc;
if (u32 != ~0U)
{
}
return VINF_SUCCESS;
}
/**
* Resolve a builtin RC symbol.
*
* Called by PDM when loading or relocating RC modules.
*
* @returns VBox status
* @param pVM VM Handle.
* @param pszSymbol Symbol to resolv
* @param pRCPtrValue Where to store the symbol value.
*
* @remark This has to work before VMMR3Relocate() is called.
*/
{
{
}
{
#ifdef VBOX_WITH_RC_RELEASE_LOGGING
#else
#endif
}
else
return VERR_SYMBOL_NOT_FOUND;
return VINF_SUCCESS;
}
/**
* Suspends the CPU yielder.
*
* @param pVM The VM handle.
*/
{
{
else
}
}
/**
* Stops the CPU yielder.
*
* @param pVM The VM handle.
*/
{
}
/**
* Resumes the CPU yielder when it has been a suspended or stopped.
*
* @param pVM The VM handle.
*/
{
{
}
}
/**
* Internal timer callback function.
*
* @param pVM The VM.
* @param pTimer The timer handle.
* @param pvUser User argument specified upon timer creation.
*/
{
/*
* This really needs some careful tuning. While we shouldn't be too greedy since
* that'll cause the rest of the system to stop up, we shouldn't be too nice either
* because that'll cause us to stop up.
*
* The current logic is to use the default interval when there is no lag worth
* mentioning, but when we start accumulating lag we don't bother yielding at all.
*
* (This depends on the TMCLOCK_VIRTUAL_SYNC to be scheduled before TMCLOCK_REAL
* so the lag is up to date.)
*/
)
{
#ifdef LOG_ENABLED
#endif
}
}
/**
* Executes guest code in the raw-mode context.
*
* @param pVM VM handle.
* @param pVCpu The VMCPU to operate on.
*/
{
/*
* Set the EIP and ESP.
*/
/*
* We hide log flushes (outer) and hypervisor interrupts (inner).
*/
for (;;)
{
#ifdef VBOX_STRICT
#endif
int rc;
do
{
#ifdef NO_SUPCALLR0VMM
#else
#endif
} while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
/*
* Flush the logs.
*/
#ifdef LOG_ENABLED
if ( pLogger
&& pLogger->offScratch > 0)
#endif
#ifdef VBOX_WITH_RC_RELEASE_LOGGING
#endif
if (rc != VINF_VMM_CALL_HOST)
{
Log2(("VMMR3RawRunGC: returns %Rrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
return rc;
}
if (RT_FAILURE(rc))
return rc;
/* Resume GC */
}
}
/**
* Executes guest code (Intel VT-x and AMD-V).
*
* @param pVM VM handle.
* @param pVCpu The VMCPU to operate on.
*/
{
for (;;)
{
int rc;
do
{
#ifdef NO_SUPCALLR0VMM
#else
#endif
} while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
#ifdef LOG_ENABLED
/*
* Flush the log
*/
if ( pR0LoggerR3
#endif /* !LOG_ENABLED */
if (rc != VINF_VMM_CALL_HOST)
{
Log2(("VMMR3HwAccRunGC: returns %Rrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
return rc;
}
if (RT_FAILURE(rc))
return rc;
/* Resume R0 */
}
}
/**
* VCPU worker for VMMSendSipi.
*
* @param pVM The VM to operate on.
* @param idCpu Virtual CPU to perform SIPI on
* @param uVector SIPI vector
*/
{
/** @todo what are we supposed to do if the processor is already running? */
return VERR_ACCESS_DENIED;
# if 1 /* If we keep the EMSTATE_WAIT_SIPI method, then move this to EM.cpp. */
return VINF_EM_RESCHEDULE;
# else /* And if we go the VMCPU::enmState way it can stay here. */
return VINF_SUCCESS;
# endif
}
{
return VINF_EM_WAIT_SIPI;
}
/**
* Sends SIPI to the virtual CPU by setting CS:EIP into vector-dependent state
* and unhalting processor
*
* @param pVM The VM to operate on.
* @param idCpu Virtual CPU to perform SIPI on
* @param uVector SIPI vector
*/
{
}
/**
* Sends init IPI to the virtual CPU.
*
* @param pVM The VM to operate on.
* @param idCpu Virtual CPU to perform int IPI on
*/
{
}
/**
* VCPU worker for VMMR3SynchronizeAllVCpus.
*
* @param pVM The VM to operate on.
* @param idCpu Virtual CPU to perform SIPI on
* @param uVector SIPI vector
*/
{
/* Block until the job in the caller has finished. */
return VINF_SUCCESS;
}
/**
* Atomically execute a callback handler
* Note: This is very expensive; avoid using it frequently!
*
* @param pVM The VM to operate on.
* @param pfnHandler Callback handler
* @param pvUser User specified parameter
*/
{
int rc;
/* Shortcut for the uniprocessor case. */
{
{
}
}
/* Wait until all other VCPUs are waiting for us. */
RTThreadSleep(1);
return rc;
}
/**
* Read from the ring 0 jump buffer stack
*
* @returns VBox status code.
*
* @param pVM Pointer to the shared VM structure.
* @param idCpu The ID of the source CPU context (for the address).
* @param pAddress Where to start reading.
* @param pvBuf Where to store the data we've read.
* @param cbRead The number of bytes to read.
*/
VMMR3DECL(int) VMMR3ReadR0Stack(PVM pVM, VMCPUID idCpu, RTHCUINTPTR pAddress, void *pvBuf, size_t cbRead)
{
return VERR_INVALID_POINTER;
memcpy(pvBuf, pVCpu->vmm.s.pbEMTStackR3 + pVCpu->vmm.s.CallHostR0JmpBuf.cbSavedStack - offset, cbRead);
return VINF_SUCCESS;
}
/**
* Calls a RC function.
*
* @param pVM The VM handle.
* @param RCPtrEntry The address of the RC function.
* @param cArgs The number of arguments in the ....
* @param ... Arguments to the function.
*/
{
return rc;
}
/**
* Calls a RC function.
*
* @param pVM The VM handle.
* @param RCPtrEntry The address of the RC function.
* @param cArgs The number of arguments in the ....
* @param args Arguments to the function.
*/
{
/* Raw mode implies 1 VCPU. */
/*
* Setup the call frame using the trampoline.
*/
int i = cArgs;
while (i-- > 0)
/*
* We hide log flushes (outer) and hypervisor interrupts (inner).
*/
for (;;)
{
int rc;
do
{
#ifdef NO_SUPCALLR0VMM
#else
#endif
} while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
/*
* Flush the logs.
*/
#ifdef LOG_ENABLED
if ( pLogger
&& pLogger->offScratch > 0)
#endif
#ifdef VBOX_WITH_RC_RELEASE_LOGGING
#endif
if (rc != VINF_VMM_CALL_HOST)
{
Log2(("VMMR3CallGCV: returns %Rrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
return rc;
}
if (RT_FAILURE(rc))
return rc;
}
}
/**
* Wrapper for SUPCallVMMR0Ex which will deal with
* VINF_VMM_CALL_HOST returns.
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param uOperation Operation to execute.
* @param u64Arg Constant argument.
* @param pReqHdr Pointer to a request header. See SUPCallVMMR0Ex for
* details.
*/
{
/*
* Call Ring-0 entry with init code.
*/
int rc;
for (;;)
{
#ifdef NO_SUPCALLR0VMM
#else
#endif
if (rc != VINF_VMM_CALL_HOST)
break;
break;
/* Resume R0 */
}
return rc;
}
/**
* Resumes executing hypervisor code when interrupted by a queue flush or a
* debug event.
*
* @returns VBox status code.
* @param pVM VM handle.
* @param pVCpu VMCPU handle.
*/
{
/*
* We hide log flushes (outer) and hypervisor interrupts (inner).
*/
for (;;)
{
int rc;
do
{
#ifdef NO_SUPCALLR0VMM
#else
#endif
} while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
/*
* Flush the loggers,
*/
#ifdef LOG_ENABLED
if ( pLogger
&& pLogger->offScratch > 0)
#endif
#ifdef VBOX_WITH_RC_RELEASE_LOGGING
#endif
if (rc != VINF_VMM_CALL_HOST)
{
return rc;
}
if (RT_FAILURE(rc))
return rc;
}
}
/**
* Service a call to the ring-3 host code.
*
* @returns VBox status code.
* @param pVM VM handle.
* @param pVCpu VMCPU handle
* @remark Careful with critsects.
*/
{
{
/*
* Acquire the PDM lock.
*/
case VMMCALLHOST_PDM_LOCK:
{
break;
}
/*
* Flush a PDM queue.
*/
{
break;
}
/*
* Grow the PGM pool.
*/
{
break;
}
/*
* Maps an page allocation chunk into ring-3 so ring-0 can use it.
*/
{
break;
}
/*
* Allocates more handy pages.
*/
{
break;
}
/*
* Acquire the PGM lock.
*/
case VMMCALLHOST_PGM_LOCK:
{
break;
}
/*
* Acquire the MM hypervisor heap lock.
*/
case VMMCALLHOST_MMHYPER_LOCK:
{
break;
}
/*
* Flush REM handler notifications.
*/
{
break;
}
/*
* This is a noop. We just take this route to avoid unnecessary
* tests in the loops.
*/
LogAlways(("*FLUSH*\n"));
break;
/*
* Set the VM error message.
*/
case VMMCALLHOST_VM_SET_ERROR:
break;
/*
* Set the VM runtime error message.
*/
break;
/*
* Signal a ring 0 hypervisor assertion.
* Cancel the longjmp operation that's in progress.
*/
#ifdef RT_ARCH_X86
#else
#endif
return VERR_VMM_RING0_ASSERTION;
/*
* A forced switch to ring 0 for preemption purposes.
*/
break;
default:
return VERR_INTERNAL_ERROR;
}
return VINF_SUCCESS;
}
/**
* Displays the Force action Flags.
*
* @param pVM The VM handle.
* @param pHlp The output helpers.
* @param pszArgs The additional arguments (ignored).
*/
{
int c;
uint32_t f;
{ \
if (!(c % 6)) \
else \
c++; \
} \
} while (0)
{ \
if (!(c % 5)) \
else \
c++; \
} \
} while (0)
/*
* The global flags.
*/
/* show the flag mnemonics */
c = 0;
f = fGlobalForcedActions;
if (f)
else
/* the groups */
c = 0;
f = fGlobalForcedActions;
if (c)
/*
* Per CPU flags.
*/
{
/* show the flag mnemonics */
c = 0;
f = fLocalForcedActions;
if (f)
else
/* the groups */
c = 0;
f = fLocalForcedActions;
if (c)
}
}