VMMRZ.cpp revision 8a179f8f39b6114b304505d4fb79a987d5e5a623
45e9809aff7304721fddb95654901b32195c9c7avboxsync/* $Id$ */
45e9809aff7304721fddb95654901b32195c9c7avboxsync/** @file
45e9809aff7304721fddb95654901b32195c9c7avboxsync * VMM - Virtual Machine Monitor, Raw-mode and ring-0 context code.
45e9809aff7304721fddb95654901b32195c9c7avboxsync */
45e9809aff7304721fddb95654901b32195c9c7avboxsync
45e9809aff7304721fddb95654901b32195c9c7avboxsync/*
45e9809aff7304721fddb95654901b32195c9c7avboxsync * Copyright (C) 2009 Sun Microsystems, Inc.
45e9809aff7304721fddb95654901b32195c9c7avboxsync *
45e9809aff7304721fddb95654901b32195c9c7avboxsync * This file is part of VirtualBox Open Source Edition (OSE), as
45e9809aff7304721fddb95654901b32195c9c7avboxsync * available from http://www.virtualbox.org. This file is free software;
45e9809aff7304721fddb95654901b32195c9c7avboxsync * you can redistribute it and/or modify it under the terms of the GNU
45e9809aff7304721fddb95654901b32195c9c7avboxsync * General Public License (GPL) as published by the Free Software
45e9809aff7304721fddb95654901b32195c9c7avboxsync * Foundation, in version 2 as it comes in the "COPYING" file of the
45e9809aff7304721fddb95654901b32195c9c7avboxsync * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
45e9809aff7304721fddb95654901b32195c9c7avboxsync * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
45e9809aff7304721fddb95654901b32195c9c7avboxsync *
45e9809aff7304721fddb95654901b32195c9c7avboxsync * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
45e9809aff7304721fddb95654901b32195c9c7avboxsync * Clara, CA 95054 USA or visit http://www.sun.com if you need
45e9809aff7304721fddb95654901b32195c9c7avboxsync * additional information or have any questions.
45e9809aff7304721fddb95654901b32195c9c7avboxsync */
45e9809aff7304721fddb95654901b32195c9c7avboxsync
45e9809aff7304721fddb95654901b32195c9c7avboxsync
45e9809aff7304721fddb95654901b32195c9c7avboxsync/*******************************************************************************
45e9809aff7304721fddb95654901b32195c9c7avboxsync* Header Files *
45e9809aff7304721fddb95654901b32195c9c7avboxsync*******************************************************************************/
45e9809aff7304721fddb95654901b32195c9c7avboxsync#include <VBox/vmm.h>
45e9809aff7304721fddb95654901b32195c9c7avboxsync#include "VMMInternal.h"
45e9809aff7304721fddb95654901b32195c9c7avboxsync#include <VBox/vm.h>
45e9809aff7304721fddb95654901b32195c9c7avboxsync#include <VBox/err.h>
45e9809aff7304721fddb95654901b32195c9c7avboxsync
45e9809aff7304721fddb95654901b32195c9c7avboxsync#include <iprt/assert.h>
45e9809aff7304721fddb95654901b32195c9c7avboxsync#include <iprt/string.h>
45e9809aff7304721fddb95654901b32195c9c7avboxsync
45e9809aff7304721fddb95654901b32195c9c7avboxsync
45e9809aff7304721fddb95654901b32195c9c7avboxsync/**
45e9809aff7304721fddb95654901b32195c9c7avboxsync * Calls the ring-3 host code.
45e9809aff7304721fddb95654901b32195c9c7avboxsync *
45e9809aff7304721fddb95654901b32195c9c7avboxsync * @returns VBox status code of the ring-3 call.
45e9809aff7304721fddb95654901b32195c9c7avboxsync * @retval VERR_VMM_RING3_CALL_DISABLED if called at the wrong time. This must
45e9809aff7304721fddb95654901b32195c9c7avboxsync * be passed up the stack, or if that isn't possible then VMMRZCallRing3
45e9809aff7304721fddb95654901b32195c9c7avboxsync * needs to change it into an assertion.
45e9809aff7304721fddb95654901b32195c9c7avboxsync *
45e9809aff7304721fddb95654901b32195c9c7avboxsync *
45e9809aff7304721fddb95654901b32195c9c7avboxsync * @param pVM The VM handle.
45e9809aff7304721fddb95654901b32195c9c7avboxsync * @param pVCpu The virtual CPU handle of the calling EMT.
45e9809aff7304721fddb95654901b32195c9c7avboxsync * @param enmOperation The operation.
45e9809aff7304721fddb95654901b32195c9c7avboxsync * @param uArg The argument to the operation.
45e9809aff7304721fddb95654901b32195c9c7avboxsync */
45e9809aff7304721fddb95654901b32195c9c7avboxsyncVMMRZDECL(int) VMMRZCallRing3(PVM pVM, PVMCPU pVCpu, VMMCALLRING3 enmOperation, uint64_t uArg)
45e9809aff7304721fddb95654901b32195c9c7avboxsync{
45e9809aff7304721fddb95654901b32195c9c7avboxsync VMCPU_ASSERT_EMT(pVCpu);
45e9809aff7304721fddb95654901b32195c9c7avboxsync
45e9809aff7304721fddb95654901b32195c9c7avboxsync /*
45e9809aff7304721fddb95654901b32195c9c7avboxsync * Check if calling ring-3 has been disabled and only let let fatal calls thru.
45e9809aff7304721fddb95654901b32195c9c7avboxsync */
45e9809aff7304721fddb95654901b32195c9c7avboxsync if (RT_UNLIKELY( pVCpu->vmm.s.cCallRing3Disabled != 0
45e9809aff7304721fddb95654901b32195c9c7avboxsync && enmOperation != VMMCALLRING3_VM_R0_ASSERTION))
45e9809aff7304721fddb95654901b32195c9c7avboxsync {
45e9809aff7304721fddb95654901b32195c9c7avboxsync /*
45e9809aff7304721fddb95654901b32195c9c7avboxsync * In most cases, it's sufficient to return a status code which
45e9809aff7304721fddb95654901b32195c9c7avboxsync * will then be propagated up the code usually encountering several
45e9809aff7304721fddb95654901b32195c9c7avboxsync * AssertRC invocations along the way. Hitting one of those is more
45e9809aff7304721fddb95654901b32195c9c7avboxsync * helpful than stopping here.
45e9809aff7304721fddb95654901b32195c9c7avboxsync *
45e9809aff7304721fddb95654901b32195c9c7avboxsync * However, some doesn't check the status code because they are called
45e9809aff7304721fddb95654901b32195c9c7avboxsync * from void functions, and for these we'll turn this into a ring-0
45e9809aff7304721fddb95654901b32195c9c7avboxsync * assertion host call.
45e9809aff7304721fddb95654901b32195c9c7avboxsync */
45e9809aff7304721fddb95654901b32195c9c7avboxsync if (enmOperation != VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS)
45e9809aff7304721fddb95654901b32195c9c7avboxsync return VERR_VMM_RING3_CALL_DISABLED;
45e9809aff7304721fddb95654901b32195c9c7avboxsync#ifdef IN_RC
45e9809aff7304721fddb95654901b32195c9c7avboxsync RTStrPrintf(g_szRTAssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
45e9809aff7304721fddb95654901b32195c9c7avboxsync "VMMRZCallRing3: enmOperation=%d uArg=%#llx idCpu=%#x\n", enmOperation, uArg, pVCpu->idCpu);
45e9809aff7304721fddb95654901b32195c9c7avboxsync#endif
45e9809aff7304721fddb95654901b32195c9c7avboxsync RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
45e9809aff7304721fddb95654901b32195c9c7avboxsync "VMMRZCallRing3: enmOperation=%d uArg=%#llx idCpu=%#x\n", enmOperation, uArg, pVCpu->idCpu);
45e9809aff7304721fddb95654901b32195c9c7avboxsync enmOperation = VMMCALLRING3_VM_R0_ASSERTION;
45e9809aff7304721fddb95654901b32195c9c7avboxsync }
45e9809aff7304721fddb95654901b32195c9c7avboxsync
45e9809aff7304721fddb95654901b32195c9c7avboxsync /*
45e9809aff7304721fddb95654901b32195c9c7avboxsync * The normal path.
45e9809aff7304721fddb95654901b32195c9c7avboxsync */
45e9809aff7304721fddb95654901b32195c9c7avboxsync/** @todo profile this! */
45e9809aff7304721fddb95654901b32195c9c7avboxsync pVCpu->vmm.s.enmCallRing3Operation = enmOperation;
45e9809aff7304721fddb95654901b32195c9c7avboxsync pVCpu->vmm.s.u64CallRing3Arg = uArg;
45e9809aff7304721fddb95654901b32195c9c7avboxsync pVCpu->vmm.s.rcCallRing3 = VERR_INTERNAL_ERROR;
45e9809aff7304721fddb95654901b32195c9c7avboxsync#ifdef IN_RC
45e9809aff7304721fddb95654901b32195c9c7avboxsync pVM->vmm.s.pfnGuestToHostRC(VINF_VMM_CALL_HOST);
45e9809aff7304721fddb95654901b32195c9c7avboxsync#else
45e9809aff7304721fddb95654901b32195c9c7avboxsync int rc = vmmR0CallRing3LongJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, VINF_VMM_CALL_HOST);
45e9809aff7304721fddb95654901b32195c9c7avboxsync if (RT_FAILURE(rc))
45e9809aff7304721fddb95654901b32195c9c7avboxsync return rc;
45e9809aff7304721fddb95654901b32195c9c7avboxsync#endif
return pVCpu->vmm.s.rcCallRing3;
}
/**
* Simple wrapper that adds the pVCpu argument.
*
* @returns VBox status code of the ring-3 call.
* @retval VERR_VMM_RING3_CALL_DISABLED if called at the wrong time. This must
* be passed up the stack, or if that isn't possible then VMMRZCallRing3
* needs to change it into an assertion.
*
* @param pVM The VM handle.
* @param pVCpu The virtual CPU handle of the calling EMT.
* @param enmOperation The operation.
* @param uArg The argument to the operation.
*/
VMMRZDECL(int) VMMRZCallRing3NoCpu(PVM pVM, VMMCALLRING3 enmOperation, uint64_t uArg)
{
return VMMRZCallRing3(pVM, VMMGetCpu(pVM), enmOperation, uArg);
}
/**
* Disables all host calls, except certain fatal ones.
*
* @param pVCpu The CPU struct for the calling EMT.
* @thread EMT.
*/
VMMRZDECL(void) VMMRZCallRing3Disable(PVMCPU pVCpu)
{
VMCPU_ASSERT_EMT(pVCpu);
Assert(pVCpu->vmm.s.cCallRing3Disabled < 16);
if (++pVCpu->vmm.s.cCallRing3Disabled == 1)
{
/** @todo it might make more sense to just disable logging here, then we
* won't flush away important bits... but that goes both ways really. */
#ifdef IN_RC
pVCpu->pVMRC->vmm.s.fRCLoggerFlushingDisabled = true;
#else
# ifdef LOG_ENABLED
if (pVCpu->vmm.s.pR0LoggerR0)
pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
# endif
#endif
}
}
/**
* Counters VMMRZCallRing3Disable and re-enables host calls.
*
* @param pVCpu The CPU struct for the calling EMT.
* @thread EMT.
*/
VMMRZDECL(void) VMMRZCallRing3Enable(PVMCPU pVCpu)
{
VMCPU_ASSERT_EMT(pVCpu);
Assert(pVCpu->vmm.s.cCallRing3Disabled > 0);
if (--pVCpu->vmm.s.cCallRing3Disabled == 0)
{
#ifdef IN_RC
pVCpu->pVMRC->vmm.s.fRCLoggerFlushingDisabled = false;
#else
# ifdef LOG_ENABLED
if (pVCpu->vmm.s.pR0LoggerR0)
pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
# endif
#endif
}
}
/**
* Checks whether its possible to call host context or not.
*
* @returns true if it's safe, false if it isn't.
* @param pVCpu The CPU struct for the calling EMT.
*/
VMMRZDECL(bool) VMMRZCallRing3IsEnabled(PVMCPU pVCpu)
{
VMCPU_ASSERT_EMT(pVCpu);
Assert(pVCpu->vmm.s.cCallRing3Disabled <= 16);
return pVCpu->vmm.s.cCallRing3Disabled == 0;
}