EMHandleRCTmpl.h revision ca3114b6543bd80f79f6e2d33feab732eeb32ef0
/* $Id$ */
/** @file
* EM - emR3[Raw|Hm]HandleRC template.
*/
/*
* Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
#ifndef ___EMHandleRCTmpl_h
#define ___EMHandleRCTmpl_h
#if defined(EMHANDLERC_WITH_PATM) && defined(EMHANDLERC_WITH_HM)
# error "Only one define"
#endif
/**
* Process a subset of the raw-mode and hm return codes.
*
* Since we have to share this with raw-mode single stepping, this inline
* function has been created to avoid code duplication.
*
* @returns VINF_SUCCESS if it's ok to continue raw mode.
* @returns VBox status code to return to the EM main loop.
*
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
* @param rc The return code.
* @param pCtx Pointer to the guest CPU context.
*/
#ifdef EMHANDLERC_WITH_PATM
#elif defined(EMHANDLERC_WITH_HM)
#endif
{
switch (rc)
{
/*
* Common & simple ones.
*/
case VINF_SUCCESS:
break;
case VINF_EM_RESCHEDULE_RAW:
case VINF_EM_RESCHEDULE_HM:
case VINF_EM_RAW_INTERRUPT:
case VINF_EM_RAW_TO_R3:
case VINF_EM_PENDING_REQUEST:
rc = VINF_SUCCESS;
break;
#ifdef EMHANDLERC_WITH_PATM
/*
* Privileged instruction.
*/
case VINF_PATM_PATCH_TRAP_GP:
break;
case VINF_EM_RAW_GUEST_TRAP:
/*
* Got a trap which needs dispatching.
*/
{
AssertReleaseMsgFailed(("FATAL ERROR: executing random instruction inside generated patch jump %08X\n", CPUMGetGuestEIP(pVCpu)));
break;
}
break;
/*
* Trap in patch code.
*/
case VINF_PATM_PATCH_TRAP_PF:
case VINF_PATM_PATCH_INT3:
break;
rc = VINF_SUCCESS;
break;
rc = VINF_SUCCESS;
break;
/*
* Patch manager.
*/
break;
#endif /* EMHANDLERC_WITH_PATM */
#ifdef EMHANDLERC_WITH_PATM
/*
* Memory mapped I/O access - attempt to patch the instruction
*/
if (RT_FAILURE(rc))
break;
AssertFailed(); /* not yet implemented. */
break;
#endif /* EMHANDLERC_WITH_PATM */
/*
* Conflict or out of page tables.
*
* VM_FF_PGM_SYNC_CR3 is set by the hypervisor and all we need to
* do here is to execute the pending forced actions.
*/
case VINF_PGM_SYNC_CR3:
("VINF_PGM_SYNC_CR3 and no VMCPU_FF_PGM_SYNC_CR3*!\n"));
rc = VINF_SUCCESS;
break;
/*
* PGM pool flush pending (guest SMP only).
*/
/** @todo jumping back and forth between ring 0 and 3 can burn a lot of cycles
* if the EMT thread that's supposed to handle the flush is currently not active
* (e.g. waiting to be scheduled) -> fix this properly!
*
* bird: Since the clearing is global and done via a rendezvous any CPU can do
* it. They would have to choose who to call VMMR3EmtRendezvous and send
* the rest to VMMR3EmtRendezvousFF ... Hmm ... that's not going to work
* all that well since the latter will race the setup done by the
* first. Guess that means we need some new magic in that area for
* handling this case. :/
*/
rc = VINF_SUCCESS;
break;
/*
* Paging mode change.
*/
case VINF_PGM_CHANGE_MODE:
if (rc == VINF_SUCCESS)
break;
#ifdef EMHANDLERC_WITH_PATM
/*
* CSAM wants to perform a task in ring-3. It has set an FF action flag.
*/
case VINF_CSAM_PENDING_ACTION:
rc = VINF_SUCCESS;
break;
/*
* Invoked Interrupt gate - must directly (!) go to the recompiler.
*/
if (TRPMHasTrap(pVCpu))
{
/* If the guest gate is marked unpatched, then we will check again if we can patch it. */
{
Log(("emR3RawHandleRC: recheck gate %x -> valid=%d\n", u8Interrupt, TRPMR3GetGuestTrapHandler(pVM, u8Interrupt) != TRPM_INVALID_HANDLER));
/* Note: If it was successful, then we could go back to raw mode, but let's keep things simple for now. */
}
}
break;
/*
* Other ring switch types.
*/
case VINF_EM_RAW_RING_SWITCH:
break;
#endif /* EMHANDLERC_WITH_PATM */
/*
* I/O Port access - emulate the instruction.
*/
case VINF_IOM_R3_IOPORT_READ:
case VINF_IOM_R3_IOPORT_WRITE:
break;
/*
* Memory mapped I/O access - emulate the instruction.
*/
case VINF_IOM_R3_MMIO_READ:
case VINF_IOM_R3_MMIO_WRITE:
break;
#ifdef EMHANDLERC_WITH_HM
/*
* (MM)IO intensive code block detected; fall back to the recompiler for better performance
*/
break;
break;
#endif
#ifdef EMHANDLERC_WITH_PATM
/*
* Execute instruction.
*/
break;
break;
break;
break;
break;
/** @todo skip instruction and go directly to the halt state. (see REM for implementation details) */
break;
#endif
#ifdef EMHANDLERC_WITH_PATM
break;
case VINF_PATCH_EMULATE_INSTR:
#else
case VINF_EM_RAW_GUEST_TRAP:
#endif
break;
#ifdef VBOX_WITH_FIRST_IEM_STEP
/* The following condition should be removed when IEM_IMPLEMENTS_TASKSWITCH becomes true. */
if (rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
#else
/* Do the same thing as VINF_EM_RAW_EMULATE_INSTR. */
#endif
break;
#ifdef EMHANDLERC_WITH_PATM
/*
* Stale selector and iret traps => REM.
*/
case VINF_EM_RAW_IRET_TRAP:
/* We will not go to the recompiler if EIP points to patch code. */
{
}
break;
/*
* Conflict in GDT, resync and continue.
*/
case VINF_SELM_SYNC_GDT:
AssertMsg(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_SELM_SYNC_TSS),
("VINF_SELM_SYNC_GDT without VMCPU_FF_SELM_SYNC_GDT/LDT/TSS!\n"));
rc = VINF_SUCCESS;
break;
#endif
/*
* Up a level.
*/
case VINF_EM_TERMINATE:
case VINF_EM_OFF:
case VINF_EM_RESET:
case VINF_EM_SUSPEND:
case VINF_EM_HALT:
case VINF_EM_RESUME:
case VINF_EM_NO_MEMORY:
case VINF_EM_RESCHEDULE:
case VINF_EM_RESCHEDULE_REM:
case VINF_EM_WAIT_SIPI:
break;
/*
* Up a level and invoke the debugger.
*/
case VINF_EM_DBG_STEPPED:
case VINF_EM_DBG_BREAKPOINT:
case VINF_EM_DBG_STEP:
case VINF_EM_DBG_STOP:
break;
/*
* Up a level, dump and debug.
*/
case VERR_TRPM_DONT_PANIC:
case VERR_TRPM_PANIC:
case VERR_VMM_RING0_ASSERTION:
case VINF_EM_TRIPLE_FAULT:
break;
#ifdef EMHANDLERC_WITH_HM
/*
* Up a level, after Hm have done some release logging.
*/
case VERR_VMX_UNEXPECTED_EXIT:
case VERR_SVM_UNKNOWN_EXIT:
case VERR_SVM_UNEXPECTED_EXIT:
break;
/* Up a level; fatal */
case VERR_SVM_IN_USE:
break;
#endif
/*
* Anything which is not known to us means an internal error
* and the termination of the VM!
*/
default:
break;
}
return rc;
}
#endif