IEMAll.cpp revision e6b3c993bbd588a2dce6ca6095c01c497000e6d0
/* $Id$ */
/** @file
* IEM - Interpreted Execution Manager - All Contexts.
*/
/*
* Copyright (C) 2011-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
/** @page pg_iem IEM - Interpreted Execution Manager
*
* The interpreted exeuction manager (IEM) is for executing short guest code
* sequences that are causing too many exits / virtualization traps. It will
* also be used to interpret single instructions, thus replacing the selective
* interpreters in EM and IOM.
*
* Design goals:
* - Relatively small footprint, although we favour speed and correctness
* over size.
* - Reasonably fast.
* - Correctly handle lock prefixed instructions.
* - Complete instruction set - eventually.
* - Refactorable into a recompiler, maybe.
* - Replace EMInterpret*.
*
* Using the existing disassembler has been considered, however this is thought
* to conflict with speed as the disassembler chews things a bit too much while
* leaving us with a somewhat complicated state to interpret afterwards.
*
*
* The current code is very much work in progress. You've been warned!
*
*
* @section sec_iem_fpu_instr FPU Instructions
*
* On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
* same or equivalent instructions on the host FPU. To make life easy, we also
* let the FPU prioritize the unmasked exceptions for us. This however, only
* works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
* for FPU exception delivery, because with CR0.NE=0 there is a window where we
* can trigger spurious FPU exceptions.
*
* The guest FPU state is not loaded into the host CPU and kept there till we
* leave IEM because the calling conventions have declared an all year open
* season on much of the FPU state. For instance an innocent looking call to
* memcpy might end up using a whole bunch of XMM or MM registers if the
* particular implementation finds it worthwhile.
*
*
* @section sec_iem_logging Logging
*
* The IEM code uses the \"IEM\" log group for the main logging. The different
* - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
* - Level 2 (Log2): ?
* - Level 4 (Log4): Decoding mnemonics w/ EIP.
* - Level 5 (Log5): Decoding details.
*
*/
/** @def IEM_VERIFICATION_MODE_MINIMAL
* Use for pitting IEM against EM or something else in ring-0 or raw-mode
* context. */
//#define IEM_VERIFICATION_MODE_MINIMAL
//#define IEM_LOG_MEMORY_WRITES
#define IEM_IMPLEMENTS_TASKSWITCH
/*******************************************************************************
* Header Files *
*******************************************************************************/
#define LOG_GROUP LOG_GROUP_IEM
#ifdef VBOX_WITH_RAW_MODE_NOT_R0
# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
# endif
#endif
#include "IEMInternal.h"
#ifdef IEM_VERIFICATION_MODE_FULL
#endif
#include <VBox/disopcode.h>
/*******************************************************************************
* Structures and Typedefs *
*******************************************************************************/
/** @typedef PFNIEMOP
* Pointer to an opcode decoder function.
*/
/** @def FNIEMOP_DEF
* Define an opcode decoder function.
*
* We're using macors for this so that adding and removing parameters as well as
* tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
*
* @param a_Name The function name.
*/
#if defined(__GNUC__) && defined(RT_ARCH_X86)
# define FNIEMOP_DEF(a_Name) \
static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
# define FNIEMOP_DEF(a_Name) \
static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
# define FNIEMOP_DEF(a_Name) \
static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
#else
# define FNIEMOP_DEF(a_Name) \
#endif
/**
* Selector descriptor table entry as fetched by iemMemFetchSelDesc.
*/
typedef union IEMSELDESC
{
/** The legacy view. */
/** The long mode view. */
} IEMSELDESC;
/** Pointer to a selector descriptor table entry. */
typedef IEMSELDESC *PIEMSELDESC;
/*******************************************************************************
* Defined Constants And Macros *
*******************************************************************************/
/** @name IEM status codes.
*
* Not quite sure how this will play out in the end, just aliasing safe status
* codes for now.
*
* @{ */
/** @} */
/** Temporary hack to disable the double execution. Will be removed in favor
* of a dedicated execution mode in EM. */
//#define IEM_VERIFICATION_MODE_NO_REM
/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
* due to GCC lacking knowledge about the value range of a switch. */
#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
/**
* Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
* occation.
*/
#ifdef LOG_ENABLED
# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
do { \
/*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
} while (0)
#else
# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
#endif
/**
* Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
* occation using the supplied logger statement.
*
* @param a_LoggerArgs What to log on failure.
*/
#ifdef LOG_ENABLED
# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
do { \
/*LogFunc(a_LoggerArgs);*/ \
return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
} while (0)
#else
# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
#endif
/**
* Call an opcode decoder function.
*
* We're using macors for this so that adding and removing parameters can be
* done as we please. See FNIEMOP_DEF.
*/
/**
* Call a common opcode decoder function taking one extra argument.
*
* We're using macors for this so that adding and removing parameters can be
* done as we please. See FNIEMOP_DEF_1.
*/
/**
* Call a common opcode decoder function taking one extra argument.
*
* We're using macors for this so that adding and removing parameters can be
* done as we please. See FNIEMOP_DEF_1.
*/
/**
* Check if we're currently executing in real or virtual 8086 mode.
*
* @returns @c true if it is, @c false if not.
* @param a_pIemCpu The IEM state of the current CPU.
*/
#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
/**
* Check if we're currently executing in virtual 8086 mode.
*
* @returns @c true if it is, @c false if not.
* @param a_pIemCpu The IEM state of the current CPU.
*/
/**
* Check if we're currently executing in long mode.
*
* @returns @c true if it is, @c false if not.
* @param a_pIemCpu The IEM state of the current CPU.
*/
/**
* Check if we're currently executing in real mode.
*
* @returns @c true if it is, @c false if not.
* @param a_pIemCpu The IEM state of the current CPU.
*/
/**
* Tests if an AMD CPUID feature (extended) is marked present - ECX.
*/
#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx))
/**
* Tests if an AMD CPUID feature (extended) is marked present - EDX.
*/
#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(a_fEdx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0)
/**
* Tests if at least on of the specified AMD CPUID features (extended) are
* marked present.
*/
#define IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(a_fEdx, a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), (a_fEcx))
/**
* Checks if an Intel CPUID feature is present.
*/
#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(a_fEdx) \
( ((a_fEdx) & (X86_CPUID_FEATURE_EDX_TSC | 0)) \
/**
* Checks if an Intel CPUID feature is present.
*/
#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX(a_fEcx) \
/**
* Checks if an Intel CPUID feature is present in the host CPU.
*/
/**
* Evaluates to true if we're presenting an Intel CPU to the guest.
*/
/**
* Evaluates to true if we're presenting an AMD CPU to the guest.
*/
/**
* Check if the address is canonical.
*/
/*******************************************************************************
* Global Variables *
*******************************************************************************/
/** Function table for the ADD instruction. */
static const IEMOPBINSIZES g_iemAImpl_add =
{
};
/** Function table for the ADC instruction. */
static const IEMOPBINSIZES g_iemAImpl_adc =
{
};
/** Function table for the SUB instruction. */
static const IEMOPBINSIZES g_iemAImpl_sub =
{
};
/** Function table for the SBB instruction. */
static const IEMOPBINSIZES g_iemAImpl_sbb =
{
};
/** Function table for the OR instruction. */
static const IEMOPBINSIZES g_iemAImpl_or =
{
};
/** Function table for the XOR instruction. */
static const IEMOPBINSIZES g_iemAImpl_xor =
{
};
/** Function table for the AND instruction. */
static const IEMOPBINSIZES g_iemAImpl_and =
{
};
/** Function table for the CMP instruction.
* @remarks Making operand order ASSUMPTIONS.
*/
static const IEMOPBINSIZES g_iemAImpl_cmp =
{
};
/** Function table for the TEST instruction.
* @remarks Making operand order ASSUMPTIONS.
*/
static const IEMOPBINSIZES g_iemAImpl_test =
{
};
/** Function table for the BT instruction. */
static const IEMOPBINSIZES g_iemAImpl_bt =
{
};
/** Function table for the BTC instruction. */
static const IEMOPBINSIZES g_iemAImpl_btc =
{
};
/** Function table for the BTR instruction. */
static const IEMOPBINSIZES g_iemAImpl_btr =
{
};
/** Function table for the BTS instruction. */
static const IEMOPBINSIZES g_iemAImpl_bts =
{
};
/** Function table for the BSF instruction. */
static const IEMOPBINSIZES g_iemAImpl_bsf =
{
};
/** Function table for the BSR instruction. */
static const IEMOPBINSIZES g_iemAImpl_bsr =
{
};
/** Function table for the IMUL instruction. */
static const IEMOPBINSIZES g_iemAImpl_imul_two =
{
};
/** Group 1 /r lookup table. */
{
};
/** Function table for the INC instruction. */
static const IEMOPUNARYSIZES g_iemAImpl_inc =
{
};
/** Function table for the DEC instruction. */
static const IEMOPUNARYSIZES g_iemAImpl_dec =
{
};
/** Function table for the NEG instruction. */
static const IEMOPUNARYSIZES g_iemAImpl_neg =
{
};
/** Function table for the NOT instruction. */
static const IEMOPUNARYSIZES g_iemAImpl_not =
{
};
/** Function table for the ROL instruction. */
static const IEMOPSHIFTSIZES g_iemAImpl_rol =
{
};
/** Function table for the ROR instruction. */
static const IEMOPSHIFTSIZES g_iemAImpl_ror =
{
};
/** Function table for the RCL instruction. */
static const IEMOPSHIFTSIZES g_iemAImpl_rcl =
{
};
/** Function table for the RCR instruction. */
static const IEMOPSHIFTSIZES g_iemAImpl_rcr =
{
};
/** Function table for the SHL instruction. */
static const IEMOPSHIFTSIZES g_iemAImpl_shl =
{
};
/** Function table for the SHR instruction. */
static const IEMOPSHIFTSIZES g_iemAImpl_shr =
{
};
/** Function table for the SAR instruction. */
static const IEMOPSHIFTSIZES g_iemAImpl_sar =
{
};
/** Function table for the MUL instruction. */
static const IEMOPMULDIVSIZES g_iemAImpl_mul =
{
};
/** Function table for the IMUL instruction working implicitly on rAX. */
static const IEMOPMULDIVSIZES g_iemAImpl_imul =
{
};
/** Function table for the DIV instruction. */
static const IEMOPMULDIVSIZES g_iemAImpl_div =
{
};
/** Function table for the MUL instruction. */
static const IEMOPMULDIVSIZES g_iemAImpl_idiv =
{
};
/** Function table for the SHLD instruction */
static const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
{
};
/** Function table for the SHRD instruction */
static const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
{
};
/** Function table for the PUNPCKLBW instruction */
static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
/** Function table for the PUNPCKLBD instruction */
static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
/** Function table for the PUNPCKLDQ instruction */
static const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
/** Function table for the PUNPCKLQDQ instruction */
/** Function table for the PUNPCKHBW instruction */
static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
/** Function table for the PUNPCKHBD instruction */
static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
/** Function table for the PUNPCKHDQ instruction */
static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
/** Function table for the PUNPCKHQDQ instruction */
/** Function table for the PXOR instruction */
/** Function table for the PCMPEQB instruction */
/** Function table for the PCMPEQW instruction */
/** Function table for the PCMPEQD instruction */
#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
/** What IEM just wrote. */
/** How much IEM just wrote. */
#endif
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
/*static VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
static VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
static VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
static VBOXSTRICTRC iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
#endif
static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
/**
* Sets the pass up status.
*
* @returns VINF_SUCCESS.
* @param pIemCpu The per CPU IEM state of the calling thread.
* @param rcPassUp The pass up status. Must be informational.
* VINF_SUCCESS is not allowed.
*/
{
if (rcOldPassUp == VINF_SUCCESS)
/* If both are EM scheduling codes, use EM priority rules. */
{
if (rcPassUp < rcOldPassUp)
{
}
else
}
/* Override EM scheduling with specific status code. */
{
}
/* Don't override specific status code, first come first served. */
else
return VINF_SUCCESS;
}
/**
* Initializes the execution state.
*
* @param pIemCpu The per CPU IEM state.
* @param fBypassHandlers Whether to bypass access handlers.
*/
{
#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
#endif
#ifdef VBOX_WITH_RAW_MODE_NOT_R0
#endif
#ifdef VBOX_STRICT
#endif
pIemCpu->cActiveMappings = 0;
pIemCpu->iNextMapping = 0;
#ifdef VBOX_WITH_RAW_MODE_NOT_R0
if (!pIemCpu->fInPatchCode)
#endif
}
/**
* Initializes the decoder state.
*
* @param pIemCpu The per CPU IEM state.
* @param fBypassHandlers Whether to bypass access handlers.
*/
{
#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
#endif
#ifdef VBOX_WITH_RAW_MODE_NOT_R0
#endif
#ifdef IEM_VERIFICATION_MODE_FULL
#endif
if (enmMode != IEMMODE_64BIT)
{
}
else
{
}
pIemCpu->cActiveMappings = 0;
pIemCpu->iNextMapping = 0;
#ifdef VBOX_WITH_RAW_MODE_NOT_R0
if (!pIemCpu->fInPatchCode)
#endif
#ifdef DBGFTRACE_ENABLED
switch (enmMode)
{
case IEMMODE_64BIT:
RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pIemCpu->uCpl, pCtx->rip);
break;
case IEMMODE_32BIT:
RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
break;
case IEMMODE_16BIT:
RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
break;
}
#endif
}
/**
* Prefetch opcodes the first time when starting executing.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM state.
* @param fBypassHandlers Whether to bypass access handlers.
*/
{
#ifdef IEM_VERIFICATION_MODE_FULL
#endif
/*
* What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
*
* First translate CS:rIP to a physical address.
*/
{
if (!IEM_IS_CANONICAL(GCPtrPC))
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
else
{
AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
if (!cbToTryRead) /* overflowed */
{
}
}
#ifdef VBOX_WITH_RAW_MODE_NOT_R0
/* Allow interpretation of patch manager code blocks since they can for
instance throw #PFs for perfectly good reasons. */
if (pIemCpu->fInPatchCode)
{
int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbRead);
return VINF_SUCCESS;
}
#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
if (RT_FAILURE(rc))
{
}
{
}
{
}
/** @todo Check reserved bits and such stuff. PGM is better at doing
* that, so do it when implementing the guest virtual address
* TLB... */
#ifdef IEM_VERIFICATION_MODE_FULL
/*
* Optimistic optimization: Use unconsumed opcode bytes from the previous
* instruction.
*/
/** @todo optimize this differently by not using PGMPhysRead. */
if ( offPrevOpcodes < cbOldOpcodes
{
return VINF_SUCCESS;
}
#endif
/*
* Read the bytes at this address.
*/
#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
if ( PATMIsEnabled(pVM)
&& RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbActual)))
{
}
else
#endif
{
if (cbToTryRead > cbLeftOnPage)
if (!pIemCpu->fBypassHandlers)
else
if (rc != VINF_SUCCESS)
{
/** @todo status code handling */
Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
return rc;
}
}
return VINF_SUCCESS;
}
/**
* Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
* exception if it fails.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM state.
* @param cbMin The minimum number of bytes relative offOpcode
* that must be read.
*/
{
/*
* What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
*
* First translate CS:rIP to a physical address.
*/
{
if (!IEM_IS_CANONICAL(GCPtrNext))
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
else
{
if (!cbToTryRead) /* overflowed */
{
/** @todo check out wrapping around the code segment. */
}
}
/* Only read up to the end of the page, and make sure we don't read more
than the opcode buffer can hold. */
if (cbToTryRead > cbLeftOnPage)
#ifdef VBOX_WITH_RAW_MODE_NOT_R0
/* Allow interpretation of patch manager code blocks since they can for
instance throw #PFs for perfectly good reasons. */
if (pIemCpu->fInPatchCode)
{
int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrNext, pIemCpu->abOpcode, cbToTryRead, &cbRead);
return VINF_SUCCESS;
}
#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
if (RT_FAILURE(rc))
{
}
{
}
{
}
/** @todo Check reserved bits and such stuff. PGM is better at doing
* that, so do it when implementing the guest virtual address
* TLB... */
/*
* Read the bytes at this address.
*
* We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
* and since PATM should only patch the start of an instruction there
* should be no need to check again here.
*/
if (!pIemCpu->fBypassHandlers)
rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode], cbToTryRead);
else
rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
if (rc != VINF_SUCCESS)
{
/** @todo status code handling */
return rc;
}
return VINF_SUCCESS;
}
/**
* Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM state.
* @param pb Where to return the opcode byte.
*/
{
if (rcStrict == VINF_SUCCESS)
{
}
else
*pb = 0;
return rcStrict;
}
/**
* Fetches the next opcode byte.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM state.
* @param pu8 Where to return the opcode byte.
*/
{
return VINF_SUCCESS;
}
/**
* Fetches the next opcode byte, returns automatically on failure.
*
* @param a_pu8 Where to return the opcode byte.
* @remark Implicitly references pIemCpu.
*/
#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
do \
{ \
if (rcStrict2 != VINF_SUCCESS) \
return rcStrict2; \
} while (0)
/**
* Fetches the next signed byte from the opcode stream.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM state.
* @param pi8 Where to return the signed byte.
*/
{
}
/**
* Fetches the next signed byte from the opcode stream, returning automatically
* on failure.
*
* @param pi8 Where to return the signed byte.
* @remark Implicitly references pIemCpu.
*/
#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
do \
{ \
if (rcStrict2 != VINF_SUCCESS) \
return rcStrict2; \
} while (0)
/**
* Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM state.
* @param pu16 Where to return the opcode dword.
*/
{
if (rcStrict == VINF_SUCCESS)
return rcStrict;
}
/**
* Fetches the next signed byte from the opcode stream, extending it to
* unsigned 16-bit.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM state.
* @param pu16 Where to return the unsigned word.
*/
{
return VINF_SUCCESS;
}
/**
* Fetches the next signed byte from the opcode stream and sign-extending it to
* a word, returning automatically on failure.
*
* @param pu16 Where to return the word.
* @remark Implicitly references pIemCpu.
*/
#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
do \
{ \
if (rcStrict2 != VINF_SUCCESS) \
return rcStrict2; \
} while (0)
/**
* Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM state.
* @param pu32 Where to return the opcode dword.
*/
{
if (rcStrict == VINF_SUCCESS)
return rcStrict;
}
/**
* Fetches the next signed byte from the opcode stream, extending it to
* unsigned 32-bit.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM state.
* @param pu32 Where to return the unsigned dword.
*/
{
return VINF_SUCCESS;
}
/**
* Fetches the next signed byte from the opcode stream and sign-extending it to
* a word, returning automatically on failure.
*
* @param pu32 Where to return the word.
* @remark Implicitly references pIemCpu.
*/
#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
do \
{ \
if (rcStrict2 != VINF_SUCCESS) \
return rcStrict2; \
} while (0)
/**
* Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM state.
* @param pu64 Where to return the opcode qword.
*/
{
if (rcStrict == VINF_SUCCESS)
return rcStrict;
}
/**
* Fetches the next signed byte from the opcode stream, extending it to
* unsigned 64-bit.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM state.
* @param pu64 Where to return the unsigned qword.
*/
{
return VINF_SUCCESS;
}
/**
* Fetches the next signed byte from the opcode stream and sign-extending it to
* a word, returning automatically on failure.
*
* @param pu64 Where to return the word.
* @remark Implicitly references pIemCpu.
*/
#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
do \
{ \
if (rcStrict2 != VINF_SUCCESS) \
return rcStrict2; \
} while (0)
/**
* Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM state.
* @param pu16 Where to return the opcode word.
*/
{
if (rcStrict == VINF_SUCCESS)
{
}
else
*pu16 = 0;
return rcStrict;
}
/**
* Fetches the next opcode word.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM state.
* @param pu16 Where to return the opcode word.
*/
{
return VINF_SUCCESS;
}
/**
* Fetches the next opcode word, returns automatically on failure.
*
* @param a_pu16 Where to return the opcode word.
* @remark Implicitly references pIemCpu.
*/
#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
do \
{ \
if (rcStrict2 != VINF_SUCCESS) \
return rcStrict2; \
} while (0)
/**
* Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM state.
* @param pu32 Where to return the opcode double word.
*/
{
if (rcStrict == VINF_SUCCESS)
{
}
else
*pu32 = 0;
return rcStrict;
}
/**
* Fetches the next opcode word, zero extending it to a double word.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM state.
* @param pu32 Where to return the opcode double word.
*/
{
return VINF_SUCCESS;
}
/**
* Fetches the next opcode word and zero extends it to a double word, returns
* automatically on failure.
*
* @param a_pu32 Where to return the opcode double word.
* @remark Implicitly references pIemCpu.
*/
#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
do \
{ \
if (rcStrict2 != VINF_SUCCESS) \
return rcStrict2; \
} while (0)
/**
* Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM state.
* @param pu64 Where to return the opcode quad word.
*/
{
if (rcStrict == VINF_SUCCESS)
{
}
else
*pu64 = 0;
return rcStrict;
}
/**
* Fetches the next opcode word, zero extending it to a quad word.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM state.
* @param pu64 Where to return the opcode quad word.
*/
{
return VINF_SUCCESS;
}
/**
* Fetches the next opcode word and zero extends it to a quad word, returns
* automatically on failure.
*
* @param a_pu64 Where to return the opcode quad word.
* @remark Implicitly references pIemCpu.
*/
#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
do \
{ \
if (rcStrict2 != VINF_SUCCESS) \
return rcStrict2; \
} while (0)
/**
* Fetches the next signed word from the opcode stream.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM state.
* @param pi16 Where to return the signed word.
*/
{
}
/**
* Fetches the next signed word from the opcode stream, returning automatically
* on failure.
*
* @param pi16 Where to return the signed word.
* @remark Implicitly references pIemCpu.
*/
#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
do \
{ \
if (rcStrict2 != VINF_SUCCESS) \
return rcStrict2; \
} while (0)
/**
* Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM state.
* @param pu32 Where to return the opcode dword.
*/
{
if (rcStrict == VINF_SUCCESS)
{
}
else
*pu32 = 0;
return rcStrict;
}
/**
* Fetches the next opcode dword.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM state.
* @param pu32 Where to return the opcode double word.
*/
{
return VINF_SUCCESS;
}
/**
* Fetches the next opcode dword, returns automatically on failure.
*
* @param a_pu32 Where to return the opcode dword.
* @remark Implicitly references pIemCpu.
*/
#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
do \
{ \
if (rcStrict2 != VINF_SUCCESS) \
return rcStrict2; \
} while (0)
/**
* Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM state.
* @param pu32 Where to return the opcode dword.
*/
{
if (rcStrict == VINF_SUCCESS)
{
}
else
*pu64 = 0;
return rcStrict;
}
/**
* Fetches the next opcode dword, zero extending it to a quad word.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM state.
* @param pu64 Where to return the opcode quad word.
*/
{
return VINF_SUCCESS;
}
/**
* Fetches the next opcode dword and zero extends it to a quad word, returns
* automatically on failure.
*
* @param a_pu64 Where to return the opcode quad word.
* @remark Implicitly references pIemCpu.
*/
#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
do \
{ \
if (rcStrict2 != VINF_SUCCESS) \
return rcStrict2; \
} while (0)
/**
* Fetches the next signed double word from the opcode stream.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM state.
* @param pi32 Where to return the signed double word.
*/
{
}
/**
* Fetches the next signed double word from the opcode stream, returning
* automatically on failure.
*
* @param pi32 Where to return the signed double word.
* @remark Implicitly references pIemCpu.
*/
#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
do \
{ \
if (rcStrict2 != VINF_SUCCESS) \
return rcStrict2; \
} while (0)
/**
* Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM state.
* @param pu64 Where to return the opcode qword.
*/
{
if (rcStrict == VINF_SUCCESS)
{
}
else
*pu64 = 0;
return rcStrict;
}
/**
* Fetches the next opcode dword, sign extending it into a quad word.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM state.
* @param pu64 Where to return the opcode quad word.
*/
{
return VINF_SUCCESS;
}
/**
* Fetches the next opcode double word and sign extends it to a quad word,
* returns automatically on failure.
*
* @param a_pu64 Where to return the opcode quad word.
* @remark Implicitly references pIemCpu.
*/
#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
do \
{ \
if (rcStrict2 != VINF_SUCCESS) \
return rcStrict2; \
} while (0)
/**
* Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM state.
* @param pu64 Where to return the opcode qword.
*/
{
if (rcStrict == VINF_SUCCESS)
{
}
else
*pu64 = 0;
return rcStrict;
}
/**
* Fetches the next opcode qword.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM state.
* @param pu64 Where to return the opcode qword.
*/
{
return VINF_SUCCESS;
}
/**
* Fetches the next opcode quad word, returns automatically on failure.
*
* @param a_pu64 Where to return the opcode quad word.
* @remark Implicitly references pIemCpu.
*/
#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
do \
{ \
if (rcStrict2 != VINF_SUCCESS) \
return rcStrict2; \
} while (0)
/** @name Misc Worker Functions.
* @{
*/
/**
* Validates a new SS segment.
*
* @returns VBox strict status code.
* @param pIemCpu The IEM per CPU instance data.
* @param pCtx The CPU context.
* @param NewSS The new SS selctor.
* @param uCpl The CPL to load the stack for.
* @param pDesc Where to return the descriptor.
*/
static VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
{
/* Null selectors are not allowed (we're not called for dispatching
interrupts with SS=0 in long mode). */
if (!(NewSS & X86_SEL_MASK_OFF_RPL))
{
return iemRaiseTaskSwitchFault0(pIemCpu);
}
/** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
{
}
/*
* Read the descriptor.
*/
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
* Perform the descriptor validation documented for LSS, POP SS and MOV SS.
*/
{
Log(("iemMiscValidateNewSSandRsp: %#x - system selector -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
}
{
Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
}
{
Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
}
/* Is it there? */
/** @todo testcase: Is this checked before the canonical / limit check below? */
{
}
return VINF_SUCCESS;
}
/**
* Gets the correct EFLAGS regardless of whether PATM stores parts of them or
* not.
*
* @param a_pIemCpu The IEM per CPU data.
* @param a_pCtx The CPU context.
*/
#ifdef VBOX_WITH_RAW_MODE_NOT_R0
#else
#endif
/**
* Updates the EFLAGS in the correct manner wrt. PATM.
*
* @param a_pIemCpu The IEM per CPU data.
* @param a_pCtx The CPU context.
*/
#ifdef VBOX_WITH_RAW_MODE_NOT_R0
do { \
if (IEM_VERIFICATION_ENABLED(a_pIemCpu)) \
else \
} while (0)
#else
do { \
} while (0)
#endif
/** @} */
/** @name Raising Exceptions.
*
* @{
*/
/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
* @{ */
/** CPU exception. */
#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
/** External interrupt (from PIC, APIC, whatever). */
/** Software interrupt (int or into, not bound).
* Returns to the following instruction */
/** Takes an error code. */
/** Takes a CR2. */
/** Generated by the breakpoint instruction. */
/** Generated by a DRx instruction breakpoint and RF should be cleared. */
/** @} */
/**
* Loads the specified stack far pointer from the TSS.
*
* @returns VBox strict status code.
* @param pIemCpu The IEM per CPU instance data.
* @param pCtx The CPU context.
* @param uCpl The CPL to load the stack for.
* @param pSelSS Where to return the new stack segment.
* @param puEsp Where to return the new stack pointer.
*/
{
*puEsp = 0; /* make gcc happy */
*pSelSS = 0; /* make gcc happy */
{
/*
* 16-bit TSS (X86TSS16).
*/
{
{
Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
}
if (rcStrict == VINF_SUCCESS)
{
return VINF_SUCCESS;
}
break;
}
/*
* 32-bit TSS (X86TSS32).
*/
{
{
Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
}
if (rcStrict == VINF_SUCCESS)
{
return VINF_SUCCESS;
}
break;
}
default:
}
return rcStrict;
}
/**
* Loads the specified stack pointer from the 64-bit TSS.
*
* @returns VBox strict status code.
* @param pIemCpu The IEM per CPU instance data.
* @param pCtx The CPU context.
* @param uCpl The CPL to load the stack for.
* @param uIst The interrupt stack table index, 0 if to use uCpl.
* @param puRsp Where to return the new stack pointer.
*/
static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst,
{
*puRsp = 0; /* make gcc happy */
if (uIst)
else
{
Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
}
}
/**
* Adjust the CPU state according to the exception being raised.
*
* @param pCtx The CPU context.
* @param u8Vector The exception that has been raised.
*/
{
switch (u8Vector)
{
case X86_XCPT_DB:
break;
/** @todo Read the AMD and Intel exception reference... */
}
}
/**
* Implements exceptions and interrupts for real mode.
*
* @returns VBox strict status code.
* @param pIemCpu The IEM per CPU instance data.
* @param pCtx The CPU context.
* @param cbInstr The number of bytes to offset rIP by in the return
* address.
* @param u8Vector The interrupt / exception vector number.
* @param fFlags The flags.
* @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
* @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
*/
static VBOXSTRICTRC
{
/*
* Read the IDT entry.
*/
{
return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
}
return rcStrict;
/*
* Push the stack frame.
*/
if (rcStrict != VINF_SUCCESS)
return rcStrict;
return rcStrict;
/*
* Load the vector address into cs:ip and make exception specific state
* adjustments.
*/
/** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
fEfl &= ~X86_EFL_IF;
/** @todo do we actually do this in real mode? */
if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
}
/**
* Loads a NULL data selector into when coming from V8086 mode.
*
* @param pIemCpu The IEM per CPU instance data.
* @param pSReg Pointer to the segment register.
*/
{
{
/* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
}
else
{
/** @todo check this on AMD-V */
}
}
/**
* Loads a segment selector during a task switch in V8086 mode.
*
* @param pIemCpu The IEM per CPU instance data.
* @param pSReg Pointer to the segment register.
* @param uSel The selector value to load.
*/
{
/* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
}
/**
* Loads a NULL data selector into a selector register, both the hidden and
* visible parts, in protected mode.
*
* @param pIemCpu The IEM state of the calling EMT.
* @param pSReg Pointer to the segment register.
* @param uRpl The RPL.
*/
{
/** @todo Testcase: write a testcase checking what happends when loading a NULL
* data selector in protected mode. */
{
/* VT-x (Intel 3960x) observed doing something like this. */
pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT);
}
else
{
}
}
/**
* Loads a segment selector during a task switch in protected mode. In this task
* switch scenario, we would throw #TS exceptions rather than #GPs.
*
* @returns VBox strict status code.
* @param pIemCpu The IEM per CPU instance data.
* @param pSReg Pointer to the segment register.
* @param uSel The new selector value.
*
* @remarks This does -NOT- handle CS or SS.
* @remarks This expects pIemCpu->uCpl to be up to date.
*/
static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
{
/* Null data selector. */
if (!(uSel & X86_SEL_MASK_OFF_RPL))
{
return VINF_SUCCESS;
}
/* Fetch the descriptor. */
if (rcStrict != VINF_SUCCESS)
{
Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
return rcStrict;
}
/* Must be a data segment or readable code segment. */
{
Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
}
/* Check privileges for data segments and non-conforming code segments. */
!= (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
{
/* The RPL and the new CPL must be less than or equal to the DPL. */
{
Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
}
}
/* Is it there? */
{
}
/* The base and limit. */
/*
* Ok, everything checked out fine. Now set the accessed bit before
* committing the result into the registers.
*/
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
/* Commit */
pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
return VINF_SUCCESS;
}
/**
* Performs a task switch.
*
* If the task switch is the result of a JMP, CALL or IRET instruction, the
* caller is responsible for performing the necessary checks (like DPL, TSS
* reference for JMP, CALL, IRET.
*
* If the task switch is the due to a software interrupt or hardware exception,
* the caller is responsible for validating the TSS selector and descriptor. See
* Intel Instruction reference for INT n.
*
* @returns VBox strict status code.
* @param pIemCpu The IEM per CPU instance data.
* @param pCtx The CPU context.
* @param enmTaskSwitch What caused this task switch.
* @param uNextEip The EIP effective after the task switch.
* @param fFlags The flags.
* @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
* @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
* @param SelTSS The TSS selector of the new task.
* @param pNewDescTSS Pointer to the new TSS descriptor.
*/
{
Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RGv uNextEip=%#RGv\n", enmTaskSwitch, SelTSS,
/* Update CR2 in case it's a page-fault. */
* @bugref{5653} comment #49. */
if (fFlags & IEM_XCPT_FLAGS_CR2)
/*
* Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
* subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
*/
uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
if (uNewTSSLimit < uNewTSSLimitMin)
{
Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
}
/*
* Check the current TSS limit. The last written byte to the current TSS during the
* task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
* See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
*
* The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
* end up with smaller than "legal" TSS limits.
*/
if (uCurTSSLimit < uCurTSSLimitMin)
{
Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
}
/*
* Verify that the new TSS can be accessed and map it. Map only the required contents
* and not the entire TSS.
*/
void *pvNewTSS;
AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, IntRedirBitmap) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
/** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
* not perform correct translation if this happens. See Intel spec. 7.2.1
* "Task-State Segment" */
VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
if (rcStrict != VINF_SUCCESS)
{
Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
return rcStrict;
}
/*
*/
if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
|| enmTaskSwitch == IEMTASKSWITCH_IRET)
{
if (rcStrict != VINF_SUCCESS)
{
Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
return rcStrict;
}
if (rcStrict != VINF_SUCCESS)
{
Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
return rcStrict;
}
/* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
if (enmTaskSwitch == IEMTASKSWITCH_IRET)
{
u32EFlags &= ~X86_EFL_NT;
}
}
/*
* Save the CPU state into the current TSS.
*/
if (GCPtrNewTSS == GCPtrCurTSS)
{
Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
}
if (fIsNewTSS386)
{
/*
* Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
* See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
*/
void *pvCurTSS32;
rcStrict = iemMemMap(pIemCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
if (rcStrict != VINF_SUCCESS)
{
Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
return rcStrict;
}
/* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
if (rcStrict != VINF_SUCCESS)
{
Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
return rcStrict;
}
}
else
{
/*
* Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
*/
void *pvCurTSS16;
rcStrict = iemMemMap(pIemCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
if (rcStrict != VINF_SUCCESS)
{
Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
return rcStrict;
}
/* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
if (rcStrict != VINF_SUCCESS)
{
Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
return rcStrict;
}
}
/*
* Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
*/
if ( enmTaskSwitch == IEMTASKSWITCH_CALL
{
/* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
}
/*
* Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
* it's done further below with error handling (e.g. CR3 changes will go through PGM).
*/
uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
bool fNewDebugTrap;
if (fIsNewTSS386)
{
}
else
{
uNewCr3 = 0;
uNewFS = 0;
uNewGS = 0;
fNewDebugTrap = false;
}
if (GCPtrNewTSS == GCPtrCurTSS)
Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
/*
* We're done accessing the new TSS.
*/
if (rcStrict != VINF_SUCCESS)
{
Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
return rcStrict;
}
/*
*/
if (enmTaskSwitch != IEMTASKSWITCH_IRET)
{
if (rcStrict != VINF_SUCCESS)
{
Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
return rcStrict;
}
/* Check that the descriptor indicates the new TSS is available (not busy). */
if (rcStrict != VINF_SUCCESS)
{
Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
return rcStrict;
}
}
/*
* From this point on, we're technically in the new task. We will defer exceptions
* until the completion of the task switch but before executing any instructions in the new task.
*/
/* Set the busy bit in TR. */
/* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
if ( enmTaskSwitch == IEMTASKSWITCH_CALL
{
uNewEflags |= X86_EFL_NT;
}
/*
* Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
* will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
*/
{
}
/*
* Switch CR3 for the new task.
*/
if ( fIsNewTSS386
{
/** @todo Should we update and flush TLBs only if CR3 value actually changes? */
{
}
else
/* Inform PGM. */
{
/* ignore informational status codes */
}
}
/*
* Switch LDTR for the new task.
*/
if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
else
{
Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
if (rcStrict != VINF_SUCCESS)
{
Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
return rcStrict;
}
{
Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
}
}
if (IEM_IS_V86_MODE(pIemCpu))
{
}
else
{
/*
* Load the stack segment for the new task.
*/
if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
{
Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
}
/* Fetch the descriptor. */
if (rcStrict != VINF_SUCCESS)
{
return rcStrict;
}
/* SS must be a data segment and writable. */
{
Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
}
/* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
{
Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
uNewCpl));
}
/* Is it there? */
{
}
/* Set the accessed bit before committing the result into SS. */
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
/* Commit SS. */
/* CPL has changed, update IEM before loading rest of segments. */
/*
* Load the data segments for the new task.
*/
if (rcStrict != VINF_SUCCESS)
return rcStrict;
if (rcStrict != VINF_SUCCESS)
return rcStrict;
if (rcStrict != VINF_SUCCESS)
return rcStrict;
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
* Load the code segment for the new task.
*/
if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
{
Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
}
/* Fetch the descriptor. */
if (rcStrict != VINF_SUCCESS)
{
Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
return rcStrict;
}
/* CS must be a code segment. */
{
Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
}
/* For conforming CS, DPL must be less than or equal to the RPL. */
{
Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
}
/* For non-conforming CS, DPL must match RPL. */
{
Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
}
/* Is it there? */
{
}
/* Set the accessed bit before committing the result into CS. */
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
/* Commit CS. */
}
/** @todo Debug trap. */
if (fIsNewTSS386 && fNewDebugTrap)
Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
/*
* Construct the error code masks based on what caused this task switch.
* See Intel Instruction reference for INT.
*/
if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
&& !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
{
uExt = 1;
}
else
uExt = 0;
/*
* Push any error code on to the new stack.
*/
if (fFlags & IEM_XCPT_FLAGS_ERR)
{
/* Check that there is sufficient space on the stack. */
* into a separate function. */
{
{
Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
cbStackFrame));
}
}
else
{
{
Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
cbStackFrame));
}
}
if (fIsNewTSS386)
else
if (rcStrict != VINF_SUCCESS)
{
Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n", fIsNewTSS386 ? "32" : "16",
return rcStrict;
}
}
/* Check the new EIP against the new CS limit. */
{
Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RGv CS limit=%u -> #GP(0)\n",
}
Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
}
/**
* Implements exceptions and interrupts for protected mode.
*
* @returns VBox strict status code.
* @param pIemCpu The IEM per CPU instance data.
* @param pCtx The CPU context.
* @param cbInstr The number of bytes to offset rIP by in the return
* address.
* @param u8Vector The interrupt / exception vector number.
* @param fFlags The flags.
* @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
* @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
*/
static VBOXSTRICTRC
{
/*
* Read the IDT entry.
*/
{
return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
}
return rcStrict;
Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
/*
* Check the descriptor type, DPL and such.
* ASSUMES this is done in the same order as described for call-gate calls.
*/
{
Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
}
bool fTaskGate = false;
uint8_t f32BitGate = true;
{
case X86_SEL_TYPE_SYS_LDT:
{
/** @todo check what actually happens when the type is wrong...
* esp. call gates. */
return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
}
f32BitGate = false;
break;
fTaskGate = true;
#ifndef IEM_IMPLEMENTS_TASKSWITCH
IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
#endif
break;
f32BitGate = false;
break;
}
/* Check DPL against CPL if applicable. */
if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
{
{
Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
}
}
/* Is it there? */
{
return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
}
/* Is it a task-gate? */
if (fTaskGate)
{
/*
* Construct the error code masks based on what caused this task switch.
* See Intel Instruction reference for INT.
*/
/*
* Fetch the TSS descriptor in the GDT.
*/
rcStrict = iemMemFetchSelDescWithErr(pIemCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
if (rcStrict != VINF_SUCCESS)
{
Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
return rcStrict;
}
/* The TSS descriptor must be a system segment and be available (not busy). */
{
Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
}
/* The TSS must be present. */
{
Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
}
/* Do the actual task switch. */
return iemTaskSwitch(pIemCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
}
/* A null CS is bad. */
if (!(NewCS & X86_SEL_MASK_OFF_RPL))
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/* Fetch the descriptor for the new CS. */
rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
if (rcStrict != VINF_SUCCESS)
{
Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
return rcStrict;
}
/* Must be a code segment. */
{
Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
}
{
Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
}
/* Don't allow lowering the privilege level. */
/** @todo Does the lowering of privileges apply to software interrupts
* only? This has bearings on the more-privileged or
* same-privilege stack behavior further down. A testcase would
* be nice. */
{
Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
}
/* Make sure the selector is present. */
{
}
/* Check the new EIP against the new CS limit. */
{
Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
return iemRaiseGeneralProtectionFault(pIemCpu, 0);
}
/* Calc the flag image to push. */
fEfl &= ~X86_EFL_RF;
else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
/* From V8086 mode only go to CPL 0. */
{
Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
return iemRaiseGeneralProtectionFault(pIemCpu, 0);
}
/*
* If the privilege level changes, we need to get a new stack from the TSS.
* This in turns means validating the new SS and ESP...
*/
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/* Check that there is sufficient space for the stack frame. */
{
|| uNewEsp < cbStackFrame)
{
Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
}
}
else
{
{
Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
}
}
/*
* Start making changes.
*/
/* Create the stack frame. */
uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
if (rcStrict != VINF_SUCCESS)
return rcStrict;
if (f32BitGate)
{
if (fFlags & IEM_XCPT_FLAGS_ERR)
if (fEfl & X86_EFL_VM)
{
}
}
else
{
if (fFlags & IEM_XCPT_FLAGS_ERR)
if (fEfl & X86_EFL_VM)
{
}
}
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/* Mark the selectors 'accessed' (hope this is the correct time). */
/** @todo testcase: excatly _when_ are the accessed bits set - before or
* after pushing the stack frame? (Write protect the gdt + stack to
* find out.) */
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
/*
* Start comitting the register changes (joins with the DPL=CPL branch).
*/
pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */
if (fEfl & X86_EFL_VM)
{
}
}
/*
* Same privilege, no stack change and smaller stack frame.
*/
else
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
if (f32BitGate)
{
if (fFlags & IEM_XCPT_FLAGS_ERR)
}
else
{
if (fFlags & IEM_XCPT_FLAGS_ERR)
}
rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/* Mark the CS selector as 'accessed'. */
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
/*
* Start committing the register changes (joins with the other branch).
*/
}
/* ... register committing continues. */
fEfl &= ~fEflToClear;
if (fFlags & IEM_XCPT_FLAGS_CR2)
if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
}
/**
* Implements exceptions and interrupts for long mode.
*
* @returns VBox strict status code.
* @param pIemCpu The IEM per CPU instance data.
* @param pCtx The CPU context.
* @param cbInstr The number of bytes to offset rIP by in the return
* address.
* @param u8Vector The interrupt / exception vector number.
* @param fFlags The flags.
* @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
* @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
*/
static VBOXSTRICTRC
{
/*
* Read the IDT entry.
*/
{
return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
}
VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
return rcStrict;
Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
/*
* Check the descriptor type, DPL and such.
* ASSUMES this is done in the same order as described for call-gate calls.
*/
{
Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
}
{
break;
break;
default:
return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
}
/* Check DPL against CPL if applicable. */
if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
{
{
Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
}
}
/* Is it there? */
{
return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
}
/* A null CS is bad. */
if (!(NewCS & X86_SEL_MASK_OFF_RPL))
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/* Fetch the descriptor for the new CS. */
if (rcStrict != VINF_SUCCESS)
{
Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
return rcStrict;
}
/* Must be a 64-bit code segment. */
{
Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
}
{
Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
}
/* Don't allow lowering the privilege level. For non-conforming CS
handler runs at. For conforming CS selectors, the CPL remains
unchanged, but the CS.DPL must be <= CPL. */
/** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
* when CPU in Ring-0. Result \#GP? */
{
Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
}
/* Make sure the selector is present. */
{
}
/* Check that the new RIP is canonical. */
if (!IEM_IS_CANONICAL(uNewRip))
{
Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/*
* If the privilege level changes or if the IST isn't zero, we need to get
* a new stack from the TSS.
*/
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
else
/*
* Calc the flag image to push.
*/
fEfl &= ~X86_EFL_RF;
else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
/*
* Start making changes.
*/
/* Create the stack frame. */
if (rcStrict != VINF_SUCCESS)
return rcStrict;
if (fFlags & IEM_XCPT_FLAGS_ERR)
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/* Mark the CS selectors 'accessed' (hope this is the correct time). */
/** @todo testcase: excatly _when_ are the accessed bits set - before or
* after pushing the stack frame? (Write protect the gdt + stack to
* find out.) */
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
/*
* Start comitting the register changes.
*/
* hidden registers when interrupting 32-bit or 16-bit code! */
{
}
fEfl &= ~fEflToClear;
if (fFlags & IEM_XCPT_FLAGS_CR2)
if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
}
/**
* Implements exceptions and interrupts.
*
* All exceptions and interrupts goes thru this function!
*
* @returns VBox strict status code.
* @param pIemCpu The IEM per CPU instance data.
* @param cbInstr The number of bytes to offset rIP by in the return
* address.
* @param u8Vector The interrupt / exception vector number.
* @param fFlags The flags.
* @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
* @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
*/
DECL_NO_INLINE(static, VBOXSTRICTRC)
{
/*
* Perform the V8086 IOPL check and upgrade the fault without nesting.
*/
{
uErr = 0;
}
#ifdef DBGFTRACE_ENABLED
RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
#endif
/*
* Do recursion accounting.
*/
if (pIemCpu->cXcptRecursions == 0)
Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
else
{
Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
/** @todo double and tripple faults. */
{
#ifdef DEBUG_bird
AssertFailed();
#endif
IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
}
/** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
{
....
} */
}
/*
* Extensive logging.
*/
#if defined(LOG_ENABLED) && defined(IN_RING3)
if (LogIs3Enabled())
{
char szRegs[4096];
"rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
"rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
"r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
"r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
"rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
"cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
"ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
"es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
"fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
"gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
"ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
"dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
"dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
"gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
"ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
"tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
" sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
" efer=%016VR{efer}\n"
" pat=%016VR{pat}\n"
" sf_mask=%016VR{sf_mask}\n"
"krnl_gs_base=%016VR{krnl_gs_base}\n"
" lstar=%016VR{lstar}\n"
" star=%016VR{star} cstar=%016VR{cstar}\n"
"fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
);
char szInstr[256];
}
#endif /* LOG_ENABLED */
/*
* Call the mode specific worker function.
*/
else
/*
* Unwind.
*/
Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pIemCpu->uCpl));
return rcStrict;
}
/** \#DE - 00. */
{
}
/** \#DB - 01.
* @note This automatically clear DR7.GD. */
{
}
/** \#UD - 06. */
{
}
/** \#NM - 07. */
{
}
/** \#TS(err) - 0a. */
{
return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
}
/** \#TS(tr) - 0a. */
{
}
/** \#TS(0) - 0a. */
{
0, 0);
}
/** \#TS(err) - 0a. */
DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel)
{
uSel & X86_SEL_MASK_OFF_RPL, 0);
}
/** \#NP(err) - 0b. */
DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
{
return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
}
/** \#NP(seg) - 0b. */
DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
{
}
/** \#NP(sel) - 0b. */
DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
{
uSel & ~X86_SEL_RPL, 0);
}
/** \#SS(seg) - 0c. */
DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
{
uSel & ~X86_SEL_RPL, 0);
}
/** \#SS(err) - 0c. */
DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
{
return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
}
/** \#GP(n) - 0d. */
{
return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
}
/** \#GP(0) - 0d. */
{
return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
}
/** \#GP(sel) - 0d. */
DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
{
Sel & ~X86_SEL_RPL, 0);
}
/** \#GP(0) - 0d. */
{
return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
}
/** \#GP(sel) - 0d. */
DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
{
IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
}
/** \#GP(sel) - 0d. */
{
return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
}
/** \#GP(sel) - 0d. */
DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
{
return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
}
/** \#PF(n) - 0e. */
DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
{
switch (rc)
{
case VERR_PAGE_NOT_PRESENT:
uErr = 0;
break;
default:
case VERR_ACCESS_DENIED:
break;
/** @todo reserved */
}
uErr |= X86_TRAP_PF_US;
uErr |= X86_TRAP_PF_ID;
#if 0 /* This is so much non-sense, really. Why was it done like that? */
/* Note! RW access callers reporting a WRITE protection fault, will clear
the READ flag before calling. So, read-modify-write accesses (RW)
can safely be reported as READ faults. */
uErr |= X86_TRAP_PF_RW;
#else
if (fAccess & IEM_ACCESS_TYPE_WRITE)
{
uErr |= X86_TRAP_PF_RW;
}
#endif
return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
uErr, GCPtrWhere);
}
/** \#MF(0) - 10. */
{
}
/** \#AC(0) - 11. */
{
}
/**
* Macro for calling iemCImplRaiseDivideError().
*
* inlining as we wish.
*
* @return Strict VBox status code.
*/
{
}
/**
* Macro for calling iemCImplRaiseInvalidLockPrefix().
*
* inlining as we wish.
*
* @return Strict VBox status code.
*/
{
}
/**
* Macro for calling iemCImplRaiseInvalidOpcode().
*
* inlining as we wish.
*
* @return Strict VBox status code.
*/
{
}
/** @} */
/*
*
* Helpers routines.
* Helpers routines.
* Helpers routines.
*
*/
/**
* Recalculates the effective operand size.
*
* @param pIemCpu The IEM state.
*/
{
switch (pIemCpu->enmCpuMode)
{
case IEMMODE_16BIT:
break;
case IEMMODE_32BIT:
break;
case IEMMODE_64BIT:
{
case 0:
break;
case IEM_OP_PRF_SIZE_OP:
break;
case IEM_OP_PRF_SIZE_REX_W:
break;
}
break;
default:
AssertFailed();
}
}
/**
* Sets the default operand size to 64-bit and recalculates the effective
* operand size.
*
* @param pIemCpu The IEM state.
*/
{
else
}
/*
*
* Common opcode decoders.
* Common opcode decoders.
* Common opcode decoders.
*
*/
/**
* Used to add extra details about a stub case.
* @param pIemCpu The IEM per CPU state.
*/
{
#if defined(LOG_ENABLED) && defined(IN_RING3)
char szRegs[4096];
"rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
"rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
"r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
"r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
"rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
"cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
"ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
"es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
"fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
"gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
"ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
"dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
"dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
"gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
"ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
"tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
" sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
" efer=%016VR{efer}\n"
" pat=%016VR{pat}\n"
" sf_mask=%016VR{sf_mask}\n"
"krnl_gs_base=%016VR{krnl_gs_base}\n"
" lstar=%016VR{lstar}\n"
" star=%016VR{star} cstar=%016VR{cstar}\n"
"fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
);
char szInstr[256];
#else
#endif
}
/**
* Complains about a stub.
*
* Providing two versions of this macro, one for daily use and one for use when
* working on IEM.
*/
#if 0
# define IEMOP_BITCH_ABOUT_STUB() \
do { \
iemOpStubMsg2(pIemCpu); \
RTAssertPanic(); \
} while (0)
#else
#endif
/** Stubs an opcode. */
#define FNIEMOP_STUB(a_Name) \
{ \
return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
} \
typedef int ignore_semicolon
/** Stubs an opcode. */
{ \
return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
} \
typedef int ignore_semicolon
/** Stubs an opcode which currently should raise \#UD. */
#define FNIEMOP_UD_STUB(a_Name) \
{ \
return IEMOP_RAISE_INVALID_OPCODE(); \
} \
typedef int ignore_semicolon
/** Stubs an opcode which currently should raise \#UD. */
{ \
return IEMOP_RAISE_INVALID_OPCODE(); \
} \
typedef int ignore_semicolon
/** @name Register Access.
* @{
*/
/**
* Gets a reference (pointer) to the specified hidden segment register.
*
* @returns Hidden register reference.
* @param pIemCpu The per CPU data.
* @param iSegReg The segment register.
*/
{
switch (iSegReg)
{
default:
}
#ifdef VBOX_WITH_RAW_MODE_NOT_R0
#else
#endif
return pSReg;
}
/**
* Gets a reference (pointer) to the specified segment register (the selector
* value).
*
* @returns Pointer to the selector variable.
* @param pIemCpu The per CPU data.
* @param iSegReg The segment register.
*/
{
switch (iSegReg)
{
}
}
/**
* Fetches the selector value of a segment register.
*
* @returns The selector value.
* @param pIemCpu The per CPU data.
* @param iSegReg The segment register.
*/
{
switch (iSegReg)
{
}
AssertFailedReturn(0xffff);
}
/**
* Gets a reference (pointer) to the specified general register.
*
* @returns Register reference.
* @param pIemCpu The per CPU data.
* @param iReg The general register.
*/
{
switch (iReg)
{
}
}
/**
* Gets a reference (pointer) to the specified 8-bit general register.
*
* Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
*
* @returns Register reference.
* @param pIemCpu The per CPU data.
* @param iReg The register.
*/
{
if (iReg >= 4)
pu8Reg++;
return pu8Reg;
}
/**
* Fetches the value of a 8-bit general register.
*
* @returns The register value.
* @param pIemCpu The per CPU data.
* @param iReg The register.
*/
{
return *pbSrc;
}
/**
* Fetches the value of a 16-bit general register.
*
* @returns The register value.
* @param pIemCpu The per CPU data.
* @param iReg The register.
*/
{
}
/**
* Fetches the value of a 32-bit general register.
*
* @returns The register value.
* @param pIemCpu The per CPU data.
* @param iReg The register.
*/
{
}
/**
* Fetches the value of a 64-bit general register.
*
* @returns The register value.
* @param pIemCpu The per CPU data.
* @param iReg The register.
*/
{
}
/**
*
* May raise a \#GP(0) if the new RIP is non-canonical or outside the code
* segment limit.
*
* @param pIemCpu The per CPU data.
* @param offNextInstr The offset of the next instruction.
*/
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
break;
}
case IEMMODE_32BIT:
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
break;
}
case IEMMODE_64BIT:
{
if (!IEM_IS_CANONICAL(uNewRip))
return iemRaiseGeneralProtectionFault0(pIemCpu);
break;
}
}
return VINF_SUCCESS;
}
/**
*
* May raise a \#GP(0) if the new RIP is non-canonical or outside the code
* segment limit.
*
* @returns Strict VBox status code.
* @param pIemCpu The per CPU data.
* @param offNextInstr The offset of the next instruction.
*/
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
/** @todo Test 16-bit jump in 64-bit mode. possible? */
return VINF_SUCCESS;
}
/**
*
* May raise a \#GP(0) if the new RIP is non-canonical or outside the code
* segment limit.
*
* @returns Strict VBox status code.
* @param pIemCpu The per CPU data.
* @param offNextInstr The offset of the next instruction.
*/
{
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
else
{
if (!IEM_IS_CANONICAL(uNewRip))
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
return VINF_SUCCESS;
}
/**
* Performs a near jump to the specified address.
*
* May raise a \#GP(0) if the new RIP is non-canonical or outside the code
* segment limit.
*
* @param pIemCpu The per CPU data.
* @param uNewRip The new RIP value.
*/
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
/** @todo Test 16-bit jump in 64-bit mode. */
break;
}
case IEMMODE_32BIT:
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
break;
}
case IEMMODE_64BIT:
{
if (!IEM_IS_CANONICAL(uNewRip))
return iemRaiseGeneralProtectionFault0(pIemCpu);
break;
}
}
return VINF_SUCCESS;
}
/**
* Get the address of the top of the stack.
*
* @param pIemCpu The per CPU data.
* read.
*/
{
}
/**
*
* This function leaves the EFLAGS.RF flag alone.
*
* @param pIemCpu The per CPU data.
* @param cbInstr The number of bytes to add.
*/
{
switch (pIemCpu->enmCpuMode)
{
case IEMMODE_16BIT:
break;
case IEMMODE_32BIT:
break;
case IEMMODE_64BIT:
break;
default: AssertFailed();
}
}
#if 0
/**
*
* @param pIemCpu The per CPU data.
*/
{
}
#endif
/**
*
* @param pIemCpu The per CPU data.
* @param cbInstr The number of bytes to add.
*/
{
switch (pIemCpu->enmCpuMode)
{
case IEMMODE_16BIT:
break;
case IEMMODE_32BIT:
break;
case IEMMODE_64BIT:
break;
default: AssertFailed();
}
}
/**
*
* @param pIemCpu The per CPU data.
*/
{
}
/**
* Adds to the stack pointer.
*
* @param pIemCpu The per CPU data.
* updated.
* @param cbToAdd The number of bytes to add.
*/
{
else
}
/**
* Subtracts from the stack pointer.
*
* @param pIemCpu The per CPU data.
* updated.
* @param cbToSub The number of bytes to subtract.
*/
{
else
}
/**
* Adds to the temporary stack pointer.
*
* @param pIemCpu The per CPU data.
* @param cbToAdd The number of bytes to add.
* @param pCtx Where to get the current stack mode.
*/
DECLINLINE(void) iemRegAddToRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
{
else
}
/**
* Subtracts from the temporary stack pointer.
*
* @param pIemCpu The per CPU data.
* @param cbToSub The number of bytes to subtract.
* @param pCtx Where to get the current stack mode.
* @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
* expecting that.
*/
DECLINLINE(void) iemRegSubFromRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
{
else
}
/**
* Calculates the effective stack address for a push of the specified size as
* well as the new RSP value (upper bits may be masked).
*
* @returns Effective stack addressf for the push.
* @param pIemCpu The IEM per CPU data.
* @param pCtx Where to get the current stack mode.
* @param cbItem The size of the stack item to pop.
* @param puNewRsp Where to return the new RSP value.
*/
DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
{
else
return GCPtrTop;
}
/**
* Gets the current stack pointer and calculates the value after a pop of the
* specified size.
*
* @returns Current stack pointer.
* @param pIemCpu The per CPU data.
* @param pCtx Where to get the current stack mode.
* @param cbItem The size of the stack item to pop.
* @param puNewRsp Where to return the new RSP value.
*/
DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
{
{
}
{
}
else
{
}
return GCPtrTop;
}
/**
* Calculates the effective stack address for a push of the specified size as
* well as the new temporary RSP value (upper bits may be masked).
*
* @returns Effective stack addressf for the push.
* @param pIemCpu The per CPU data.
* @param pTmpRsp The temporary stack pointer. This is updated.
* @param cbItem The size of the stack item to pop.
* @param puNewRsp Where to return the new RSP value.
*/
DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
{
else
return GCPtrTop;
}
/**
* Gets the effective stack address for a pop of the specified size and
* calculates and updates the temporary RSP.
*
* @returns Current stack pointer.
* @param pIemCpu The per CPU data.
* @param pTmpRsp The temporary stack pointer. This is updated.
* @param pCtx Where to get the current stack mode.
* @param cbItem The size of the stack item to pop.
*/
DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
{
{
}
{
}
else
{
}
return GCPtrTop;
}
/**
* Checks if an Intel CPUID feature bit is set.
*
* @returns true / false.
*
* @param pIemCpu The IEM per CPU data.
* @param fEdx The EDX bit to test, or 0 if ECX.
* @param fEcx The ECX bit to test, or 0 if EDX.
* @remarks Used via IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX,
* IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX and others.
*/
{
}
/**
* Checks if an AMD CPUID feature bit is set.
*
* @returns true / false.
*
* @param pIemCpu The IEM per CPU data.
* @param fEdx The EDX bit to test, or 0 if ECX.
* @param fEcx The ECX bit to test, or 0 if EDX.
* @remarks Used via IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX,
* IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX and others.
*/
{
}
/** @} */
/** @name FPU access and helpers.
*
* @{
*/
/**
* Hook for preparing to use the host FPU.
*
* This is necessary in ring-0 and raw-mode context.
*
* @param pIemCpu The IEM per CPU data.
*/
{
#ifdef IN_RING3
#else
/** @todo RZ: FIXME */
//# error "Implement me"
#endif
}
/**
* Hook for preparing to use the host FPU for SSE
*
* This is necessary in ring-0 and raw-mode context.
*
* @param pIemCpu The IEM per CPU data.
*/
{
}
/**
* Stores a QNaN value into a FPU register.
*
* @param pReg Pointer to the register.
*/
{
}
/**
* Updates the FOP, FPU.CS and FPUIP registers.
*
* @param pIemCpu The IEM per CPU data.
* @param pCtx The CPU context.
* @param pFpuCtx The FPU context.
*/
{
/** @todo x87.CS and FPUIP needs to be kept seperately. */
{
/** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
* happens in real mode here based on the fnsave and fnstenv images. */
}
else
{
}
}
/**
* Updates the x87.DS and FPUDP registers.
*
* @param pIemCpu The IEM per CPU data.
* @param pCtx The CPU context.
* @param pFpuCtx The FPU context.
* @param iEffSeg The effective segment register.
* @param GCPtrEff The effective address relative to @a iEffSeg.
*/
DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
{
switch (iEffSeg)
{
default:
}
/** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
{
}
else
{
}
}
/**
* Rotates the stack registers in the push direction.
*
* @param pFpuCtx The FPU context.
* @remarks This is a complete waste of time, but fxsave stores the registers in
* stack order.
*/
{
}
/**
* Rotates the stack registers in the pop direction.
*
* @param pFpuCtx The FPU context.
* @remarks This is a complete waste of time, but fxsave stores the registers in
* stack order.
*/
{
}
/**
* Updates FSW and pushes a FPU result onto the FPU stack if no pending
* exception prevents it.
*
* @param pIemCpu The IEM per CPU data.
* @param pResult The FPU operation result to push.
* @param pFpuCtx The FPU context.
*/
{
/* Update FSW and bail if there are pending exceptions afterwards. */
{
return;
}
{
/* All is fine, push the actual value. */
}
{
/* Masked stack overflow, push QNaN. */
}
else
{
/* Raise stack overflow, don't push anything. */
return;
}
fFsw &= ~X86_FSW_TOP_MASK;
}
/**
* Stores a result in a FPU register and updates the FSW and FTW.
*
* @param pFpuCtx The FPU context.
* @param pResult The result to store.
* @param iStReg Which FPU register to store it in.
*/
{
}
/**
* Only updates the FPU status word (FSW) with the result of the current
* instruction.
*
* @param pFpuCtx The FPU context.
* @param u16FSW The FSW output of the current instruction.
*/
{
}
/**
* Pops one item off the FPU stack if no pending exception prevents it.
*
* @param pFpuCtx The FPU context.
*/
{
/* Check pending exceptions. */
return;
/* TOP--. */
uFSW &= ~X86_FSW_TOP_MASK;
/* Mark the previous ST0 as empty. */
iOldTop >>= X86_FSW_TOP_SHIFT;
/* Rotate the registers. */
}
/**
* Pushes a FPU result onto the FPU stack if no pending exception prevents it.
*
* @param pIemCpu The IEM per CPU data.
* @param pResult The FPU operation result to push.
*/
{
}
/**
* Pushes a FPU result onto the FPU stack if no pending exception prevents it,
* and sets FPUDP and FPUDS.
*
* @param pIemCpu The IEM per CPU data.
* @param pResult The FPU operation result to push.
* @param iEffSeg The effective segment register.
* @param GCPtrEff The effective address relative to @a iEffSeg.
*/
static void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
{
}
/**
* Replace ST0 with the first value and push the second onto the FPU stack,
* unless a pending exception prevents it.
*
* @param pIemCpu The IEM per CPU data.
* @param pResult The FPU operation result to store and push.
*/
{
/* Update FSW and bail if there are pending exceptions afterwards. */
{
return;
}
{
/* All is fine, push the actual value. */
}
{
/* Masked stack overflow, push QNaN. */
}
else
{
/* Raise stack overflow, don't push anything. */
return;
}
fFsw &= ~X86_FSW_TOP_MASK;
}
/**
* Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
* FOP.
*
* @param pIemCpu The IEM per CPU data.
* @param pResult The result to store.
* @param iStReg Which FPU register to store it in.
* @param pCtx The CPU context.
*/
{
}
/**
* Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
* FOP, and then pops the stack.
*
* @param pIemCpu The IEM per CPU data.
* @param pResult The result to store.
* @param iStReg Which FPU register to store it in.
* @param pCtx The CPU context.
*/
{
}
/**
* Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
* FPUDP, and FPUDS.
*
* @param pIemCpu The IEM per CPU data.
* @param pResult The result to store.
* @param iStReg Which FPU register to store it in.
* @param pCtx The CPU context.
* @param iEffSeg The effective memory operand selector register.
* @param GCPtrEff The effective memory operand offset.
*/
static void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
{
}
/**
* Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
* FPUDP, and FPUDS, and then pops the stack.
*
* @param pIemCpu The IEM per CPU data.
* @param pResult The result to store.
* @param iStReg Which FPU register to store it in.
* @param pCtx The CPU context.
* @param iEffSeg The effective memory operand selector register.
* @param GCPtrEff The effective memory operand offset.
*/
{
}
/**
* Updates the FOP, FPUIP, and FPUCS. For FNOP.
*
* @param pIemCpu The IEM per CPU data.
*/
{
}
/**
* Marks the specified stack register as free (for FFREE).
*
* @param pIemCpu The IEM per CPU data.
* @param iStReg The register to free.
*/
{
}
/**
* Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
*
* @param pIemCpu The IEM per CPU data.
*/
{
uFsw &= ~X86_FSW_TOP_MASK;
}
/**
* Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
*
* @param pIemCpu The IEM per CPU data.
*/
{
uFsw &= ~X86_FSW_TOP_MASK;
}
/**
* Updates the FSW, FOP, FPUIP, and FPUCS.
*
* @param pIemCpu The IEM per CPU data.
* @param u16FSW The FSW from the current instruction.
*/
{
}
/**
* Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
*
* @param pIemCpu The IEM per CPU data.
* @param u16FSW The FSW from the current instruction.
*/
{
}
/**
* Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
*
* @param pIemCpu The IEM per CPU data.
* @param u16FSW The FSW from the current instruction.
* @param iEffSeg The effective memory operand selector register.
* @param GCPtrEff The effective memory operand offset.
*/
static void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
{
}
/**
* Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
*
* @param pIemCpu The IEM per CPU data.
* @param u16FSW The FSW from the current instruction.
*/
{
}
/**
* Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
*
* @param pIemCpu The IEM per CPU data.
* @param u16FSW The FSW from the current instruction.
* @param iEffSeg The effective memory operand selector register.
* @param GCPtrEff The effective memory operand offset.
*/
static void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
{
}
/**
* Worker routine for raising an FPU stack underflow exception.
*
* @param pIemCpu The IEM per CPU data.
* @param pFpuCtx The FPU context.
* @param iStReg The stack register being accessed.
*/
{
{
/* Masked underflow. */
{
}
}
else
{
}
}
/**
* Raises a FPU stack underflow exception.
*
* @param pIemCpu The IEM per CPU data.
* @param iStReg The destination register that should be loaded
* with QNaN if \#IS is not masked. Specify
* UINT8_MAX if none (like for fcom).
*/
{
}
DECL_NO_INLINE(static, void)
{
}
{
}
DECL_NO_INLINE(static, void)
iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
{
}
{
}
DECL_NO_INLINE(static, void)
{
{
/* Masked overflow - Push QNaN. */
}
else
{
/* Exception pending - don't change TOP or the register stack. */
}
}
DECL_NO_INLINE(static, void)
{
{
/* Masked overflow - Push QNaN. */
}
else
{
/* Exception pending - don't change TOP or the register stack. */
}
}
/**
* Worker routine for raising an FPU stack overflow exception on a push.
*
* @param pFpuCtx The FPU context.
*/
{
{
/* Masked overflow. */
}
else
{
/* Exception pending - don't change TOP or the register stack. */
}
}
/**
* Raises a FPU stack overflow exception on a push.
*
* @param pIemCpu The IEM per CPU data.
*/
{
}
/**
* Raises a FPU stack overflow exception on a push with a memory operand.
*
* @param pIemCpu The IEM per CPU data.
* @param iEffSeg The effective memory operand selector register.
* @param GCPtrEff The effective memory operand offset.
*/
DECL_NO_INLINE(static, void)
{
}
{
return VINF_SUCCESS;
return VERR_NOT_FOUND;
}
{
{
return VINF_SUCCESS;
}
return VERR_NOT_FOUND;
}
{
{
return VINF_SUCCESS;
}
return VERR_NOT_FOUND;
}
static int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
{
{
return VINF_SUCCESS;
}
return VERR_NOT_FOUND;
}
/**
* Updates the FPU exception status after FCW is changed.
*
* @param pFpuCtx The FPU context.
*/
{
else
}
/**
* Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
*
* @returns The full FTW.
* @param pFpuCtx The FPU context.
*/
{
{
else
{
{
else
}
uTag = 0; /* Valid. */
else
}
}
return u16Ftw;
}
/**
* Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
*
* @returns The compressed FTW.
* @param u16FullFtw The full FTW to convert.
*/
{
for (unsigned i = 0; i < 8; i++)
{
u16FullFtw >>= 2;
}
return u8Ftw;
}
/** @} */
/** @name Memory access.
*
* @{
*/
/**
* Updates the IEMCPU::cbWritten counter if applicable.
*
* @param pIemCpu The IEM per CPU data.
* @param fAccess The access being accounted for.
* @param cbMem The access size.
*/
{
if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
|| (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
}
/**
* Checks if the given segment can be written to, raise the appropriate
* exception if not.
*
* @returns VBox strict status code.
*
* @param pIemCpu The IEM per CPU data.
* @param pHid Pointer to the hidden register.
* @param iSegReg The register number.
* @param pu64BaseAddr Where to return the base address to use for the
* segment. (In 64-bit code it may differ from the
* base in the hidden segment.)
*/
static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
{
else
{
}
return VINF_SUCCESS;
}
/**
* Checks if the given segment can be read from, raise the appropriate
* exception if not.
*
* @returns VBox strict status code.
*
* @param pIemCpu The IEM per CPU data.
* @param pHid Pointer to the hidden register.
* @param iSegReg The register number.
* @param pu64BaseAddr Where to return the base address to use for the
* segment. (In 64-bit code it may differ from the
* base in the hidden segment.)
*/
static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
{
else
{
}
return VINF_SUCCESS;
}
/**
* Applies the segment limit, base and attributes.
*
* This may raise a \#GP or \#SS.
*
* @returns VBox strict status code.
*
* @param pIemCpu The IEM per CPU data.
* @param fAccess The kind of access which is being performed.
* @param iSegReg The index of the segment register to apply.
* This is UINT8_MAX if none (for IDT, GDT, LDT,
* TSS, ++).
* @param pGCPtrMem Pointer to the guest memory address to apply
* segmentation to. Input and output parameter.
*/
{
return VINF_SUCCESS;
switch (pIemCpu->enmCpuMode)
{
case IEMMODE_16BIT:
case IEMMODE_32BIT:
{
{
if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
{
/** @todo CPL check. */
}
/*
* There are two kinds of data selectors, normal and expand down.
*/
{
}
else
{
/*
* The upper boundary is defined by the B bit, not the G bit!
*/
}
}
else
{
/*
* Code selector and usually be used to read thru, writing is
* only permitted in real and V8086 mode.
*/
if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
|| ( (fAccess & IEM_ACCESS_TYPE_READ)
&& !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
{
/** @todo CPL check. */
}
}
return VINF_SUCCESS;
}
case IEMMODE_64BIT:
return VINF_SUCCESS;
default:
}
}
/**
* Translates a virtual address to a physical physical address and checks if we
* can access the page as specified.
*
* @param pIemCpu The IEM per CPU data.
* @param GCPtrMem The virtual address.
* @param fAccess The intended access.
* @param pGCPhysMem Where to return the physical address.
*/
static VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess,
{
/** @todo Need a different PGM interface here. We're currently using
* generic / REM interfaces. this won't cut it for R0 & RC. */
if (RT_FAILURE(rc))
{
/** @todo Check unassigned memory in unpaged mode. */
/** @todo Reserved bits in page tables. Requires new PGM interface. */
}
/* If the page is writable and does not have the no-exec bit set, all
access is allowed. Otherwise we'll have to check more carefully... */
{
/* Write to read only memory? */
if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
&& !(fFlags & X86_PTE_RW)
{
}
/* Kernel memory accessed by userland? */
if ( !(fFlags & X86_PTE_US)
&& !(fAccess & IEM_ACCESS_WHAT_SYS))
{
Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
}
/* Executing non-executable memory? */
if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
&& (fFlags & X86_PTE_PAE_NX)
{
return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
}
}
/*
* Set the dirty / access flags.
* ASSUMES this is set when the address is translated rather than on committ...
*/
/** @todo testcase: check when A and D bits are actually set by the CPU. */
{
int rc2 = PGMGstModifyPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
}
*pGCPhysMem = GCPhys;
return VINF_SUCCESS;
}
/**
* Maps a physical page.
*
* @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
* @param pIemCpu The IEM per CPU data.
* @param GCPhysMem The physical address.
* @param fAccess The intended access.
* @param ppvMem Where to return the mapping address.
* @param pLock The PGM lock.
*/
static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
{
#ifdef IEM_VERIFICATION_MODE_FULL
/* Force the alternative path so we can ignore writes. */
{
{
if (RT_FAILURE(rc2))
pIemCpu->fProblematicMemory = true;
}
return VERR_PGM_PHYS_TLB_CATCH_ALL;
}
#endif
#ifdef IEM_LOG_MEMORY_WRITES
if (fAccess & IEM_ACCESS_TYPE_WRITE)
return VERR_PGM_PHYS_TLB_CATCH_ALL;
#endif
return VERR_PGM_PHYS_TLB_CATCH_ALL;
#endif
/** @todo This API may require some improving later. A private deal with PGM
* regarding locking and unlocking needs to be struct. A couple of TLBs
* living in PGM, but with publicly accessible inlined access methods
* could perhaps be an even better solution. */
pLock);
/*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
#ifdef IEM_VERIFICATION_MODE_FULL
pIemCpu->fProblematicMemory = true;
#endif
return rc;
}
/**
* Unmap a page previously mapped by iemMemPageMap.
*
* @param pIemCpu The IEM per CPU data.
* @param GCPhysMem The physical address.
* @param fAccess The intended access.
* @param pvMem What iemMemPageMap returned.
* @param pLock The PGM lock.
*/
DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
{
}
/**
* Looks up a memory mapping entry.
*
* @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
* @param pIemCpu The IEM per CPU data.
* @param pvMem The memory address.
* @param fAccess The access to.
*/
{
return 0;
return 1;
return 2;
return VERR_NOT_FOUND;
}
/**
* Finds a free memmap entry when using iNextMapping doesn't work.
*
* @returns Memory mapping index, 1024 on failure.
* @param pIemCpu The IEM per CPU data.
*/
{
/*
* The easy case.
*/
if (pIemCpu->cActiveMappings == 0)
{
return 0;
}
/* There should be enough mappings for all instructions. */
return i;
AssertFailedReturn(1024);
}
/**
* Commits a bounce buffer that needs writing back and unmaps it.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param iMemMap The index of the buffer to commit.
*/
{
/*
* Do the writing.
*/
int rc;
#ifndef IEM_VERIFICATION_MODE_MINIMAL
{
if (!pIemCpu->fBypassHandlers)
{
cbFirst);
cbSecond);
}
else
{
cbFirst);
cbSecond);
}
if (rc != VINF_SUCCESS)
{
/** @todo status code handling */
Log(("iemMemBounceBufferCommitAndUnmap: %s GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
}
}
else
#endif
rc = VINF_SUCCESS;
#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
/*
* Record the write(s).
*/
{
if (pEvtRec)
{
memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
}
{
if (pEvtRec)
{
}
}
}
#endif
#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
if (rc == VINF_SUCCESS)
{
RT_MAX(RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbFirst, 64), 1), &pIemCpu->aBounceBuffers[iMemMap].ab[0]));
size_t cbWrote = pIemCpu->aMemBbMappings[iMemMap].cbFirst + pIemCpu->aMemBbMappings[iMemMap].cbSecond;
memcpy(g_abIemWrote, &pIemCpu->aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
}
#endif
/*
* Free the mapping entry.
*/
return rc;
}
/**
* iemMemMap worker that deals with a request crossing pages.
*/
{
/*
* Do the address translations.
*/
VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
if (rcStrict != VINF_SUCCESS)
return rcStrict;
* last byte. */
rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
if (rcStrict != VINF_SUCCESS)
return rcStrict;
#ifdef IEM_VERIFICATION_MODE_FULL
/*
* Detect problematic memory when verifying so we can select
* the right execution engine. (TLB: Redo this.)
*/
{
if (RT_SUCCESS(rc2))
if (RT_FAILURE(rc2))
pIemCpu->fProblematicMemory = true;
}
#endif
/*
* Read in the current memory content if it's a read, execute or partial
* write access.
*/
{
int rc;
if (!pIemCpu->fBypassHandlers)
{
if (rc != VINF_SUCCESS)
{
/** @todo status code handling */
return rc;
}
if (rc != VINF_SUCCESS)
{
/** @todo status code handling */
return rc;
}
}
else
{
if (rc != VINF_SUCCESS)
{
/** @todo status code handling */
Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
return rc;
}
rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
if (rc != VINF_SUCCESS)
{
/** @todo status code handling */
Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
return rc;
}
}
#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
{
/*
* Record the reads.
*/
if (pEvtRec)
{
}
if (pEvtRec)
{
}
}
#endif
}
#ifdef VBOX_STRICT
else
#endif
#ifdef VBOX_STRICT
#endif
/*
* Commit the bounce buffer entry.
*/
return VINF_SUCCESS;
}
/**
* iemMemMap woker that deals with iemMemPageMap failures.
*/
static VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
{
/*
* Filter out conditions we can handle and the ones which shouldn't happen.
*/
if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
{
return rcMap;
}
/*
* Read in the current memory content if it's a read, execute or partial
* write access.
*/
{
if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
else
{
int rc;
if (!pIemCpu->fBypassHandlers)
else
if (rc != VINF_SUCCESS)
{
/** @todo status code handling */
Log(("iemMemBounceBufferMapPhys: %s GCPhysFirst=%RGp rc=%Rrc (!!)\n",
return rc;
}
}
#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
{
/*
* Record the read.
*/
if (pEvtRec)
{
}
}
#endif
}
#ifdef VBOX_STRICT
else
#endif
#ifdef VBOX_STRICT
#endif
/*
* Commit the bounce buffer entry.
*/
return VINF_SUCCESS;
}
/**
* Maps the specified guest memory for the given kind of access.
*
* This may be using bounce buffering of the memory if it's crossing a page
* boundary or if there is an access handler installed for any of it. Because
* of lock prefix guarantees, we're in for some extra clutter when this
* happens.
*
* This may raise a \#GP, \#SS, \#PF or \#AC.
*
* @returns VBox strict status code.
*
* @param pIemCpu The IEM per CPU data.
* @param ppvMem Where to return the pointer to the mapped
* memory.
* @param cbMem The number of bytes to map. This is usually 1,
* 2, 4, 6, 8, 12, 16, 32 or 512. When used by
* string operations it can be up to a page.
* @param iSegReg The index of the segment register to use for
* this access. The base and limits are checked.
* Use UINT8_MAX to indicate that no segmentation
* is required (for IDT, GDT and LDT accesses).
* @param GCPtrMem The address of the guest memory.
* @param a_fAccess How the memory is being accessed. The
* IEM_ACCESS_TYPE_XXX bit is used to figure out
* how to map the memory, while the
* IEM_ACCESS_WHAT_XXX bit is used when raising
* exceptions.
*/
static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
{
/*
* Check the input and figure out which mapping entry to use.
*/
Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
{
}
/*
* Map the memory, checking that we can actually access it. If something
* slightly complicated happens, fall back on bounce buffering.
*/
if (rcStrict != VINF_SUCCESS)
return rcStrict;
if (rcStrict != VINF_SUCCESS)
return rcStrict;
void *pvMem;
rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
if (rcStrict != VINF_SUCCESS)
/*
* Fill in the mapping table entry.
*/
return VINF_SUCCESS;
}
/**
* Commits the guest memory if bounce buffered and unmaps it.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param pvMem The mapping.
* @param fAccess The kind of access.
*/
{
/* If it's bounce buffered, we may need to write back the buffer. */
{
}
/* Otherwise unlock it. */
else
/* Free the entry. */
return VINF_SUCCESS;
}
/**
* Rollbacks mappings, releasing page locks and such.
*
* The caller shall only call this after checking cActiveMappings.
*
* @returns Strict VBox status code to pass up.
* @param pIemCpu The IEM per CPU data.
*/
{
while (iMemMap-- > 0)
{
if (fAccess != IEM_ACCESS_INVALID)
{
if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
}
}
}
/**
* Fetches a data byte.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param pu8Dst Where to return the byte.
* @param iSegReg The index of the segment register to use for
* this access. The base and limits are checked.
* @param GCPtrMem The address of the guest memory.
*/
static VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
{
/* The lazy approach for now... */
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
if (rc == VINF_SUCCESS)
{
}
return rc;
}
/**
* Fetches a data word.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param pu16Dst Where to return the word.
* @param iSegReg The index of the segment register to use for
* this access. The base and limits are checked.
* @param GCPtrMem The address of the guest memory.
*/
static VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
{
/* The lazy approach for now... */
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
if (rc == VINF_SUCCESS)
{
}
return rc;
}
/**
* Fetches a data dword.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param pu32Dst Where to return the dword.
* @param iSegReg The index of the segment register to use for
* this access. The base and limits are checked.
* @param GCPtrMem The address of the guest memory.
*/
static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
{
/* The lazy approach for now... */
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
if (rc == VINF_SUCCESS)
{
}
return rc;
}
#ifdef SOME_UNUSED_FUNCTION
/**
* Fetches a data dword and sign extends it to a qword.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param pu64Dst Where to return the sign extended value.
* @param iSegReg The index of the segment register to use for
* this access. The base and limits are checked.
* @param GCPtrMem The address of the guest memory.
*/
static VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
{
/* The lazy approach for now... */
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
if (rc == VINF_SUCCESS)
{
}
#ifdef __GNUC__ /* warning: GCC may be a royal pain */
else
*pu64Dst = 0;
#endif
return rc;
}
#endif
/**
* Fetches a data qword.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param pu64Dst Where to return the qword.
* @param iSegReg The index of the segment register to use for
* this access. The base and limits are checked.
* @param GCPtrMem The address of the guest memory.
*/
static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
{
/* The lazy approach for now... */
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
if (rc == VINF_SUCCESS)
{
}
return rc;
}
/**
* Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param pu64Dst Where to return the qword.
* @param iSegReg The index of the segment register to use for
* this access. The base and limits are checked.
* @param GCPtrMem The address of the guest memory.
*/
static VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
{
/* The lazy approach for now... */
/** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
return iemRaiseGeneralProtectionFault0(pIemCpu);
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
if (rc == VINF_SUCCESS)
{
}
return rc;
}
/**
* Fetches a data tword.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param pr80Dst Where to return the tword.
* @param iSegReg The index of the segment register to use for
* this access. The base and limits are checked.
* @param GCPtrMem The address of the guest memory.
*/
static VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
{
/* The lazy approach for now... */
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
if (rc == VINF_SUCCESS)
{
}
return rc;
}
/**
* Fetches a data dqword (double qword), generally SSE related.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param pu128Dst Where to return the qword.
* @param iSegReg The index of the segment register to use for
* this access. The base and limits are checked.
* @param GCPtrMem The address of the guest memory.
*/
static VBOXSTRICTRC iemMemFetchDataU128(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
{
/* The lazy approach for now... */
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
if (rc == VINF_SUCCESS)
{
}
return rc;
}
/**
* Fetches a data dqword (double qword) at an aligned address, generally SSE
* related.
*
* Raises \#GP(0) if not aligned.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param pu128Dst Where to return the qword.
* @param iSegReg The index of the segment register to use for
* this access. The base and limits are checked.
* @param GCPtrMem The address of the guest memory.
*/
static VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
{
/* The lazy approach for now... */
/** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
if ( (GCPtrMem & 15)
&& !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
return iemRaiseGeneralProtectionFault0(pIemCpu);
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
if (rc == VINF_SUCCESS)
{
}
return rc;
}
/**
* Fetches a descriptor register (lgdt, lidt).
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param pcbLimit Where to return the limit.
* @param pGCPTrBase Where to return the base.
* @param iSegReg The index of the segment register to use for
* this access. The base and limits are checked.
* @param GCPtrMem The address of the guest memory.
* @param enmOpSize The effective operand size.
*/
{
(void **)&pu8Src,
? 2 + 8
: enmOpSize == IEMMODE_32BIT
? 2 + 4
: 2 + 3,
if (rcStrict == VINF_SUCCESS)
{
switch (enmOpSize)
{
case IEMMODE_16BIT:
break;
case IEMMODE_32BIT:
break;
case IEMMODE_64BIT:
break;
}
}
return rcStrict;
}
/**
* Stores a data byte.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param iSegReg The index of the segment register to use for
* this access. The base and limits are checked.
* @param GCPtrMem The address of the guest memory.
* @param u8Value The value to store.
*/
static VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
{
/* The lazy approach for now... */
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
if (rc == VINF_SUCCESS)
{
}
return rc;
}
/**
* Stores a data word.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param iSegReg The index of the segment register to use for
* this access. The base and limits are checked.
* @param GCPtrMem The address of the guest memory.
* @param u16Value The value to store.
*/
static VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
{
/* The lazy approach for now... */
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
if (rc == VINF_SUCCESS)
{
}
return rc;
}
/**
* Stores a data dword.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param iSegReg The index of the segment register to use for
* this access. The base and limits are checked.
* @param GCPtrMem The address of the guest memory.
* @param u32Value The value to store.
*/
static VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
{
/* The lazy approach for now... */
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
if (rc == VINF_SUCCESS)
{
}
return rc;
}
/**
* Stores a data qword.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param iSegReg The index of the segment register to use for
* this access. The base and limits are checked.
* @param GCPtrMem The address of the guest memory.
* @param u64Value The value to store.
*/
static VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
{
/* The lazy approach for now... */
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
if (rc == VINF_SUCCESS)
{
}
return rc;
}
/**
* Stores a data dqword.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param iSegReg The index of the segment register to use for
* this access. The base and limits are checked.
* @param GCPtrMem The address of the guest memory.
* @param u64Value The value to store.
*/
static VBOXSTRICTRC iemMemStoreDataU128(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
{
/* The lazy approach for now... */
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
if (rc == VINF_SUCCESS)
{
}
return rc;
}
/**
* Stores a data dqword, SSE aligned.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param iSegReg The index of the segment register to use for
* this access. The base and limits are checked.
* @param GCPtrMem The address of the guest memory.
* @param u64Value The value to store.
*/
static VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
{
/* The lazy approach for now... */
if ( (GCPtrMem & 15)
&& !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
return iemRaiseGeneralProtectionFault0(pIemCpu);
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
if (rc == VINF_SUCCESS)
{
}
return rc;
}
/**
* Stores a descriptor register (sgdt, sidt).
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param cbLimit The limit.
* @param GCPTrBase The base address.
* @param iSegReg The index of the segment register to use for
* this access. The base and limits are checked.
* @param GCPtrMem The address of the guest memory.
* @param enmOpSize The effective operand size.
*/
{
(void **)&pu8Src,
? 2 + 8
: enmOpSize == IEMMODE_32BIT
? 2 + 4
: 2 + 3,
if (rcStrict == VINF_SUCCESS)
{
if (enmOpSize == IEMMODE_16BIT)
else
{
if (enmOpSize == IEMMODE_64BIT)
{
}
}
}
return rcStrict;
}
/**
* Pushes a word onto the stack.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param u16Value The value to push.
*/
{
/* Increment the stack pointer. */
/* Write the word the lazy way. */
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
if (rc == VINF_SUCCESS)
{
}
/* Commit the new RSP value unless we an access handler made trouble. */
if (rc == VINF_SUCCESS)
return rc;
}
/**
* Pushes a dword onto the stack.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param u32Value The value to push.
*/
{
/* Increment the stack pointer. */
/* Write the dword the lazy way. */
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
if (rc == VINF_SUCCESS)
{
}
/* Commit the new RSP value unless we an access handler made trouble. */
if (rc == VINF_SUCCESS)
return rc;
}
/**
* Pushes a dword segment register value onto the stack.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param u16Value The value to push.
*/
{
/* Increment the stack pointer. */
{
/* The recompiler writes a full dword. */
rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
if (rc == VINF_SUCCESS)
{
}
}
else
{
/* The intel docs talks about zero extending the selector register
value. My actual intel CPU here might be zero extending the value
but it still only writes the lower word... */
/** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
* happens when crossing an electric page boundrary, is the high word
* checked for write accessibility or not? Probably it is. What about
* segment limits? */
rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
if (rc == VINF_SUCCESS)
{
}
}
/* Commit the new RSP value unless we an access handler made trouble. */
if (rc == VINF_SUCCESS)
return rc;
}
/**
* Pushes a qword onto the stack.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param u64Value The value to push.
*/
{
/* Increment the stack pointer. */
/* Write the word the lazy way. */
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
if (rc == VINF_SUCCESS)
{
}
/* Commit the new RSP value unless we an access handler made trouble. */
if (rc == VINF_SUCCESS)
return rc;
}
/**
* Pops a word from the stack.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param pu16Value Where to store the popped value.
*/
{
/* Increment the stack pointer. */
/* Write the word the lazy way. */
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
if (rc == VINF_SUCCESS)
{
/* Commit the new RSP value. */
if (rc == VINF_SUCCESS)
}
return rc;
}
/**
* Pops a dword from the stack.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param pu32Value Where to store the popped value.
*/
{
/* Increment the stack pointer. */
/* Write the word the lazy way. */
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
if (rc == VINF_SUCCESS)
{
/* Commit the new RSP value. */
if (rc == VINF_SUCCESS)
}
return rc;
}
/**
* Pops a qword from the stack.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param pu64Value Where to store the popped value.
*/
{
/* Increment the stack pointer. */
/* Write the word the lazy way. */
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
if (rc == VINF_SUCCESS)
{
/* Commit the new RSP value. */
if (rc == VINF_SUCCESS)
}
return rc;
}
/**
* Pushes a word onto the stack, using a temporary stack pointer.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param u16Value The value to push.
* @param pTmpRsp Pointer to the temporary stack pointer.
*/
{
/* Increment the stack pointer. */
/* Write the word the lazy way. */
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
if (rc == VINF_SUCCESS)
{
}
/* Commit the new RSP value unless we an access handler made trouble. */
if (rc == VINF_SUCCESS)
return rc;
}
/**
* Pushes a dword onto the stack, using a temporary stack pointer.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param u32Value The value to push.
* @param pTmpRsp Pointer to the temporary stack pointer.
*/
{
/* Increment the stack pointer. */
/* Write the word the lazy way. */
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
if (rc == VINF_SUCCESS)
{
}
/* Commit the new RSP value unless we an access handler made trouble. */
if (rc == VINF_SUCCESS)
return rc;
}
/**
* Pushes a dword onto the stack, using a temporary stack pointer.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param u64Value The value to push.
* @param pTmpRsp Pointer to the temporary stack pointer.
*/
{
/* Increment the stack pointer. */
/* Write the word the lazy way. */
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
if (rc == VINF_SUCCESS)
{
}
/* Commit the new RSP value unless we an access handler made trouble. */
if (rc == VINF_SUCCESS)
return rc;
}
/**
* Pops a word from the stack, using a temporary stack pointer.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param pu16Value Where to store the popped value.
* @param pTmpRsp Pointer to the temporary stack pointer.
*/
{
/* Increment the stack pointer. */
/* Write the word the lazy way. */
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
if (rc == VINF_SUCCESS)
{
/* Commit the new RSP value. */
if (rc == VINF_SUCCESS)
}
return rc;
}
/**
* Pops a dword from the stack, using a temporary stack pointer.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param pu32Value Where to store the popped value.
* @param pTmpRsp Pointer to the temporary stack pointer.
*/
{
/* Increment the stack pointer. */
/* Write the word the lazy way. */
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
if (rc == VINF_SUCCESS)
{
/* Commit the new RSP value. */
if (rc == VINF_SUCCESS)
}
return rc;
}
/**
* Pops a qword from the stack, using a temporary stack pointer.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param pu64Value Where to store the popped value.
* @param pTmpRsp Pointer to the temporary stack pointer.
*/
{
/* Increment the stack pointer. */
/* Write the word the lazy way. */
VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
if (rcStrict == VINF_SUCCESS)
{
/* Commit the new RSP value. */
if (rcStrict == VINF_SUCCESS)
}
return rcStrict;
}
/**
* Begin a special stack push (used by interrupt, exceptions and such).
*
* This will raise #SS or #PF if appropriate.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param cbMem The number of bytes to push onto the stack.
* @param ppvMem Where to return the pointer to the stack memory.
* As with the other memory functions this could be
* direct access or bounce buffered access, so
* don't commit register until the commit call
* succeeds.
* @param puNewRsp Where to return the new RSP value. This must be
* passed unchanged to
* iemMemStackPushCommitSpecial().
*/
static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
{
}
/**
* Commits a special stack push (started by iemMemStackPushBeginSpecial).
*
* This will update the rSP.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param pvMem The pointer returned by
* iemMemStackPushBeginSpecial().
* @param uNewRsp The new RSP value returned by
* iemMemStackPushBeginSpecial().
*/
{
if (rcStrict == VINF_SUCCESS)
return rcStrict;
}
/**
* Begin a special stack pop (used by iret, retf and such).
*
* This will raise \#SS or \#PF if appropriate.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param cbMem The number of bytes to push onto the stack.
* @param ppvMem Where to return the pointer to the stack memory.
* @param puNewRsp Where to return the new RSP value. This must be
* passed unchanged to
* iemMemStackPopCommitSpecial() or applied
* manually if iemMemStackPopDoneSpecial() is used.
*/
static VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
{
}
/**
* Continue a special stack pop (used by iret and retf).
*
* This will raise \#SS or \#PF if appropriate.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param cbMem The number of bytes to push onto the stack.
* @param ppvMem Where to return the pointer to the stack memory.
* @param puNewRsp Where to return the new RSP value. This must be
* passed unchanged to
* iemMemStackPopCommitSpecial() or applied
* manually if iemMemStackPopDoneSpecial() is used.
*/
static VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
{
}
/**
* Commits a special stack pop (started by iemMemStackPopBeginSpecial).
*
* This will update the rSP.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param pvMem The pointer returned by
* iemMemStackPopBeginSpecial().
* @param uNewRsp The new RSP value returned by
* iemMemStackPopBeginSpecial().
*/
static VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
{
if (rcStrict == VINF_SUCCESS)
return rcStrict;
}
/**
* Done with a special stack pop (started by iemMemStackPopBeginSpecial or
* iemMemStackPopContinueSpecial).
*
* The caller will manually commit the rSP.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param pvMem The pointer returned by
* iemMemStackPopBeginSpecial() or
* iemMemStackPopContinueSpecial().
*/
{
}
/**
* Fetches a system table byte.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param pbDst Where to return the byte.
* @param iSegReg The index of the segment register to use for
* this access. The base and limits are checked.
* @param GCPtrMem The address of the guest memory.
*/
static VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
{
/* The lazy approach for now... */
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
if (rc == VINF_SUCCESS)
{
}
return rc;
}
/**
* Fetches a system table word.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param pu16Dst Where to return the word.
* @param iSegReg The index of the segment register to use for
* this access. The base and limits are checked.
* @param GCPtrMem The address of the guest memory.
*/
static VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
{
/* The lazy approach for now... */
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
if (rc == VINF_SUCCESS)
{
}
return rc;
}
/**
* Fetches a system table dword.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param pu32Dst Where to return the dword.
* @param iSegReg The index of the segment register to use for
* this access. The base and limits are checked.
* @param GCPtrMem The address of the guest memory.
*/
static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
{
/* The lazy approach for now... */
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
if (rc == VINF_SUCCESS)
{
}
return rc;
}
/**
* Fetches a system table qword.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param pu64Dst Where to return the qword.
* @param iSegReg The index of the segment register to use for
* this access. The base and limits are checked.
* @param GCPtrMem The address of the guest memory.
*/
static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
{
/* The lazy approach for now... */
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
if (rc == VINF_SUCCESS)
{
}
return rc;
}
/**
* Fetches a descriptor table entry with caller specified error code.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU.
* @param pDesc Where to return the descriptor table entry.
* @param uSel The selector which table entry to fetch.
* @param uXcpt The exception to raise on table lookup error.
* @param uErrorCode The error code associated with the exception.
*/
static VBOXSTRICTRC iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt,
{
/** @todo did the 286 require all 8 bytes to be accessible? */
/*
* Get the selector table base and check bounds.
*/
if (uSel & X86_SEL_LDT)
{
{
Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
uErrorCode, 0);
}
}
else
{
{
uErrorCode, 0);
}
}
/*
* Read the legacy descriptor and maybe the long mode extensions if
* required.
*/
VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
if (rcStrict == VINF_SUCCESS)
{
if ( !IEM_IS_LONG_MODE(pIemCpu)
else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
else
{
/** @todo is this the right exception? */
return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
}
}
return rcStrict;
}
/**
* Fetches a descriptor table entry.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU.
* @param pDesc Where to return the descriptor table entry.
* @param uSel The selector which table entry to fetch.
* @param uXcpt The exception to raise on table lookup error.
*/
static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
{
}
/**
* Fakes a long mode stack selector for SS = 0.
*
* @param pDescSs Where to return the fake stack descriptor.
* @param uDpl The DPL we want.
*/
{
}
/**
* Marks the selector descriptor as accessed (only non-system descriptors).
*
* This function ASSUMES that iemMemFetchSelDesc has be called previously and
* will therefore skip the limit checks.
*
* @returns Strict VBox status code.
* @param pIemCpu The IEM per CPU.
* @param uSel The selector.
*/
{
/*
* Get the selector table base and calculate the entry address.
*/
/*
* ASMAtomicBitSet will assert if the address is misaligned, so do some
* ugly stuff to avoid this. This will make sure it's an atomic access
* as well more or less remove any question about 8-bit or 32-bit accesss.
*/
if ((GCPtr & 3) == 0)
{
/* The normal case, map the 32-bit bits around the accessed bit (40). */
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
else
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
{
}
}
}
/** @} */
/*
* Include the C/C++ implementation of instruction.
*/
#include "IEMAllCImpl.cpp.h"
/** @name "Microcode" macros.
*
* The idea is that we should be able to use the same code to interpret
* instructions as well as recompiler instructions. Thus this obfuscation.
*
* @{
*/
#define IEM_MC_END() }
#define IEM_MC_PAUSE() do {} while (0)
#define IEM_MC_CONTINUE() do {} while (0)
/** Internal macro. */
#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
do \
{ \
if (rcStrict2 != VINF_SUCCESS) \
return rcStrict2; \
} while (0)
#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
do { \
return iemRaiseDeviceNotAvailable(pIemCpu); \
} while (0)
#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
do { \
return iemRaiseMathFault(pIemCpu); \
} while (0)
#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
do { \
return iemRaiseUndefinedOpcode(pIemCpu); \
return iemRaiseDeviceNotAvailable(pIemCpu); \
} while (0)
#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
do { \
return iemRaiseUndefinedOpcode(pIemCpu); \
return iemRaiseDeviceNotAvailable(pIemCpu); \
} while (0)
do { \
return iemRaiseUndefinedOpcode(pIemCpu); \
return iemRaiseDeviceNotAvailable(pIemCpu); \
} while (0)
#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
do { \
return iemRaiseGeneralProtectionFault0(pIemCpu); \
} while (0)
#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
/** @note Not for IOPL or IF testing or modification. */
#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
* Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
/** @note Not for IOPL or IF testing or modification. */
#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
do { \
*pu32Reg += (a_u32Value); \
} while (0)
#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
do { \
*pu32Reg -= (a_u32Value); \
} while (0)
#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
do { \
*pu32Reg &= (a_u32Value); \
} while (0)
#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
do { \
*pu32Reg |= (a_u32Value); \
} while (0)
#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
/** @note Not for IOPL or IF modification. */
#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
/** @note Not for IOPL or IF modification. */
#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
/** @note Not for IOPL or IF modification. */
#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
(a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
(a_pu32Dst) = ((uint32_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
do { (a_u128Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
} while (0)
do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
} while (0)
(a_pu128Dst) = ((uint128_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
(a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
do { \
} while (0)
do { \
} while (0)
do { \
} while (0)
do { \
} while (0)
do { \
} while (0)
do { \
} while (0)
do { \
} while (0)
do { \
} while (0)
do { \
} while (0)
do { \
} while (0)
do { \
} while (0)
do { \
} while (0)
#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
do { \
} while (0)
IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
#define IEM_MC_PUSH_U16(a_u16Value) \
#define IEM_MC_PUSH_U32(a_u32Value) \
#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
#define IEM_MC_PUSH_U64(a_u64Value) \
#define IEM_MC_POP_U16(a_pu16Value) \
#define IEM_MC_POP_U32(a_pu32Value) \
#define IEM_MC_POP_U64(a_pu64Value) \
/** Maps guest memory for direct or bounce buffered access.
* The purpose is to pass it to an operand implementation, thus the a_iArg.
* @remarks May return.
*/
IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
/** Maps guest memory for direct or bounce buffered access.
* The purpose is to pass it to an operand implementation, thus the a_iArg.
* @remarks May return.
*/
IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
/** Commits the memory and unmaps the guest memory.
* @remarks May return.
*/
/** Commits the memory and unmaps the guest memory unless the FPU status word
* indicates (@a a_u16FSW) and FPU control word indicates a pending exception
* that would cause FLD not to store.
*
* The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
* store, while \#P will not.
*
* @remarks May in theory return - for now.
*/
do { \
if ( !(a_u16FSW & X86_FSW_ES) \
} while (0)
/** Calculate efficient address from R/M. */
/**
* Defers the rest of the instruction emulation to a C implementation routine
* and returns, only taking the standard parameters.
*
* @param a_pfnCImpl The pointer to the C routine.
* @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
*/
/**
* Defers the rest of instruction emulation to a C implementation routine and
* returns, taking one argument in addition to the standard ones.
*
* @param a_pfnCImpl The pointer to the C routine.
* @param a0 The argument.
*/
/**
* Defers the rest of the instruction emulation to a C implementation routine
* and returns, taking two arguments in addition to the standard ones.
*
* @param a_pfnCImpl The pointer to the C routine.
* @param a0 The first extra argument.
* @param a1 The second extra argument.
*/
#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
/**
* Defers the rest of the instruction emulation to a C implementation routine
* and returns, taking three arguments in addition to the standard ones.
*
* @param a_pfnCImpl The pointer to the C routine.
* @param a0 The first extra argument.
* @param a1 The second extra argument.
* @param a2 The third extra argument.
*/
#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
/**
* Defers the rest of the instruction emulation to a C implementation routine
* and returns, taking four arguments in addition to the standard ones.
*
* @param a_pfnCImpl The pointer to the C routine.
* @param a0 The first extra argument.
* @param a1 The second extra argument.
* @param a2 The third extra argument.
* @param a3 The fourth extra argument.
*/
#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3)
/**
* Defers the rest of the instruction emulation to a C implementation routine
* and returns, taking two arguments in addition to the standard ones.
*
* @param a_pfnCImpl The pointer to the C routine.
* @param a0 The first extra argument.
* @param a1 The second extra argument.
* @param a2 The third extra argument.
* @param a3 The fourth extra argument.
* @param a4 The fifth extra argument.
*/
#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
/**
* Defers the entire instruction emulation to a C implementation routine and
* returns, only taking the standard parameters.
*
* This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
*
* @param a_pfnCImpl The pointer to the C routine.
* @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
*/
/**
* Defers the entire instruction emulation to a C implementation routine and
* returns, taking one argument in addition to the standard ones.
*
* This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
*
* @param a_pfnCImpl The pointer to the C routine.
* @param a0 The argument.
*/
/**
* Defers the entire instruction emulation to a C implementation routine and
* returns, taking two arguments in addition to the standard ones.
*
* This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
*
* @param a_pfnCImpl The pointer to the C routine.
* @param a0 The first extra argument.
* @param a1 The second extra argument.
*/
#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
/**
* Defers the entire instruction emulation to a C implementation routine and
* returns, taking three arguments in addition to the standard ones.
*
* This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
*
* @param a_pfnCImpl The pointer to the C routine.
* @param a0 The first extra argument.
* @param a1 The second extra argument.
* @param a2 The third extra argument.
*/
#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
/**
* Calls a FPU assembly implementation taking one visible argument.
*
* @param a_pfnAImpl Pointer to the assembly FPU routine.
* @param a0 The first extra argument.
*/
do { \
} while (0)
/**
* Calls a FPU assembly implementation taking two visible arguments.
*
* @param a_pfnAImpl Pointer to the assembly FPU routine.
* @param a0 The first extra argument.
* @param a1 The second extra argument.
*/
do { \
} while (0)
/**
* Calls a FPU assembly implementation taking three visible arguments.
*
* @param a_pfnAImpl Pointer to the assembly FPU routine.
* @param a0 The first extra argument.
* @param a1 The second extra argument.
* @param a2 The third extra argument.
*/
do { \
} while (0)
do { \
} while (0)
/** Pushes FPU result onto the stack. */
#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
/** Pushes FPU result onto the stack and sets the FPUDP. */
/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
/** Stores FPU result in a stack register. */
/** Stores FPU result in a stack register and pops the stack. */
/** Stores FPU result in a stack register and sets the FPUDP. */
/** Stores FPU result in a stack register, sets the FPUDP, and pops the
* stack. */
/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
/** Free a stack register (for FFREE and FFREEP). */
#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
/** Increment the FPU stack pointer. */
#define IEM_MC_FPU_STACK_INC_TOP() \
/** Decrement the FPU stack pointer. */
#define IEM_MC_FPU_STACK_DEC_TOP() \
/** Updates the FSW, FOP, FPUIP, and FPUCS. */
#define IEM_MC_UPDATE_FSW(a_u16FSW) \
/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
* stack. */
/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
* stack. */
#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
* FPUDS. */
/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
* FPUDS. Pops stack. */
/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
* stack twice. */
#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
/** Raises a FPU stack underflow exception for an instruction pushing a result
* value onto the stack. Sets FPUIP, FPUCS and FOP. */
#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
/** Raises a FPU stack underflow exception for an instruction pushing a result
* value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
/** Raises a FPU stack overflow exception as part of a push attempt. Sets
* FPUIP, FPUCS and FOP. */
#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
/** Raises a FPU stack overflow exception as part of a push attempt. Sets
* FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
/** Indicates that we (might) have modified the FPU state. */
#define IEM_MC_USED_FPU() \
/**
* Calls a MMX assembly implementation taking two visible arguments.
*
* @param a_pfnAImpl Pointer to the assembly MMX routine.
* @param a0 The first extra argument.
* @param a1 The second extra argument.
*/
do { \
} while (0)
/**
* Calls a MMX assembly implementation taking three visible arguments.
*
* @param a_pfnAImpl Pointer to the assembly MMX routine.
* @param a0 The first extra argument.
* @param a1 The second extra argument.
* @param a2 The third extra argument.
*/
do { \
} while (0)
/**
* Calls a SSE assembly implementation taking two visible arguments.
*
* @param a_pfnAImpl Pointer to the assembly MMX routine.
* @param a0 The first extra argument.
* @param a1 The second extra argument.
*/
do { \
} while (0)
/**
* Calls a SSE assembly implementation taking three visible arguments.
*
* @param a_pfnAImpl Pointer to the assembly MMX routine.
* @param a0 The first extra argument.
* @param a1 The second extra argument.
* @param a2 The third extra argument.
*/
do { \
} while (0)
/** @note Not for IOPL or IF testing. */
/** @note Not for IOPL or IF testing. */
/** @note Not for IOPL or IF testing. */
/** @note Not for IOPL or IF testing. */
/** @note Not for IOPL or IF testing. */
/** @note Not for IOPL or IF testing. */
/** @note Not for IOPL or IF testing. */
/** @note Not for IOPL or IF testing. */
/** @note Not for IOPL or IF testing. */
#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
/** @note Not for IOPL or IF testing. */
#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
/** @note Not for IOPL or IF testing. */
#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
/** @note Not for IOPL or IF testing. */
#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
/** @note Not for IOPL or IF testing. */
#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
/** @note Not for IOPL or IF testing. */
#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
#define IEM_MC_IF_FCW_IM() \
#define IEM_MC_ELSE() } else {
#define IEM_MC_ENDIF() } do {} while (0)
/** @} */
/** @name Opcode Debug Helpers.
* @{
*/
#ifdef DEBUG
# define IEMOP_MNEMONIC(a_szMnemonic) \
Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
#else
# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
#endif
/** @} */
/** @name Opcode Helpers.
* @{
*/
/** The instruction raises an \#UD in real and V8086 mode. */
#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
do \
{ \
if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) \
return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
} while (0)
/** The instruction allows no lock prefixing (in this encoding), throw #UD if
* lock prefixed.
* @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
#define IEMOP_HLP_NO_LOCK_PREFIX() \
do \
{ \
return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
} while (0)
/** The instruction is not available in 64-bit mode, throw #UD if we're in
* 64-bit mode. */
#define IEMOP_HLP_NO_64BIT() \
do \
{ \
return IEMOP_RAISE_INVALID_OPCODE(); \
} while (0)
/** The instruction is only available in 64-bit mode, throw #UD if we're not in
* 64-bit mode. */
#define IEMOP_HLP_ONLY_64BIT() \
do \
{ \
return IEMOP_RAISE_INVALID_OPCODE(); \
} while (0)
/** The instruction defaults to 64-bit operand size if 64-bit mode. */
#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
do \
{ \
} while (0)
/** The instruction has 64-bit operand size if 64-bit mode. */
#define IEMOP_HLP_64BIT_OP_SIZE() \
do \
{ \
} while (0)
/** Only a REX prefix immediately preceeding the first opcode byte takes
* effect. This macro helps ensuring this as well as logging bad guest code. */
#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
do \
{ \
{ \
} \
} while (0)
/**
* Done decoding.
*/
#define IEMOP_HLP_DONE_DECODING() \
do \
{ \
/*nothing for now, maybe later... */ \
} while (0)
/**
* Done decoding, raise \#UD exception if lock prefix present.
*/
#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
do \
{ \
return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
} while (0)
do \
{ \
{ \
return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
} \
} while (0)
#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
do \
{ \
{ \
NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
} \
} while (0)
/**
* Calculates the effective address of a ModR/M memory operand.
*
* Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
*
* @return Strict VBox status code.
* @param pIemCpu The IEM per CPU data.
* @param bRm The ModRM byte.
* @param cbImm The size of any immediate following the
* effective address opcode bytes. Important for
* RIP relative addressing.
* @param pGCPtrEff Where to return the effective address.
*/
static VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
{
#define SET_SS_DEF() \
do \
{ \
} while (0)
{
/** @todo Check the effective address size crap! */
{
/* Handle the disp16 form with no registers first. */
else
{
/* Get the displacment. */
{
case 0: u16EffAddr = 0; break;
}
/* Add the base and index registers to the disp. */
switch (bRm & X86_MODRM_RM_MASK)
{
}
}
*pGCPtrEff = u16EffAddr;
}
else
{
/* Handle the disp32 form with no registers first. */
else
{
/* Get the register (or SIB) value. */
switch ((bRm & X86_MODRM_RM_MASK))
{
case 4: /* SIB */
{
/* Get the index and scale it. */
{
}
/* add base */
switch (bSib & X86_SIB_BASE_MASK)
{
case 5:
if ((bRm & X86_MODRM_MOD_MASK) != 0)
{
SET_SS_DEF();
}
else
{
u32EffAddr += u32Disp;
}
break;
}
break;
}
}
/* Get and add the displacement. */
{
case 0:
break;
case 1:
{
u32EffAddr += i8Disp;
break;
}
case 2:
{
u32EffAddr += u32Disp;
break;
}
default:
}
}
*pGCPtrEff = u32EffAddr;
else
{
}
}
}
else
{
/* Handle the rip+disp32 form with no registers first. */
{
}
else
{
/* Get the register (or SIB) value. */
{
/* SIB */
case 4:
case 12:
{
/* Get the index and scale it. */
{
}
/* add base */
{
/* complicated encodings */
case 5:
case 13:
if ((bRm & X86_MODRM_MOD_MASK) != 0)
{
{
SET_SS_DEF();
}
else
}
else
{
}
break;
}
break;
}
}
/* Get and add the displacement. */
{
case 0:
break;
case 1:
{
u64EffAddr += i8Disp;
break;
}
case 2:
{
break;
}
IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
}
}
*pGCPtrEff = u64EffAddr;
else
{
}
}
return VINF_SUCCESS;
}
/** @} */
/*
* Include the instructions
*/
#include "IEMAllInstructions.cpp.h"
#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
/**
* Sets up execution verification mode.
*/
{
/*
* Always note down the address of the current instruction.
*/
/*
*/
if ( fNewNoRem
&& ( 0
#if 0 /* auto enable on first paged protected mode interrupt */
&& TRPMHasTrap(pVCpu)
#endif
#if 0
#endif
#if 0 /* Auto enable DSL - FPU stuff. */
&& (// pOrgCtx->rip == 0xc02ec07f
//|| pOrgCtx->rip == 0xc02ec082
//|| pOrgCtx->rip == 0xc02ec0c9
0
#endif
#if 0 /* Auto enable DSL - fstp st0 stuff. */
#endif
#if 0
#endif
#if 0
#endif
#if 0
#endif
#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
#endif
#if 0 /* NT4SP1 - xadd early boot. */
#endif
#if 0 /* NT4SP1 - wrmsr (intel MSR). */
#endif
#if 0 /* NT4SP1 - cmpxchg (AMD). */
#endif
#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
#endif
#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
#endif
#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
#endif
#if 0 /* NT4SP1 - frstor [ecx] */
#endif
#if 0 /* xxxxxx - All long mode code. */
#endif
#if 0 /* rep movsq linux 3.7 64-bit boot. */
#endif
#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
#endif
#if 0 /* DOS's size-overridden iret to v8086. */
#endif
)
)
{
fNewNoRem = false;
}
{
if (!fNewNoRem)
{
LogAlways(("Enabling verification mode!\n"));
}
else
LogAlways(("Disabling verification mode!\n"));
}
/*
* Switch state.
*/
{
s_DebugCtx = *pOrgCtx;
}
/*
* See if there is an interrupt pending in TRPM and inject it if we can.
*/
&& TRPMHasTrap(pVCpu)
{
int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
}
/*
* Reset the counters.
*/
pIemCpu->fIgnoreRaxRdx = false;
pIemCpu->fOverlappingMovs = false;
pIemCpu->fProblematicMemory = false;
pIemCpu->fUndefinedEFlags = 0;
{
/*
* Free all verification records.
*/
do
{
while (pEvtRec)
{
}
} while (pEvtRec);
}
}
/**
* Allocate an event record.
* @returns Pointer to a record.
*/
{
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
return NULL;
if (pEvtRec)
else
{
if (!pIemCpu->ppIemEvtRecNext)
return NULL; /* Too early (fake PCIBIOS), ignore notification. */
pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
if (!pEvtRec)
return NULL;
}
return pEvtRec;
}
/**
* IOMMMIORead notification.
*/
{
if (!pVCpu)
return;
if (!pEvtRec)
return;
}
/**
* IOMMMIOWrite notification.
*/
{
if (!pVCpu)
return;
if (!pEvtRec)
return;
}
/**
* IOMIOPortRead notification.
*/
{
if (!pVCpu)
return;
if (!pEvtRec)
return;
}
/**
* IOMIOPortWrite notification.
*/
{
if (!pVCpu)
return;
if (!pEvtRec)
return;
}
VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrDst, RTGCUINTREG cTransfers, size_t cbValue)
{
AssertFailed();
}
VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrSrc, RTGCUINTREG cTransfers, size_t cbValue)
{
AssertFailed();
}
/**
* Fakes and records an I/O port read.
*
* @returns VINF_SUCCESS.
* @param pIemCpu The IEM per CPU data.
* @param Port The I/O port.
* @param pu32Value Where to store the fake value.
* @param cbValue The size of the access.
*/
static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
{
if (pEvtRec)
{
}
*pu32Value = 0xcccccccc;
return VINF_SUCCESS;
}
/**
* Fakes and records an I/O port write.
*
* @returns VINF_SUCCESS.
* @param pIemCpu The IEM per CPU data.
* @param Port The I/O port.
* @param u32Value The value being written.
* @param cbValue The size of the access.
*/
static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
{
if (pEvtRec)
{
}
return VINF_SUCCESS;
}
/**
* Used to add extra details about a stub case.
* @param pIemCpu The IEM per CPU state.
*/
{
char szRegs[4096];
"rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
"rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
"r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
"r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
"rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
"cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
"ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
"es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
"fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
"gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
"ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
"dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
"dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
"gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
"ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
"tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
" sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
" efer=%016VR{efer}\n"
" pat=%016VR{pat}\n"
" sf_mask=%016VR{sf_mask}\n"
"krnl_gs_base=%016VR{krnl_gs_base}\n"
" lstar=%016VR{lstar}\n"
" star=%016VR{star} cstar=%016VR{cstar}\n"
"fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
);
char szInstr1[256];
char szInstr2[256];
}
/**
* Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
* dump to the assertion info.
*
* @param pEvtRec The record to dump.
*/
{
{
RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
break;
RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
break;
case IEMVERIFYEVENT_RAM_READ:
RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
break;
case IEMVERIFYEVENT_RAM_WRITE:
RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
break;
default:
break;
}
}
/**
* Raises an assertion on the specified record, showing the given message with
* a record dump attached.
*
* @param pIemCpu The IEM per CPU data.
* @param pEvtRec1 The first record.
* @param pEvtRec2 The second record.
* @param pszMsg The message explaining why we're asserting.
*/
static void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
{
}
/**
* Raises an assertion on the specified record, showing the given message with
* a record dump attached.
*
* @param pIemCpu The IEM per CPU data.
* @param pEvtRec1 The first record.
* @param pszMsg The message explaining why we're asserting.
*/
{
}
/**
* Verifies a write record.
*
* @param pIemCpu The IEM per CPU data.
* @param pEvtRec The write record.
* @param fRem Set if REM was doing the other executing. If clear
* it was HM.
*/
{
int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
if ( RT_FAILURE(rc)
{
/* fend off ins */
{
/* fend off ROMs and MMIO */
{
/* fend off fxsave */
{
RTAssertMsg2Add("%s: %.*Rhxs\n"
"iem: %.*Rhxs\n",
}
}
}
}
}
/**
* Performs the post-execution verfication checks.
*/
{
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
return;
/*
* Switch back the state.
*/
/*
* Execute the instruction in REM.
*/
bool fRem = false;
if ( HMIsEnabled(pVM)
&& !pIemCpu->fProblematicMemory)
{
unsigned iLoops = 0;
do
{
iLoops++;
} while ( rc == VINF_SUCCESS
|| ( rc == VINF_EM_DBG_STEPPED
&& iLoops < 8) );
rc = VINF_SUCCESS;
}
#endif
if ( rc == VERR_EM_CANNOT_EXEC_GUEST
|| rc == VINF_IOM_R3_IOPORT_READ
|| rc == VINF_IOM_R3_IOPORT_WRITE
|| rc == VINF_IOM_R3_MMIO_READ
|| rc == VINF_IOM_R3_MMIO_WRITE
|| rc == VINF_CPUM_R3_MSR_READ
|| rc == VINF_CPUM_R3_MSR_WRITE
|| rc == VINF_EM_RESCHEDULE
)
{
fRem = true;
}
/*
* Compare the register states.
*/
unsigned cDiffs = 0;
{
//Log(("REM and IEM ends up with different registers!\n"));
# define CHECK_FIELD(a_Field) \
do \
{ \
{ \
{ \
case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
} \
cDiffs++; \
} \
} while (0)
# define CHECK_XSTATE_FIELD(a_Field) \
do \
{ \
{ \
{ \
case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
} \
cDiffs++; \
} \
} while (0)
# define CHECK_BIT_FIELD(a_Field) \
do \
{ \
{ \
RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
cDiffs++; \
} \
} while (0)
do \
{ \
} while (0)
#if 1 /* The recompiler doesn't update these the intel way. */
if (fRem)
{
//pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
}
#endif
{
RTAssertMsg2Weak(" the FPU state differs\n");
cDiffs++;
}
{
RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
}
if (!pIemCpu->fIgnoreRaxRdx)
/* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
/* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
{
{ /* ignore */ }
&& fRem)
{ /* ignore */ }
else
}
CHECK_FIELD(dr[0]);
if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
if (cDiffs != 0)
{
}
}
/*
* If the register state compared fine, check the verification event
* records.
*/
{
/*
* Compare verficiation event records.
* - I/O port accesses should be a 1:1 match.
*/
{
/* Since we might miss RAM writes and reads, ignore reads and check
that any written memory is the same extra ones. */
{
}
/* Do the compare. */
{
break;
}
bool fEquals;
{
break;
break;
case IEMVERIFYEVENT_RAM_READ:
break;
case IEMVERIFYEVENT_RAM_WRITE:
break;
default:
fEquals = false;
break;
}
if (!fEquals)
{
break;
}
/* advance */
}
/* Ignore extra writes and reads. */
{
}
}
}
#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
/* stubs */
static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
{
return VERR_INTERNAL_ERROR;
}
static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
{
return VERR_INTERNAL_ERROR;
}
#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
#ifdef LOG_ENABLED
/**
* Logs the current instruction.
* @param pVCpu The cross context virtual CPU structure of the caller.
* @param pCtx The current CPU context.
* @param fSameCtx Set if we have the same context information as the VMM,
* clear if we may have already executed an instruction in
* our debug context. When clear, we assume IEMCPU holds
* valid CPU mode info.
*/
{
# ifdef IN_RING3
if (LogIs2Enabled())
{
char szInstr[256];
if (fSameCtx)
else
{
{
case IEMMODE_16BIT:
else
break;
}
}
Log2(("****\n"
" eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
" eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
" cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
" fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
" %s\n"
,
szInstr));
if (LogIs3Enabled())
}
else
# endif
LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
}
#endif
/**
* Makes status code addjustments (pass up from I/O and access handler)
* as well as maintaining statistics.
*
* @returns Strict VBox status code to pass up.
* @param pIemCpu The IEM per CPU data.
* @param rcStrict The status from executing an instruction.
*/
{
if (rcStrict != VINF_SUCCESS)
{
if (RT_SUCCESS(rcStrict))
{
if (rcPassUp == VINF_SUCCESS)
else if ( rcPassUp < VINF_EM_FIRST
|| rcPassUp > VINF_EM_LAST
{
}
else
{
}
}
else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
#ifdef IEM_VERIFICATION_MODE_FULL
else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
#endif
else
}
{
}
return rcStrict;
}
/**
* The actual code execution bits of IEMExecOne, IEMExecOneEx, and
* IEMExecOneWithPrefetchedByPC.
*
* @return Strict VBox status code.
* @param pVCpu The current virtual CPU.
* @param pIemCpu The IEM per CPU data.
* @param fExecuteInhibit If set, execute the instruction following CLI,
* POP SS and MOV SS,GR.
*/
DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit)
{
uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
if (rcStrict == VINF_SUCCESS)
pIemCpu->cInstructions++;
if (pIemCpu->cActiveMappings > 0)
//#ifdef DEBUG
// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
//#endif
/* Execute the next instruction as well if a cli, pop ss or
mov ss, Gr has just completed successfully. */
if ( fExecuteInhibit
&& rcStrict == VINF_SUCCESS
{
if (rcStrict == VINF_SUCCESS)
{
# ifdef LOG_ENABLED
# endif
if (rcStrict == VINF_SUCCESS)
pIemCpu->cInstructions++;
if (pIemCpu->cActiveMappings > 0)
}
}
/*
* Return value fiddling, statistics and sanity assertions.
*/
#if defined(IEM_VERIFICATION_MODE_FULL)
#endif
return rcStrict;
}
#ifdef IN_RC
/**
* Re-enters raw-mode or ensure we return to ring-3.
*
* @returns rcStrict, maybe modified.
* @param pIemCpu The IEM CPU structure.
* @param pVCpu The cross context virtual CPU structure of the caller.
* @param pCtx The current CPU context.
* @param rcStrict The status code returne by the interpreter.
*/
DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PIEMCPU pIemCpu, PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
{
if (!pIemCpu->fInPatchCode)
return rcStrict;
}
#endif
/**
* Execute one instruction.
*
* @return Strict VBox status code.
* @param pVCpu The current virtual CPU.
*/
{
#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
#endif
#ifdef LOG_ENABLED
#endif
/*
* Do the decoding and emulation.
*/
if (rcStrict == VINF_SUCCESS)
#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
/*
* Assert some sanity.
*/
#endif
#ifdef IN_RC
#endif
if (rcStrict != VINF_SUCCESS)
LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
return rcStrict;
}
{
if (rcStrict == VINF_SUCCESS)
{
if (pcbWritten)
}
#ifdef IN_RC
#endif
return rcStrict;
}
VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
{
if ( cbOpcodeBytes
{
iemInitDecoder(pIemCpu, false);
}
else
if (rcStrict == VINF_SUCCESS)
{
}
#ifdef IN_RC
#endif
return rcStrict;
}
{
if (rcStrict == VINF_SUCCESS)
{
if (pcbWritten)
}
#ifdef IN_RC
#endif
return rcStrict;
}
VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
{
if ( cbOpcodeBytes
{
iemInitDecoder(pIemCpu, true);
}
else
if (rcStrict == VINF_SUCCESS)
#ifdef IN_RC
#endif
return rcStrict;
}
{
/*
* See if there is an interrupt pending in TRPM and inject it if we can.
*/
#if !defined(IEM_VERIFICATION_MODE_FULL) || !defined(IN_RING3)
# ifdef IEM_VERIFICATION_MODE_FULL
# endif
&& TRPMHasTrap(pVCpu)
{
int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
}
#else
#endif
/*
* Log the state.
*/
#ifdef LOG_ENABLED
#endif
/*
* Do the decoding and emulation.
*/
if (rcStrict == VINF_SUCCESS)
#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
/*
* Assert some sanity.
*/
#endif
/*
* Maybe re-enter raw-mode and log.
*/
#ifdef IN_RC
#endif
if (rcStrict != VINF_SUCCESS)
LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
return rcStrict;
}
/**
* Injects a trap, fault, abort, software interrupt or external interrupt.
*
* The parameter list matches TRPMQueryTrapAll pretty closely.
*
* @returns Strict VBox status code.
* @param pVCpu The current virtual CPU.
* @param u8TrapNo The trap number.
* interrupt or hardware interrupt.
* @param uErrCode The error code if applicable.
* @param uCr2 The CR2 value if applicable.
* @param cbInstr The instruction length (only relevant for
* software interrupts).
*/
VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
{
#ifdef DBGFTRACE_ENABLED
#endif
switch (enmType)
{
case TRPM_HARDWARE_INT:
break;
case TRPM_SOFTWARE_INT:
break;
case TRPM_TRAP:
if (u8TrapNo == X86_XCPT_PF)
switch (u8TrapNo)
{
case X86_XCPT_DF:
case X86_XCPT_TS:
case X86_XCPT_NP:
case X86_XCPT_SS:
case X86_XCPT_PF:
case X86_XCPT_AC:
break;
case X86_XCPT_NMI:
break;
}
break;
}
}
/**
* Injects the active TRPM event.
*
* @returns Strict VBox status code.
* @param pVCpu Pointer to the VMCPU.
*/
{
#ifndef IEM_IMPLEMENTS_TASKSWITCH
IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
#else
if (RT_FAILURE(rc))
return rc;
/** @todo Are there any other codes that imply the event was successfully
* delivered to the guest? See @bugref{6607}. */
if ( rcStrict == VINF_SUCCESS
|| rcStrict == VINF_IEM_RAISED_XCPT)
{
}
return rcStrict;
#endif
}
{
return VERR_NOT_IMPLEMENTED;
}
{
return VERR_NOT_IMPLEMENTED;
}
#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
/**
* Executes a IRET instruction with default operand size.
*
* This is for PATM.
*
* @returns VBox status code.
* @param pVCpu The current virtual CPU.
* @param pCtxCore The register frame.
*/
{
if (rcStrict == VINF_SUCCESS)
else
LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
return rcStrict;
}
#endif
/**
* Interface for HM and EM for executing string I/O OUT (write) instructions.
*
* This API ASSUMES that the caller has already verified that the guest code is
* allowed to access the I/O port. (The I/O port is in the DX register in the
* guest state.)
*
* @returns Strict VBox status code.
* @param pVCpu The cross context per virtual CPU structure.
* @param cbValue The size of the I/O port access (1, 2, or 4).
* @param enmAddrMode The addressing mode.
* @param fRepPrefix Indicates whether a repeat prefix is used
* (doesn't matter which for this instruction).
* @param cbInstr The instruction length in bytes.
* @param iEffSeg The effective segment address.
*/
{
/*
* State init.
*/
/*
* Switch orgy for getting to the right handler.
*/
if (fRepPrefix)
{
switch (enmAddrMode)
{
case IEMMODE_16BIT:
switch (cbValue)
{
case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
default:
}
break;
case IEMMODE_32BIT:
switch (cbValue)
{
case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
default:
}
break;
case IEMMODE_64BIT:
switch (cbValue)
{
case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
default:
}
break;
default:
}
}
else
{
switch (enmAddrMode)
{
case IEMMODE_16BIT:
switch (cbValue)
{
case 2: rcStrict = iemCImpl_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
case 4: rcStrict = iemCImpl_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
default:
}
break;
case IEMMODE_32BIT:
switch (cbValue)
{
case 2: rcStrict = iemCImpl_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
case 4: rcStrict = iemCImpl_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
default:
}
break;
case IEMMODE_64BIT:
switch (cbValue)
{
case 2: rcStrict = iemCImpl_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
case 4: rcStrict = iemCImpl_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
default:
}
break;
default:
}
}
}
/**
* Interface for HM and EM for executing string I/O IN (read) instructions.
*
* This API ASSUMES that the caller has already verified that the guest code is
* allowed to access the I/O port. (The I/O port is in the DX register in the
* guest state.)
*
* @returns Strict VBox status code.
* @param pVCpu The cross context per virtual CPU structure.
* @param cbValue The size of the I/O port access (1, 2, or 4).
* @param enmAddrMode The addressing mode.
* @param fRepPrefix Indicates whether a repeat prefix is used
* (doesn't matter which for this instruction).
* @param cbInstr The instruction length in bytes.
*/
{
/*
* State init.
*/
/*
* Switch orgy for getting to the right handler.
*/
if (fRepPrefix)
{
switch (enmAddrMode)
{
case IEMMODE_16BIT:
switch (cbValue)
{
default:
}
break;
case IEMMODE_32BIT:
switch (cbValue)
{
default:
}
break;
case IEMMODE_64BIT:
switch (cbValue)
{
default:
}
break;
default:
}
}
else
{
switch (enmAddrMode)
{
case IEMMODE_16BIT:
switch (cbValue)
{
default:
}
break;
case IEMMODE_32BIT:
switch (cbValue)
{
default:
}
break;
case IEMMODE_64BIT:
switch (cbValue)
{
default:
}
break;
default:
}
}
}